Revert godep updates, needs a fix in influxdb repo
This commit is contained in:
parent
17e165382f
commit
6cb0f2d392
|
@ -97,8 +97,8 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/influxdb/influxdb",
|
||||
"Comment": "v0.9.4-rc1-84-g6d4319d",
|
||||
"Rev": "6d4319d244b47db94b79c505a16e00e7ac02ebed"
|
||||
"Comment": "v0.9.3",
|
||||
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/lib/pq",
|
||||
|
|
|
@ -20,10 +20,6 @@ cmd/influxd/version.go
|
|||
|
||||
# executables
|
||||
|
||||
influx_stress
|
||||
**/influx_stress
|
||||
!**/influx_stress/
|
||||
|
||||
influxd
|
||||
**/influxd
|
||||
!**/influxd/
|
||||
|
@ -69,6 +65,3 @@ integration/migration_data/
|
|||
|
||||
# goconvey config files
|
||||
*.goconvey
|
||||
|
||||
// Ingnore SourceGraph directory
|
||||
.srclib-store/
|
||||
|
|
|
@ -1,78 +1,9 @@
|
|||
## v0.9.5 [unreleased]
|
||||
|
||||
### Features
|
||||
- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex
|
||||
|
||||
### Bugfixes
|
||||
- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name
|
||||
- [#4111](https://github.com/influxdb/influxdb/pull/4111): Update pre-commit hook for go vet composites
|
||||
|
||||
## v0.9.4 [2015-09-14]
|
||||
|
||||
### Release Notes
|
||||
With this release InfluxDB is moving to Go 1.5.
|
||||
|
||||
### Features
|
||||
- [#4050](https://github.com/influxdb/influxdb/pull/4050): Add stats to collectd
|
||||
- [#3771](https://github.com/influxdb/influxdb/pull/3771): Close idle Graphite TCP connections
|
||||
- [#3755](https://github.com/influxdb/influxdb/issues/3755): Add option to build script. Thanks @fg2it
|
||||
- [#3863](https://github.com/influxdb/influxdb/pull/3863): Move to Go 1.5
|
||||
- [#3892](https://github.com/influxdb/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE
|
||||
- [#3916](https://github.com/influxdb/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented.
|
||||
- [#3901](https://github.com/influxdb/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki
|
||||
- [#4048](https://github.com/influxdb/influxdb/pull/4048): Add statistics to Continuous Query service
|
||||
- [#4049](https://github.com/influxdb/influxdb/pull/4049): Add stats to the UDP input
|
||||
- [#3876](https://github.com/influxdb/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT
|
||||
- [#3975](https://github.com/influxdb/influxdb/pull/3975): Add shard copy service
|
||||
- [#3986](https://github.com/influxdb/influxdb/pull/3986): Support sorting by time desc
|
||||
- [#3930](https://github.com/influxdb/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdb/influxdb/issues/1821)
|
||||
- [#4045](https://github.com/influxdb/influxdb/pull/4045): Instrument cluster-level points writer
|
||||
- [#3996](https://github.com/influxdb/influxdb/pull/3996): Add statistics to httpd package
|
||||
- [#4003](https://github.com/influxdb/influxdb/pull/4033): Add logrotate configuration.
|
||||
- [#4043](https://github.com/influxdb/influxdb/pull/4043): Add stats and batching to openTSDB input
|
||||
- [#4042](https://github.com/influxdb/influxdb/pull/4042): Add pending batches control to batcher
|
||||
- [#4006](https://github.com/influxdb/influxdb/pull/4006): Add basic statistics for shards
|
||||
- [#4072](https://github.com/influxdb/influxdb/pull/4072): Add statistics for the WAL.
|
||||
|
||||
### Bugfixes
|
||||
- [#4042](https://github.com/influxdb/influxdb/pull/4042): Set UDP input batching defaults as needed.
|
||||
- [#3785](https://github.com/influxdb/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic
|
||||
- [#3804](https://github.com/influxdb/influxdb/pull/3804): init.d script fixes, fixes issue 3803.
|
||||
- [#3823](https://github.com/influxdb/influxdb/pull/3823): Deterministic ordering for first() and last()
|
||||
- [#3869](https://github.com/influxdb/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin
|
||||
- [#3856](https://github.com/influxdb/influxdb/pull/3856): Minor changes to retention enforcement.
|
||||
- [#3884](https://github.com/influxdb/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup
|
||||
- [#3868](https://github.com/influxdb/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset.
|
||||
- [#3886](https://github.com/influxdb/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL
|
||||
- [#3574](https://github.com/influxdb/influxdb/issues/3574): Querying data node causes panic
|
||||
- [#3913](https://github.com/influxdb/influxdb/issues/3913): Convert meta shard owners to objects
|
||||
- [#4026](https://github.com/influxdb/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdb/influxdb/issues/3636)
|
||||
- [#3927](https://github.com/influxdb/influxdb/issues/3927): Add WAL lock to prevent timing lock contention
|
||||
- [#3928](https://github.com/influxdb/influxdb/issues/3928): Write fails for multiple points when tag starts with quote
|
||||
- [#3901](https://github.com/influxdb/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki!
|
||||
- [#3950](https://github.com/influxdb/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI
|
||||
- [#3977](https://github.com/influxdb/influxdb/pull/3977): Silence wal logging during testing
|
||||
- [#3931](https://github.com/influxdb/influxdb/pull/3931): Don't precreate shard groups entirely in the past
|
||||
- [#3960](https://github.com/influxdb/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster
|
||||
- [#3980](https://github.com/influxdb/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548.
|
||||
- [#4016](https://github.com/influxdb/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM.
|
||||
- [#4034](https://github.com/influxdb/influxdb/pull/4034): Rollback bolt tx on mapper open error
|
||||
- [#3848](https://github.com/influxdb/influxdb/issues/3848): restart influxdb causing panic
|
||||
- [#3881](https://github.com/influxdb/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference
|
||||
- [#3926](https://github.com/influxdb/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdb/influxdb/pull/4038)
|
||||
- [#4053](https://github.com/influxdb/influxdb/pull/4053): Prohibit dropping default retention policy.
|
||||
- [#4060](https://github.com/influxdb/influxdb/pull/4060): Don't log EOF error in openTSDB input.
|
||||
- [#3978](https://github.com/influxdb/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause
|
||||
- [#4058](https://github.com/influxdb/influxdb/pull/4058): Disable bz1 recompression
|
||||
- [#3902](https://github.com/influxdb/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time"
|
||||
- [#3718](https://github.com/influxdb/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse
|
||||
|
||||
## v0.9.3 [2015-08-26]
|
||||
## v0.9.3 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
There are breaking changes in this release.
|
||||
- To store data points as integers you must now append `i` to the number if using the line protocol.
|
||||
- To store data points as integers you must now append i to the number if using the line protocol.
|
||||
- If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs.
|
||||
- Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) for more details.
|
||||
- The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query.
|
||||
|
|
|
@ -79,7 +79,7 @@ second to sign our CLA, which can be found
|
|||
|
||||
Installing Go
|
||||
-------------
|
||||
InfluxDB requires Go 1.5 or greater.
|
||||
InfluxDB requires Go 1.4 or greater.
|
||||
|
||||
At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
|
||||
on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
|
||||
|
@ -87,40 +87,29 @@ on how to install it see [the gvm page on github](https://github.com/moovweb/gvm
|
|||
After installing gvm you can install and set the default go version by
|
||||
running the following:
|
||||
|
||||
gvm install go1.5
|
||||
gvm use go1.5 --default
|
||||
gvm install go1.4
|
||||
gvm use go1.4 --default
|
||||
|
||||
Revision Control Systems
|
||||
-------------
|
||||
------
|
||||
Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system.
|
||||
Currently the project only depends on `git` and `mercurial`.
|
||||
|
||||
* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git)
|
||||
* [Install Mercurial](http://mercurial.selenic.com/wiki/Download)
|
||||
|
||||
Getting the source
|
||||
------
|
||||
Setup the project structure and fetch the repo like so:
|
||||
|
||||
mkdir $HOME/gocodez
|
||||
export GOPATH=$HOME/gocodez
|
||||
go get github.com/influxdb/influxdb
|
||||
|
||||
You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime.
|
||||
|
||||
Cloning a fork
|
||||
-------------
|
||||
If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork:
|
||||
Project structure
|
||||
-----------------
|
||||
First you need to setup the project structure:
|
||||
|
||||
export GOPATH=$HOME/gocodez
|
||||
mkdir -p $GOPATH/src/github.com/influxdb
|
||||
cd $GOPATH/src/github.com/influxdb
|
||||
git clone git@github.com:<username>/influxdb
|
||||
git clone git@github.com:influxdb/influxdb
|
||||
|
||||
Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly.
|
||||
|
||||
Pre-commit checks
|
||||
-------------
|
||||
You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh
|
||||
file to be set for every shell instead of having to manually run it
|
||||
everytime.
|
||||
|
||||
We have a pre commit hook to make sure code is formatted properly
|
||||
and vetted before you commit any changes. We strongly recommend using the pre
|
||||
|
@ -167,16 +156,11 @@ go install ./...
|
|||
To set the version and commit flags during the build pass the following to the build command:
|
||||
|
||||
```bash
|
||||
-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT"
|
||||
-ldflags="-X main.version $VERSION -X main.branch $BRANCH -X main.commit $COMMIT"
|
||||
```
|
||||
|
||||
where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash.
|
||||
|
||||
If you want to build packages, see `package.sh` help:
|
||||
```bash
|
||||
package.sh -h
|
||||
```
|
||||
|
||||
To run the tests, execute the following command:
|
||||
|
||||
```bash
|
||||
|
|
|
@ -37,8 +37,8 @@ The `HOST_IP` env variable should be your host IP if running under linux or the
|
|||
|
||||
```
|
||||
$ export HOST_IP=<your host/VM IP>
|
||||
$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -hostname $HOST_IP:8088
|
||||
$ docker run -it -p 8186:8086 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088
|
||||
$ docker run -it -p 8286:8086 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088
|
||||
$ docker run -it -p 8086:8088 -p 8088:8088 influxdb -hostname $HOST_IP:8088
|
||||
$ docker run -it -p 8186:8088 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088
|
||||
$ docker run -it -p 8286:8088 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088
|
||||
```
|
||||
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
FROM 32bit/ubuntu:14.04
|
||||
|
||||
RUN apt-get update && apt-get install -y python-software-properties software-properties-common git
|
||||
RUN add-apt-repository ppa:evarlast/golang1.5
|
||||
RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go
|
||||
|
||||
ENV GOPATH=/root/go
|
||||
RUN mkdir -p /root/go/src/github.com/influxdb/influxdb
|
||||
RUN mkdir -p /tmp/artifacts
|
||||
|
||||
VOLUME /root/go/src/github.com/influxdb/influxdb
|
||||
VOLUME /tmp/artifacts
|
|
@ -1,19 +0,0 @@
|
|||
# List
|
||||
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
|
||||
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
|
||||
- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
|
||||
- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
|
||||
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
|
||||
- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)
|
||||
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
||||
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
|
||||
- glyphicons [LICENSE](http://glyphicons.com/license/)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- golang.org/x/crypto/bcrypt [BSD LICENSE](https://go.googlesource.com/crypto/+/master/LICENSE)
|
||||
|
|
@ -32,7 +32,6 @@ For those adventurous enough, you can
|
|||
|
||||
### Starting InfluxDB
|
||||
* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package.
|
||||
* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later.
|
||||
* `$GOPATH/bin/influxd` if you have built InfluxDB from source.
|
||||
|
||||
### Creating your first database
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e -x
|
||||
#!/bin/sh -x -e
|
||||
|
||||
GO_VER=${GO_VER:-1.5}
|
||||
|
||||
docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd'
|
||||
docker run -it -v "$GOPATH":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd'
|
||||
|
||||
docker build -t influxdb .
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# build process for InfluxDB.
|
||||
|
||||
BUILD_DIR=$HOME/influxdb-build
|
||||
GO_VERSION=go1.5
|
||||
GO_VERSION=go1.4.2
|
||||
PARALLELISM="-parallel 256"
|
||||
TIMEOUT="-timeout 480s"
|
||||
|
||||
|
@ -21,25 +21,6 @@ function exit_if_fail {
|
|||
fi
|
||||
}
|
||||
|
||||
# Check that go fmt has been run.
|
||||
function check_go_fmt {
|
||||
fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l`
|
||||
if [ $fmtcount -gt 0 ]; then
|
||||
echo "run 'go fmt ./...' to format your source code."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Check that go vet passes.
|
||||
function check_go_vet {
|
||||
# Due to the way composites work, vet will fail for some of our tests so we ignore it
|
||||
vetcount=`go tool vet --composites=false ./ 2>&1 | wc -l`
|
||||
if [ $vetcount -gt 0 ]; then
|
||||
echo "run 'go tool vet --composites=false ./' to see the errors it flags and correct your source code."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
source $HOME/.gvm/scripts/gvm
|
||||
exit_if_fail gvm use $GO_VERSION
|
||||
|
||||
|
@ -64,29 +45,16 @@ exit_if_fail git branch --set-upstream-to=origin/$CIRCLE_BRANCH $CIRCLE_BRANCH
|
|||
exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb
|
||||
exit_if_fail go get -t -d -v ./...
|
||||
exit_if_fail git checkout $CIRCLE_BRANCH # 'go get' switches to master. Who knew? Switch back.
|
||||
check_go_fmt
|
||||
check_go_vet
|
||||
exit_if_fail go build -v ./...
|
||||
|
||||
# Run the tests.
|
||||
exit_if_fail go tool vet --composites=false .
|
||||
case $CIRCLE_NODE_INDEX in
|
||||
0)
|
||||
go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt
|
||||
rc=${PIPESTATUS[0]}
|
||||
;;
|
||||
1)
|
||||
# 32bit tests.
|
||||
if [[ -e ~/docker/image.tar ]]; then docker load -i ~/docker/image.tar; fi
|
||||
docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test .
|
||||
mkdir -p ~/docker; docker save ubuntu-32-influxdb-test > ~/docker/image.tar
|
||||
exit_if_fail docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test .
|
||||
docker run -v $(pwd):/root/go/src/github.com/influxdb/influxdb -e "CI=${CI}" \
|
||||
-v ${CIRCLE_ARTIFACTS}:/tmp/artifacts \
|
||||
-t ubuntu-32-influxdb-test bash \
|
||||
-c "cd /root/go/src/github.com/influxdb/influxdb && go get -t -d -v ./... && go build -v ./... && go test ${PARALLELISM} ${TIMEOUT} -v ./... 2>&1 | tee /tmp/artifacts/test_logs_i386.txt && exit \${PIPESTATUS[0]}"
|
||||
rc=$?
|
||||
;;
|
||||
2)
|
||||
GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt
|
||||
rc=${PIPESTATUS[0]}
|
||||
;;
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
machine:
|
||||
services:
|
||||
- docker
|
||||
pre:
|
||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
||||
- source $HOME/.gvm/scripts/gvm; gvm install go1.5 --binary
|
||||
- source $HOME/.gvm/scripts/gvm; gvm install go1.4.2 --binary
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- mkdir -p ~/docker
|
||||
cache_directories:
|
||||
- "~/docker"
|
||||
- echo "Dummy override, so no Circle dependencies execute"
|
||||
test:
|
||||
override:
|
||||
- bash circle-test.sh:
|
||||
|
|
|
@ -45,12 +45,7 @@ the configuration below.
|
|||
package main
|
||||
|
||||
import "github.com/influxdb/influxdb/client"
|
||||
import (
|
||||
"net/url"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
import "net/url"
|
||||
|
||||
const (
|
||||
MyHost = "localhost"
|
||||
|
|
|
@ -79,7 +79,6 @@ type Config struct {
|
|||
Password string
|
||||
UserAgent string
|
||||
Timeout time.Duration
|
||||
Precision string
|
||||
}
|
||||
|
||||
// NewConfig will create a config to be used in connecting to the client
|
||||
|
@ -96,7 +95,6 @@ type Client struct {
|
|||
password string
|
||||
httpClient *http.Client
|
||||
userAgent string
|
||||
precision string
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -114,7 +112,6 @@ func NewClient(c Config) (*Client, error) {
|
|||
password: c.Password,
|
||||
httpClient: &http.Client{Timeout: c.Timeout},
|
||||
userAgent: c.UserAgent,
|
||||
precision: c.Precision,
|
||||
}
|
||||
if client.userAgent == "" {
|
||||
client.userAgent = "InfluxDBClient"
|
||||
|
@ -128,11 +125,6 @@ func (c *Client) SetAuth(u, p string) {
|
|||
c.password = p
|
||||
}
|
||||
|
||||
// SetPrecision will update the precision
|
||||
func (c *Client) SetPrecision(precision string) {
|
||||
c.precision = precision
|
||||
}
|
||||
|
||||
// Query sends a command to the server and returns the Response
|
||||
func (c *Client) Query(q Query) (*Response, error) {
|
||||
u := c.url
|
||||
|
@ -141,9 +133,6 @@ func (c *Client) Query(q Query) (*Response, error) {
|
|||
values := u.Query()
|
||||
values.Set("q", q.Command)
|
||||
values.Set("db", q.Database)
|
||||
if c.precision != "" {
|
||||
values.Set("epoch", c.precision)
|
||||
}
|
||||
u.RawQuery = values.Encode()
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
|
@ -460,11 +449,7 @@ func (p *Point) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (p *Point) MarshalString() string {
|
||||
pt := tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
|
||||
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
|
||||
return pt.String()
|
||||
}
|
||||
return pt.PrecisionString(p.Precision)
|
||||
return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String()
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the data into the Point struct
|
||||
|
|
|
@ -498,12 +498,13 @@ func TestBatchPoints_Normal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Timeout(t *testing.T) {
|
||||
done := make(chan bool)
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
<-done
|
||||
time.Sleep(1 * time.Second)
|
||||
var data client.Response
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(data)
|
||||
}))
|
||||
defer ts.Close()
|
||||
defer func() { done <- true }()
|
||||
|
||||
u, _ := url.Parse(ts.URL)
|
||||
config := client.Config{URL: *u, Timeout: 500 * time.Millisecond}
|
||||
|
@ -516,33 +517,13 @@ func TestClient_Timeout(t *testing.T) {
|
|||
_, err = c.Query(query)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected success. expected timeout error")
|
||||
} else if !strings.Contains(err.Error(), "request canceled") &&
|
||||
!strings.Contains(err.Error(), "use of closed network connection") {
|
||||
t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err)
|
||||
}
|
||||
} else if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
t.Fatalf("unexpected error. expected 'use of closed network connection' error, got %v", err)
|
||||
}
|
||||
|
||||
func TestClient_NoTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(1 * time.Second)
|
||||
var data client.Response
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(data)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
u, _ := url.Parse(ts.URL)
|
||||
config := client.Config{URL: *u}
|
||||
c, err := client.NewClient(config)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||
}
|
||||
|
||||
query := client.Query{}
|
||||
_, err = c.Query(query)
|
||||
confignotimeout := client.Config{URL: *u}
|
||||
cnotimeout, err := client.NewClient(confignotimeout)
|
||||
_, err = cnotimeout.Query(query)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||
}
|
||||
|
|
135
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go
generated
vendored
135
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go
generated
vendored
|
@ -10,6 +10,9 @@ It is generated from these files:
|
|||
|
||||
It has these top-level messages:
|
||||
WriteShardRequest
|
||||
Field
|
||||
Tag
|
||||
Point
|
||||
WriteShardResponse
|
||||
MapShardRequest
|
||||
MapShardResponse
|
||||
|
@ -25,7 +28,7 @@ var _ = math.Inf
|
|||
|
||||
type WriteShardRequest struct {
|
||||
ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"`
|
||||
Points [][]byte `protobuf:"bytes,2,rep" json:"Points,omitempty"`
|
||||
Points []*Point `protobuf:"bytes,2,rep" json:"Points,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
|
@ -40,13 +43,141 @@ func (m *WriteShardRequest) GetShardID() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *WriteShardRequest) GetPoints() [][]byte {
|
||||
func (m *WriteShardRequest) GetPoints() []*Point {
|
||||
if m != nil {
|
||||
return m.Points
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
||||
Int32 *int32 `protobuf:"varint,2,opt" json:"Int32,omitempty"`
|
||||
Int64 *int64 `protobuf:"varint,3,opt" json:"Int64,omitempty"`
|
||||
Float64 *float64 `protobuf:"fixed64,4,opt" json:"Float64,omitempty"`
|
||||
Bool *bool `protobuf:"varint,5,opt" json:"Bool,omitempty"`
|
||||
String_ *string `protobuf:"bytes,6,opt" json:"String,omitempty"`
|
||||
Bytes []byte `protobuf:"bytes,7,opt" json:"Bytes,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Field) Reset() { *m = Field{} }
|
||||
func (m *Field) String() string { return proto.CompactTextString(m) }
|
||||
func (*Field) ProtoMessage() {}
|
||||
|
||||
func (m *Field) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
return *m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Field) GetInt32() int32 {
|
||||
if m != nil && m.Int32 != nil {
|
||||
return *m.Int32
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Field) GetInt64() int64 {
|
||||
if m != nil && m.Int64 != nil {
|
||||
return *m.Int64
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Field) GetFloat64() float64 {
|
||||
if m != nil && m.Float64 != nil {
|
||||
return *m.Float64
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Field) GetBool() bool {
|
||||
if m != nil && m.Bool != nil {
|
||||
return *m.Bool
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Field) GetString_() string {
|
||||
if m != nil && m.String_ != nil {
|
||||
return *m.String_
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Field) GetBytes() []byte {
|
||||
if m != nil {
|
||||
return m.Bytes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Tag struct {
|
||||
Key *string `protobuf:"bytes,1,req" json:"Key,omitempty"`
|
||||
Value *string `protobuf:"bytes,2,req" json:"Value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Tag) Reset() { *m = Tag{} }
|
||||
func (m *Tag) String() string { return proto.CompactTextString(m) }
|
||||
func (*Tag) ProtoMessage() {}
|
||||
|
||||
func (m *Tag) GetKey() string {
|
||||
if m != nil && m.Key != nil {
|
||||
return *m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Tag) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Point struct {
|
||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
||||
Time *int64 `protobuf:"varint,2,req" json:"Time,omitempty"`
|
||||
Fields []*Field `protobuf:"bytes,3,rep" json:"Fields,omitempty"`
|
||||
Tags []*Tag `protobuf:"bytes,4,rep" json:"Tags,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Point) Reset() { *m = Point{} }
|
||||
func (m *Point) String() string { return proto.CompactTextString(m) }
|
||||
func (*Point) ProtoMessage() {}
|
||||
|
||||
func (m *Point) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
return *m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Point) GetTime() int64 {
|
||||
if m != nil && m.Time != nil {
|
||||
return *m.Time
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Point) GetFields() []*Field {
|
||||
if m != nil {
|
||||
return m.Fields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Point) GetTags() []*Tag {
|
||||
if m != nil {
|
||||
return m.Tags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WriteShardResponse struct {
|
||||
Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"`
|
||||
Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"`
|
||||
|
|
26
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto
generated
vendored
26
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto
generated
vendored
|
@ -2,7 +2,31 @@ package internal;
|
|||
|
||||
message WriteShardRequest {
|
||||
required uint64 ShardID = 1;
|
||||
repeated bytes Points = 2;
|
||||
repeated Point Points = 2;
|
||||
}
|
||||
|
||||
message Field {
|
||||
required string Name = 1;
|
||||
oneof Value {
|
||||
int32 Int32 = 2;
|
||||
int64 Int64 = 3;
|
||||
double Float64 = 4;
|
||||
bool Bool = 5;
|
||||
string String = 6;
|
||||
bytes Bytes = 7;
|
||||
}
|
||||
}
|
||||
|
||||
message Tag {
|
||||
required string Key = 1;
|
||||
required string Value = 2;
|
||||
}
|
||||
|
||||
message Point {
|
||||
required string Name = 1;
|
||||
required int64 Time = 2;
|
||||
repeated Field Fields = 3;
|
||||
repeated Tag Tags = 4;
|
||||
}
|
||||
|
||||
message WriteShardResponse {
|
||||
|
|
|
@ -2,7 +2,6 @@ package cluster
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -19,19 +18,6 @@ import (
|
|||
// be returned as successful
|
||||
type ConsistencyLevel int
|
||||
|
||||
// The statistics generated by the "write" mdoule
|
||||
const (
|
||||
statWriteReq = "req"
|
||||
statPointWriteReq = "point_req"
|
||||
statPointWriteReqLocal = "point_req_local"
|
||||
statPointWriteReqRemote = "point_req_remote"
|
||||
statWriteOK = "write_ok"
|
||||
statWritePartial = "write_partial"
|
||||
statWriteTimeout = "write_timeout"
|
||||
statWriteErr = "write_error"
|
||||
statWritePointReqHH = "point_req_hh"
|
||||
)
|
||||
|
||||
const (
|
||||
// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
|
||||
ConsistencyLevelAny ConsistencyLevel = iota
|
||||
|
@ -104,8 +90,6 @@ type PointsWriter struct {
|
|||
HintedHandoff interface {
|
||||
WriteShard(shardID, ownerID uint64, points []tsdb.Point) error
|
||||
}
|
||||
|
||||
statMap *expvar.Map
|
||||
}
|
||||
|
||||
// NewPointsWriter returns a new instance of PointsWriter for a node.
|
||||
|
@ -114,7 +98,6 @@ func NewPointsWriter() *PointsWriter {
|
|||
closing: make(chan struct{}),
|
||||
WriteTimeout: DefaultWriteTimeout,
|
||||
Logger: log.New(os.Stderr, "[write] ", log.LstdFlags),
|
||||
statMap: influxdb.NewStatistics("write", "write", nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,9 +182,6 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error)
|
|||
|
||||
// WritePoints writes across multiple local and remote data nodes according the consistency level.
|
||||
func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
||||
w.statMap.Add(statWriteReq, 1)
|
||||
w.statMap.Add(statPointWriteReq, int64(len(p.Points)))
|
||||
|
||||
if p.RetentionPolicy == "" {
|
||||
db, err := w.MetaStore.Database(p.Database)
|
||||
if err != nil {
|
||||
|
@ -244,7 +224,7 @@ func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
|||
func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string,
|
||||
consistency ConsistencyLevel, points []tsdb.Point) error {
|
||||
// The required number of writes to achieve the requested consistency level
|
||||
required := len(shard.Owners)
|
||||
required := len(shard.OwnerIDs)
|
||||
switch consistency {
|
||||
case ConsistencyLevelAny, ConsistencyLevelOne:
|
||||
required = 1
|
||||
|
@ -253,88 +233,76 @@ func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPo
|
|||
}
|
||||
|
||||
// response channel for each shard writer go routine
|
||||
type AsyncWriteResult struct {
|
||||
Owner meta.ShardOwner
|
||||
Err error
|
||||
}
|
||||
ch := make(chan *AsyncWriteResult, len(shard.Owners))
|
||||
|
||||
for _, owner := range shard.Owners {
|
||||
go func(shardID uint64, owner meta.ShardOwner, points []tsdb.Point) {
|
||||
if w.MetaStore.NodeID() == owner.NodeID {
|
||||
w.statMap.Add(statPointWriteReqLocal, int64(len(points)))
|
||||
ch := make(chan error, len(shard.OwnerIDs))
|
||||
|
||||
for _, nodeID := range shard.OwnerIDs {
|
||||
go func(shardID, nodeID uint64, points []tsdb.Point) {
|
||||
if w.MetaStore.NodeID() == nodeID {
|
||||
err := w.TSDBStore.WriteToShard(shardID, points)
|
||||
// If we've written to shard that should exist on the current node, but the store has
|
||||
// not actually created this shard, tell it to create it and retry the write
|
||||
if err == tsdb.ErrShardNotFound {
|
||||
err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID)
|
||||
if err != nil {
|
||||
ch <- &AsyncWriteResult{owner, err}
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
err = w.TSDBStore.WriteToShard(shardID, points)
|
||||
}
|
||||
ch <- &AsyncWriteResult{owner, err}
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
|
||||
w.statMap.Add(statPointWriteReqRemote, int64(len(points)))
|
||||
err := w.ShardWriter.WriteShard(shardID, owner.NodeID, points)
|
||||
err := w.ShardWriter.WriteShard(shardID, nodeID, points)
|
||||
if err != nil && tsdb.IsRetryable(err) {
|
||||
// The remote write failed so queue it via hinted handoff
|
||||
w.statMap.Add(statWritePointReqHH, int64(len(points)))
|
||||
hherr := w.HintedHandoff.WriteShard(shardID, owner.NodeID, points)
|
||||
hherr := w.HintedHandoff.WriteShard(shardID, nodeID, points)
|
||||
|
||||
// If the write consistency level is ANY, then a successful hinted handoff can
|
||||
// be considered a successful write so send nil to the response channel
|
||||
// otherwise, let the original error propogate to the response channel
|
||||
if hherr == nil && consistency == ConsistencyLevelAny {
|
||||
ch <- &AsyncWriteResult{owner, nil}
|
||||
ch <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
ch <- &AsyncWriteResult{owner, err}
|
||||
ch <- err
|
||||
|
||||
}(shard.ID, owner, points)
|
||||
}(shard.ID, nodeID, points)
|
||||
}
|
||||
|
||||
var wrote int
|
||||
timeout := time.After(w.WriteTimeout)
|
||||
var writeError error
|
||||
for range shard.Owners {
|
||||
for _, nodeID := range shard.OwnerIDs {
|
||||
select {
|
||||
case <-w.closing:
|
||||
return ErrWriteFailed
|
||||
case <-timeout:
|
||||
w.statMap.Add(statWriteTimeout, 1)
|
||||
// return timeout error to caller
|
||||
return ErrTimeout
|
||||
case result := <-ch:
|
||||
case err := <-ch:
|
||||
// If the write returned an error, continue to the next response
|
||||
if result.Err != nil {
|
||||
w.statMap.Add(statWriteErr, 1)
|
||||
w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, result.Owner.NodeID, result.Err)
|
||||
if err != nil {
|
||||
w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, nodeID, err)
|
||||
|
||||
// Keep track of the first error we see to return back to the client
|
||||
if writeError == nil {
|
||||
writeError = result.Err
|
||||
writeError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
wrote += 1
|
||||
}
|
||||
}
|
||||
|
||||
// We wrote the required consistency level
|
||||
if wrote >= required {
|
||||
w.statMap.Add(statWriteOK, 1)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if wrote > 0 {
|
||||
w.statMap.Add(statWritePartial, 1)
|
||||
return ErrPartialWrite
|
||||
}
|
||||
|
||||
|
|
52
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go
generated
vendored
52
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go
generated
vendored
|
@ -51,16 +51,8 @@ func TestPointsWriter_MapShards_One(t *testing.T) {
|
|||
func TestPointsWriter_MapShards_Multiple(t *testing.T) {
|
||||
ms := MetaStore{}
|
||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
||||
AttachShardGroupInfo(rp, []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
})
|
||||
AttachShardGroupInfo(rp, []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
})
|
||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
||||
|
||||
ms.NodeIDFn = func() uint64 { return 1 }
|
||||
ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
|
||||
|
@ -257,25 +249,13 @@ func TestPointsWriter_WritePoints(t *testing.T) {
|
|||
theTest := test
|
||||
sm := cluster.NewShardMapping()
|
||||
sm.MapPoint(
|
||||
&meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
}},
|
||||
&meta.ShardInfo{ID: uint64(1), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
|
||||
pr.Points[0])
|
||||
sm.MapPoint(
|
||||
&meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
}},
|
||||
&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
|
||||
pr.Points[1])
|
||||
sm.MapPoint(
|
||||
&meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
}},
|
||||
&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
|
||||
pr.Points[2])
|
||||
|
||||
// Local cluster.Node ShardWriter
|
||||
|
@ -354,16 +334,8 @@ func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64
|
|||
func NewMetaStore() *MetaStore {
|
||||
ms := &MetaStore{}
|
||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
||||
AttachShardGroupInfo(rp, []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
})
|
||||
AttachShardGroupInfo(rp, []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
})
|
||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
||||
|
||||
ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
|
||||
return rp, nil
|
||||
|
@ -408,15 +380,15 @@ func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupI
|
|||
|
||||
func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo {
|
||||
shards := []meta.ShardInfo{}
|
||||
owners := []meta.ShardOwner{}
|
||||
ownerIDs := []uint64{}
|
||||
for i := 1; i <= nodeCount; i++ {
|
||||
owners = append(owners, meta.ShardOwner{NodeID: uint64(i)})
|
||||
ownerIDs = append(ownerIDs, uint64(i))
|
||||
}
|
||||
|
||||
// each node is fully replicated with each other
|
||||
shards = append(shards, meta.ShardInfo{
|
||||
ID: nextShardID(),
|
||||
Owners: owners,
|
||||
OwnerIDs: ownerIDs,
|
||||
})
|
||||
|
||||
rp := &meta.RetentionPolicyInfo{
|
||||
|
@ -436,7 +408,7 @@ func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *met
|
|||
return rp
|
||||
}
|
||||
|
||||
func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) {
|
||||
func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, ownerIDs []uint64) {
|
||||
var startTime, endTime time.Time
|
||||
if len(rp.ShardGroups) == 0 {
|
||||
startTime = time.Unix(0, 0)
|
||||
|
@ -452,7 +424,7 @@ func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner
|
|||
Shards: []meta.ShardInfo{
|
||||
meta.ShardInfo{
|
||||
ID: nextShardID(),
|
||||
Owners: owners,
|
||||
OwnerIDs: ownerIDs,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
@ -111,9 +110,7 @@ func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp t
|
|||
}
|
||||
|
||||
func (w *WriteShardRequest) AddPoints(points []tsdb.Point) {
|
||||
for _, p := range points {
|
||||
w.pb.Points = append(w.pb.Points, []byte(p.String()))
|
||||
}
|
||||
w.pb.Points = append(w.pb.Points, w.marshalPoints(points)...)
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the object to a binary format.
|
||||
|
@ -121,6 +118,55 @@ func (w *WriteShardRequest) MarshalBinary() ([]byte, error) {
|
|||
return proto.Marshal(&w.pb)
|
||||
}
|
||||
|
||||
func (w *WriteShardRequest) marshalPoints(points []tsdb.Point) []*internal.Point {
|
||||
pts := make([]*internal.Point, len(points))
|
||||
for i, p := range points {
|
||||
fields := []*internal.Field{}
|
||||
for k, v := range p.Fields() {
|
||||
name := k
|
||||
f := &internal.Field{
|
||||
Name: &name,
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case int:
|
||||
f.Int64 = proto.Int64(int64(t))
|
||||
case int32:
|
||||
f.Int32 = proto.Int32(t)
|
||||
case int64:
|
||||
f.Int64 = proto.Int64(t)
|
||||
case float64:
|
||||
f.Float64 = proto.Float64(t)
|
||||
case bool:
|
||||
f.Bool = proto.Bool(t)
|
||||
case string:
|
||||
f.String_ = proto.String(t)
|
||||
case []byte:
|
||||
f.Bytes = t
|
||||
}
|
||||
fields = append(fields, f)
|
||||
}
|
||||
|
||||
tags := []*internal.Tag{}
|
||||
for k, v := range p.Tags() {
|
||||
key := k
|
||||
value := v
|
||||
tags = append(tags, &internal.Tag{
|
||||
Key: &key,
|
||||
Value: &value,
|
||||
})
|
||||
}
|
||||
name := p.Name()
|
||||
pts[i] = &internal.Point{
|
||||
Name: &name,
|
||||
Time: proto.Int64(p.Time().UnixNano()),
|
||||
Fields: fields,
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
}
|
||||
return pts
|
||||
}
|
||||
|
||||
// UnmarshalBinary populates WritePointRequest from a binary format.
|
||||
func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error {
|
||||
if err := proto.Unmarshal(buf, &w.pb); err != nil {
|
||||
|
@ -132,14 +178,33 @@ func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error {
|
|||
func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point {
|
||||
points := make([]tsdb.Point, len(w.pb.GetPoints()))
|
||||
for i, p := range w.pb.GetPoints() {
|
||||
pt, err := tsdb.ParsePoints(p)
|
||||
if err != nil {
|
||||
// A error here means that one node parsed the point correctly but sent an
|
||||
// unparseable version to another node. We could log and drop the point and allow
|
||||
// anti-entropy to resolve the discrepancy but this shouldn't ever happen.
|
||||
panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err))
|
||||
pt := tsdb.NewPoint(
|
||||
p.GetName(), map[string]string{},
|
||||
map[string]interface{}{}, time.Unix(0, p.GetTime()))
|
||||
|
||||
for _, f := range p.GetFields() {
|
||||
n := f.GetName()
|
||||
if f.Int32 != nil {
|
||||
pt.AddField(n, f.GetInt32())
|
||||
} else if f.Int64 != nil {
|
||||
pt.AddField(n, f.GetInt64())
|
||||
} else if f.Float64 != nil {
|
||||
pt.AddField(n, f.GetFloat64())
|
||||
} else if f.Bool != nil {
|
||||
pt.AddField(n, f.GetBool())
|
||||
} else if f.String_ != nil {
|
||||
pt.AddField(n, f.GetString_())
|
||||
} else {
|
||||
pt.AddField(n, f.GetBytes())
|
||||
}
|
||||
points[i] = pt[0]
|
||||
}
|
||||
|
||||
tags := tsdb.Tags{}
|
||||
for _, t := range p.GetTags() {
|
||||
tags[t.GetKey()] = t.GetValue()
|
||||
}
|
||||
pt.SetTags(tags)
|
||||
points[i] = pt
|
||||
}
|
||||
return points
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
@ -38,7 +37,7 @@ type Service struct {
|
|||
TSDBStore interface {
|
||||
CreateShard(database, policy string, shardID uint64) error
|
||||
WriteToShard(shardID uint64, points []tsdb.Point) error
|
||||
CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error)
|
||||
CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
||||
}
|
||||
|
||||
Logger *log.Logger
|
||||
|
@ -108,7 +107,7 @@ func (s *Service) Close() error {
|
|||
|
||||
// Shut down all handlers.
|
||||
close(s.closing)
|
||||
s.wg.Wait()
|
||||
// s.wg.Wait() // FIXME(benbjohnson)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -185,7 +184,7 @@ func (s *Service) processWriteShardRequest(buf []byte) error {
|
|||
// If we can't find it, then we need to drop this request
|
||||
// as it is no longer valid. This could happen if writes were queued via
|
||||
// hinted handoff and delivered after a shard group was deleted.
|
||||
s.Logger.Printf("drop write request: shard=%d. shard group does not exist or was deleted", req.ShardID())
|
||||
s.Logger.Printf("drop write request: shard=%d", req.ShardID())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -233,15 +232,7 @@ func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Parse the statement.
|
||||
q, err := influxql.ParseQuery(req.Query())
|
||||
if err != nil {
|
||||
return fmt.Errorf("processing map shard: %s", err)
|
||||
} else if len(q.Statements) != 1 {
|
||||
return fmt.Errorf("processing map shard: expected 1 statement but got %d", len(q.Statements))
|
||||
}
|
||||
|
||||
m, err := s.TSDBStore.CreateMapper(req.ShardID(), q.Statements[0], int(req.ChunkSize()))
|
||||
m, err := s.TSDBStore.CreateMapper(req.ShardID(), req.Query(), int(req.ChunkSize()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create mapper: %s", err)
|
||||
}
|
||||
|
@ -268,10 +259,6 @@ func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("next chunk: %s", err)
|
||||
}
|
||||
|
||||
// NOTE: Even if the chunk is nil, we still need to send one
|
||||
// empty response to let the other side know we're out of data.
|
||||
|
||||
if chunk != nil {
|
||||
b, err := json.Marshal(chunk)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/tcp"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
|
@ -29,7 +28,7 @@ type testService struct {
|
|||
muxln net.Listener
|
||||
writeShardFunc func(shardID uint64, points []tsdb.Point) error
|
||||
createShardFunc func(database, policy string, shardID uint64) error
|
||||
createMapperFunc func(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error)
|
||||
createMapperFunc func(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
||||
}
|
||||
|
||||
func newTestWriteService(f func(shardID uint64, points []tsdb.Point) error) testService {
|
||||
|
@ -70,8 +69,8 @@ func (t testService) CreateShard(database, policy string, shardID uint64) error
|
|||
return t.createShardFunc(database, policy, shardID)
|
||||
}
|
||||
|
||||
func (t testService) CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) {
|
||||
return t.createMapperFunc(shardID, stmt, chunkSize)
|
||||
func (t testService) CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) {
|
||||
return t.createMapperFunc(shardID, query, chunkSize)
|
||||
}
|
||||
|
||||
func writeShardSuccess(shardID uint64, points []tsdb.Point) error {
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
"gopkg.in/fatih/pool.v2"
|
||||
)
|
||||
|
||||
// ShardMapper is responsible for providing mappers for requested shards. It is
|
||||
|
@ -23,7 +25,7 @@ type ShardMapper struct {
|
|||
}
|
||||
|
||||
TSDBStore interface {
|
||||
CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error)
|
||||
CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
||||
}
|
||||
|
||||
timeout time.Duration
|
||||
|
@ -39,58 +41,67 @@ func NewShardMapper(timeout time.Duration) *ShardMapper {
|
|||
}
|
||||
|
||||
// CreateMapper returns a Mapper for the given shard ID.
|
||||
func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) {
|
||||
m, err := s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize)
|
||||
func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt string, chunkSize int) (tsdb.Mapper, error) {
|
||||
var err error
|
||||
var m tsdb.Mapper
|
||||
if sh.OwnedBy(s.MetaStore.NodeID()) && !s.ForceRemoteMapping {
|
||||
m, err = s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !sh.OwnedBy(s.MetaStore.NodeID()) || s.ForceRemoteMapping {
|
||||
} else {
|
||||
// Pick a node in a pseudo-random manner.
|
||||
conn, err := s.dial(sh.Owners[rand.Intn(len(sh.Owners))].NodeID)
|
||||
conn, err := s.dial(sh.OwnerIDs[rand.Intn(len(sh.OwnerIDs))])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.SetDeadline(time.Now().Add(s.timeout))
|
||||
|
||||
m.SetRemote(NewRemoteMapper(conn, sh.ID, stmt, chunkSize))
|
||||
rm := NewRemoteMapper(conn.(*pool.PoolConn), sh.ID, stmt, chunkSize)
|
||||
m = rm
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) {
|
||||
ni, err := s.MetaStore.Node(nodeID)
|
||||
// If we don't have a connection pool for that addr yet, create one
|
||||
_, ok := s.pool.getPool(nodeID)
|
||||
if !ok {
|
||||
factory := &connFactory{nodeID: nodeID, clientPool: s.pool, timeout: s.timeout}
|
||||
factory.metaStore = s.MetaStore
|
||||
|
||||
p, err := pool.NewChannelPool(1, 3, factory.dial)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.Dial("tcp", ni.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
s.pool.setPool(nodeID, p)
|
||||
}
|
||||
return s.pool.conn(nodeID)
|
||||
}
|
||||
|
||||
// Write the cluster multiplexing header byte
|
||||
conn.Write([]byte{MuxHeader})
|
||||
|
||||
return conn, nil
|
||||
type remoteShardConn interface {
|
||||
io.ReadWriter
|
||||
Close() error
|
||||
MarkUnusable()
|
||||
}
|
||||
|
||||
// RemoteMapper implements the tsdb.Mapper interface. It connects to a remote node,
|
||||
// sends a query, and interprets the stream of data that comes back.
|
||||
type RemoteMapper struct {
|
||||
shardID uint64
|
||||
stmt influxql.Statement
|
||||
stmt string
|
||||
chunkSize int
|
||||
|
||||
tagsets []string
|
||||
fields []string
|
||||
|
||||
conn net.Conn
|
||||
conn remoteShardConn
|
||||
bufferedResponse *MapShardResponse
|
||||
}
|
||||
|
||||
// NewRemoteMapper returns a new remote mapper using the given connection.
|
||||
func NewRemoteMapper(c net.Conn, shardID uint64, stmt influxql.Statement, chunkSize int) *RemoteMapper {
|
||||
func NewRemoteMapper(c remoteShardConn, shardID uint64, stmt string, chunkSize int) *RemoteMapper {
|
||||
return &RemoteMapper{
|
||||
conn: c,
|
||||
shardID: shardID,
|
||||
|
@ -109,7 +120,7 @@ func (r *RemoteMapper) Open() (err error) {
|
|||
// Build Map request.
|
||||
var request MapShardRequest
|
||||
request.SetShardID(r.shardID)
|
||||
request.SetQuery(r.stmt.String())
|
||||
request.SetQuery(r.stmt)
|
||||
request.SetChunkSize(int32(r.chunkSize))
|
||||
|
||||
// Marshal into protocol buffers.
|
||||
|
@ -120,12 +131,14 @@ func (r *RemoteMapper) Open() (err error) {
|
|||
|
||||
// Write request.
|
||||
if err := WriteTLV(r.conn, mapShardRequestMessage, buf); err != nil {
|
||||
r.conn.MarkUnusable()
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the response.
|
||||
_, buf, err = ReadTLV(r.conn)
|
||||
if err != nil {
|
||||
r.conn.MarkUnusable()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -141,15 +154,10 @@ func (r *RemoteMapper) Open() (err error) {
|
|||
|
||||
// Decode the first response to get the TagSets.
|
||||
r.tagsets = r.bufferedResponse.TagSets()
|
||||
r.fields = r.bufferedResponse.Fields()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RemoteMapper) SetRemote(m tsdb.Mapper) error {
|
||||
return fmt.Errorf("cannot set remote mapper on a remote mapper")
|
||||
}
|
||||
|
||||
func (r *RemoteMapper) TagSets() []string {
|
||||
return r.tagsets
|
||||
}
|
||||
|
@ -160,7 +168,9 @@ func (r *RemoteMapper) Fields() []string {
|
|||
|
||||
// NextChunk returns the next chunk read from the remote node to the client.
|
||||
func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) {
|
||||
output := &tsdb.MapperOutput{}
|
||||
var response *MapShardResponse
|
||||
|
||||
if r.bufferedResponse != nil {
|
||||
response = r.bufferedResponse
|
||||
r.bufferedResponse = nil
|
||||
|
@ -170,6 +180,7 @@ func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) {
|
|||
// Read the response.
|
||||
_, buf, err := ReadTLV(r.conn)
|
||||
if err != nil {
|
||||
r.conn.MarkUnusable()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -186,8 +197,8 @@ func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) {
|
|||
if response.Data() == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return response.Data(), err
|
||||
err = json.Unmarshal(response.Data(), output)
|
||||
return output, err
|
||||
}
|
||||
|
||||
// Close the Mapper
|
||||
|
|
24
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go
generated
vendored
24
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go
generated
vendored
|
@ -3,18 +3,14 @@ package cluster
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
// remoteShardResponder implements the remoteShardConn interface.
|
||||
type remoteShardResponder struct {
|
||||
net.Conn
|
||||
t *testing.T
|
||||
rxBytes []byte
|
||||
|
||||
|
@ -43,6 +39,7 @@ func newRemoteShardResponder(outputs []*tsdb.MapperOutput, tagsets []string) *re
|
|||
return r
|
||||
}
|
||||
|
||||
func (r remoteShardResponder) MarkUnusable() { return }
|
||||
func (r remoteShardResponder) Close() error { return nil }
|
||||
func (r remoteShardResponder) Read(p []byte) (n int, err error) {
|
||||
return io.ReadFull(r.buffer, p)
|
||||
|
@ -66,7 +63,7 @@ func TestShardWriter_RemoteMapper_Success(t *testing.T) {
|
|||
|
||||
c := newRemoteShardResponder([]*tsdb.MapperOutput{expOutput, nil}, expTagSets)
|
||||
|
||||
r := NewRemoteMapper(c, 1234, mustParseStmt("SELECT * FROM CPU"), 10)
|
||||
r := NewRemoteMapper(c, 1234, "SELECT * FROM CPU", 10)
|
||||
if err := r.Open(); err != nil {
|
||||
t.Fatalf("failed to open remote mapper: %s", err.Error())
|
||||
}
|
||||
|
@ -80,14 +77,10 @@ func TestShardWriter_RemoteMapper_Success(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to get next chunk from mapper: %s", err.Error())
|
||||
}
|
||||
b, ok := chunk.([]byte)
|
||||
output, ok := chunk.(*tsdb.MapperOutput)
|
||||
if !ok {
|
||||
t.Fatal("chunk is not of expected type")
|
||||
}
|
||||
output := &tsdb.MapperOutput{}
|
||||
if err := json.Unmarshal(b, output); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if output.Name != "cpu" {
|
||||
t.Fatalf("received output incorrect, exp: %v, got %v", expOutput, output)
|
||||
}
|
||||
|
@ -101,14 +94,3 @@ func TestShardWriter_RemoteMapper_Success(t *testing.T) {
|
|||
t.Fatal("received more chunks when none expected")
|
||||
}
|
||||
}
|
||||
|
||||
// mustParseStmt parses a single statement or panics.
|
||||
func mustParseStmt(stmt string) influxql.Statement {
|
||||
q, err := influxql.ParseQuery(stmt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
} else if len(q.Statements) != 1 {
|
||||
panic(fmt.Sprintf("expected 1 statement but got %d", len(q.Statements)))
|
||||
}
|
||||
return q.Statements[0]
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"text/tabwriter"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/importer/v8"
|
||||
"github.com/peterh/liner"
|
||||
)
|
||||
|
@ -31,9 +30,6 @@ const (
|
|||
// defaultFormat is the default format of the results when issuing queries
|
||||
defaultFormat = "column"
|
||||
|
||||
// defaultPrecision is the default timestamp format of the results when issuing queries
|
||||
defaultPrecision = "ns"
|
||||
|
||||
// defaultPPS is the default points per second that the import will throttle at
|
||||
// by default it's 0, which means it will not throttle
|
||||
defaultPPS = 0
|
||||
|
@ -52,8 +48,6 @@ type CommandLine struct {
|
|||
Version string
|
||||
Pretty bool // controls pretty print for json
|
||||
Format string // controls the output format. Valid values are json, csv, or column
|
||||
Precision string
|
||||
WriteConsistency string
|
||||
Execute string
|
||||
ShowVersion bool
|
||||
Import bool
|
||||
|
@ -73,8 +67,6 @@ func main() {
|
|||
fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.")
|
||||
fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.")
|
||||
fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.")
|
||||
fs.StringVar(&c.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.")
|
||||
fs.StringVar(&c.WriteConsistency, "consistency", "any", "Set write consistency level: any, one, quorum, or all.")
|
||||
fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.")
|
||||
fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.")
|
||||
fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.")
|
||||
|
@ -104,10 +96,6 @@ func main() {
|
|||
Execute command and quit.
|
||||
-format 'json|csv|column'
|
||||
Format specifies the format of the server responses: json, csv, or column.
|
||||
-precision 'rfc3339|h|m|s|ms|u|ns'
|
||||
Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns.
|
||||
-consistency 'any|one|quorum|all'
|
||||
Set write consistency level: any, one, quorum, or all
|
||||
-pretty
|
||||
Turns on pretty print for the json format.
|
||||
-import
|
||||
|
@ -165,8 +153,6 @@ Examples:
|
|||
}
|
||||
|
||||
if c.Execute != "" {
|
||||
// Modify precision before executing query
|
||||
c.SetPrecision(c.Precision)
|
||||
if err := c.ExecuteQuery(c.Execute); err != nil {
|
||||
c.Line.Close()
|
||||
os.Exit(1)
|
||||
|
@ -193,7 +179,6 @@ Examples:
|
|||
config.URL = u
|
||||
config.Compressed = c.Compressed
|
||||
config.PPS = c.PPS
|
||||
config.Precision = c.Precision
|
||||
|
||||
i := v8.NewImporter(config)
|
||||
if err := i.Import(); err != nil {
|
||||
|
@ -259,10 +244,6 @@ func (c *CommandLine) ParseCommand(cmd string) bool {
|
|||
c.help()
|
||||
case strings.HasPrefix(lcmd, "format"):
|
||||
c.SetFormat(cmd)
|
||||
case strings.HasPrefix(lcmd, "precision"):
|
||||
c.SetPrecision(cmd)
|
||||
case strings.HasPrefix(lcmd, "consistency"):
|
||||
c.SetWriteConsistency(cmd)
|
||||
case strings.HasPrefix(lcmd, "settings"):
|
||||
c.Settings()
|
||||
case strings.HasPrefix(lcmd, "pretty"):
|
||||
|
@ -307,7 +288,6 @@ func (c *CommandLine) connect(cmd string) error {
|
|||
config.Username = c.Username
|
||||
config.Password = c.Password
|
||||
config.UserAgent = "InfluxDBShell/" + version
|
||||
config.Precision = c.Precision
|
||||
cl, err := client.NewClient(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not create client %s", err)
|
||||
|
@ -364,24 +344,6 @@ func (c *CommandLine) use(cmd string) {
|
|||
fmt.Printf("Using database %s\n", d)
|
||||
}
|
||||
|
||||
func (c *CommandLine) SetPrecision(cmd string) {
|
||||
// Remove the "precision" keyword if it exists
|
||||
cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1))
|
||||
// normalize cmd
|
||||
cmd = strings.ToLower(cmd)
|
||||
|
||||
switch cmd {
|
||||
case "h", "m", "s", "ms", "u", "ns":
|
||||
c.Precision = cmd
|
||||
c.Client.SetPrecision(c.Precision)
|
||||
case "rfc3339":
|
||||
c.Precision = ""
|
||||
c.Client.SetPrecision(c.Precision)
|
||||
default:
|
||||
fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandLine) SetFormat(cmd string) {
|
||||
// Remove the "format" keyword if it exists
|
||||
cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1))
|
||||
|
@ -396,20 +358,6 @@ func (c *CommandLine) SetFormat(cmd string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *CommandLine) SetWriteConsistency(cmd string) {
|
||||
// Remove the "consistency" keyword if it exists
|
||||
cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1))
|
||||
// normalize cmd
|
||||
cmd = strings.ToLower(cmd)
|
||||
|
||||
_, err := cluster.ParseConsistencyLevel(cmd)
|
||||
if err != nil {
|
||||
fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd)
|
||||
return
|
||||
}
|
||||
c.WriteConsistency = cmd
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, or newline.
|
||||
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
|
||||
|
||||
|
@ -496,7 +444,7 @@ func (c *CommandLine) Insert(stmt string) error {
|
|||
Database: c.Database,
|
||||
RetentionPolicy: c.RetentionPolicy,
|
||||
Precision: "n",
|
||||
WriteConsistency: c.WriteConsistency,
|
||||
WriteConsistency: client.ConsistencyAny,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("ERR: %s\n", err)
|
||||
|
@ -693,7 +641,6 @@ func (c *CommandLine) Settings() {
|
|||
fmt.Fprintf(w, "Database\t%s\n", c.Database)
|
||||
fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty)
|
||||
fmt.Fprintf(w, "Format\t%s\n", c.Format)
|
||||
fmt.Fprintf(w, "Write Consistency\t%s\n", c.WriteConsistency)
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
}
|
||||
|
@ -705,8 +652,6 @@ func (c *CommandLine) help() {
|
|||
pretty toggle pretty print
|
||||
use <db_name> set current databases
|
||||
format <format> set the output format: json, csv, or column
|
||||
precision <format> set the timestamp format: h,m,s,ms,u,ns
|
||||
consistency <level> set write consistency level: any, one, quorum, or all
|
||||
settings output the current settings for the shell
|
||||
exit quit the influx shell
|
||||
|
||||
|
|
|
@ -91,31 +91,6 @@ func TestParseCommand_Use(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseCommand_Consistency(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := main.CommandLine{}
|
||||
tests := []struct {
|
||||
cmd string
|
||||
}{
|
||||
{cmd: "consistency one"},
|
||||
{cmd: " consistency one"},
|
||||
{cmd: "consistency one "},
|
||||
{cmd: "consistency one;"},
|
||||
{cmd: "consistency one; "},
|
||||
{cmd: "Consistency one"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if !c.ParseCommand(test.cmd) {
|
||||
t.Fatalf(`Command "consistency" failed for %q.`, test.cmd)
|
||||
}
|
||||
|
||||
if c.WriteConsistency != "one" {
|
||||
t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.WriteConsistency)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCommand_Insert(t *testing.T) {
|
||||
t.Parallel()
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
139
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go
generated
vendored
139
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go
generated
vendored
|
@ -3,11 +3,14 @@ package main
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/stress"
|
||||
"github.com/influxdb/influxdb/client"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -18,50 +21,134 @@ var (
|
|||
batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches")
|
||||
database = flag.String("database", "stress", "name of database")
|
||||
address = flag.String("addr", "localhost:8086", "IP address and port of database (e.g., localhost:8086)")
|
||||
precision = flag.String("precision", "n", "The precision that points in the database will be with")
|
||||
)
|
||||
|
||||
var ms runner.Measurements
|
||||
|
||||
func init() {
|
||||
flag.Var(&ms, "m", "comma-separated list of intervals to use between events")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
if len(ms) == 0 {
|
||||
ms = append(ms, "cpu")
|
||||
startTime := time.Now()
|
||||
counter := NewConcurrencyLimiter(*concurrency)
|
||||
|
||||
u, _ := url.Parse(fmt.Sprintf("http://%s", *address))
|
||||
c, err := client.NewClient(client.Config{URL: *u})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cfg := &runner.Config{
|
||||
BatchSize: *batchSize,
|
||||
Measurements: ms,
|
||||
SeriesCount: *seriesCount,
|
||||
PointCount: *pointCount,
|
||||
Concurrency: *concurrency,
|
||||
BatchInterval: *batchInterval,
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
responseTimes := make([]int, 0)
|
||||
|
||||
totalPoints := 0
|
||||
|
||||
batch := &client.BatchPoints{
|
||||
Database: *database,
|
||||
Address: *address,
|
||||
Precision: *precision,
|
||||
WriteConsistency: "any",
|
||||
Time: time.Now(),
|
||||
Precision: "n",
|
||||
}
|
||||
for i := 1; i <= *pointCount; i++ {
|
||||
for j := 1; j <= *seriesCount; j++ {
|
||||
p := client.Point{
|
||||
Measurement: "cpu",
|
||||
Tags: map[string]string{"region": "uswest", "host": fmt.Sprintf("host-%d", j)},
|
||||
Fields: map[string]interface{}{"value": rand.Float64()},
|
||||
}
|
||||
batch.Points = append(batch.Points, p)
|
||||
if len(batch.Points) >= *batchSize {
|
||||
wg.Add(1)
|
||||
counter.Increment()
|
||||
totalPoints += len(batch.Points)
|
||||
go func(b *client.BatchPoints, total int) {
|
||||
st := time.Now()
|
||||
if _, err := c.Write(*b); err != nil {
|
||||
fmt.Println("ERROR: ", err.Error())
|
||||
} else {
|
||||
mu.Lock()
|
||||
responseTimes = append(responseTimes, int(time.Since(st).Nanoseconds()))
|
||||
mu.Unlock()
|
||||
}
|
||||
wg.Done()
|
||||
counter.Decrement()
|
||||
if total%500000 == 0 {
|
||||
fmt.Printf("%d total points. %d in %s\n", total, *batchSize, time.Since(st))
|
||||
}
|
||||
}(batch, totalPoints)
|
||||
|
||||
batch = &client.BatchPoints{
|
||||
Database: *database,
|
||||
WriteConsistency: "any",
|
||||
Precision: "n",
|
||||
Time: time.Now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
totalPoints, failedRequests, responseTimes, timer := runner.Run(cfg)
|
||||
|
||||
sort.Sort(sort.Reverse(sort.Interface(responseTimes)))
|
||||
wg.Wait()
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(responseTimes)))
|
||||
|
||||
total := int64(0)
|
||||
for _, t := range responseTimes {
|
||||
total += int64(t.Value)
|
||||
total += int64(t)
|
||||
}
|
||||
mean := total / int64(len(responseTimes))
|
||||
|
||||
fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/timer.Elapsed().Seconds())
|
||||
fmt.Printf("%d requests failed for %d total points that didn't get posted.\n", failedRequests, failedRequests**batchSize)
|
||||
fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/time.Since(startTime).Seconds())
|
||||
fmt.Println("Average response time: ", time.Duration(mean))
|
||||
fmt.Println("Slowest response times:")
|
||||
for _, r := range responseTimes[:100] {
|
||||
fmt.Println(time.Duration(r.Value))
|
||||
fmt.Println(time.Duration(r))
|
||||
}
|
||||
}
|
||||
|
||||
// ConcurrencyLimiter is a go routine safe struct that can be used to
|
||||
// ensure that no more than a specifid max number of goroutines are
|
||||
// executing.
|
||||
type ConcurrencyLimiter struct {
|
||||
inc chan chan struct{}
|
||||
dec chan struct{}
|
||||
max int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewConcurrencyLimiter returns a configured limiter that will
|
||||
// ensure that calls to Increment will block if the max is hit.
|
||||
func NewConcurrencyLimiter(max int) *ConcurrencyLimiter {
|
||||
c := &ConcurrencyLimiter{
|
||||
inc: make(chan chan struct{}),
|
||||
dec: make(chan struct{}, max),
|
||||
max: max,
|
||||
}
|
||||
go c.handleLimits()
|
||||
return c
|
||||
}
|
||||
|
||||
// Increment will increase the count of running goroutines by 1.
|
||||
// if the number is currently at the max, the call to Increment
|
||||
// will block until another goroutine decrements.
|
||||
func (c *ConcurrencyLimiter) Increment() {
|
||||
r := make(chan struct{})
|
||||
c.inc <- r
|
||||
<-r
|
||||
}
|
||||
|
||||
// Decrement will reduce the count of running goroutines by 1
|
||||
func (c *ConcurrencyLimiter) Decrement() {
|
||||
c.dec <- struct{}{}
|
||||
}
|
||||
|
||||
// handleLimits runs in a goroutine to manage the count of
|
||||
// running goroutines.
|
||||
func (c *ConcurrencyLimiter) handleLimits() {
|
||||
for {
|
||||
r := <-c.inc
|
||||
if c.count >= c.max {
|
||||
<-c.dec
|
||||
c.count--
|
||||
}
|
||||
c.count++
|
||||
r <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,15 +66,14 @@ func (cmd *Command) Run(args ...string) error {
|
|||
// Print sweet InfluxDB logo.
|
||||
fmt.Print(logo)
|
||||
|
||||
// Mark start-up in log.
|
||||
log.Printf("InfluxDB starting, version %s, branch %s, commit %s", cmd.Version, cmd.Branch, cmd.Commit)
|
||||
log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0))
|
||||
|
||||
// Write the PID file.
|
||||
if err := cmd.writePIDFile(options.PIDFile); err != nil {
|
||||
return fmt.Errorf("write pid file: %s", err)
|
||||
}
|
||||
|
||||
// Set parallelism.
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// Turn on block profiling to debug stuck databases
|
||||
runtime.SetBlockProfileRate(int(1 * time.Second))
|
||||
|
||||
|
@ -104,8 +103,7 @@ func (cmd *Command) Run(args ...string) error {
|
|||
}
|
||||
|
||||
// Create server from config and start it.
|
||||
buildInfo := &BuildInfo{Version: cmd.Version, Commit: cmd.Commit, Branch: cmd.Branch}
|
||||
s, err := NewServer(config, buildInfo)
|
||||
s, err := NewServer(config, cmd.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create server: %s", err)
|
||||
}
|
||||
|
@ -116,6 +114,10 @@ func (cmd *Command) Run(args ...string) error {
|
|||
}
|
||||
cmd.Server = s
|
||||
|
||||
// Mark start-up in log.
|
||||
log.Printf("InfluxDB starting, version %s, branch %s, commit %s", cmd.Version, cmd.Branch, cmd.Commit)
|
||||
log.Println("GOMAXPROCS set to", runtime.GOMAXPROCS(0))
|
||||
|
||||
// Begin monitoring the server's error channel.
|
||||
go cmd.monitorServerErrors()
|
||||
|
||||
|
@ -188,11 +190,11 @@ func (cmd *Command) writePIDFile(path string) error {
|
|||
func (cmd *Command) ParseConfig(path string) (*Config, error) {
|
||||
// Use demo configuration if no config path is specified.
|
||||
if path == "" {
|
||||
log.Println("no configuration provided, using default settings")
|
||||
fmt.Fprintln(cmd.Stdout, "no configuration provided, using default settings")
|
||||
return NewDemoConfig()
|
||||
}
|
||||
|
||||
log.Printf("Using configuration at: %s\n", path)
|
||||
fmt.Fprintf(cmd.Stdout, "Using configuration at: %s\n", path)
|
||||
|
||||
config := NewConfig()
|
||||
if _, err := toml.DecodeFile(path, &config); err != nil {
|
||||
|
|
|
@ -13,13 +13,13 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/services/admin"
|
||||
"github.com/influxdb/influxdb/services/collectd"
|
||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
||||
"github.com/influxdb/influxdb/services/graphite"
|
||||
"github.com/influxdb/influxdb/services/hh"
|
||||
"github.com/influxdb/influxdb/services/httpd"
|
||||
"github.com/influxdb/influxdb/services/monitor"
|
||||
"github.com/influxdb/influxdb/services/opentsdb"
|
||||
"github.com/influxdb/influxdb/services/precreator"
|
||||
"github.com/influxdb/influxdb/services/retention"
|
||||
|
@ -36,7 +36,6 @@ type Config struct {
|
|||
Precreator precreator.Config `toml:"shard-precreation"`
|
||||
|
||||
Admin admin.Config `toml:"admin"`
|
||||
Monitor monitor.Config `toml:"monitor"`
|
||||
HTTPD httpd.Config `toml:"http"`
|
||||
Graphites []graphite.Config `toml:"graphite"`
|
||||
Collectd collectd.Config `toml:"collectd"`
|
||||
|
@ -44,6 +43,7 @@ type Config struct {
|
|||
UDPs []udp.Config `toml:"udp"`
|
||||
|
||||
// Snapshot SnapshotConfig `toml:"snapshot"`
|
||||
Monitoring monitor.Config `toml:"monitoring"`
|
||||
ContinuousQuery continuous_querier.Config `toml:"continuous_queries"`
|
||||
|
||||
HintedHandoff hh.Config `toml:"hinted-handoff"`
|
||||
|
@ -61,11 +61,12 @@ func NewConfig() *Config {
|
|||
c.Precreator = precreator.NewConfig()
|
||||
|
||||
c.Admin = admin.NewConfig()
|
||||
c.Monitor = monitor.NewConfig()
|
||||
c.HTTPD = httpd.NewConfig()
|
||||
c.Collectd = collectd.NewConfig()
|
||||
c.OpenTSDB = opentsdb.NewConfig()
|
||||
c.Graphites = append(c.Graphites, graphite.NewConfig())
|
||||
|
||||
c.Monitoring = monitor.NewConfig()
|
||||
c.ContinuousQuery = continuous_querier.NewConfig()
|
||||
c.Retention = retention.NewConfig()
|
||||
c.HintedHandoff = hh.NewConfig()
|
||||
|
@ -94,6 +95,7 @@ func NewDemoConfig() (*Config, error) {
|
|||
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
|
||||
|
||||
c.Admin.Enabled = true
|
||||
c.Monitoring.Enabled = false
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
|
10
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go
generated
vendored
10
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go
generated
vendored
|
@ -42,21 +42,11 @@ func (cmd *PrintConfigCommand) Run(args ...string) error {
|
|||
return fmt.Errorf("parse config: %s", err)
|
||||
}
|
||||
|
||||
// Apply any environment variables on top of the parsed config
|
||||
if err := config.ApplyEnvOverrides(); err != nil {
|
||||
return fmt.Errorf("apply env config: %v", err)
|
||||
}
|
||||
|
||||
// Override config properties.
|
||||
if *hostname != "" {
|
||||
config.Meta.Hostname = *hostname
|
||||
}
|
||||
|
||||
// Validate the configuration.
|
||||
if err := config.Validate(); err != nil {
|
||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err)
|
||||
}
|
||||
|
||||
toml.NewEncoder(cmd.Stdout).Encode(config)
|
||||
fmt.Fprint(cmd.Stdout, "\n")
|
||||
|
||||
|
|
2
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go
generated
vendored
|
@ -72,6 +72,8 @@ enabled = true
|
|||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress)
|
||||
} else if c.UDPs[0].BindAddress != ":4444" {
|
||||
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
||||
} else if c.Monitoring.Enabled != true {
|
||||
t.Fatalf("unexpected monitoring enabled: %v", c.Monitoring.Enabled)
|
||||
} else if c.ContinuousQuery.Enabled != true {
|
||||
t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled)
|
||||
}
|
||||
|
|
|
@ -14,11 +14,9 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/services/admin"
|
||||
"github.com/influxdb/influxdb/services/collectd"
|
||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
||||
"github.com/influxdb/influxdb/services/copier"
|
||||
"github.com/influxdb/influxdb/services/graphite"
|
||||
"github.com/influxdb/influxdb/services/hh"
|
||||
"github.com/influxdb/influxdb/services/httpd"
|
||||
|
@ -32,18 +30,11 @@ import (
|
|||
_ "github.com/influxdb/influxdb/tsdb/engine"
|
||||
)
|
||||
|
||||
// BuildInfo represents the build details for the server code.
|
||||
type BuildInfo struct {
|
||||
Version string
|
||||
Commit string
|
||||
Branch string
|
||||
}
|
||||
|
||||
// Server represents a container for the metadata and storage data and services.
|
||||
// It is built using a Config and it manages the startup and shutdown of all
|
||||
// services in the proper order.
|
||||
type Server struct {
|
||||
buildInfo BuildInfo
|
||||
version string // Build version
|
||||
|
||||
err chan error
|
||||
closing chan struct{}
|
||||
|
@ -65,9 +56,6 @@ type Server struct {
|
|||
// These references are required for the tcp muxer.
|
||||
ClusterService *cluster.Service
|
||||
SnapshotterService *snapshotter.Service
|
||||
CopierService *copier.Service
|
||||
|
||||
Monitor *monitor.Monitor
|
||||
|
||||
// Server reporting
|
||||
reportingDisabled bool
|
||||
|
@ -78,13 +66,13 @@ type Server struct {
|
|||
}
|
||||
|
||||
// NewServer returns a new instance of Server built from a config.
|
||||
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
||||
func NewServer(c *Config, version string) (*Server, error) {
|
||||
// Construct base meta store and data store.
|
||||
tsdbStore := tsdb.NewStore(c.Data.Dir)
|
||||
tsdbStore.EngineOptions.Config = c.Data
|
||||
|
||||
s := &Server{
|
||||
buildInfo: *buildInfo,
|
||||
version: version,
|
||||
err: make(chan error),
|
||||
closing: make(chan struct{}),
|
||||
|
||||
|
@ -94,8 +82,6 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
|||
MetaStore: meta.NewStore(c.Meta),
|
||||
TSDBStore: tsdbStore,
|
||||
|
||||
Monitor: monitor.New(c.Monitor),
|
||||
|
||||
reportingDisabled: c.ReportingDisabled,
|
||||
}
|
||||
|
||||
|
@ -114,7 +100,6 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
|||
s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
|
||||
s.QueryExecutor.MetaStore = s.MetaStore
|
||||
s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}
|
||||
s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
|
||||
s.QueryExecutor.ShardMapper = s.ShardMapper
|
||||
|
||||
// Set the shard writer
|
||||
|
@ -132,18 +117,10 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
|||
s.PointsWriter.ShardWriter = s.ShardWriter
|
||||
s.PointsWriter.HintedHandoff = s.HintedHandoff
|
||||
|
||||
// Initialize the monitor
|
||||
s.Monitor.Version = s.buildInfo.Version
|
||||
s.Monitor.Commit = s.buildInfo.Commit
|
||||
s.Monitor.Branch = s.buildInfo.Branch
|
||||
s.Monitor.MetaStore = s.MetaStore
|
||||
s.Monitor.PointsWriter = s.PointsWriter
|
||||
|
||||
// Append services.
|
||||
s.appendClusterService(c.Cluster)
|
||||
s.appendPrecreatorService(c.Precreator)
|
||||
s.appendSnapshotterService()
|
||||
s.appendCopierService()
|
||||
s.appendAdminService(c.Admin)
|
||||
s.appendContinuousQueryService(c.ContinuousQuery)
|
||||
s.appendHTTPDService(c.HTTPD)
|
||||
|
@ -180,13 +157,6 @@ func (s *Server) appendSnapshotterService() {
|
|||
s.SnapshotterService = srv
|
||||
}
|
||||
|
||||
func (s *Server) appendCopierService() {
|
||||
srv := copier.NewService()
|
||||
srv.TSDBStore = s.TSDBStore
|
||||
s.Services = append(s.Services, srv)
|
||||
s.CopierService = srv
|
||||
}
|
||||
|
||||
func (s *Server) appendRetentionPolicyService(c retention.Config) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
|
@ -213,7 +183,7 @@ func (s *Server) appendHTTPDService(c httpd.Config) {
|
|||
srv.Handler.MetaStore = s.MetaStore
|
||||
srv.Handler.QueryExecutor = s.QueryExecutor
|
||||
srv.Handler.PointsWriter = s.PointsWriter
|
||||
srv.Handler.Version = s.buildInfo.Version
|
||||
srv.Handler.Version = s.version
|
||||
|
||||
// If a ContinuousQuerier service has been started, attach it.
|
||||
for _, srvc := range s.Services {
|
||||
|
@ -260,7 +230,6 @@ func (s *Server) appendGraphiteService(c graphite.Config) error {
|
|||
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.Monitor = s.Monitor
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
}
|
||||
|
@ -341,7 +310,6 @@ func (s *Server) Open() error {
|
|||
|
||||
s.ClusterService.Listener = mux.Listen(cluster.MuxHeader)
|
||||
s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader)
|
||||
s.CopierService.Listener = mux.Listen(copier.MuxHeader)
|
||||
go mux.Serve(ln)
|
||||
|
||||
// Open meta store.
|
||||
|
@ -353,10 +321,6 @@ func (s *Server) Open() error {
|
|||
// Wait for the store to initialize.
|
||||
<-s.MetaStore.Ready()
|
||||
|
||||
if err := s.Monitor.Open(); err != nil {
|
||||
return fmt.Errorf("open monitor: %v", err)
|
||||
}
|
||||
|
||||
// Open TSDB store.
|
||||
if err := s.TSDBStore.Open(); err != nil {
|
||||
return fmt.Errorf("open tsdb store: %s", err)
|
||||
|
@ -392,33 +356,20 @@ func (s *Server) Open() error {
|
|||
func (s *Server) Close() error {
|
||||
stopProfile()
|
||||
|
||||
// Close the listener first to stop any new connections
|
||||
if s.Listener != nil {
|
||||
s.Listener.Close()
|
||||
}
|
||||
|
||||
// Close services to allow any inflight requests to complete
|
||||
// and prevent new requests from being accepted.
|
||||
for _, service := range s.Services {
|
||||
service.Close()
|
||||
if s.MetaStore != nil {
|
||||
s.MetaStore.Close()
|
||||
}
|
||||
|
||||
if s.Monitor != nil {
|
||||
s.Monitor.Close()
|
||||
}
|
||||
|
||||
if s.HintedHandoff != nil {
|
||||
s.HintedHandoff.Close()
|
||||
}
|
||||
|
||||
// Close the TSDBStore, no more reads or writes at this point
|
||||
if s.TSDBStore != nil {
|
||||
s.TSDBStore.Close()
|
||||
}
|
||||
|
||||
// Finally close the meta-store since everything else depends on it
|
||||
if s.MetaStore != nil {
|
||||
s.MetaStore.Close()
|
||||
if s.HintedHandoff != nil {
|
||||
s.HintedHandoff.Close()
|
||||
}
|
||||
for _, service := range s.Services {
|
||||
service.Close()
|
||||
}
|
||||
|
||||
close(s.closing)
|
||||
|
@ -475,7 +426,7 @@ func (s *Server) reportServer() {
|
|||
"name":"reports",
|
||||
"columns":["os", "arch", "version", "server_id", "cluster_id", "num_series", "num_measurements", "num_databases"],
|
||||
"points":[["%s", "%s", "%s", "%x", "%x", "%d", "%d", "%d"]]
|
||||
}]`, runtime.GOOS, runtime.GOARCH, s.buildInfo.Version, s.MetaStore.NodeID(), clusterID, numSeries, numMeasurements, numDatabases)
|
||||
}]`, runtime.GOOS, runtime.GOARCH, s.version, s.MetaStore.NodeID(), clusterID, numSeries, numMeasurements, numDatabases)
|
||||
|
||||
data := bytes.NewBufferString(json)
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
package run_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -31,12 +30,7 @@ type Server struct {
|
|||
|
||||
// NewServer returns a new instance of Server.
|
||||
func NewServer(c *run.Config) *Server {
|
||||
buildInfo := &run.BuildInfo{
|
||||
Version: "testServer",
|
||||
Commit: "testCommit",
|
||||
Branch: "testBranch",
|
||||
}
|
||||
srv, _ := run.NewServer(c, buildInfo)
|
||||
srv, _ := run.NewServer(c, "testServer")
|
||||
s := Server{
|
||||
Server: srv,
|
||||
Config: c,
|
||||
|
@ -59,12 +53,7 @@ func OpenServer(c *run.Config, joinURLs string) *Server {
|
|||
|
||||
// OpenServerWithVersion opens a test server with a specific version.
|
||||
func OpenServerWithVersion(c *run.Config, version string) *Server {
|
||||
buildInfo := &run.BuildInfo{
|
||||
Version: version,
|
||||
Commit: "",
|
||||
Branch: "",
|
||||
}
|
||||
srv, _ := run.NewServer(c, buildInfo)
|
||||
srv, _ := run.NewServer(c, version)
|
||||
s := Server{
|
||||
Server: srv,
|
||||
Config: c,
|
||||
|
@ -116,14 +105,10 @@ func (s *Server) QueryWithParams(query string, values url.Values) (results strin
|
|||
values = url.Values{}
|
||||
}
|
||||
values.Set("q", query)
|
||||
return s.HTTPGet(s.URL() + "/query?" + values.Encode())
|
||||
}
|
||||
|
||||
// HTTPGet makes an HTTP GET request to the server and returns the response.
|
||||
func (s *Server) HTTPGet(url string) (results string, err error) {
|
||||
resp, err := http.Get(url)
|
||||
resp, err := http.Get(s.URL() + "/query?" + values.Encode())
|
||||
if err != nil {
|
||||
return "", err
|
||||
//} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusBadRequest {
|
||||
}
|
||||
body := string(MustReadAll(resp.Body))
|
||||
switch resp.StatusCode {
|
||||
|
@ -139,27 +124,6 @@ func (s *Server) HTTPGet(url string) (results string, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// HTTPPost makes an HTTP POST request to the server and returns the response.
|
||||
func (s *Server) HTTPPost(url string, content []byte) (results string, err error) {
|
||||
buf := bytes.NewBuffer(content)
|
||||
resp, err := http.Post(url, "application/json", buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
body := string(MustReadAll(resp.Body))
|
||||
switch resp.StatusCode {
|
||||
case http.StatusBadRequest:
|
||||
if !expectPattern(".*error parsing query*.", body) {
|
||||
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
|
||||
}
|
||||
return body, nil
|
||||
case http.StatusOK, http.StatusNoContent:
|
||||
return body, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
|
||||
}
|
||||
}
|
||||
|
||||
// Write executes a write against the server and returns the results.
|
||||
func (s *Server) Write(db, rp, body string, params url.Values) (results string, err error) {
|
||||
if params == nil {
|
||||
|
@ -184,8 +148,6 @@ func (s *Server) Write(db, rp, body string, params url.Values) (results string,
|
|||
func NewConfig() *run.Config {
|
||||
c := run.NewConfig()
|
||||
c.ReportingDisabled = true
|
||||
c.Cluster.ShardWriterTimeout = toml.Duration(30 * time.Second)
|
||||
c.Cluster.WriteTimeout = toml.Duration(30 * time.Second)
|
||||
c.Meta.Dir = MustTempFile()
|
||||
c.Meta.BindAddress = "127.0.0.1:0"
|
||||
c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond)
|
||||
|
@ -195,7 +157,6 @@ func NewConfig() *run.Config {
|
|||
|
||||
c.Data.Dir = MustTempFile()
|
||||
c.Data.WALDir = MustTempFile()
|
||||
c.Data.WALLoggingEnabled = false
|
||||
|
||||
c.HintedHandoff.Dir = MustTempFile()
|
||||
|
||||
|
@ -203,8 +164,6 @@ func NewConfig() *run.Config {
|
|||
c.HTTPD.BindAddress = "127.0.0.1:0"
|
||||
c.HTTPD.LogEnabled = testing.Verbose()
|
||||
|
||||
c.Monitor.StoreEnabled = false
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
|
@ -273,7 +232,6 @@ type Query struct {
|
|||
exp, act string
|
||||
pattern bool
|
||||
skip bool
|
||||
repeat int
|
||||
}
|
||||
|
||||
// Execute runs the command and returns an err if it fails
|
||||
|
@ -345,8 +303,6 @@ func configureLogging(s *Server) {
|
|||
s.MetaStore.Logger = nullLogger
|
||||
s.TSDBStore.Logger = nullLogger
|
||||
s.HintedHandoff.SetLogger(nullLogger)
|
||||
s.Monitor.SetLogger(nullLogger)
|
||||
s.QueryExecutor.SetLogger(nullLogger)
|
||||
for _, service := range s.Services {
|
||||
if service, ok := service.(logSetter); ok {
|
||||
service.SetLogger(nullLogger)
|
||||
|
|
697
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go
generated
vendored
697
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go
generated
vendored
|
@ -52,30 +52,10 @@ func TestServer_DatabaseCommands(t *testing.T) {
|
|||
exp: `{"results":[{"error":"database already exists"}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create database should not error with existing database with IF NOT EXISTS",
|
||||
command: `CREATE DATABASE IF NOT EXISTS db0`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create database should create non-existing database with IF NOT EXISTS",
|
||||
command: `CREATE DATABASE IF NOT EXISTS db1`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show database should succeed",
|
||||
command: `SHOW DATABASES`,
|
||||
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "drop database db0 should succeed",
|
||||
name: "drop database should succeed",
|
||||
command: `DROP DATABASE db0`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "drop database db1 should succeed",
|
||||
command: `DROP DATABASE db1`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show database should have no results",
|
||||
command: `SHOW DATABASES`,
|
||||
|
@ -370,34 +350,14 @@ func TestServer_RetentionPolicyCommands(t *testing.T) {
|
|||
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "dropping default retention policy should not succeed",
|
||||
name: "drop retention policy should succeed",
|
||||
command: `DROP RETENTION POLICY rp0 ON db0`,
|
||||
exp: `{"results":[{"error":"retention policy is default"}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show retention policy should still show policy",
|
||||
command: `SHOW RETENTION POLICIES ON db0`,
|
||||
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create a second non-default retention policy",
|
||||
command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show retention policy should show both",
|
||||
name: "show retention policy should be empty after dropping them",
|
||||
command: `SHOW RETENTION POLICIES ON db0`,
|
||||
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true],["rp2","1h0m0s",1,false]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "dropping non-default retention policy succeed",
|
||||
command: `DROP RETENTION POLICY rp2 ON db0`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show retention policy should show just default",
|
||||
command: `SHOW RETENTION POLICIES ON db0`,
|
||||
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`,
|
||||
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "Ensure retention policy with unacceptable retention cannot be created",
|
||||
|
@ -1013,7 +973,7 @@ func TestServer_Query_Count(t *testing.T) {
|
|||
&Query{
|
||||
name: "selecting count(*) should error",
|
||||
command: `SELECT count(*) FROM db0.rp0.cpu`,
|
||||
exp: `{"error":"error parsing query: expected field argument in count()"}`,
|
||||
exp: `{"results":[{"error":"expected field argument in count()"}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
|
@ -2221,348 +2181,6 @@ func TestServer_Query_Aggregates(t *testing.T) {
|
|||
command: `SELECT sum(value)/2 FROM load`,
|
||||
exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`,
|
||||
},
|
||||
|
||||
// order by time desc
|
||||
&Query{
|
||||
name: "aggregate order by time desc",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`,
|
||||
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
t.Fatalf("test init failed: %s", err)
|
||||
}
|
||||
}
|
||||
if query.skip {
|
||||
t.Logf("SKIP:: %s", query.name)
|
||||
continue
|
||||
}
|
||||
if err := query.Execute(s); err != nil {
|
||||
t.Error(query.Error(err))
|
||||
} else if !query.success() {
|
||||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Query_AggregatesTopInt(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writes := []string{
|
||||
// cpu data with overlapping duplicate values
|
||||
// hour 0
|
||||
fmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
|
||||
// hour 1
|
||||
fmt.Sprintf(`cpu,host=server04 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:20Z").UnixNano()),
|
||||
// hour 2
|
||||
fmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()),
|
||||
|
||||
// memory data
|
||||
// hour 0
|
||||
fmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
// hour 1
|
||||
fmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
|
||||
// hour 2
|
||||
fmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "top - cpu",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 1) FROM cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - 2 values",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 2) FROM cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - 3 values - sorts on tie properly",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 3) FROM cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - with tag",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, host, 2) FROM cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T01:00:10Z",7,"server05"],["2000-01-01T02:00:10Z",9,"server08"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - 3 values with limit 2",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 3) FROM cpu limit 2`,
|
||||
exp: `{"error":"error parsing query: limit (3) in top function can not be larger than the LIMIT (2) in the select statement"}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - hourly",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - time specified - hourly",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - time specified (not first) - hourly",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 1), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - 2 values hourly",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - time specified - 2 values hourly",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 2), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - cpu - time specified - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 3), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",5],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - 2 values, two tags",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, 2), host, service FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T01:00:00Z",2001,"b","mysql"],["2000-01-01T02:00:00Z",2002,"b","mysql"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - host tag with limit 2",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, host, 2) FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host"],"values":[["2000-01-01T02:00:00Z",2002,"b"],["2000-01-01T02:00:00Z",1002,"a"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - host tag with limit 2, service tag in select",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, host, 2), service FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - service tag with limit 2, host tag in select",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, service, 2), host FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","service","host"],"values":[["2000-01-01T02:00:00Z",2002,"mysql","b"],["2000-01-01T02:00:00Z",1502,"redis","b"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - host and service tag with limit 2",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, host, service, 2) FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - host tag with limit 2 with service tag in select",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, host, 2), service FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "top - memory - host and service tag with limit 3",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT TOP(value, host, service, 3) FROM memory`,
|
||||
exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`,
|
||||
},
|
||||
|
||||
// TODO
|
||||
// - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields
|
||||
// - Test that a field can be used in the top function
|
||||
// - Test that asking for a field will come back before a tag if they have the same name for a tag and a field
|
||||
// - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value
|
||||
// - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values
|
||||
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
t.Fatalf("test init failed: %s", err)
|
||||
}
|
||||
}
|
||||
if query.skip {
|
||||
t.Logf("SKIP: %s", query.name)
|
||||
continue
|
||||
}
|
||||
if err := query.Execute(s); err != nil {
|
||||
t.Error(query.Error(err))
|
||||
} else if !query.success() {
|
||||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test various aggregates when different series only have data for the same timestamp.
|
||||
func TestServer_Query_AggregatesIdenticalTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writes := []string{
|
||||
fmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "last from multiple series with identical timestamp",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT last(value) FROM "series"`,
|
||||
exp: `{"results":[{"series":[{"name":"series","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`,
|
||||
repeat: 100,
|
||||
},
|
||||
&Query{
|
||||
name: "first from multiple series with identical timestamp",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT first(value) FROM "series"`,
|
||||
exp: `{"results":[{"series":[{"name":"series","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`,
|
||||
repeat: 100,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
t.Fatalf("test init failed: %s", err)
|
||||
}
|
||||
}
|
||||
if query.skip {
|
||||
t.Logf("SKIP:: %s", query.name)
|
||||
continue
|
||||
}
|
||||
for n := 0; n <= query.repeat; n++ {
|
||||
if err := query.Execute(s); err != nil {
|
||||
t.Error(query.Error(err))
|
||||
} else if !query.success() {
|
||||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This will test that when using a group by, that it observes the time you asked for
|
||||
// but will only put the values in the bucket that match the time range
|
||||
func TestServer_Query_GroupByTimeCutoffs(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writes := []string{
|
||||
fmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:05Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:08Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:09Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
|
||||
}
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "sum all time",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT SUM(value) FROM cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",21]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "sum all time grouped by time 5s",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "sum all time grouped by time 5s missing first point",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "sum all time grouped by time 5s missing first points (null for bucket)",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "sum all time grouped by time 5s missing last point - 2 time intervals",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "sum all time grouped by time 5s missing last 2 points - 2 time intervals",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",7]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
|
@ -2718,9 +2336,6 @@ func TestServer_Query_Wildcards(t *testing.T) {
|
|||
fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
|
||||
fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
|
||||
|
||||
fmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
fmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
|
@ -2751,48 +2366,6 @@ func TestServer_Query_Wildcards(t *testing.T) {
|
|||
command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`,
|
||||
exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "wildcard and field in select",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT value, * FROM wildcard`,
|
||||
exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "field and wildcard in select",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT value, * FROM wildcard`,
|
||||
exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "field and wildcard in group by",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT * FROM wildcard GROUP BY region, *`,
|
||||
exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "wildcard and field in group by",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT * FROM wildcard GROUP BY *, region`,
|
||||
exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "wildcard with multiple measurements",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT * FROM m1, m2`,
|
||||
exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "wildcard with multiple measurements via regex",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT * FROM /^m.*/`,
|
||||
exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "wildcard with multiple measurements via regex and limit",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `SELECT * FROM db0../^m.*/ LIMIT 2`,
|
||||
exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
|
@ -3973,8 +3546,7 @@ func TestServer_Query_ShowFieldKeys(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServer_ContinuousQuery(t *testing.T) {
|
||||
t.Skip()
|
||||
func TestServer_Query_CreateContinuousQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
@ -3986,7 +3558,21 @@ func TestServer_ContinuousQuery(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
runTest := func(test *Test, t *testing.T) {
|
||||
test := NewTest("db0", "rp0")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "create continuous query",
|
||||
command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT count(value) INTO measure1 FROM myseries GROUP BY time(10m) END`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: `show continuous queries`,
|
||||
command: `SHOW CONTINUOUS QUERIES`,
|
||||
exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["my.query","CREATE CONTINUOUS QUERY \"my.query\" ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp0\".measure1 FROM \"db0\".\"rp0\".myseries GROUP BY time(10m) END"]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
|
@ -4005,95 +3591,6 @@ func TestServer_ContinuousQuery(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Start times of CQ intervals.
|
||||
interval0 := time.Now().Add(-time.Second).Round(time.Second * 5)
|
||||
interval1 := interval0.Add(-time.Second * 5)
|
||||
interval2 := interval0.Add(-time.Second * 10)
|
||||
interval3 := interval0.Add(-time.Second * 15)
|
||||
|
||||
writes := []string{
|
||||
// Point too far in the past for CQ to pick up.
|
||||
fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval3.Add(time.Second).UnixNano()),
|
||||
|
||||
// Points two intervals ago.
|
||||
fmt.Sprintf(`cpu,host=server01 value=100 %d`, interval2.Add(time.Second).UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval2.Add(time.Second*2).UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, interval2.Add(time.Second*3).UnixNano()),
|
||||
|
||||
// Points one interval ago.
|
||||
fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, interval1.Add(time.Second).UnixNano()),
|
||||
fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval1.Add(time.Second*2).UnixNano()),
|
||||
|
||||
// Points in the current interval.
|
||||
fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second).UnixNano()),
|
||||
fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second*2).UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: `create another retention policy for CQ to write into`,
|
||||
command: `CREATE RETENTION POLICY rp1 ON db0 DURATION 1h REPLICATION 1`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create continuous query with backreference",
|
||||
command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s) END`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: `create another retention policy for CQ to write into`,
|
||||
command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create continuous query with backreference and group by time",
|
||||
command: `CREATE CONTINUOUS QUERY "cq2" ON db0 BEGIN SELECT count(value) INTO "rp2".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s), * END`,
|
||||
exp: `{"results":[{}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: `show continuous queries`,
|
||||
command: `SHOW CONTINUOUS QUERIES`,
|
||||
exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp1\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp2\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
// Run first test to create CQs.
|
||||
runTest(&test, t)
|
||||
|
||||
// Trigger CQs to run.
|
||||
u := fmt.Sprintf("%s/data/process_continuous_queries?time=%d", s.URL(), interval0.UnixNano())
|
||||
if _, err := s.HTTPPost(u, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for CQs to run. TODO: fix this ugly hack
|
||||
time.Sleep(time.Second * 5)
|
||||
|
||||
// Setup tests to check the CQ results.
|
||||
test2 := NewTest("db0", "rp1")
|
||||
test2.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "check results of cq1",
|
||||
command: `SELECT * FROM "rp1"./[cg]pu/`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",3,null,null,null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",2,null,null,null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,null,null,null]]}]}]}`,
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
},
|
||||
// TODO: restore this test once this is fixed: https://github.com/influxdb/influxdb/issues/3968
|
||||
&Query{
|
||||
skip: true,
|
||||
name: "check results of cq2",
|
||||
command: `SELECT * FROM "rp2"./[cg]pu/`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","uswest",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","useast",null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server02","useast",null],["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null]]}]}]}`,
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
},
|
||||
}...)
|
||||
|
||||
// Run second test to check CQ results.
|
||||
runTest(&test2, t)
|
||||
}
|
||||
|
||||
// Tests that a known CQ query with concurrent writes does not deadlock the server
|
||||
func TestServer_ContinuousQuery_Deadlock(t *testing.T) {
|
||||
|
||||
|
@ -4220,155 +3717,3 @@ func TestServer_Query_EvilIdentifiers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Query_OrderByTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writes := []string{
|
||||
fmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()),
|
||||
fmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "order on points",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `select value from "cpu" ORDER BY time DESC`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
t.Fatalf("test init failed: %s", err)
|
||||
}
|
||||
}
|
||||
if query.skip {
|
||||
t.Logf("SKIP:: %s", query.name)
|
||||
continue
|
||||
}
|
||||
if err := query.Execute(s); err != nil {
|
||||
t.Error(query.Error(err))
|
||||
} else if !query.success() {
|
||||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writes := []string{
|
||||
fmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "baseline",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `select * from cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "select field with periods",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `select "foo.bar.baz" from cpu`,
|
||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
t.Fatalf("test init failed: %s", err)
|
||||
}
|
||||
}
|
||||
if query.skip {
|
||||
t.Logf("SKIP:: %s", query.name)
|
||||
continue
|
||||
}
|
||||
if err := query.Execute(s); err != nil {
|
||||
t.Error(query.Error(err))
|
||||
} else if !query.success() {
|
||||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
defer s.Close()
|
||||
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
writes := []string{
|
||||
fmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||
}
|
||||
|
||||
test := NewTest("db0", "rp0")
|
||||
test.write = strings.Join(writes, "\n")
|
||||
|
||||
test.addQueries([]*Query{
|
||||
&Query{
|
||||
name: "baseline",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `select * from foo`,
|
||||
exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "select field with periods",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `select "foo.bar.baz" from foo`,
|
||||
exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
t.Fatalf("test init failed: %s", err)
|
||||
}
|
||||
}
|
||||
if query.skip {
|
||||
t.Logf("SKIP:: %s", query.name)
|
||||
continue
|
||||
}
|
||||
if err := query.Execute(s); err != nil {
|
||||
t.Error(query.Error(err))
|
||||
} else if !query.success() {
|
||||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
_ "github.com/influxdb/influxdb/tsdb/engine"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
var path string
|
||||
flag.StringVar(&path, "p", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]")
|
||||
flag.Parse()
|
||||
|
||||
tstore := tsdb.NewStore(filepath.Join(path, "data"))
|
||||
tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
tstore.EngineOptions.Config.Dir = filepath.Join(path, "data")
|
||||
tstore.EngineOptions.Config.WALLoggingEnabled = false
|
||||
tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal")
|
||||
if err := tstore.Open(); err != nil {
|
||||
fmt.Printf("Failed to open dir: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
size, err := tstore.DiskSize()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to determine disk usage: %v\n", err)
|
||||
}
|
||||
|
||||
// Summary stats
|
||||
fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n",
|
||||
tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore))
|
||||
fmt.Println()
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0)
|
||||
|
||||
fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t"))
|
||||
|
||||
shardIDs := tstore.ShardIDs()
|
||||
|
||||
databases := tstore.Databases()
|
||||
sort.Strings(databases)
|
||||
|
||||
for _, db := range databases {
|
||||
index := tstore.DatabaseIndex(db)
|
||||
measurements := index.Measurements()
|
||||
sort.Sort(measurements)
|
||||
for _, m := range measurements {
|
||||
tags := m.TagKeys()
|
||||
tagValues := 0
|
||||
for _, tag := range tags {
|
||||
tagValues += len(m.TagValues(tag))
|
||||
}
|
||||
fields := m.FieldNames()
|
||||
sort.Strings(fields)
|
||||
series := m.SeriesKeys()
|
||||
sort.Strings(series)
|
||||
sort.Sort(ShardIDs(shardIDs))
|
||||
|
||||
// Sample a point from each measurement to determine the field types
|
||||
for _, shardID := range shardIDs {
|
||||
shard := tstore.Shard(shardID)
|
||||
tx, err := shard.ReadOnlyTx()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get transaction: %v", err)
|
||||
}
|
||||
|
||||
for _, key := range series {
|
||||
fieldSummary := []string{}
|
||||
|
||||
cursor := tx.Cursor(key, tsdb.Forward)
|
||||
|
||||
// Series doesn't exist in this shard
|
||||
if cursor == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Seek to the beginning
|
||||
_, value := cursor.Seek([]byte{})
|
||||
codec := shard.FieldCodec(m.Name)
|
||||
if codec != nil {
|
||||
fields, err := codec.DecodeFieldsWithNames(value)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to decode values: %v", err)
|
||||
}
|
||||
|
||||
for field, value := range fields {
|
||||
fieldSummary = append(fieldSummary, fmt.Sprintf("%s:%T", field, value))
|
||||
}
|
||||
sort.Strings(fieldSummary)
|
||||
}
|
||||
fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues,
|
||||
len(fields), strings.Join(fieldSummary, ","), len(series))
|
||||
break
|
||||
}
|
||||
tx.Rollback()
|
||||
}
|
||||
}
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func countSeries(tstore *tsdb.Store) int {
|
||||
var count int
|
||||
for _, shardID := range tstore.ShardIDs() {
|
||||
shard := tstore.Shard(shardID)
|
||||
cnt, err := shard.SeriesCount()
|
||||
if err != nil {
|
||||
fmt.Printf("series count failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
count += cnt
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func btou64(b []byte) uint64 {
|
||||
return binary.BigEndian.Uint64(b)
|
||||
}
|
||||
|
||||
// u64tob converts a uint64 into an 8-byte slice.
|
||||
func u64tob(v uint64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, v)
|
||||
return b
|
||||
}
|
||||
|
||||
type ShardIDs []uint64
|
||||
|
||||
func (a ShardIDs) Len() int { return len(a) }
|
||||
func (a ShardIDs) Less(i, j int) bool { return a[i] < a[j] }
|
||||
func (a ShardIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
143
Godeps/_workspace/src/github.com/influxdb/influxdb/diagnostics.go
generated
vendored
Normal file
143
Godeps/_workspace/src/github.com/influxdb/influxdb/diagnostics.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
)
|
||||
|
||||
// GoDiagnostics captures basic information about the runtime.
|
||||
type GoDiagnostics struct {
|
||||
GoMaxProcs int
|
||||
NumGoroutine int
|
||||
Version string
|
||||
}
|
||||
|
||||
// NewGoDiagnostics returns a GoDiagnostics object.
|
||||
func NewGoDiagnostics() *GoDiagnostics {
|
||||
return &GoDiagnostics{
|
||||
GoMaxProcs: runtime.GOMAXPROCS(0),
|
||||
NumGoroutine: runtime.NumGoroutine(),
|
||||
Version: runtime.Version(),
|
||||
}
|
||||
}
|
||||
|
||||
// AsRow returns the GoDiagnostic object as an InfluxQL row.
|
||||
func (g *GoDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
||||
return &influxql.Row{
|
||||
Name: measurement,
|
||||
Columns: []string{"time", "goMaxProcs", "numGoRoutine", "version"},
|
||||
Tags: tags,
|
||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
||||
g.GoMaxProcs, g.NumGoroutine, g.Version}},
|
||||
}
|
||||
}
|
||||
|
||||
// SystemDiagnostics captures basic machine data.
|
||||
type SystemDiagnostics struct {
|
||||
Hostname string
|
||||
PID int
|
||||
OS string
|
||||
Arch string
|
||||
NumCPU int
|
||||
}
|
||||
|
||||
// NewSystemDiagnostics returns a SystemDiagnostics object.
|
||||
func NewSystemDiagnostics() *SystemDiagnostics {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostname = "unknown"
|
||||
}
|
||||
|
||||
return &SystemDiagnostics{
|
||||
Hostname: hostname,
|
||||
PID: os.Getpid(),
|
||||
OS: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
NumCPU: runtime.NumCPU(),
|
||||
}
|
||||
}
|
||||
|
||||
// AsRow returns the GoDiagnostic object as an InfluxQL row.
|
||||
func (s *SystemDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
||||
return &influxql.Row{
|
||||
Name: measurement,
|
||||
Columns: []string{"time", "hostname", "pid", "os", "arch", "numCPU"},
|
||||
Tags: tags,
|
||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
||||
s.Hostname, s.PID, s.OS, s.Arch, s.NumCPU}},
|
||||
}
|
||||
}
|
||||
|
||||
// MemoryDiagnostics captures Go memory stats.
|
||||
type MemoryDiagnostics struct {
|
||||
Alloc int64
|
||||
TotalAlloc int64
|
||||
Sys int64
|
||||
Lookups int64
|
||||
Mallocs int64
|
||||
Frees int64
|
||||
HeapAlloc int64
|
||||
HeapSys int64
|
||||
HeapIdle int64
|
||||
HeapInUse int64
|
||||
HeapReleased int64
|
||||
HeapObjects int64
|
||||
PauseTotalNs int64
|
||||
NumGC int64
|
||||
}
|
||||
|
||||
// NewMemoryDiagnostics returns a MemoryDiagnostics object.
|
||||
func NewMemoryDiagnostics() *MemoryDiagnostics {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
return &MemoryDiagnostics{
|
||||
Alloc: int64(m.Alloc),
|
||||
TotalAlloc: int64(m.TotalAlloc),
|
||||
Sys: int64(m.Sys),
|
||||
Lookups: int64(m.Lookups),
|
||||
Mallocs: int64(m.Mallocs),
|
||||
Frees: int64(m.Frees),
|
||||
HeapAlloc: int64(m.HeapAlloc),
|
||||
HeapSys: int64(m.HeapSys),
|
||||
HeapIdle: int64(m.HeapIdle),
|
||||
HeapInUse: int64(m.HeapInuse),
|
||||
HeapReleased: int64(m.HeapReleased),
|
||||
HeapObjects: int64(m.HeapObjects),
|
||||
PauseTotalNs: int64(m.PauseTotalNs),
|
||||
NumGC: int64(m.NumGC),
|
||||
}
|
||||
}
|
||||
|
||||
// AsRow returns the MemoryDiagnostics object as an InfluxQL row.
|
||||
func (m *MemoryDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
||||
return &influxql.Row{
|
||||
Name: measurement,
|
||||
Columns: []string{"time", "alloc", "totalAlloc", "sys", "lookups", "mallocs", "frees", "heapAlloc",
|
||||
"heapSys", "heapIdle", "heapInUse", "heapReleased", "heapObjects", "pauseTotalNs", "numGG"},
|
||||
Tags: tags,
|
||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
||||
m.Alloc, m.TotalAlloc, m.Sys, m.Lookups, m.Mallocs, m.Frees, m.HeapAlloc,
|
||||
m.HeapSys, m.HeapIdle, m.HeapInUse, m.HeapReleased, m.HeapObjects, m.PauseTotalNs, m.NumGC}},
|
||||
}
|
||||
}
|
||||
|
||||
// BuildDiagnostics capture basic build version information.
|
||||
type BuildDiagnostics struct {
|
||||
Version string
|
||||
CommitHash string
|
||||
}
|
||||
|
||||
// AsRow returns the BuildDiagnostics object as an InfluxQL row.
|
||||
func (b *BuildDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
||||
return &influxql.Row{
|
||||
Name: measurement,
|
||||
Columns: []string{"time", "version", "commitHash"},
|
||||
Tags: tags,
|
||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
||||
b.Version, b.CommitHash}},
|
||||
}
|
||||
}
|
|
@ -86,20 +86,7 @@ reporting-disabled = false
|
|||
|
||||
[retention]
|
||||
enabled = true
|
||||
check-interval = "30m"
|
||||
|
||||
###
|
||||
### Controls the system self-monitoring, statistics and diagnostics.
|
||||
###
|
||||
### The retention policy for this data is the default retention policy within
|
||||
### the internal database. The internal database is created automatically if
|
||||
### if it does not already exist, as is the default retention policy. If you
|
||||
### want to use a non-default retention policy, it must be explicitly created.
|
||||
|
||||
[monitor]
|
||||
store-enabled = true # Whether to record statistics internally.
|
||||
store-database = "_internal" # The destination database for recorded statistics
|
||||
store-interval = "10s" # The interval at which to record statistics
|
||||
check-interval = "10m"
|
||||
|
||||
###
|
||||
### [admin]
|
||||
|
@ -149,7 +136,6 @@ reporting-disabled = false
|
|||
# will buffer points in memory if you have many coming in.
|
||||
|
||||
# batch-size = 1000 # will flush if this many points get buffered
|
||||
# batch-pending = 5 # number of batches that may be pending in memory
|
||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
||||
|
||||
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
|
||||
|
@ -189,7 +175,6 @@ reporting-disabled = false
|
|||
# will buffer points in memory if you have many coming in.
|
||||
|
||||
# batch-size = 1000 # will flush if this many points get buffered
|
||||
# batch-pending = 5 # number of batches that may be pending in memory
|
||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
||||
|
||||
###
|
||||
|
@ -204,14 +189,6 @@ reporting-disabled = false
|
|||
# database = ""
|
||||
# retention-policy = ""
|
||||
|
||||
# These next lines control how batching works. You should have this enabled
|
||||
# otherwise you could get dropped metrics or poor performance. Only points
|
||||
# metrics received over the telnet protocol undergo batching.
|
||||
|
||||
# batch-size = 1000 # will flush if this many points get buffered
|
||||
# batch-pending = 5 # number of batches that may be pending in memory
|
||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
||||
|
||||
###
|
||||
### [[udp]]
|
||||
###
|
||||
|
@ -228,9 +205,16 @@ reporting-disabled = false
|
|||
# will buffer points in memory if you have many coming in.
|
||||
|
||||
# batch-size = 1000 # will flush if this many points get buffered
|
||||
# batch-pending = 5 # number of batches that may be pending in memory
|
||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
||||
|
||||
###
|
||||
### [monitoring]
|
||||
###
|
||||
|
||||
[monitoring]
|
||||
enabled = true
|
||||
write-interval = "24h"
|
||||
|
||||
###
|
||||
### [continuous_queries]
|
||||
###
|
||||
|
|
|
@ -4,13 +4,6 @@
|
|||
|
||||
Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later.
|
||||
|
||||
Note that `0.8.9` can be found here:
|
||||
|
||||
```
|
||||
http://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb
|
||||
http://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm
|
||||
```
|
||||
|
||||
### Design
|
||||
|
||||
`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below).
|
||||
|
|
|
@ -87,11 +87,11 @@ CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE
|
|||
DESC DROP DURATION END EXISTS EXPLAIN
|
||||
FIELD FROM GRANT GROUP IF IN
|
||||
INNER INSERT INTO KEY KEYS LIMIT
|
||||
SHOW MEASUREMENT MEASUREMENTS NOT OFFSET ON
|
||||
ORDER PASSWORD POLICY POLICIES PRIVILEGES QUERIES
|
||||
QUERY READ REPLICATION RETENTION REVOKE SELECT
|
||||
SERIES SLIMIT SOFFSET TAG TO USER
|
||||
USERS VALUES WHERE WITH WRITE
|
||||
SHOW MEASUREMENT MEASUREMENTS OFFSET ON ORDER
|
||||
PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY
|
||||
READ REPLICATION RETENTION REVOKE SELECT SERIES
|
||||
SLIMIT SOFFSET TAG TO USER USERS
|
||||
VALUES WHERE WITH WRITE
|
||||
```
|
||||
|
||||
## Literals
|
||||
|
@ -124,7 +124,9 @@ string_lit = `'` { unicode_char } `'`' .
|
|||
|
||||
Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal.
|
||||
|
||||
### Duration units
|
||||
```
|
||||
Duration unit definitions
|
||||
-------------------------
|
||||
| Units | Meaning |
|
||||
|--------|-----------------------------------------|
|
||||
| u or µ | microseconds (1 millionth of a second) |
|
||||
|
@ -134,6 +136,7 @@ Duration literals specify a length of time. An integer literal followed immedia
|
|||
| h | hour |
|
||||
| d | day |
|
||||
| w | week |
|
||||
```
|
||||
|
||||
```
|
||||
duration_lit = int_lit duration_unit .
|
||||
|
@ -188,7 +191,6 @@ statement = alter_retention_policy_stmt |
|
|||
show_measurements_stmt |
|
||||
show_retention_policies |
|
||||
show_series_stmt |
|
||||
show_shards_stmt |
|
||||
show_tag_keys_stmt |
|
||||
show_tag_values_stmt |
|
||||
show_users_stmt |
|
||||
|
@ -453,7 +455,7 @@ SHOW FIELD KEYS FROM cpu;
|
|||
|
||||
### SHOW MEASUREMENTS
|
||||
|
||||
show_measurements_stmt = "SHOW MEASUREMENTS" [ where_clause ] [ group_by_clause ] [ limit_clause ]
|
||||
show_measurements_stmt = [ where_clause ] [ group_by_clause ] [ limit_clause ]
|
||||
[ offset_clause ] .
|
||||
|
||||
```sql
|
||||
|
@ -480,7 +482,7 @@ SHOW RETENTION POLICIES ON mydb;
|
|||
### SHOW SERIES
|
||||
|
||||
```
|
||||
show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ group_by_clause ]
|
||||
show_series_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ]
|
||||
[ limit_clause ] [ offset_clause ] .
|
||||
```
|
||||
|
||||
|
@ -490,22 +492,10 @@ show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ group_by_cla
|
|||
|
||||
```
|
||||
|
||||
### SHOW SHARDS
|
||||
|
||||
```
|
||||
show_shards_stmt = "SHOW SHARDS" .
|
||||
```
|
||||
|
||||
#### Example:
|
||||
|
||||
```sql
|
||||
SHOW SHARDS;
|
||||
```
|
||||
|
||||
### SHOW TAG KEYS
|
||||
|
||||
```
|
||||
show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ]
|
||||
show_tag_keys_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ]
|
||||
[ limit_clause ] [ offset_clause ] .
|
||||
```
|
||||
|
||||
|
@ -528,7 +518,7 @@ SHOW TAG KEYS WHERE host = 'serverA';
|
|||
### SHOW TAG VALUES
|
||||
|
||||
```
|
||||
show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ]
|
||||
show_tag_values_stmt = [ from_clause ] with_tag_clause [ where_clause ]
|
||||
[ group_by_clause ] [ limit_clause ] [ offset_clause ] .
|
||||
```
|
||||
|
||||
|
@ -561,7 +551,7 @@ SHOW USERS;
|
|||
### REVOKE
|
||||
|
||||
```
|
||||
revoke_stmt = "REVOKE" privilege [ "ON" db_name ] "FROM" user_name
|
||||
revoke_stmt = privilege [ "ON" db_name ] "FROM" user_name
|
||||
```
|
||||
|
||||
#### Examples:
|
||||
|
@ -577,7 +567,7 @@ REVOKE READ ON mydb FROM jdoe;
|
|||
### SELECT
|
||||
|
||||
```
|
||||
select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ]
|
||||
select_stmt = fields from_clause [ into_clause ] [ where_clause ]
|
||||
[ group_by_clause ] [ order_by_clause ] [ limit_clause ]
|
||||
[ offset_clause ] [ slimit_clause ] [ soffset_clause ].
|
||||
```
|
||||
|
|
|
@ -9,8 +9,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/pkg/slices"
|
||||
)
|
||||
|
||||
// DataType represents the primitive data types available in InfluxQL.
|
||||
|
@ -107,7 +105,6 @@ func (*ShowFieldKeysStatement) node() {}
|
|||
func (*ShowRetentionPoliciesStatement) node() {}
|
||||
func (*ShowMeasurementsStatement) node() {}
|
||||
func (*ShowSeriesStatement) node() {}
|
||||
func (*ShowShardsStatement) node() {}
|
||||
func (*ShowStatsStatement) node() {}
|
||||
func (*ShowDiagnosticsStatement) node() {}
|
||||
func (*ShowTagKeysStatement) node() {}
|
||||
|
@ -209,7 +206,6 @@ func (*ShowFieldKeysStatement) stmt() {}
|
|||
func (*ShowMeasurementsStatement) stmt() {}
|
||||
func (*ShowRetentionPoliciesStatement) stmt() {}
|
||||
func (*ShowSeriesStatement) stmt() {}
|
||||
func (*ShowShardsStatement) stmt() {}
|
||||
func (*ShowStatsStatement) stmt() {}
|
||||
func (*ShowDiagnosticsStatement) stmt() {}
|
||||
func (*ShowTagKeysStatement) stmt() {}
|
||||
|
@ -278,7 +274,7 @@ type SortField struct {
|
|||
// String returns a string representation of a sort field
|
||||
func (field *SortField) String() string {
|
||||
var buf bytes.Buffer
|
||||
if field.Name != "" {
|
||||
if field.Name == "" {
|
||||
_, _ = buf.WriteString(field.Name)
|
||||
_, _ = buf.WriteString(" ")
|
||||
}
|
||||
|
@ -306,19 +302,12 @@ func (a SortFields) String() string {
|
|||
type CreateDatabaseStatement struct {
|
||||
// Name of the database to be created.
|
||||
Name string
|
||||
|
||||
// IfNotExists indicates whether to return without error if the database
|
||||
// already exists.
|
||||
IfNotExists bool
|
||||
}
|
||||
|
||||
// String returns a string representation of the create database statement.
|
||||
func (s *CreateDatabaseStatement) String() string {
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.WriteString("CREATE DATABASE ")
|
||||
if s.IfNotExists {
|
||||
_, _ = buf.WriteString("IF NOT EXISTS ")
|
||||
}
|
||||
_, _ = buf.WriteString(s.Name)
|
||||
return buf.String()
|
||||
}
|
||||
|
@ -859,48 +848,6 @@ func (s *SelectStatement) RewriteDistinct() {
|
|||
}
|
||||
}
|
||||
|
||||
// ColumnNames will walk all fields and functions and return the appropriate field names for the select statement
|
||||
// while maintaining order of the field names
|
||||
func (s *SelectStatement) ColumnNames() []string {
|
||||
// Always set the first column to be time, even if they didn't specify it
|
||||
columnNames := []string{"time"}
|
||||
|
||||
// First walk each field
|
||||
for _, field := range s.Fields {
|
||||
switch f := field.Expr.(type) {
|
||||
case *Call:
|
||||
if f.Name == "top" || f.Name == "bottom" {
|
||||
if len(f.Args) == 2 {
|
||||
columnNames = append(columnNames, f.Name)
|
||||
continue
|
||||
}
|
||||
// We have a special case now where we have to add the column names for the fields TOP or BOTTOM asked for as well
|
||||
columnNames = slices.Union(columnNames, f.Fields(), true)
|
||||
continue
|
||||
}
|
||||
columnNames = append(columnNames, field.Name())
|
||||
default:
|
||||
// time is always first, and we already added it, so ignore it if they asked for it anywhere else.
|
||||
if field.Name() != "time" {
|
||||
columnNames = append(columnNames, field.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return columnNames
|
||||
}
|
||||
|
||||
// HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time
|
||||
// This is needed to determine re-write behaviors for functions like TOP and BOTTOM
|
||||
func (s *SelectStatement) HasTimeFieldSpecified() bool {
|
||||
for _, f := range s.Fields {
|
||||
if f.Name() == "time" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// String returns a string representation of the select statement.
|
||||
func (s *SelectStatement) String() string {
|
||||
var buf bytes.Buffer
|
||||
|
@ -1042,10 +989,6 @@ func (s *SelectStatement) validate(tr targetRequirement) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := s.validateDimensions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.validateDistinct(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1062,6 +1005,10 @@ func (s *SelectStatement) validate(tr targetRequirement) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := s.validateWildcard(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1073,133 +1020,40 @@ func (s *SelectStatement) validateFields() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *SelectStatement) validateDimensions() error {
|
||||
var dur time.Duration
|
||||
for _, dim := range s.Dimensions {
|
||||
switch expr := dim.Expr.(type) {
|
||||
case *Call:
|
||||
// Ensure the call is time() and it only has one duration argument.
|
||||
// If we already have a duration
|
||||
if expr.Name != "time" {
|
||||
return errors.New("only time() calls allowed in dimensions")
|
||||
} else if len(expr.Args) != 1 {
|
||||
return errors.New("time dimension expected one argument")
|
||||
} else if lit, ok := expr.Args[0].(*DurationLiteral); !ok {
|
||||
return errors.New("time dimension must have one duration argument")
|
||||
} else if dur != 0 {
|
||||
return errors.New("multiple time dimensions not allowed")
|
||||
} else {
|
||||
dur = lit.Val
|
||||
}
|
||||
case *VarRef:
|
||||
if strings.ToLower(expr.Val) == "time" {
|
||||
return errors.New("time() is a function and expects at least one argument")
|
||||
}
|
||||
case *Wildcard:
|
||||
default:
|
||||
return errors.New("only time and tag dimensions allowed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validSelectWithAggregate determines if a SELECT statement has the correct
|
||||
// combination of aggregate functions combined with selected fields and tags
|
||||
// Currently we don't have support for all aggregates, but aggregates that
|
||||
// can be combined with fields/tags are:
|
||||
// TOP, BOTTOM, MAX, MIN, FIRST, LAST
|
||||
func (s *SelectStatement) validSelectWithAggregate(numAggregates int) error {
|
||||
if numAggregates != 0 && numAggregates != len(s.Fields) {
|
||||
return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SelectStatement) validateAggregates(tr targetRequirement) error {
|
||||
// Curently most aggregates can be the ONLY thing in a select statement
|
||||
// Others, like TOP/BOTTOM can mix aggregates and tags/fields
|
||||
// First, if 1 field is an aggregate, then all fields must be an aggregate. This is
|
||||
// a explicit limitation of the current system.
|
||||
numAggregates := 0
|
||||
for _, f := range s.Fields {
|
||||
if _, ok := f.Expr.(*Call); ok {
|
||||
numAggregates++
|
||||
}
|
||||
}
|
||||
if numAggregates != 0 && numAggregates != len(s.Fields) {
|
||||
return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported")
|
||||
}
|
||||
|
||||
// Secondly, determine if specific calls have at least one and only one argument
|
||||
for _, f := range s.Fields {
|
||||
switch expr := f.Expr.(type) {
|
||||
case *Call:
|
||||
switch expr.Name {
|
||||
if c, ok := f.Expr.(*Call); ok {
|
||||
switch c.Name {
|
||||
case "derivative", "non_negative_derivative":
|
||||
if err := s.validSelectWithAggregate(numAggregates); err != nil {
|
||||
return err
|
||||
if min, max, got := 1, 2, len(c.Args); got > max || got < min {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", c.Name, min, max, got)
|
||||
}
|
||||
if min, max, got := 1, 2, len(expr.Args); got > max || got < min {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got)
|
||||
}
|
||||
// Validate that if they have a time dimension, they need a sub-call like min/max, etc.
|
||||
if s.hasTimeDimensions(s.Condition) {
|
||||
if _, ok := expr.Args[0].(*Call); !ok {
|
||||
return fmt.Errorf("aggregate function required inside the call to %s", expr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
case "percentile":
|
||||
if err := s.validSelectWithAggregate(numAggregates); err != nil {
|
||||
return err
|
||||
}
|
||||
if exp, got := 2, len(expr.Args); got != exp {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got)
|
||||
}
|
||||
_, ok := expr.Args[1].(*NumberLiteral)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected float argument in percentile()")
|
||||
}
|
||||
case "top", "bottom":
|
||||
if exp, got := 2, len(expr.Args); got < exp {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", expr.Name, exp, got)
|
||||
}
|
||||
if len(expr.Args) > 1 {
|
||||
callLimit, ok := expr.Args[len(expr.Args)-1].(*NumberLiteral)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected integer as last argument in %s(), found %s", expr.Name, expr.Args[len(expr.Args)-1])
|
||||
}
|
||||
// Check if they asked for a limit smaller than what they passed into the call
|
||||
if int64(callLimit.Val) > int64(s.Limit) && s.Limit != 0 {
|
||||
return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", int64(callLimit.Val), expr.Name, int64(s.Limit))
|
||||
}
|
||||
|
||||
for _, v := range expr.Args[:len(expr.Args)-1] {
|
||||
if _, ok := v.(*VarRef); !ok {
|
||||
return fmt.Errorf("only fields or tags are allowed in %s(), found %s", expr.Name, v)
|
||||
}
|
||||
}
|
||||
if exp, got := 2, len(c.Args); got != exp {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got)
|
||||
}
|
||||
default:
|
||||
if err := s.validSelectWithAggregate(numAggregates); err != nil {
|
||||
return err
|
||||
}
|
||||
if exp, got := 1, len(expr.Args); got != exp {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got)
|
||||
}
|
||||
switch fc := expr.Args[0].(type) {
|
||||
case *VarRef:
|
||||
// do nothing
|
||||
case *Call:
|
||||
if fc.Name != "distinct" {
|
||||
return fmt.Errorf("expected field argument in %s()", expr.Name)
|
||||
}
|
||||
case *Distinct:
|
||||
if expr.Name != "count" {
|
||||
return fmt.Errorf("expected field argument in %s()", expr.Name)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("expected field argument in %s()", expr.Name)
|
||||
if exp, got := 1, len(c.Args); got != exp {
|
||||
return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we have valid duration and where clauses for aggregates
|
||||
// Now, check that we have valid duration and where clauses for aggregates
|
||||
|
||||
// fetch the group by duration
|
||||
groupByDuration, _ := s.GroupByInterval()
|
||||
|
@ -1218,6 +1072,13 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *SelectStatement) validateWildcard() error {
|
||||
if s.HasWildcard() && len(s.Fields) > 1 {
|
||||
return fmt.Errorf("wildcards can not be combined with other fields")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SelectStatement) HasDistinct() bool {
|
||||
// determine if we have a call named distinct
|
||||
for _, f := range s.Fields {
|
||||
|
@ -1640,9 +1501,6 @@ func (t *Target) String() string {
|
|||
var buf bytes.Buffer
|
||||
_, _ = buf.WriteString("INTO ")
|
||||
_, _ = buf.WriteString(t.Measurement.String())
|
||||
if t.Measurement.Name == "" {
|
||||
_, _ = buf.WriteString(":MEASUREMENT")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
@ -1973,17 +1831,6 @@ func (s *ShowStatsStatement) RequiredPrivileges() ExecutionPrivileges {
|
|||
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||
}
|
||||
|
||||
// ShowShardsStatement represents a command for displaying shards in the cluster.
|
||||
type ShowShardsStatement struct{}
|
||||
|
||||
// String returns a string representation.
|
||||
func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" }
|
||||
|
||||
// RequiredPrivileges returns the privileges required to execute the statement.
|
||||
func (s *ShowShardsStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||
}
|
||||
|
||||
// ShowDiagnosticsStatement represents a command for show node diagnostics.
|
||||
type ShowDiagnosticsStatement struct{}
|
||||
|
||||
|
@ -2253,21 +2100,37 @@ func (a Dimensions) String() string {
|
|||
|
||||
// Normalize returns the interval and tag dimensions separately.
|
||||
// Returns 0 if no time interval is specified.
|
||||
func (a Dimensions) Normalize() (time.Duration, []string) {
|
||||
// Returns an error if multiple time dimensions exist or if non-VarRef dimensions are specified.
|
||||
func (a Dimensions) Normalize() (time.Duration, []string, error) {
|
||||
var dur time.Duration
|
||||
var tags []string
|
||||
|
||||
for _, dim := range a {
|
||||
switch expr := dim.Expr.(type) {
|
||||
case *Call:
|
||||
lit, _ := expr.Args[0].(*DurationLiteral)
|
||||
// Ensure the call is time() and it only has one duration argument.
|
||||
// If we already have a duration
|
||||
if expr.Name != "time" {
|
||||
return 0, nil, errors.New("only time() calls allowed in dimensions")
|
||||
} else if len(expr.Args) != 1 {
|
||||
return 0, nil, errors.New("time dimension expected one argument")
|
||||
} else if lit, ok := expr.Args[0].(*DurationLiteral); !ok {
|
||||
return 0, nil, errors.New("time dimension must have one duration argument")
|
||||
} else if dur != 0 {
|
||||
return 0, nil, errors.New("multiple time dimensions not allowed")
|
||||
} else {
|
||||
dur = lit.Val
|
||||
}
|
||||
|
||||
case *VarRef:
|
||||
tags = append(tags, expr.Val)
|
||||
|
||||
default:
|
||||
return 0, nil, errors.New("only time and tag dimensions allowed")
|
||||
}
|
||||
}
|
||||
|
||||
return dur, tags
|
||||
return dur, tags, nil
|
||||
}
|
||||
|
||||
// Dimension represents an expression that a select statement is grouped by.
|
||||
|
@ -2296,7 +2159,6 @@ type Measurement struct {
|
|||
RetentionPolicy string
|
||||
Name string
|
||||
Regex *RegexLiteral
|
||||
IsTarget bool
|
||||
}
|
||||
|
||||
// String returns a string representation of the measurement.
|
||||
|
@ -2355,33 +2217,6 @@ func (c *Call) String() string {
|
|||
return fmt.Sprintf("%s(%s)", c.Name, strings.Join(str, ", "))
|
||||
}
|
||||
|
||||
// Fields will extract any field names from the call. Only specific calls support this.
|
||||
func (c *Call) Fields() []string {
|
||||
switch c.Name {
|
||||
case "top", "bottom":
|
||||
// maintain the order the user specified in the query
|
||||
keyMap := make(map[string]struct{})
|
||||
keys := []string{}
|
||||
for i, a := range c.Args {
|
||||
if i == 0 {
|
||||
// special case, first argument is always the name of the function regardless of the field name
|
||||
keys = append(keys, c.Name)
|
||||
continue
|
||||
}
|
||||
switch v := a.(type) {
|
||||
case *VarRef:
|
||||
if _, ok := keyMap[v.Val]; !ok {
|
||||
keyMap[v.Val] = struct{}{}
|
||||
keys = append(keys, v.Val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return keys
|
||||
default:
|
||||
return []string{}
|
||||
}
|
||||
}
|
||||
|
||||
// Distinct represents a DISTINCT expression.
|
||||
type Distinct struct {
|
||||
// Identifier following DISTINCT
|
||||
|
|
|
@ -451,7 +451,7 @@ func TestSelectStatement_IsRawQuerySet(t *testing.T) {
|
|||
isRaw: false,
|
||||
},
|
||||
{
|
||||
stmt: "select mean(value) from foo group by *",
|
||||
stmt: "select mean(*) from foo group by *",
|
||||
isRaw: false,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package tsdb
|
||||
package influxql
|
||||
|
||||
// All aggregate and query functions are defined in this file along with any intermediate data objects they need to process.
|
||||
// Query functions are represented as two discreet functions: Map and Reduce. These roughly follow the MapReduce
|
||||
// paradigm popularized by Google and Hadoop.
|
||||
//
|
||||
// When adding an aggregate function, define a mapper, a reducer, and add them in the switch statement in the MapreduceFuncs function
|
||||
// When adding an aggregate function, define a mapper, a reducer, and add them in the switch statement in the MapReduceFuncs function
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -13,43 +13,72 @@ import (
|
|||
"math/rand"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
)
|
||||
|
||||
// iterator represents a forward-only iterator over a set of points.
|
||||
// These are used by the mapFunctions in this file
|
||||
type iterator interface {
|
||||
// Iterator represents a forward-only iterator over a set of points.
|
||||
// These are used by the MapFunctions in this file
|
||||
type Iterator interface {
|
||||
Next() (time int64, value interface{})
|
||||
Tags() map[string]string
|
||||
TMin() int64
|
||||
}
|
||||
|
||||
// mapFunc represents a function used for mapping over a sequential series of data.
|
||||
// MapFunc represents a function used for mapping over a sequential series of data.
|
||||
// The iterator represents a single group by interval
|
||||
type mapFunc func(iterator) interface{}
|
||||
type MapFunc func(Iterator) interface{}
|
||||
|
||||
// reduceFunc represents a function used for reducing mapper output.
|
||||
type reduceFunc func([]interface{}) interface{}
|
||||
// ReduceFunc represents a function used for reducing mapper output.
|
||||
type ReduceFunc func([]interface{}) interface{}
|
||||
|
||||
// UnmarshalFunc represents a function that can take bytes from a mapper from remote
|
||||
// server and marshal it into an interface the reducer can use
|
||||
type unmarshalFunc func([]byte) (interface{}, error)
|
||||
type UnmarshalFunc func([]byte) (interface{}, error)
|
||||
|
||||
// initializemapFunc takes an aggregate call from the query and returns the mapFunc
|
||||
func initializeMapFunc(c *influxql.Call) (mapFunc, error) {
|
||||
// InitializeMapFunc takes an aggregate call from the query and returns the MapFunc
|
||||
func InitializeMapFunc(c *Call) (MapFunc, error) {
|
||||
// see if it's a query for raw data
|
||||
if c == nil {
|
||||
return MapRawQuery, nil
|
||||
}
|
||||
|
||||
// Ensure that there is either a single argument or if for percentile, two
|
||||
if c.Name == "percentile" {
|
||||
if len(c.Args) != 2 {
|
||||
return nil, fmt.Errorf("expected two arguments for %s()", c.Name)
|
||||
}
|
||||
} else if strings.HasSuffix(c.Name, "derivative") {
|
||||
// derivatives require a field name and optional duration
|
||||
if len(c.Args) == 0 {
|
||||
return nil, fmt.Errorf("expected field name argument for %s()", c.Name)
|
||||
}
|
||||
} else if len(c.Args) != 1 {
|
||||
return nil, fmt.Errorf("expected one argument for %s()", c.Name)
|
||||
}
|
||||
|
||||
// derivative can take a nested aggregate function, everything else expects
|
||||
// a variable reference as the first arg
|
||||
if !strings.HasSuffix(c.Name, "derivative") {
|
||||
// Ensure the argument is appropriate for the aggregate function.
|
||||
switch fc := c.Args[0].(type) {
|
||||
case *VarRef:
|
||||
case *Distinct:
|
||||
if c.Name != "count" {
|
||||
return nil, fmt.Errorf("expected field argument in %s()", c.Name)
|
||||
}
|
||||
case *Call:
|
||||
if fc.Name != "distinct" {
|
||||
return nil, fmt.Errorf("expected field argument in %s()", c.Name)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("expected field argument in %s()", c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve map function by name.
|
||||
switch c.Name {
|
||||
case "count":
|
||||
if _, ok := c.Args[0].(*influxql.Distinct); ok {
|
||||
if _, ok := c.Args[0].(*Distinct); ok {
|
||||
return MapCountDistinct, nil
|
||||
}
|
||||
if c, ok := c.Args[0].(*influxql.Call); ok {
|
||||
if c, ok := c.Args[0].(*Call); ok {
|
||||
if c.Name == "distinct" {
|
||||
return MapCountDistinct, nil
|
||||
}
|
||||
|
@ -75,17 +104,17 @@ func initializeMapFunc(c *influxql.Call) (mapFunc, error) {
|
|||
return MapFirst, nil
|
||||
case "last":
|
||||
return MapLast, nil
|
||||
case "top":
|
||||
return func(itr iterator) interface{} {
|
||||
return MapTop(itr, c)
|
||||
}, nil
|
||||
case "percentile":
|
||||
_, ok := c.Args[1].(*NumberLiteral)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected float argument in percentile()")
|
||||
}
|
||||
return MapEcho, nil
|
||||
case "derivative", "non_negative_derivative":
|
||||
// If the arg is another aggregate e.g. derivative(mean(value)), then
|
||||
// use the map func for that nested aggregate
|
||||
if fn, ok := c.Args[0].(*influxql.Call); ok {
|
||||
return initializeMapFunc(fn)
|
||||
if fn, ok := c.Args[0].(*Call); ok {
|
||||
return InitializeMapFunc(fn)
|
||||
}
|
||||
return MapRawQuery, nil
|
||||
default:
|
||||
|
@ -93,15 +122,15 @@ func initializeMapFunc(c *influxql.Call) (mapFunc, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// InitializereduceFunc takes an aggregate call from the query and returns the reduceFunc
|
||||
func initializeReduceFunc(c *influxql.Call) (reduceFunc, error) {
|
||||
// InitializeReduceFunc takes an aggregate call from the query and returns the ReduceFunc
|
||||
func InitializeReduceFunc(c *Call) (ReduceFunc, error) {
|
||||
// Retrieve reduce function by name.
|
||||
switch c.Name {
|
||||
case "count":
|
||||
if _, ok := c.Args[0].(*influxql.Distinct); ok {
|
||||
if _, ok := c.Args[0].(*Distinct); ok {
|
||||
return ReduceCountDistinct, nil
|
||||
}
|
||||
if c, ok := c.Args[0].(*influxql.Call); ok {
|
||||
if c, ok := c.Args[0].(*Call); ok {
|
||||
if c.Name == "distinct" {
|
||||
return ReduceCountDistinct, nil
|
||||
}
|
||||
|
@ -127,19 +156,21 @@ func initializeReduceFunc(c *influxql.Call) (reduceFunc, error) {
|
|||
return ReduceFirst, nil
|
||||
case "last":
|
||||
return ReduceLast, nil
|
||||
case "top":
|
||||
return func(values []interface{}) interface{} {
|
||||
return ReduceTop(values, c)
|
||||
}, nil
|
||||
case "percentile":
|
||||
return func(values []interface{}) interface{} {
|
||||
return ReducePercentile(values, c)
|
||||
}, nil
|
||||
if len(c.Args) != 2 {
|
||||
return nil, fmt.Errorf("expected float argument in percentile()")
|
||||
}
|
||||
|
||||
lit, ok := c.Args[1].(*NumberLiteral)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected float argument in percentile()")
|
||||
}
|
||||
return ReducePercentile(lit.Val), nil
|
||||
case "derivative", "non_negative_derivative":
|
||||
// If the arg is another aggregate e.g. derivative(mean(value)), then
|
||||
// use the map func for that nested aggregate
|
||||
if fn, ok := c.Args[0].(*influxql.Call); ok {
|
||||
return initializeReduceFunc(fn)
|
||||
if fn, ok := c.Args[0].(*Call); ok {
|
||||
return InitializeReduceFunc(fn)
|
||||
}
|
||||
return nil, fmt.Errorf("expected function argument to %s", c.Name)
|
||||
default:
|
||||
|
@ -147,7 +178,7 @@ func initializeReduceFunc(c *influxql.Call) (reduceFunc, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func initializeUnmarshaller(c *influxql.Call) (unmarshalFunc, error) {
|
||||
func InitializeUnmarshaller(c *Call) (UnmarshalFunc, error) {
|
||||
// if c is nil it's a raw data query
|
||||
if c == nil {
|
||||
return func(b []byte) (interface{}, error) {
|
||||
|
@ -173,7 +204,7 @@ func initializeUnmarshaller(c *influxql.Call) (unmarshalFunc, error) {
|
|||
}, nil
|
||||
case "distinct":
|
||||
return func(b []byte) (interface{}, error) {
|
||||
var val interfaceValues
|
||||
var val distinctValues
|
||||
err := json.Unmarshal(b, &val)
|
||||
return val, err
|
||||
}, nil
|
||||
|
@ -211,7 +242,7 @@ func initializeUnmarshaller(c *influxql.Call) (unmarshalFunc, error) {
|
|||
}
|
||||
|
||||
// MapCount computes the number of values in an iterator.
|
||||
func MapCount(itr iterator) interface{} {
|
||||
func MapCount(itr Iterator) interface{} {
|
||||
n := float64(0)
|
||||
for k, _ := itr.Next(); k != -1; k, _ = itr.Next() {
|
||||
n++
|
||||
|
@ -222,16 +253,81 @@ func MapCount(itr iterator) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
type interfaceValues []interface{}
|
||||
type distinctValues []interface{}
|
||||
|
||||
func (d interfaceValues) Len() int { return len(d) }
|
||||
func (d interfaceValues) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d interfaceValues) Less(i, j int) bool {
|
||||
return interfaceCompare(d[i], d[j]) < 0
|
||||
func (d distinctValues) Len() int { return len(d) }
|
||||
func (d distinctValues) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d distinctValues) Less(i, j int) bool {
|
||||
// Sort by type if types match
|
||||
{
|
||||
d1, ok1 := d[i].(float64)
|
||||
d2, ok2 := d[j].(float64)
|
||||
if ok1 && ok2 {
|
||||
return d1 < d2
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := d[i].(uint64)
|
||||
d2, ok2 := d[j].(uint64)
|
||||
if ok1 && ok2 {
|
||||
return d1 < d2
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := d[i].(bool)
|
||||
d2, ok2 := d[j].(bool)
|
||||
if ok1 && ok2 {
|
||||
return d1 == false && d2 == true
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := d[i].(string)
|
||||
d2, ok2 := d[j].(string)
|
||||
if ok1 && ok2 {
|
||||
return d1 < d2
|
||||
}
|
||||
}
|
||||
|
||||
// Types did not match, need to sort based on arbitrary weighting of type
|
||||
const (
|
||||
intWeight = iota
|
||||
floatWeight
|
||||
boolWeight
|
||||
stringWeight
|
||||
)
|
||||
|
||||
infer := func(val interface{}) (int, float64) {
|
||||
switch v := val.(type) {
|
||||
case uint64:
|
||||
return intWeight, float64(v)
|
||||
case int64:
|
||||
return intWeight, float64(v)
|
||||
case float64:
|
||||
return floatWeight, v
|
||||
case bool:
|
||||
return boolWeight, 0
|
||||
case string:
|
||||
return stringWeight, 0
|
||||
}
|
||||
panic("unreachable code")
|
||||
}
|
||||
|
||||
w1, n1 := infer(d[i])
|
||||
w2, n2 := infer(d[j])
|
||||
|
||||
// If we had "numeric" data, use that for comparison
|
||||
if n1 != n2 && (w1 == intWeight && w2 == floatWeight) || (w1 == floatWeight && w2 == intWeight) {
|
||||
return n1 < n2
|
||||
}
|
||||
|
||||
return w1 < w2
|
||||
}
|
||||
|
||||
// MapDistinct computes the unique values in an iterator.
|
||||
func MapDistinct(itr iterator) interface{} {
|
||||
func MapDistinct(itr Iterator) interface{} {
|
||||
var index = make(map[interface{}]struct{})
|
||||
|
||||
for time, value := itr.Next(); time != -1; time, value = itr.Next() {
|
||||
|
@ -242,7 +338,7 @@ func MapDistinct(itr iterator) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
results := make(interfaceValues, len(index))
|
||||
results := make(distinctValues, len(index))
|
||||
var i int
|
||||
for value, _ := range index {
|
||||
results[i] = value
|
||||
|
@ -260,7 +356,7 @@ func ReduceDistinct(values []interface{}) interface{} {
|
|||
if v == nil {
|
||||
continue
|
||||
}
|
||||
d, ok := v.(interfaceValues)
|
||||
d, ok := v.(distinctValues)
|
||||
if !ok {
|
||||
msg := fmt.Sprintf("expected distinctValues, got: %T", v)
|
||||
panic(msg)
|
||||
|
@ -271,7 +367,7 @@ func ReduceDistinct(values []interface{}) interface{} {
|
|||
}
|
||||
|
||||
// convert map keys to an array
|
||||
results := make(interfaceValues, len(index))
|
||||
results := make(distinctValues, len(index))
|
||||
var i int
|
||||
for k, _ := range index {
|
||||
results[i] = k
|
||||
|
@ -285,7 +381,7 @@ func ReduceDistinct(values []interface{}) interface{} {
|
|||
}
|
||||
|
||||
// MapCountDistinct computes the unique count of values in an iterator.
|
||||
func MapCountDistinct(itr iterator) interface{} {
|
||||
func MapCountDistinct(itr Iterator) interface{} {
|
||||
var index = make(map[interface{}]struct{})
|
||||
|
||||
for time, value := itr.Next(); time != -1; time, value = itr.Next() {
|
||||
|
@ -329,7 +425,7 @@ const (
|
|||
)
|
||||
|
||||
// MapSum computes the summation of values in an iterator.
|
||||
func MapSum(itr iterator) interface{} {
|
||||
func MapSum(itr Iterator) interface{} {
|
||||
n := float64(0)
|
||||
count := 0
|
||||
var resultType NumberType
|
||||
|
@ -384,7 +480,7 @@ func ReduceSum(values []interface{}) interface{} {
|
|||
}
|
||||
|
||||
// MapMean computes the count and sum of values in an iterator to be combined by the reducer.
|
||||
func MapMean(itr iterator) interface{} {
|
||||
func MapMean(itr Iterator) interface{} {
|
||||
out := &meanMapOutput{}
|
||||
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
|
@ -590,7 +686,7 @@ type minMaxMapOut struct {
|
|||
}
|
||||
|
||||
// MapMin collects the values to pass to the reducer
|
||||
func MapMin(itr iterator) interface{} {
|
||||
func MapMin(itr Iterator) interface{} {
|
||||
min := &minMaxMapOut{}
|
||||
|
||||
pointsYielded := false
|
||||
|
@ -653,7 +749,7 @@ func ReduceMin(values []interface{}) interface{} {
|
|||
}
|
||||
|
||||
// MapMax collects the values to pass to the reducer
|
||||
func MapMax(itr iterator) interface{} {
|
||||
func MapMax(itr Iterator) interface{} {
|
||||
max := &minMaxMapOut{}
|
||||
|
||||
pointsYielded := false
|
||||
|
@ -721,7 +817,7 @@ type spreadMapOutput struct {
|
|||
}
|
||||
|
||||
// MapSpread collects the values to pass to the reducer
|
||||
func MapSpread(itr iterator) interface{} {
|
||||
func MapSpread(itr Iterator) interface{} {
|
||||
out := &spreadMapOutput{}
|
||||
pointsYielded := false
|
||||
var val float64
|
||||
|
@ -782,7 +878,7 @@ func ReduceSpread(values []interface{}) interface{} {
|
|||
}
|
||||
|
||||
// MapStddev collects the values to pass to the reducer
|
||||
func MapStddev(itr iterator) interface{} {
|
||||
func MapStddev(itr Iterator) interface{} {
|
||||
var values []float64
|
||||
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
|
@ -839,21 +935,27 @@ type firstLastMapOutput struct {
|
|||
}
|
||||
|
||||
// MapFirst collects the values to pass to the reducer
|
||||
// This function assumes time ordered input
|
||||
func MapFirst(itr iterator) interface{} {
|
||||
k, v := itr.Next()
|
||||
if k == -1 {
|
||||
func MapFirst(itr Iterator) interface{} {
|
||||
out := &firstLastMapOutput{}
|
||||
pointsYielded := false
|
||||
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
// Initialize first
|
||||
if !pointsYielded {
|
||||
out.Time = k
|
||||
out.Val = v
|
||||
pointsYielded = true
|
||||
}
|
||||
if k < out.Time {
|
||||
out.Time = k
|
||||
out.Val = v
|
||||
}
|
||||
}
|
||||
if pointsYielded {
|
||||
return out
|
||||
}
|
||||
return nil
|
||||
}
|
||||
nextk, nextv := itr.Next()
|
||||
for nextk == k {
|
||||
if greaterThan(nextv, v) {
|
||||
v = nextv
|
||||
}
|
||||
nextk, nextv = itr.Next()
|
||||
}
|
||||
return &firstLastMapOutput{k, v}
|
||||
}
|
||||
|
||||
// ReduceFirst computes the first of value.
|
||||
func ReduceFirst(values []interface{}) interface{} {
|
||||
|
@ -874,8 +976,6 @@ func ReduceFirst(values []interface{}) interface{} {
|
|||
if val.Time < out.Time {
|
||||
out.Time = val.Time
|
||||
out.Val = val.Val
|
||||
} else if val.Time == out.Time && greaterThan(val.Val, out.Val) {
|
||||
out.Val = val.Val
|
||||
}
|
||||
}
|
||||
if pointsYielded {
|
||||
|
@ -885,7 +985,7 @@ func ReduceFirst(values []interface{}) interface{} {
|
|||
}
|
||||
|
||||
// MapLast collects the values to pass to the reducer
|
||||
func MapLast(itr iterator) interface{} {
|
||||
func MapLast(itr Iterator) interface{} {
|
||||
out := &firstLastMapOutput{}
|
||||
pointsYielded := false
|
||||
|
||||
|
@ -899,8 +999,6 @@ func MapLast(itr iterator) interface{} {
|
|||
if k > out.Time {
|
||||
out.Time = k
|
||||
out.Val = v
|
||||
} else if k == out.Time && greaterThan(v, out.Val) {
|
||||
out.Val = v
|
||||
}
|
||||
}
|
||||
if pointsYielded {
|
||||
|
@ -929,8 +1027,6 @@ func ReduceLast(values []interface{}) interface{} {
|
|||
if val.Time > out.Time {
|
||||
out.Time = val.Time
|
||||
out.Val = val.Val
|
||||
} else if val.Time == out.Time && greaterThan(val.Val, out.Val) {
|
||||
out.Val = val.Val
|
||||
}
|
||||
}
|
||||
if pointsYielded {
|
||||
|
@ -939,418 +1035,8 @@ func ReduceLast(values []interface{}) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
type positionOut struct {
|
||||
points PositionPoints
|
||||
callArgs []string // ordered args in the call
|
||||
}
|
||||
|
||||
func (p *positionOut) lessKey(i, j int) bool {
|
||||
t1, t2 := p.points[i].Tags, p.points[j].Tags
|
||||
for _, k := range p.callArgs {
|
||||
if t1[k] != t2[k] {
|
||||
return t1[k] < t2[k]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpFloat(a, b float64) int {
|
||||
if a == b {
|
||||
return 0
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func cmpInt(a, b int64) int {
|
||||
if a == b {
|
||||
return 0
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func cmpUint(a, b uint64) int {
|
||||
if a == b {
|
||||
return 0
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func interfaceCompare(a, b interface{}) int {
|
||||
// compare by float64/int64 first as that is the most likely match
|
||||
{
|
||||
d1, ok1 := a.(float64)
|
||||
d2, ok2 := b.(float64)
|
||||
if ok1 && ok2 {
|
||||
return cmpFloat(d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(int64)
|
||||
d2, ok2 := b.(int64)
|
||||
if ok1 && ok2 {
|
||||
return cmpInt(d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
// compare by every numeric type left
|
||||
{
|
||||
d1, ok1 := a.(float32)
|
||||
d2, ok2 := b.(float32)
|
||||
if ok1 && ok2 {
|
||||
return cmpFloat(float64(d1), float64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(uint64)
|
||||
d2, ok2 := b.(uint64)
|
||||
if ok1 && ok2 {
|
||||
return cmpUint(d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(uint32)
|
||||
d2, ok2 := b.(uint32)
|
||||
if ok1 && ok2 {
|
||||
return cmpUint(uint64(d1), uint64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(uint16)
|
||||
d2, ok2 := b.(uint16)
|
||||
if ok1 && ok2 {
|
||||
return cmpUint(uint64(d1), uint64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(uint8)
|
||||
d2, ok2 := b.(uint8)
|
||||
if ok1 && ok2 {
|
||||
return cmpUint(uint64(d1), uint64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(int32)
|
||||
d2, ok2 := b.(int32)
|
||||
if ok1 && ok2 {
|
||||
return cmpInt(int64(d1), int64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(int16)
|
||||
d2, ok2 := b.(int16)
|
||||
if ok1 && ok2 {
|
||||
return cmpInt(int64(d1), int64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(int8)
|
||||
d2, ok2 := b.(int8)
|
||||
if ok1 && ok2 {
|
||||
return cmpInt(int64(d1), int64(d2))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(bool)
|
||||
d2, ok2 := b.(bool)
|
||||
if ok1 && ok2 {
|
||||
if d1 == d2 {
|
||||
return 0
|
||||
} else if d1 == true && d2 == false {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
d1, ok1 := a.(string)
|
||||
d2, ok2 := b.(string)
|
||||
if ok1 && ok2 {
|
||||
return strings.Compare(d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
// Types did not match, need to sort based on arbitrary weighting of type
|
||||
const (
|
||||
stringWeight = iota
|
||||
boolWeight
|
||||
intWeight
|
||||
floatWeight
|
||||
)
|
||||
|
||||
infer := func(val interface{}) (int, float64) {
|
||||
switch v := val.(type) {
|
||||
case uint64:
|
||||
return intWeight, float64(v)
|
||||
case uint32:
|
||||
return intWeight, float64(v)
|
||||
case uint16:
|
||||
return intWeight, float64(v)
|
||||
case uint8:
|
||||
return intWeight, float64(v)
|
||||
case int64:
|
||||
return intWeight, float64(v)
|
||||
case int32:
|
||||
return intWeight, float64(v)
|
||||
case int16:
|
||||
return intWeight, float64(v)
|
||||
case int8:
|
||||
return intWeight, float64(v)
|
||||
case float64:
|
||||
return floatWeight, float64(v)
|
||||
case float32:
|
||||
return floatWeight, float64(v)
|
||||
case bool:
|
||||
return boolWeight, 0
|
||||
case string:
|
||||
return stringWeight, 0
|
||||
}
|
||||
panic("interfaceValues.Less - unreachable code")
|
||||
}
|
||||
|
||||
w1, n1 := infer(a)
|
||||
w2, n2 := infer(b)
|
||||
|
||||
// If we had "numeric" data, use that for comparison
|
||||
if (w1 == floatWeight || w1 == intWeight) && (w2 == floatWeight || w2 == intWeight) {
|
||||
cmp := cmpFloat(n1, n2)
|
||||
// break ties
|
||||
if cmp == 0 {
|
||||
if w1 < w2 {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
|
||||
if w1 == w2 {
|
||||
// this should never happen, since equal weight means
|
||||
// it should have been handled at the start of this function.
|
||||
panic("unreachable")
|
||||
} else if w1 < w2 {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
type PositionPoints []PositionPoint
|
||||
type PositionPoint struct {
|
||||
Time int64
|
||||
Value interface{}
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
type topMapOut struct {
|
||||
positionOut
|
||||
}
|
||||
|
||||
func (t topMapOut) Len() int { return len(t.points) }
|
||||
func (t topMapOut) Swap(i, j int) { t.points[i], t.points[j] = t.points[j], t.points[i] }
|
||||
func (t topMapOut) Less(i, j int) bool {
|
||||
// old C trick makes this code easier to read. Imagine
|
||||
// that the OP in "cmp(i, j) OP 0" is the comparison you want
|
||||
// between i and j
|
||||
cmp := interfaceCompare(t.points[i].Value, t.points[j].Value)
|
||||
if cmp != 0 {
|
||||
return cmp > 0
|
||||
}
|
||||
k1, k2 := t.points[i].Time, t.points[j].Time
|
||||
if k1 != k2 {
|
||||
return k1 < k2
|
||||
}
|
||||
return t.lessKey(i, j)
|
||||
}
|
||||
|
||||
type topReduceOut struct {
|
||||
positionOut
|
||||
}
|
||||
|
||||
func (t topReduceOut) Len() int { return len(t.points) }
|
||||
func (t topReduceOut) Swap(i, j int) { t.points[i], t.points[j] = t.points[j], t.points[i] }
|
||||
func (t topReduceOut) Less(i, j int) bool {
|
||||
// Now sort by time first, not value
|
||||
|
||||
k1, k2 := t.points[i].Time, t.points[j].Time
|
||||
if k1 != k2 {
|
||||
return k1 < k2
|
||||
}
|
||||
cmp := interfaceCompare(t.points[i].Value, t.points[j].Value)
|
||||
if cmp != 0 {
|
||||
return cmp > 0
|
||||
}
|
||||
return t.lessKey(i, j)
|
||||
}
|
||||
|
||||
// callArgs will get any additional field/tag names that may be needed to sort with
|
||||
// it is important to maintain the order of these that they were asked for in the call
|
||||
// for sorting purposes
|
||||
func topCallArgs(c *influxql.Call) []string {
|
||||
var names []string
|
||||
for _, v := range c.Args[1 : len(c.Args)-1] {
|
||||
if f, ok := v.(*influxql.VarRef); ok {
|
||||
names = append(names, f.Val)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// MapTop emits the top data points for each group by interval
|
||||
func MapTop(itr iterator, c *influxql.Call) interface{} {
|
||||
// Capture the limit if it was specified in the call
|
||||
lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral)
|
||||
limit := int64(lit.Val)
|
||||
|
||||
// Simple case where only value and limit are specified.
|
||||
if len(c.Args) == 2 {
|
||||
out := positionOut{callArgs: topCallArgs(c)}
|
||||
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
t := k
|
||||
if bt := itr.TMin(); bt > -1 {
|
||||
t = bt
|
||||
}
|
||||
out.points = append(out.points, PositionPoint{t, v, itr.Tags()})
|
||||
}
|
||||
|
||||
// If we have more than we asked for, only send back the top values
|
||||
if int64(len(out.points)) > limit {
|
||||
sort.Sort(topMapOut{out})
|
||||
out.points = out.points[:limit]
|
||||
}
|
||||
if len(out.points) > 0 {
|
||||
return out.points
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// They specified tags in the call to get unique sets, so we need to map them as we accumulate them
|
||||
outMap := make(map[string]positionOut)
|
||||
|
||||
mapKey := func(args []string, fields map[string]interface{}, keys map[string]string) string {
|
||||
key := ""
|
||||
for _, a := range args {
|
||||
if v, ok := fields[a]; ok {
|
||||
key += a + ":" + fmt.Sprintf("%v", v) + ","
|
||||
continue
|
||||
}
|
||||
if v, ok := keys[a]; ok {
|
||||
key += a + ":" + v + ","
|
||||
continue
|
||||
}
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
t := k
|
||||
if bt := itr.TMin(); bt > -1 {
|
||||
t = bt
|
||||
}
|
||||
callArgs := c.Fields()
|
||||
tags := itr.Tags()
|
||||
// TODO in the future we need to send in fields as well
|
||||
// this will allow a user to query on both fields and tags
|
||||
// fields will take the priority over tags if there is a name collision
|
||||
key := mapKey(callArgs, nil, tags)
|
||||
if out, ok := outMap[key]; ok {
|
||||
out.points = append(out.points, PositionPoint{t, v, itr.Tags()})
|
||||
outMap[key] = out
|
||||
} else {
|
||||
out = positionOut{callArgs: topCallArgs(c)}
|
||||
out.points = append(out.points, PositionPoint{t, v, itr.Tags()})
|
||||
outMap[key] = out
|
||||
}
|
||||
}
|
||||
// Sort all the maps
|
||||
for k, v := range outMap {
|
||||
sort.Sort(topMapOut{v})
|
||||
outMap[k] = v
|
||||
}
|
||||
|
||||
slice := func(needed int64, m map[string]positionOut) PositionPoints {
|
||||
points := PositionPoints{}
|
||||
var collected int64
|
||||
for k, v := range m {
|
||||
if len(v.points) > 0 {
|
||||
points = append(points, v.points[0])
|
||||
v.points = v.points[1:]
|
||||
m[k] = v
|
||||
collected++
|
||||
}
|
||||
}
|
||||
o := positionOut{callArgs: topCallArgs(c), points: points}
|
||||
sort.Sort(topMapOut{o})
|
||||
points = o.points
|
||||
// If we got more than we needed, sort them and return the top
|
||||
if collected > needed {
|
||||
points = o.points[:needed]
|
||||
}
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
points := PositionPoints{}
|
||||
var collected int64
|
||||
for collected < limit {
|
||||
p := slice(limit-collected, outMap)
|
||||
if len(p) == 0 {
|
||||
break
|
||||
}
|
||||
points = append(points, p...)
|
||||
collected += int64(len(p))
|
||||
}
|
||||
if len(points) > 0 {
|
||||
return points
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReduceTop computes the top values for each key.
|
||||
func ReduceTop(values []interface{}, c *influxql.Call) interface{} {
|
||||
lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral)
|
||||
limit := int64(lit.Val)
|
||||
|
||||
out := positionOut{callArgs: topCallArgs(c)}
|
||||
for _, v := range values {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
o, _ := v.(PositionPoints)
|
||||
out.points = append(out.points, o...)
|
||||
}
|
||||
|
||||
// Get the top of the top values
|
||||
sort.Sort(topMapOut{out})
|
||||
// If we have more than we asked for, only send back the top values
|
||||
if int64(len(out.points)) > limit {
|
||||
out.points = out.points[:limit]
|
||||
}
|
||||
|
||||
// now we need to resort the tops by time
|
||||
sort.Sort(topReduceOut{out})
|
||||
if len(out.points) > 0 {
|
||||
return out.points
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapEcho emits the data points for each group by interval
|
||||
func MapEcho(itr iterator) interface{} {
|
||||
func MapEcho(itr Iterator) interface{} {
|
||||
var values []interface{}
|
||||
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
|
@ -1360,12 +1046,8 @@ func MapEcho(itr iterator) interface{} {
|
|||
}
|
||||
|
||||
// ReducePercentile computes the percentile of values for each key.
|
||||
func ReducePercentile(values []interface{}, c *influxql.Call) interface{} {
|
||||
// Checks that this arg exists and is a valid type are done in the parsing validation
|
||||
// and have test coverage there
|
||||
lit, _ := c.Args[1].(*influxql.NumberLiteral)
|
||||
percentile := lit.Val
|
||||
|
||||
func ReducePercentile(percentile float64) ReduceFunc {
|
||||
return func(values []interface{}) interface{} {
|
||||
var allValues []float64
|
||||
|
||||
for _, v := range values {
|
||||
|
@ -1394,9 +1076,10 @@ func ReducePercentile(values []interface{}, c *influxql.Call) interface{} {
|
|||
|
||||
return allValues[index]
|
||||
}
|
||||
}
|
||||
|
||||
// IsNumeric returns whether a given aggregate can only be run on numeric fields.
|
||||
func IsNumeric(c *influxql.Call) bool {
|
||||
func IsNumeric(c *Call) bool {
|
||||
switch c.Name {
|
||||
case "count", "first", "last", "distinct":
|
||||
return false
|
||||
|
@ -1406,7 +1089,7 @@ func IsNumeric(c *influxql.Call) bool {
|
|||
}
|
||||
|
||||
// MapRawQuery is for queries without aggregates
|
||||
func MapRawQuery(itr iterator) interface{} {
|
||||
func MapRawQuery(itr Iterator) interface{} {
|
||||
var values []*rawQueryMapOutput
|
||||
for k, v := itr.Next(); k != -1; k, v = itr.Next() {
|
||||
val := &rawQueryMapOutput{k, v}
|
||||
|
@ -1429,17 +1112,3 @@ type rawOutputs []*rawQueryMapOutput
|
|||
func (a rawOutputs) Len() int { return len(a) }
|
||||
func (a rawOutputs) Less(i, j int) bool { return a[i].Time < a[j].Time }
|
||||
func (a rawOutputs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func greaterThan(a, b interface{}) bool {
|
||||
switch t := a.(type) {
|
||||
case int64:
|
||||
return t > b.(int64)
|
||||
case float64:
|
||||
return t > b.(float64)
|
||||
case string:
|
||||
return t > b.(string)
|
||||
case bool:
|
||||
return t == true
|
||||
}
|
||||
return false
|
||||
}
|
534
Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go
generated
vendored
Normal file
534
Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go
generated
vendored
Normal file
|
@ -0,0 +1,534 @@
|
|||
package influxql
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
import "sort"
|
||||
|
||||
type point struct {
|
||||
seriesKey string
|
||||
time int64
|
||||
value interface{}
|
||||
}
|
||||
|
||||
type testIterator struct {
|
||||
values []point
|
||||
}
|
||||
|
||||
func (t *testIterator) Next() (timestamp int64, value interface{}) {
|
||||
if len(t.values) > 0 {
|
||||
v := t.values[0]
|
||||
t.values = t.values[1:]
|
||||
return v.time, v.value
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func TestMapMeanNoValues(t *testing.T) {
|
||||
iter := &testIterator{}
|
||||
if got := MapMean(iter); got != nil {
|
||||
t.Errorf("output mismatch: exp nil got %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapMean(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
input []point
|
||||
output *meanMapOutput
|
||||
}{
|
||||
{ // Single point
|
||||
input: []point{point{"0", 1, 1.0}},
|
||||
output: &meanMapOutput{1, 1, Float64Type},
|
||||
},
|
||||
{ // Two points
|
||||
input: []point{
|
||||
point{"0", 1, 2.0},
|
||||
point{"0", 2, 8.0},
|
||||
},
|
||||
output: &meanMapOutput{2, 5.0, Float64Type},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
iter := &testIterator{
|
||||
values: test.input,
|
||||
}
|
||||
|
||||
got := MapMean(iter)
|
||||
if got == nil {
|
||||
t.Fatalf("MapMean(%v): output mismatch: exp %v got %v", test.input, test.output, got)
|
||||
}
|
||||
|
||||
if got.(*meanMapOutput).Count != test.output.Count || got.(*meanMapOutput).Mean != test.output.Mean {
|
||||
t.Errorf("output mismatch: exp %v got %v", test.output, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestInitializeMapFuncPercentile(t *testing.T) {
|
||||
// No args
|
||||
c := &Call{
|
||||
Name: "percentile",
|
||||
Args: []Expr{},
|
||||
}
|
||||
_, err := InitializeMapFunc(c)
|
||||
if err == nil {
|
||||
t.Errorf("InitializeMapFunc(%v) expected error. got nil", c)
|
||||
}
|
||||
|
||||
if exp := "expected two arguments for percentile()"; err.Error() != exp {
|
||||
t.Errorf("InitializeMapFunc(%v) mismatch. exp %v got %v", c, exp, err.Error())
|
||||
}
|
||||
|
||||
// No percentile arg
|
||||
c = &Call{
|
||||
Name: "percentile",
|
||||
Args: []Expr{
|
||||
&VarRef{Val: "field1"},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = InitializeMapFunc(c)
|
||||
if err == nil {
|
||||
t.Errorf("InitializeMapFunc(%v) expected error. got nil", c)
|
||||
}
|
||||
|
||||
if exp := "expected two arguments for percentile()"; err.Error() != exp {
|
||||
t.Errorf("InitializeMapFunc(%v) mismatch. exp %v got %v", c, exp, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeMapFuncDerivative(t *testing.T) {
|
||||
|
||||
for _, fn := range []string{"derivative", "non_negative_derivative"} {
|
||||
// No args should fail
|
||||
c := &Call{
|
||||
Name: fn,
|
||||
Args: []Expr{},
|
||||
}
|
||||
|
||||
_, err := InitializeMapFunc(c)
|
||||
if err == nil {
|
||||
t.Errorf("InitializeMapFunc(%v) expected error. got nil", c)
|
||||
}
|
||||
|
||||
// Single field arg should return MapEcho
|
||||
c = &Call{
|
||||
Name: fn,
|
||||
Args: []Expr{
|
||||
&VarRef{Val: " field1"},
|
||||
&DurationLiteral{Val: time.Hour},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = InitializeMapFunc(c)
|
||||
if err != nil {
|
||||
t.Errorf("InitializeMapFunc(%v) unexpected error. got %v", c, err)
|
||||
}
|
||||
|
||||
// Nested Aggregate func should return the map func for the nested aggregate
|
||||
c = &Call{
|
||||
Name: fn,
|
||||
Args: []Expr{
|
||||
&Call{Name: "mean", Args: []Expr{&VarRef{Val: "field1"}}},
|
||||
&DurationLiteral{Val: time.Hour},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = InitializeMapFunc(c)
|
||||
if err != nil {
|
||||
t.Errorf("InitializeMapFunc(%v) unexpected error. got %v", c, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeReduceFuncPercentile(t *testing.T) {
|
||||
// No args
|
||||
c := &Call{
|
||||
Name: "percentile",
|
||||
Args: []Expr{},
|
||||
}
|
||||
_, err := InitializeReduceFunc(c)
|
||||
if err == nil {
|
||||
t.Errorf("InitializedReduceFunc(%v) expected error. got nil", c)
|
||||
}
|
||||
|
||||
if exp := "expected float argument in percentile()"; err.Error() != exp {
|
||||
t.Errorf("InitializedReduceFunc(%v) mismatch. exp %v got %v", c, exp, err.Error())
|
||||
}
|
||||
|
||||
// No percentile arg
|
||||
c = &Call{
|
||||
Name: "percentile",
|
||||
Args: []Expr{
|
||||
&VarRef{Val: "field1"},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = InitializeReduceFunc(c)
|
||||
if err == nil {
|
||||
t.Errorf("InitializedReduceFunc(%v) expected error. got nil", c)
|
||||
}
|
||||
|
||||
if exp := "expected float argument in percentile()"; err.Error() != exp {
|
||||
t.Errorf("InitializedReduceFunc(%v) mismatch. exp %v got %v", c, exp, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReducePercentileNil(t *testing.T) {
|
||||
|
||||
// ReducePercentile should ignore nil values when calculating the percentile
|
||||
fn := ReducePercentile(100)
|
||||
input := []interface{}{
|
||||
nil,
|
||||
}
|
||||
|
||||
got := fn(input)
|
||||
if got != nil {
|
||||
t.Fatalf("ReducePercentile(100) returned wrong type. exp nil got %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapDistinct(t *testing.T) {
|
||||
const ( // prove that we're ignoring seriesKey
|
||||
seriesKey1 = "1"
|
||||
seriesKey2 = "2"
|
||||
)
|
||||
|
||||
const ( // prove that we're ignoring time
|
||||
timeId1 = iota + 1
|
||||
timeId2
|
||||
timeId3
|
||||
timeId4
|
||||
timeId5
|
||||
timeId6
|
||||
)
|
||||
|
||||
iter := &testIterator{
|
||||
values: []point{
|
||||
{seriesKey1, timeId1, uint64(1)},
|
||||
{seriesKey1, timeId2, uint64(1)},
|
||||
{seriesKey1, timeId3, "1"},
|
||||
{seriesKey2, timeId4, uint64(1)},
|
||||
{seriesKey2, timeId5, float64(1.0)},
|
||||
{seriesKey2, timeId6, "1"},
|
||||
},
|
||||
}
|
||||
|
||||
values := MapDistinct(iter).(distinctValues)
|
||||
|
||||
if exp, got := 3, len(values); exp != got {
|
||||
t.Errorf("Wrong number of values. exp %v got %v", exp, got)
|
||||
}
|
||||
|
||||
sort.Sort(values)
|
||||
|
||||
exp := distinctValues{
|
||||
uint64(1),
|
||||
float64(1),
|
||||
"1",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(values, exp) {
|
||||
t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(values))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapDistinctNil(t *testing.T) {
|
||||
iter := &testIterator{
|
||||
values: []point{},
|
||||
}
|
||||
|
||||
values := MapDistinct(iter)
|
||||
|
||||
if values != nil {
|
||||
t.Errorf("Wrong values. exp nil got %v", spew.Sdump(values))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceDistinct(t *testing.T) {
|
||||
v1 := distinctValues{
|
||||
"2",
|
||||
"1",
|
||||
float64(2.0),
|
||||
float64(1),
|
||||
uint64(2),
|
||||
uint64(1),
|
||||
true,
|
||||
false,
|
||||
}
|
||||
|
||||
expect := distinctValues{
|
||||
uint64(1),
|
||||
float64(1),
|
||||
uint64(2),
|
||||
float64(2),
|
||||
false,
|
||||
true,
|
||||
"1",
|
||||
"2",
|
||||
}
|
||||
|
||||
got := ReduceDistinct([]interface{}{v1, v1, expect})
|
||||
|
||||
if !reflect.DeepEqual(got, expect) {
|
||||
t.Errorf("Wrong values. exp %v got %v", spew.Sdump(expect), spew.Sdump(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceDistinctNil(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
values []interface{}
|
||||
}{
|
||||
{
|
||||
name: "nil values",
|
||||
values: nil,
|
||||
},
|
||||
{
|
||||
name: "nil mapper",
|
||||
values: []interface{}{nil},
|
||||
},
|
||||
{
|
||||
name: "no mappers",
|
||||
values: []interface{}{},
|
||||
},
|
||||
{
|
||||
name: "empty mappper (len 1)",
|
||||
values: []interface{}{distinctValues{}},
|
||||
},
|
||||
{
|
||||
name: "empty mappper (len 2)",
|
||||
values: []interface{}{distinctValues{}, distinctValues{}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Log(test.name)
|
||||
got := ReduceDistinct(test.values)
|
||||
if got != nil {
|
||||
t.Errorf("Wrong values. exp nil got %v", spew.Sdump(got))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_distinctValues_Sort(t *testing.T) {
|
||||
values := distinctValues{
|
||||
"2",
|
||||
"1",
|
||||
float64(2.0),
|
||||
float64(1),
|
||||
uint64(2),
|
||||
uint64(1),
|
||||
true,
|
||||
false,
|
||||
}
|
||||
|
||||
expect := distinctValues{
|
||||
uint64(1),
|
||||
float64(1),
|
||||
uint64(2),
|
||||
float64(2),
|
||||
false,
|
||||
true,
|
||||
"1",
|
||||
"2",
|
||||
}
|
||||
|
||||
sort.Sort(values)
|
||||
|
||||
if !reflect.DeepEqual(values, expect) {
|
||||
t.Errorf("Wrong values. exp %v got %v", spew.Sdump(expect), spew.Sdump(values))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapCountDistinct(t *testing.T) {
|
||||
const ( // prove that we're ignoring seriesKey
|
||||
seriesKey1 = "1"
|
||||
seriesKey2 = "2"
|
||||
)
|
||||
|
||||
const ( // prove that we're ignoring time
|
||||
timeId1 = iota + 1
|
||||
timeId2
|
||||
timeId3
|
||||
timeId4
|
||||
timeId5
|
||||
timeId6
|
||||
timeId7
|
||||
)
|
||||
|
||||
iter := &testIterator{
|
||||
values: []point{
|
||||
{seriesKey1, timeId1, uint64(1)},
|
||||
{seriesKey1, timeId2, uint64(1)},
|
||||
{seriesKey1, timeId3, "1"},
|
||||
{seriesKey2, timeId4, uint64(1)},
|
||||
{seriesKey2, timeId5, float64(1.0)},
|
||||
{seriesKey2, timeId6, "1"},
|
||||
{seriesKey2, timeId7, true},
|
||||
},
|
||||
}
|
||||
|
||||
values := MapCountDistinct(iter).(map[interface{}]struct{})
|
||||
|
||||
if exp, got := 4, len(values); exp != got {
|
||||
t.Errorf("Wrong number of values. exp %v got %v", exp, got)
|
||||
}
|
||||
|
||||
exp := map[interface{}]struct{}{
|
||||
uint64(1): struct{}{},
|
||||
float64(1): struct{}{},
|
||||
"1": struct{}{},
|
||||
true: struct{}{},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(values, exp) {
|
||||
t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(values))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapCountDistinctNil(t *testing.T) {
|
||||
iter := &testIterator{
|
||||
values: []point{},
|
||||
}
|
||||
|
||||
values := MapCountDistinct(iter)
|
||||
|
||||
if values != nil {
|
||||
t.Errorf("Wrong values. exp nil got %v", spew.Sdump(values))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceCountDistinct(t *testing.T) {
|
||||
v1 := map[interface{}]struct{}{
|
||||
"2": struct{}{},
|
||||
"1": struct{}{},
|
||||
float64(2.0): struct{}{},
|
||||
float64(1): struct{}{},
|
||||
uint64(2): struct{}{},
|
||||
uint64(1): struct{}{},
|
||||
true: struct{}{},
|
||||
false: struct{}{},
|
||||
}
|
||||
|
||||
v2 := map[interface{}]struct{}{
|
||||
uint64(1): struct{}{},
|
||||
float64(1): struct{}{},
|
||||
uint64(2): struct{}{},
|
||||
float64(2): struct{}{},
|
||||
false: struct{}{},
|
||||
true: struct{}{},
|
||||
"1": struct{}{},
|
||||
"2": struct{}{},
|
||||
}
|
||||
|
||||
exp := 8
|
||||
got := ReduceCountDistinct([]interface{}{v1, v1, v2})
|
||||
|
||||
if !reflect.DeepEqual(got, exp) {
|
||||
t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceCountDistinctNil(t *testing.T) {
|
||||
emptyResults := make(map[interface{}]struct{})
|
||||
tests := []struct {
|
||||
name string
|
||||
values []interface{}
|
||||
}{
|
||||
{
|
||||
name: "nil values",
|
||||
values: nil,
|
||||
},
|
||||
{
|
||||
name: "nil mapper",
|
||||
values: []interface{}{nil},
|
||||
},
|
||||
{
|
||||
name: "no mappers",
|
||||
values: []interface{}{},
|
||||
},
|
||||
{
|
||||
name: "empty mappper (len 1)",
|
||||
values: []interface{}{emptyResults},
|
||||
},
|
||||
{
|
||||
name: "empty mappper (len 2)",
|
||||
values: []interface{}{emptyResults, emptyResults},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Log(test.name)
|
||||
got := ReduceCountDistinct(test.values)
|
||||
if got != 0 {
|
||||
t.Errorf("Wrong values. exp nil got %v", spew.Sdump(got))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var getSortedRangeData = []float64{
|
||||
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
||||
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
||||
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
||||
}
|
||||
|
||||
var getSortedRangeTests = []struct {
|
||||
name string
|
||||
data []float64
|
||||
start int
|
||||
count int
|
||||
expected []float64
|
||||
}{
|
||||
{"first 5", getSortedRangeData, 0, 5, []float64{0, 1, 2, 3, 4}},
|
||||
{"0 length", getSortedRangeData, 8, 0, []float64{}},
|
||||
{"past end of data", getSortedRangeData, len(getSortedRangeData) - 3, 5, []float64{67, 68, 69}},
|
||||
}
|
||||
|
||||
func TestGetSortedRange(t *testing.T) {
|
||||
for _, tt := range getSortedRangeTests {
|
||||
results := getSortedRange(tt.data, tt.start, tt.count)
|
||||
if len(results) != len(tt.expected) {
|
||||
t.Errorf("Test %s error. Expected getSortedRange to return %v but got %v", tt.name, tt.expected, results)
|
||||
}
|
||||
for i, point := range tt.expected {
|
||||
if point != results[i] {
|
||||
t.Errorf("Test %s error. getSortedRange returned wrong result for index %v. Expected %v but got %v", tt.name, i, point, results[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var benchGetSortedRangeResults []float64
|
||||
|
||||
func BenchmarkGetSortedRangeByPivot(b *testing.B) {
|
||||
data := make([]float64, len(getSortedRangeData))
|
||||
var results []float64
|
||||
for i := 0; i < b.N; i++ {
|
||||
copy(data, getSortedRangeData)
|
||||
results = getSortedRange(data, 8, 15)
|
||||
}
|
||||
benchGetSortedRangeResults = results
|
||||
}
|
||||
|
||||
func BenchmarkGetSortedRangeBySort(b *testing.B) {
|
||||
data := make([]float64, len(getSortedRangeData))
|
||||
var results []float64
|
||||
for i := 0; i < b.N; i++ {
|
||||
copy(data, getSortedRangeData)
|
||||
sort.Float64s(data)
|
||||
results = data[8:23]
|
||||
}
|
||||
benchGetSortedRangeResults = results
|
||||
}
|
|
@ -129,8 +129,6 @@ func (p *Parser) parseShowStatement() (Statement, error) {
|
|||
return nil, newParseError(tokstr(tok, lit), []string{"POLICIES"}, pos)
|
||||
case SERIES:
|
||||
return p.parseShowSeriesStatement()
|
||||
case SHARDS:
|
||||
return p.parseShowShardsStatement()
|
||||
case STATS:
|
||||
return p.parseShowStatsStatement()
|
||||
case DIAGNOSTICS:
|
||||
|
@ -490,9 +488,6 @@ func (p *Parser) parseSegmentedIdents() ([]string, error) {
|
|||
if ch := p.peekRune(); ch == '/' {
|
||||
// Next segment is a regex so we're done.
|
||||
break
|
||||
} else if ch == ':' {
|
||||
// Next segment is context-specific so let caller handle it.
|
||||
break
|
||||
} else if ch == '.' {
|
||||
// Add an empty identifier.
|
||||
idents = append(idents, "")
|
||||
|
@ -804,18 +799,7 @@ func (p *Parser) parseTarget(tr targetRequirement) (*Target, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(idents) < 3 {
|
||||
// Check for source measurement reference.
|
||||
if ch := p.peekRune(); ch == ':' {
|
||||
if err := p.parseTokens([]Token{COLON, MEASUREMENT}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Append empty measurement name.
|
||||
idents = append(idents, "")
|
||||
}
|
||||
}
|
||||
|
||||
t := &Target{Measurement: &Measurement{IsTarget: true}}
|
||||
t := &Target{Measurement: &Measurement{}}
|
||||
|
||||
switch len(idents) {
|
||||
case 1:
|
||||
|
@ -1266,16 +1250,6 @@ func (p *Parser) parseCreateContinuousQueryStatement() (*CreateContinuousQuerySt
|
|||
func (p *Parser) parseCreateDatabaseStatement() (*CreateDatabaseStatement, error) {
|
||||
stmt := &CreateDatabaseStatement{}
|
||||
|
||||
// Look for "IF NOT EXISTS"
|
||||
if tok, _, _ := p.scanIgnoreWhitespace(); tok == IF {
|
||||
if err := p.parseTokens([]Token{NOT, EXISTS}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt.IfNotExists = true
|
||||
} else {
|
||||
p.unscan()
|
||||
}
|
||||
|
||||
// Parse the name of the database to be created.
|
||||
lit, err := p.parseIdent()
|
||||
if err != nil {
|
||||
|
@ -1411,12 +1385,6 @@ func (p *Parser) parseRetentionPolicy() (name string, dfault bool, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// parseShowShardsStatement parses a string for "SHOW SHARDS" statement.
|
||||
// This function assumes the "SHOW SHARDS" tokens have already been consumed.
|
||||
func (p *Parser) parseShowShardsStatement() (*ShowShardsStatement, error) {
|
||||
return &ShowShardsStatement{}, nil
|
||||
}
|
||||
|
||||
// parseShowStatsStatement parses a string and returns a ShowStatsStatement.
|
||||
// This function assumes the "SHOW STATS" tokens have already been consumed.
|
||||
func (p *Parser) parseShowStatsStatement() (*ShowStatsStatement, error) {
|
||||
|
@ -1473,6 +1441,13 @@ func (p *Parser) parseDropContinuousQueryStatement() (*DropContinuousQueryStatem
|
|||
func (p *Parser) parseFields() (Fields, error) {
|
||||
var fields Fields
|
||||
|
||||
// Check for "*" (i.e., "all fields")
|
||||
if tok, _, _ := p.scanIgnoreWhitespace(); tok == MUL {
|
||||
fields = append(fields, &Field{&Wildcard{}, ""})
|
||||
return fields, nil
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
for {
|
||||
// Parse the field.
|
||||
f, err := p.parseField()
|
||||
|
@ -1802,29 +1777,24 @@ func (p *Parser) parseOrderBy() (SortFields, error) {
|
|||
func (p *Parser) parseSortFields() (SortFields, error) {
|
||||
var fields SortFields
|
||||
|
||||
tok, pos, lit := p.scanIgnoreWhitespace()
|
||||
|
||||
switch tok {
|
||||
// The first field after an order by may not have a field name (e.g. ORDER BY ASC)
|
||||
case ASC, DESC:
|
||||
fields = append(fields, &SortField{Ascending: (tok == ASC)})
|
||||
// If it's a token, parse it as a sort field. At least one is required.
|
||||
case IDENT:
|
||||
// If first token is ASC or DESC, all fields are sorted.
|
||||
if tok, pos, lit := p.scanIgnoreWhitespace(); tok == ASC || tok == DESC {
|
||||
if tok == DESC {
|
||||
// Token must be ASC, until other sort orders are supported.
|
||||
return nil, errors.New("only ORDER BY time ASC supported at this time")
|
||||
}
|
||||
return append(fields, &SortField{Ascending: (tok == ASC)}), nil
|
||||
} else if tok != IDENT {
|
||||
return nil, newParseError(tokstr(tok, lit), []string{"identifier", "ASC", "DESC"}, pos)
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
// At least one field is required.
|
||||
field, err := p.parseSortField()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lit != "time" {
|
||||
return nil, errors.New("only ORDER BY time supported at this time")
|
||||
}
|
||||
|
||||
fields = append(fields, field)
|
||||
// Parse error...
|
||||
default:
|
||||
return nil, newParseError(tokstr(tok, lit), []string{"identifier", "ASC", "DESC"}, pos)
|
||||
}
|
||||
|
||||
// Parse additional fields.
|
||||
for {
|
||||
|
@ -1843,8 +1813,9 @@ func (p *Parser) parseSortFields() (SortFields, error) {
|
|||
fields = append(fields, field)
|
||||
}
|
||||
|
||||
if len(fields) > 1 {
|
||||
return nil, errors.New("only ORDER BY time supported at this time")
|
||||
// First SortField must be time ASC, until other sort orders are supported.
|
||||
if len(fields) > 1 || fields[0].Name != "time" || !fields[0].Ascending {
|
||||
return nil, errors.New("only ORDER BY time ASC supported at this time")
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
|
|
|
@ -73,45 +73,11 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
s: `SELECT * FROM myseries GROUP BY *`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Wildcard{}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
s: `SELECT field1, * FROM myseries GROUP BY *`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.VarRef{Val: "field1"}},
|
||||
{Expr: &influxql.Wildcard{}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
s: `SELECT *, field1 FROM myseries GROUP BY *`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Wildcard{}},
|
||||
{Expr: &influxql.VarRef{Val: "field1"}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},
|
||||
},
|
||||
},
|
||||
|
||||
// SELECT statement
|
||||
{
|
||||
s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)),
|
||||
skip: true,
|
||||
s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY ASC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)),
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: false,
|
||||
Fields: []*influxql.Field{
|
||||
|
@ -135,32 +101,12 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
},
|
||||
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}},
|
||||
SortFields: []*influxql.SortField{
|
||||
{Ascending: false},
|
||||
{Ascending: true},
|
||||
},
|
||||
Limit: 20,
|
||||
Offset: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
s: `SELECT "foo.bar.baz" AS foo FROM myseries`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
s: `SELECT "foo.bar.baz" AS foo FROM foo`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "foo"}},
|
||||
},
|
||||
},
|
||||
|
||||
// derivative
|
||||
{
|
||||
|
@ -268,65 +214,6 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
},
|
||||
},
|
||||
|
||||
// select percentile statements
|
||||
{
|
||||
s: `select percentile("field1", 2.0) from cpu`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: false,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Call{Name: "percentile", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2.0}}}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
},
|
||||
},
|
||||
|
||||
// select top statements
|
||||
{
|
||||
s: `select top("field1", 2) from cpu`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: false,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
s: `select top(field1, 2) from cpu`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: false,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
s: `select top(field1, 2), tag1 from cpu`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: false,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}},
|
||||
{Expr: &influxql.VarRef{Val: "tag1"}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
s: `select top(field1, tag1, 2), tag1 from cpu`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: false,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "tag1"}, &influxql.NumberLiteral{Val: 2}}}},
|
||||
{Expr: &influxql.VarRef{Val: "tag1"}},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
},
|
||||
},
|
||||
|
||||
// select distinct statements
|
||||
{
|
||||
s: `select distinct(field1) from cpu`,
|
||||
|
@ -700,17 +587,17 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
// SHOW SERIES WHERE with ORDER BY and LIMIT
|
||||
{
|
||||
skip: true,
|
||||
s: `SHOW SERIES WHERE region = 'order by desc' ORDER BY DESC, field1, field2 DESC LIMIT 10`,
|
||||
s: `SHOW SERIES WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
|
||||
stmt: &influxql.ShowSeriesStatement{
|
||||
Condition: &influxql.BinaryExpr{
|
||||
Op: influxql.EQ,
|
||||
LHS: &influxql.VarRef{Val: "region"},
|
||||
RHS: &influxql.StringLiteral{Val: "order by desc"},
|
||||
RHS: &influxql.StringLiteral{Val: "uswest"},
|
||||
},
|
||||
SortFields: []*influxql.SortField{
|
||||
&influxql.SortField{Ascending: false},
|
||||
&influxql.SortField{Name: "field1", Ascending: true},
|
||||
&influxql.SortField{Name: "field2"},
|
||||
{Ascending: true},
|
||||
{Name: "field1"},
|
||||
{Name: "field2"},
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
|
@ -943,7 +830,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Database: "testdb",
|
||||
Source: &influxql.SelectStatement{
|
||||
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}},
|
||||
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}},
|
||||
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1"}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
Dimensions: []*influxql.Dimension{
|
||||
{
|
||||
|
@ -967,7 +854,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Source: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
|
||||
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}},
|
||||
Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1"}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu_load_short"}},
|
||||
},
|
||||
},
|
||||
|
@ -982,7 +869,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Source: &influxql.SelectStatement{
|
||||
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}},
|
||||
Target: &influxql.Target{
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load", IsTarget: true},
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load"},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
Dimensions: []*influxql.Dimension{
|
||||
|
@ -1009,7 +896,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "value"}}},
|
||||
Target: &influxql.Target{
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value", IsTarget: true},
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value"},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
},
|
||||
|
@ -1027,52 +914,18 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "transmit_rx"}},
|
||||
{Expr: &influxql.VarRef{Val: "transmit_tx"}}},
|
||||
Target: &influxql.Target{
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network", IsTarget: true},
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network"},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// CREATE CONTINUOUS QUERY with backreference measurement name
|
||||
{
|
||||
s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT mean(value) INTO "policy1".:measurement FROM /^[a-z]+.*/ GROUP BY time(1m) END`,
|
||||
stmt: &influxql.CreateContinuousQueryStatement{
|
||||
Name: "myquery",
|
||||
Database: "testdb",
|
||||
Source: &influxql.SelectStatement{
|
||||
Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
|
||||
Target: &influxql.Target{
|
||||
Measurement: &influxql.Measurement{RetentionPolicy: "policy1", IsTarget: true},
|
||||
},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`^[a-z]+.*`)}}},
|
||||
Dimensions: []*influxql.Dimension{
|
||||
{
|
||||
Expr: &influxql.Call{
|
||||
Name: "time",
|
||||
Args: []influxql.Expr{
|
||||
&influxql.DurationLiteral{Val: 1 * time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// CREATE DATABASE statement
|
||||
{
|
||||
s: `CREATE DATABASE testdb`,
|
||||
stmt: &influxql.CreateDatabaseStatement{
|
||||
Name: "testdb",
|
||||
IfNotExists: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
s: `CREATE DATABASE IF NOT EXISTS testdb`,
|
||||
stmt: &influxql.CreateDatabaseStatement{
|
||||
Name: "testdb",
|
||||
IfNotExists: true,
|
||||
},
|
||||
},
|
||||
|
||||
|
@ -1344,12 +1197,6 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
},
|
||||
},
|
||||
|
||||
// SHOW SHARDS
|
||||
{
|
||||
s: `SHOW SHARDS`,
|
||||
stmt: &influxql.ShowShardsStatement{},
|
||||
},
|
||||
|
||||
// SHOW DIAGNOSTICS
|
||||
{
|
||||
s: `SHOW DIAGNOSTICS`,
|
||||
|
@ -1366,21 +1213,6 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`},
|
||||
{s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected number at line 1, char 35`},
|
||||
{s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `fractional parts not allowed in LIMIT at line 1, char 35`},
|
||||
{s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`},
|
||||
{s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`},
|
||||
{s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`},
|
||||
{s: `SELECT top(field1,host,server,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`},
|
||||
{s: `SELECT top(field1,5,server,2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5.000`},
|
||||
{s: `SELECT top(field1,max(foo),server,2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`},
|
||||
{s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`},
|
||||
{s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
|
||||
{s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`},
|
||||
{s: `SELECT bottom(field1,host,server,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`},
|
||||
{s: `SELECT bottom(field1,5,server,2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5.000`},
|
||||
{s: `SELECT bottom(field1,max(foo),server,2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`},
|
||||
{s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`},
|
||||
{s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`},
|
||||
{s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`},
|
||||
{s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected number at line 1, char 36`},
|
||||
{s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `fractional parts not allowed in OFFSET at line 1, char 36`},
|
||||
{s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`},
|
||||
|
@ -1388,20 +1220,19 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, DESC at line 1, char 38`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY time ASC,`, err: `found EOF, expected identifier at line 1, char 47`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time supported at this time`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY DESC`, err: `only ORDER BY time ASC supported at this time`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY field1`, err: `only ORDER BY time ASC supported at this time`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY time DESC`, err: `only ORDER BY time ASC supported at this time`},
|
||||
{s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time ASC supported at this time`},
|
||||
{s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`},
|
||||
{s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`},
|
||||
{s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`},
|
||||
{s: `SELECT count(value) FROM foo group by time(1s)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},
|
||||
{s: `SELECT count(value) FROM foo group by time(1s) where host = 'hosta.influxdb.org'`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},
|
||||
{s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`},
|
||||
{s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`},
|
||||
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected one argument`},
|
||||
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have one duration argument`},
|
||||
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`},
|
||||
{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
|
||||
{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse number at line 1, char 8`},
|
||||
{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
|
||||
{s: `SELECT derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
|
||||
{s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`},
|
||||
{s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`},
|
||||
{s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`},
|
||||
|
@ -1412,18 +1243,15 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct <field>) can only have one argument`},
|
||||
{s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct <field>) can only have one argument`},
|
||||
{s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`},
|
||||
{s: `SELECT derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
|
||||
{s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`},
|
||||
{s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`},
|
||||
{s: `SELECT derivative(value) FROM myseries where time < now() and time > now() - 1d`, err: `aggregate function required inside the call to derivative`},
|
||||
{s: `SELECT non_negative_derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
|
||||
{s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`},
|
||||
{s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`},
|
||||
{s: `SELECT non_negative_derivative(value) FROM myseries where time < now() and time > now() - 1d`, err: `aggregate function required inside the call to non_negative_derivative`},
|
||||
{s: `SELECT field1 from myseries WHERE host =~ 'asd' LIMIT 1`, err: `found asd, expected regex at line 1, char 42`},
|
||||
{s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},
|
||||
{s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},
|
||||
{s: `SELECT s =~ /foo/ FROM cpu`, err: `invalid operator =~ in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},
|
||||
{s: `SELECT foo, * from cpu`, err: `wildcards can not be combined with other fields`},
|
||||
{s: `SELECT *, * from cpu`, err: `found ,, expected FROM at line 1, char 9`},
|
||||
{s: `SELECT *, foo from cpu`, err: `found ,, expected FROM at line 1, char 9`},
|
||||
{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},
|
||||
{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},
|
||||
{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
|
||||
|
@ -1448,10 +1276,6 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},
|
||||
{s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`},
|
||||
{s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENT at line 1, char 6`},
|
||||
{s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`},
|
||||
{s: `CREATE DATABASE IF`, err: `found EOF, expected NOT at line 1, char 20`},
|
||||
{s: `CREATE DATABASE IF NOT`, err: `found EOF, expected EXISTS at line 1, char 24`},
|
||||
{s: `CREATE DATABASE IF NOT EXISTS`, err: `found EOF, expected identifier at line 1, char 31`},
|
||||
{s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`},
|
||||
{s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`},
|
||||
{s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`},
|
||||
|
@ -1572,8 +1396,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
if !reflect.DeepEqual(tt.err, errstring(err)) {
|
||||
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
|
||||
} else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) {
|
||||
t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))
|
||||
t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String())
|
||||
t.Logf("\nexp=%s\ngot=%s\n", mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))
|
||||
t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,8 +95,6 @@ func (s *Scanner) Scan() (tok Token, pos Pos, lit string) {
|
|||
return COMMA, pos, ""
|
||||
case ';':
|
||||
return SEMICOLON, pos, ""
|
||||
case ':':
|
||||
return COLON, pos, ""
|
||||
}
|
||||
|
||||
return ILLEGAL, pos, string(ch0)
|
||||
|
|
|
@ -136,10 +136,8 @@ func TestScanner_Scan(t *testing.T) {
|
|||
{s: `KEYS`, tok: influxql.KEYS},
|
||||
{s: `LIMIT`, tok: influxql.LIMIT},
|
||||
{s: `SHOW`, tok: influxql.SHOW},
|
||||
{s: `SHARDS`, tok: influxql.SHARDS},
|
||||
{s: `MEASUREMENT`, tok: influxql.MEASUREMENT},
|
||||
{s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS},
|
||||
{s: `NOT`, tok: influxql.NOT},
|
||||
{s: `OFFSET`, tok: influxql.OFFSET},
|
||||
{s: `ON`, tok: influxql.ON},
|
||||
{s: `ORDER`, tok: influxql.ORDER},
|
||||
|
|
|
@ -50,7 +50,6 @@ const (
|
|||
LPAREN // (
|
||||
RPAREN // )
|
||||
COMMA // ,
|
||||
COLON // :
|
||||
SEMICOLON // ;
|
||||
DOT // .
|
||||
|
||||
|
@ -92,7 +91,6 @@ const (
|
|||
LIMIT
|
||||
MEASUREMENT
|
||||
MEASUREMENTS
|
||||
NOT
|
||||
OFFSET
|
||||
ON
|
||||
ORDER
|
||||
|
@ -111,7 +109,6 @@ const (
|
|||
SERVERS
|
||||
SET
|
||||
SHOW
|
||||
SHARDS
|
||||
SLIMIT
|
||||
STATS
|
||||
DIAGNOSTICS
|
||||
|
@ -162,7 +159,6 @@ var tokens = [...]string{
|
|||
LPAREN: "(",
|
||||
RPAREN: ")",
|
||||
COMMA: ",",
|
||||
COLON: ":",
|
||||
SEMICOLON: ";",
|
||||
DOT: ".",
|
||||
|
||||
|
@ -202,7 +198,6 @@ var tokens = [...]string{
|
|||
LIMIT: "LIMIT",
|
||||
MEASUREMENT: "MEASUREMENT",
|
||||
MEASUREMENTS: "MEASUREMENTS",
|
||||
NOT: "NOT",
|
||||
OFFSET: "OFFSET",
|
||||
ON: "ON",
|
||||
ORDER: "ORDER",
|
||||
|
@ -221,7 +216,6 @@ var tokens = [...]string{
|
|||
SERVERS: "SERVERS",
|
||||
SET: "SET",
|
||||
SHOW: "SHOW",
|
||||
SHARDS: "SHARDS",
|
||||
SLIMIT: "SLIMIT",
|
||||
SOFFSET: "SOFFSET",
|
||||
STATS: "STATS",
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var expvarMu sync.Mutex
|
||||
|
||||
// NewStatistics returns an expvar-based map with the given key. Within that map
|
||||
// is another map. Within there "name" is the Measurement name, "tags" are the tags,
|
||||
// and values are placed at the key "values".
|
||||
func NewStatistics(key, name string, tags map[string]string) *expvar.Map {
|
||||
expvarMu.Lock()
|
||||
defer expvarMu.Unlock()
|
||||
|
||||
// Add expvar for this service.
|
||||
var v expvar.Var
|
||||
if v = expvar.Get(key); v == nil {
|
||||
v = expvar.NewMap(key)
|
||||
}
|
||||
m := v.(*expvar.Map)
|
||||
|
||||
// Set the name
|
||||
nameVar := &expvar.String{}
|
||||
nameVar.Set(name)
|
||||
m.Set("name", nameVar)
|
||||
|
||||
// Set the tags
|
||||
tagsVar := &expvar.Map{}
|
||||
tagsVar.Init()
|
||||
for k, v := range tags {
|
||||
value := &expvar.String{}
|
||||
value.Set(v)
|
||||
tagsVar.Set(k, value)
|
||||
}
|
||||
m.Set("tags", tagsVar)
|
||||
|
||||
// Create and set the values entry used for actual stats.
|
||||
statMap := &expvar.Map{}
|
||||
statMap.Init()
|
||||
m.Set("values", statMap)
|
||||
|
||||
return statMap
|
||||
}
|
|
@ -132,7 +132,7 @@ func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo,
|
|||
return &di.RetentionPolicies[i], nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
return nil, ErrRetentionPolicyNotFound
|
||||
}
|
||||
|
||||
// CreateRetentionPolicy creates a new retention policy on a database.
|
||||
|
@ -172,11 +172,6 @@ func (data *Data) DropRetentionPolicy(database, name string) error {
|
|||
return ErrDatabaseNotFound
|
||||
}
|
||||
|
||||
// Prohibit dropping the default retention policy.
|
||||
if di.DefaultRetentionPolicy == name {
|
||||
return ErrRetentionPolicyDefault
|
||||
}
|
||||
|
||||
// Remove from list.
|
||||
for i := range di.RetentionPolicies {
|
||||
if di.RetentionPolicies[i].Name == name {
|
||||
|
@ -278,6 +273,7 @@ func (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax tim
|
|||
}
|
||||
groups = append(groups, g)
|
||||
}
|
||||
sort.Sort(ShardGroupInfos(groups))
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
|
@ -348,16 +344,13 @@ func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time)
|
|||
si := &sgi.Shards[i]
|
||||
for j := 0; j < replicaN; j++ {
|
||||
nodeID := data.Nodes[nodeIndex%len(data.Nodes)].ID
|
||||
si.Owners = append(si.Owners, ShardOwner{NodeID: nodeID})
|
||||
si.OwnerIDs = append(si.OwnerIDs, nodeID)
|
||||
nodeIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// Retention policy has a new shard group, so update the policy. Shard
|
||||
// Groups must be stored in sorted order, as other parts of the system
|
||||
// assume this to be the case.
|
||||
// Retention policy has a new shard group, so update the policy.
|
||||
rpi.ShardGroups = append(rpi.ShardGroups, sgi)
|
||||
sort.Sort(ShardGroupInfos(rpi.ShardGroups))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -669,31 +662,6 @@ func (di DatabaseInfo) RetentionPolicy(name string) *RetentionPolicyInfo {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ShardInfos returns a list of all shards' info for the database.
|
||||
func (di DatabaseInfo) ShardInfos() []ShardInfo {
|
||||
shards := map[uint64]*ShardInfo{}
|
||||
for i := range di.RetentionPolicies {
|
||||
for j := range di.RetentionPolicies[i].ShardGroups {
|
||||
sg := di.RetentionPolicies[i].ShardGroups[j]
|
||||
// Skip deleted shard groups
|
||||
if sg.Deleted() {
|
||||
continue
|
||||
}
|
||||
for k := range sg.Shards {
|
||||
si := &di.RetentionPolicies[i].ShardGroups[j].Shards[k]
|
||||
shards[si.ID] = si
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
infos := make([]ShardInfo, 0, len(shards))
|
||||
for _, info := range shards {
|
||||
infos = append(infos, *info)
|
||||
}
|
||||
|
||||
return infos
|
||||
}
|
||||
|
||||
// clone returns a deep copy of di.
|
||||
func (di DatabaseInfo) clone() DatabaseInfo {
|
||||
other := di
|
||||
|
@ -950,13 +918,13 @@ func (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) {
|
|||
// ShardInfo represents metadata about a shard.
|
||||
type ShardInfo struct {
|
||||
ID uint64
|
||||
Owners []ShardOwner
|
||||
OwnerIDs []uint64
|
||||
}
|
||||
|
||||
// OwnedBy returns whether the shard's owner IDs includes nodeID.
|
||||
func (si ShardInfo) OwnedBy(nodeID uint64) bool {
|
||||
for _, so := range si.Owners {
|
||||
if so.NodeID == nodeID {
|
||||
for _, id := range si.OwnerIDs {
|
||||
if id == nodeID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -967,11 +935,9 @@ func (si ShardInfo) OwnedBy(nodeID uint64) bool {
|
|||
func (si ShardInfo) clone() ShardInfo {
|
||||
other := si
|
||||
|
||||
if si.Owners != nil {
|
||||
other.Owners = make([]ShardOwner, len(si.Owners))
|
||||
for i := range si.Owners {
|
||||
other.Owners[i] = si.Owners[i].clone()
|
||||
}
|
||||
if si.OwnerIDs != nil {
|
||||
other.OwnerIDs = make([]uint64, len(si.OwnerIDs))
|
||||
copy(other.OwnerIDs, si.OwnerIDs)
|
||||
}
|
||||
|
||||
return other
|
||||
|
@ -983,64 +949,17 @@ func (si ShardInfo) marshal() *internal.ShardInfo {
|
|||
ID: proto.Uint64(si.ID),
|
||||
}
|
||||
|
||||
pb.Owners = make([]*internal.ShardOwner, len(si.Owners))
|
||||
for i := range si.Owners {
|
||||
pb.Owners[i] = si.Owners[i].marshal()
|
||||
}
|
||||
pb.OwnerIDs = make([]uint64, len(si.OwnerIDs))
|
||||
copy(pb.OwnerIDs, si.OwnerIDs)
|
||||
|
||||
return pb
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the object from a binary format.
|
||||
func (si *ShardInfo) UnmarshalBinary(buf []byte) error {
|
||||
var pb internal.ShardInfo
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
si.unmarshal(&pb)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshal deserializes from a protobuf representation.
|
||||
func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) {
|
||||
si.ID = pb.GetID()
|
||||
|
||||
// If deprecated "OwnerIDs" exists then convert it to "Owners" format.
|
||||
if len(pb.GetOwnerIDs()) > 0 {
|
||||
si.Owners = make([]ShardOwner, len(pb.GetOwnerIDs()))
|
||||
for i, x := range pb.GetOwnerIDs() {
|
||||
si.Owners[i].unmarshal(&internal.ShardOwner{
|
||||
NodeID: proto.Uint64(x),
|
||||
})
|
||||
}
|
||||
} else if len(pb.GetOwners()) > 0 {
|
||||
si.Owners = make([]ShardOwner, len(pb.GetOwners()))
|
||||
for i, x := range pb.GetOwners() {
|
||||
si.Owners[i].unmarshal(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ShardOwner represents a node that owns a shard.
|
||||
type ShardOwner struct {
|
||||
NodeID uint64
|
||||
}
|
||||
|
||||
// clone returns a deep copy of so.
|
||||
func (so ShardOwner) clone() ShardOwner {
|
||||
return so
|
||||
}
|
||||
|
||||
// marshal serializes to a protobuf representation.
|
||||
func (so ShardOwner) marshal() *internal.ShardOwner {
|
||||
return &internal.ShardOwner{
|
||||
NodeID: proto.Uint64(so.NodeID),
|
||||
}
|
||||
}
|
||||
|
||||
// unmarshal deserializes from a protobuf representation.
|
||||
func (so *ShardOwner) unmarshal(pb *internal.ShardOwner) {
|
||||
so.NodeID = pb.GetNodeID()
|
||||
si.OwnerIDs = make([]uint64, len(pb.GetOwnerIDs()))
|
||||
copy(si.OwnerIDs, pb.GetOwnerIDs())
|
||||
}
|
||||
|
||||
// ContinuousQueryInfo represents metadata about a continuous query.
|
||||
|
|
|
@ -9,10 +9,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/meta/internal"
|
||||
)
|
||||
|
||||
// Ensure a node can be created.
|
||||
|
@ -301,13 +299,7 @@ func TestData_CreateShardGroup(t *testing.T) {
|
|||
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
EndTime: time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC),
|
||||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 1,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
},
|
||||
},
|
||||
{ID: 1, OwnerIDs: []uint64{1, 2}},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected shard group: %#v", sgi)
|
||||
|
@ -579,11 +571,7 @@ func TestData_Clone(t *testing.T) {
|
|||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 200,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 3},
|
||||
{NodeID: 4},
|
||||
},
|
||||
OwnerIDs: []uint64{1, 3, 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -617,8 +605,8 @@ func TestData_Clone(t *testing.T) {
|
|||
}
|
||||
|
||||
// Ensure that changing data in the clone does not affect the original.
|
||||
other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID = 9
|
||||
if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID; v != 3 {
|
||||
other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1] = 9
|
||||
if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1]; v != 3 {
|
||||
t.Fatalf("editing clone changed original: %v", v)
|
||||
}
|
||||
}
|
||||
|
@ -650,11 +638,7 @@ func TestData_MarshalBinary(t *testing.T) {
|
|||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 200,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 3},
|
||||
{NodeID: 4},
|
||||
},
|
||||
OwnerIDs: []uint64{1, 3, 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -698,33 +682,3 @@ func TestData_MarshalBinary(t *testing.T) {
|
|||
t.Fatalf("unexpected users: %#v", other.Users)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure shards with deprecated "OwnerIDs" can be decoded.
|
||||
func TestShardInfo_UnmarshalBinary_OwnerIDs(t *testing.T) {
|
||||
// Encode deprecated form to bytes.
|
||||
buf, err := proto.Marshal(&internal.ShardInfo{
|
||||
ID: proto.Uint64(1),
|
||||
OwnerIDs: []uint64{10, 20, 30},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Decode deprecated form.
|
||||
var si meta.ShardInfo
|
||||
if err := si.UnmarshalBinary(buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify data is migrated correctly.
|
||||
if !reflect.DeepEqual(si, meta.ShardInfo{
|
||||
ID: 1,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 10},
|
||||
{NodeID: 20},
|
||||
{NodeID: 30},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected shard info: %s", spew.Sdump(si))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,10 +43,6 @@ var (
|
|||
// ErrRetentionPolicyExists is returned when creating an already existing policy.
|
||||
ErrRetentionPolicyExists = errors.New("retention policy already exists")
|
||||
|
||||
// ErrRetentionPolicyDefault is returned when attempting a prohibited operation
|
||||
// on a default retention policy.
|
||||
ErrRetentionPolicyDefault = errors.New("retention policy is default")
|
||||
|
||||
// ErrRetentionPolicyNotFound is returned when mutating a policy that doesn't exist.
|
||||
ErrRetentionPolicyNotFound = errors.New("retention policy not found")
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ It has these top-level messages:
|
|||
RetentionPolicyInfo
|
||||
ShardGroupInfo
|
||||
ShardInfo
|
||||
ShardOwner
|
||||
ContinuousQueryInfo
|
||||
UserInfo
|
||||
UserPrivilege
|
||||
|
@ -419,7 +418,6 @@ func (m *ShardGroupInfo) GetShards() []*ShardInfo {
|
|||
type ShardInfo struct {
|
||||
ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
|
||||
OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"`
|
||||
Owners []*ShardOwner `protobuf:"bytes,3,rep" json:"Owners,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
|
@ -441,29 +439,6 @@ func (m *ShardInfo) GetOwnerIDs() []uint64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *ShardInfo) GetOwners() []*ShardOwner {
|
||||
if m != nil {
|
||||
return m.Owners
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ShardOwner struct {
|
||||
NodeID *uint64 `protobuf:"varint,1,req" json:"NodeID,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ShardOwner) Reset() { *m = ShardOwner{} }
|
||||
func (m *ShardOwner) String() string { return proto.CompactTextString(m) }
|
||||
func (*ShardOwner) ProtoMessage() {}
|
||||
|
||||
func (m *ShardOwner) GetNodeID() uint64 {
|
||||
if m != nil && m.NodeID != nil {
|
||||
return *m.NodeID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ContinuousQueryInfo struct {
|
||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
||||
Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"`
|
||||
|
|
|
@ -50,12 +50,7 @@ message ShardGroupInfo {
|
|||
|
||||
message ShardInfo {
|
||||
required uint64 ID = 1;
|
||||
repeated uint64 OwnerIDs = 2 [deprecated=true];
|
||||
repeated ShardOwner Owners = 3;
|
||||
}
|
||||
|
||||
message ShardOwner {
|
||||
required uint64 NodeID = 1;
|
||||
repeated uint64 OwnerIDs = 2;
|
||||
}
|
||||
|
||||
message ContinuousQueryInfo {
|
||||
|
|
|
@ -122,7 +122,7 @@ func TestRPCFetchDataMatchesBlocking(t *testing.T) {
|
|||
|
||||
// Simulate the rmote index changing and unblocking
|
||||
fs.mu.Lock()
|
||||
fs.md = &Data{Index: 100}
|
||||
fs.md.Index = 100
|
||||
fs.mu.Unlock()
|
||||
close(fs.blockChan)
|
||||
wg.Wait()
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
)
|
||||
|
@ -83,8 +80,6 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.
|
|||
return e.executeDropContinuousQueryStatement(stmt)
|
||||
case *influxql.ShowContinuousQueriesStatement:
|
||||
return e.executeShowContinuousQueriesStatement(stmt)
|
||||
case *influxql.ShowShardsStatement:
|
||||
return e.executeShowShardsStatement(stmt)
|
||||
case *influxql.ShowStatsStatement:
|
||||
return e.executeShowStatsStatement(stmt)
|
||||
default:
|
||||
|
@ -94,9 +89,6 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.
|
|||
|
||||
func (e *StatementExecutor) executeCreateDatabaseStatement(q *influxql.CreateDatabaseStatement) *influxql.Result {
|
||||
_, err := e.Store.CreateDatabase(q.Name)
|
||||
if err == ErrDatabaseExists && q.IfNotExists {
|
||||
err = nil
|
||||
}
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
|
@ -289,50 +281,6 @@ func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql
|
|||
return &influxql.Result{Series: rows}
|
||||
}
|
||||
|
||||
func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) *influxql.Result {
|
||||
dis, err := e.Store.Databases()
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
rows := []*influxql.Row{}
|
||||
for _, di := range dis {
|
||||
row := &influxql.Row{Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name}
|
||||
for _, rpi := range di.RetentionPolicies {
|
||||
for _, sgi := range rpi.ShardGroups {
|
||||
for _, si := range sgi.Shards {
|
||||
ownerIDs := make([]uint64, len(si.Owners))
|
||||
for i, owner := range si.Owners {
|
||||
ownerIDs[i] = owner.NodeID
|
||||
}
|
||||
|
||||
row.Values = append(row.Values, []interface{}{
|
||||
si.ID,
|
||||
sgi.StartTime.UTC().Format(time.RFC3339),
|
||||
sgi.EndTime.UTC().Format(time.RFC3339),
|
||||
sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),
|
||||
joinUint64(ownerIDs),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return &influxql.Result{Series: rows}
|
||||
}
|
||||
|
||||
func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) *influxql.Result {
|
||||
return &influxql.Result{Err: fmt.Errorf("SHOW STATS is not implemented yet")}
|
||||
}
|
||||
|
||||
// joinUint64 returns a comma-delimited string of uint64 numbers.
|
||||
func joinUint64(a []uint64) string {
|
||||
var buf bytes.Buffer
|
||||
for i, x := range a {
|
||||
buf.WriteString(strconv.FormatUint(x, 10))
|
||||
if i < len(a)-1 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
|
71
Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go
generated
vendored
71
Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go
generated
vendored
|
@ -625,13 +625,13 @@ func TestStatementExecutor_ExecuteStatement_CreateContinuousQuery(t *testing.T)
|
|||
t.Fatalf("unexpected database: %s", database)
|
||||
} else if name != "cq0" {
|
||||
t.Fatalf("unexpected name: %s", name)
|
||||
} else if query != `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END` {
|
||||
} else if query != `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END` {
|
||||
t.Fatalf("unexpected query: %s", query)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END`)
|
||||
stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END`)
|
||||
if res := e.ExecuteStatement(stmt); res.Err != nil {
|
||||
t.Fatal(res.Err)
|
||||
} else if res.Series != nil {
|
||||
|
@ -646,7 +646,7 @@ func TestStatementExecutor_ExecuteStatement_CreateContinuousQuery_Err(t *testing
|
|||
return errors.New("marker")
|
||||
}
|
||||
|
||||
stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END`)
|
||||
stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END`)
|
||||
if res := e.ExecuteStatement(stmt); res.Err == nil || res.Err.Error() != "marker" {
|
||||
t.Fatalf("unexpected error: %s", res.Err)
|
||||
}
|
||||
|
@ -693,14 +693,14 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries(t *testing.T)
|
|||
{
|
||||
Name: "db0",
|
||||
ContinuousQueries: []meta.ContinuousQueryInfo{
|
||||
{Name: "cq0", Query: "SELECT count(field1) INTO db1 FROM db0"},
|
||||
{Name: "cq1", Query: "SELECT count(field1) INTO db2 FROM db0"},
|
||||
{Name: "cq0", Query: "SELECT count(*) INTO db1 FROM db0"},
|
||||
{Name: "cq1", Query: "SELECT count(*) INTO db2 FROM db0"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db1",
|
||||
ContinuousQueries: []meta.ContinuousQueryInfo{
|
||||
{Name: "cq2", Query: "SELECT count(field1) INTO db3 FROM db1"},
|
||||
{Name: "cq2", Query: "SELECT count(*) INTO db3 FROM db1"},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
|
@ -714,15 +714,15 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries(t *testing.T)
|
|||
Name: "db0",
|
||||
Columns: []string{"name", "query"},
|
||||
Values: [][]interface{}{
|
||||
{"cq0", "SELECT count(field1) INTO db1 FROM db0"},
|
||||
{"cq1", "SELECT count(field1) INTO db2 FROM db0"},
|
||||
{"cq0", "SELECT count(*) INTO db1 FROM db0"},
|
||||
{"cq1", "SELECT count(*) INTO db2 FROM db0"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db1",
|
||||
Columns: []string{"name", "query"},
|
||||
Values: [][]interface{}{
|
||||
{"cq2", "SELECT count(field1) INTO db3 FROM db1"},
|
||||
{"cq2", "SELECT count(*) INTO db3 FROM db1"},
|
||||
},
|
||||
},
|
||||
}) {
|
||||
|
@ -755,7 +755,7 @@ func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) {
|
|||
|
||||
// Execute a SELECT statement.
|
||||
NewStatementExecutor().ExecuteStatement(
|
||||
influxql.MustParseStatement(`SELECT count(field1) FROM db0`),
|
||||
influxql.MustParseStatement(`SELECT count(*) FROM db0`),
|
||||
)
|
||||
}()
|
||||
|
||||
|
@ -765,57 +765,6 @@ func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Ensure a SHOW SHARDS statement can be executed.
|
||||
func TestStatementExecutor_ExecuteStatement_ShowShards(t *testing.T) {
|
||||
e := NewStatementExecutor()
|
||||
e.Store.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||
return []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "foo",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Duration: time.Second,
|
||||
ShardGroups: []meta.ShardGroupInfo{
|
||||
{
|
||||
StartTime: time.Unix(0, 0),
|
||||
EndTime: time.Unix(1, 0),
|
||||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 1,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
{NodeID: 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW SHARDS`)); res.Err != nil {
|
||||
t.Fatal(res.Err)
|
||||
} else if !reflect.DeepEqual(res.Series, influxql.Rows{
|
||||
{
|
||||
Name: "foo",
|
||||
Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"},
|
||||
Values: [][]interface{}{
|
||||
{uint64(1), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", "1,2,3"},
|
||||
{uint64(2), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", ""},
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected rows: %s", spew.Sdump(res.Series))
|
||||
}
|
||||
}
|
||||
|
||||
// StatementExecutor represents a test wrapper for meta.StatementExecutor.
|
||||
type StatementExecutor struct {
|
||||
*meta.StatementExecutor
|
||||
|
|
|
@ -254,10 +254,7 @@ func (s *Store) Open() error {
|
|||
close(s.ready)
|
||||
}
|
||||
|
||||
// Wait for a leader to be elected so we know the raft log is loaded
|
||||
// and up to date
|
||||
<-s.ready
|
||||
return s.WaitForLeader(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncNodeInfo continuously tries to update the current nodes hostname
|
||||
|
@ -861,7 +858,6 @@ func (s *Store) CreateDatabase(name string) (*DatabaseInfo, error) {
|
|||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Logger.Printf("database '%s' created", name)
|
||||
|
||||
if s.retentionAutoCreate {
|
||||
// Read node count.
|
||||
|
@ -981,7 +977,6 @@ func (s *Store) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
s.Logger.Printf("retention policy '%s' for database '%s' created", rpi.Name, database)
|
||||
return s.RetentionPolicy(database, rpi.Name)
|
||||
}
|
||||
|
||||
|
@ -1394,26 +1389,28 @@ func (s *Store) UserCount() (count int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// PrecreateShardGroups creates shard groups whose endtime is before the 'to' time passed in, but
|
||||
// is yet to expire before 'from'. This is to avoid the need for these shards to be created when data
|
||||
// for the corresponding time range arrives. Shard creation involves Raft consensus, and precreation
|
||||
// avoids taking the hit at write-time.
|
||||
func (s *Store) PrecreateShardGroups(from, to time.Time) error {
|
||||
// PrecreateShardGroups creates shard groups whose endtime is before the cutoff time passed in. This
|
||||
// avoid the need for these shards to be created when data for the corresponding time range arrives.
|
||||
// Shard creation involves Raft consensus, and precreation avoids taking the hit at write-time.
|
||||
func (s *Store) PrecreateShardGroups(cutoff time.Time) error {
|
||||
s.read(func(data *Data) error {
|
||||
for _, di := range data.Databases {
|
||||
for _, rp := range di.RetentionPolicies {
|
||||
if len(rp.ShardGroups) == 0 {
|
||||
// No data was ever written to this group, or all groups have been deleted.
|
||||
for _, g := range rp.ShardGroups {
|
||||
// Check to see if it is not deleted and going to end before our interval
|
||||
if !g.Deleted() && g.EndTime.Before(cutoff) {
|
||||
nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond)
|
||||
|
||||
// Check if successive shard group exists.
|
||||
if sgi, err := s.ShardGroupByTimestamp(di.Name, rp.Name, nextShardGroupTime); err != nil {
|
||||
s.Logger.Printf("failed to check if successive shard group for group exists %d: %s",
|
||||
g.ID, err.Error())
|
||||
continue
|
||||
} else if sgi != nil && !sgi.Deleted() {
|
||||
continue
|
||||
}
|
||||
g := rp.ShardGroups[len(rp.ShardGroups)-1] // Get the last group in time.
|
||||
if !g.Deleted() && g.EndTime.Before(to) && g.EndTime.After(from) {
|
||||
// Group is not deleted, will end before the future time, but is still yet to expire.
|
||||
// This last check is important, so the system doesn't create shards groups wholly
|
||||
// in the past.
|
||||
|
||||
// Create successive shard group.
|
||||
nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond)
|
||||
// It doesn't. Create it.
|
||||
if newGroup, err := s.CreateShardGroupIfNotExists(di.Name, rp.Name, nextShardGroupTime); err != nil {
|
||||
s.Logger.Printf("failed to create successive shard group for group %d: %s",
|
||||
g.ID, err.Error())
|
||||
|
@ -1423,6 +1420,8 @@ func (s *Store) PrecreateShardGroups(from, to time.Time) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -489,57 +489,30 @@ func TestStore_PrecreateShardGroup(t *testing.T) {
|
|||
s := MustOpenStore()
|
||||
defer s.Close()
|
||||
|
||||
// Create node, database, policy, & groups.
|
||||
// Create node, database, policy, & group.
|
||||
if _, err := s.CreateNode("host0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := s.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp1", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
|
||||
} else if _, err := s.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp2", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := s.CreateShardGroup("db0", "rp0", time.Date(2001, time.January, 1, 1, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := s.CreateShardGroup("db0", "rp1", time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC)); err != nil {
|
||||
} else if err := s.PrecreateShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := s.PrecreateShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC), time.Date(2001, time.January, 1, 3, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// rp0 should undergo precreation.
|
||||
groups, err := s.ShardGroups("db0", "rp0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(groups) != 2 {
|
||||
t.Fatalf("shard group precreation failed to create new shard group for rp0")
|
||||
t.Fatalf("shard group precreation failed to create new shard group")
|
||||
}
|
||||
if groups[1].StartTime != time.Date(2001, time.January, 1, 2, 0, 0, 0, time.UTC) {
|
||||
if groups[1].StartTime != time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC) {
|
||||
t.Fatalf("precreated shard group has wrong start time, exp %s, got %s",
|
||||
time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC), groups[1].StartTime)
|
||||
}
|
||||
|
||||
// rp1 should not undergo precreation since it is completely in the past.
|
||||
groups, err = s.ShardGroups("db0", "rp1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(groups) != 1 {
|
||||
t.Fatalf("shard group precreation created new shard group for rp1")
|
||||
}
|
||||
|
||||
// rp2 should not undergo precreation since it has no shards.
|
||||
groups, err = s.ShardGroups("db0", "rp2")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(groups) != 0 {
|
||||
t.Fatalf("shard group precreation created new shard group for rp2")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the store can create a new continuous query.
|
||||
|
@ -855,14 +828,14 @@ func TestCluster_Restart(t *testing.T) {
|
|||
t.Fatal("no leader found")
|
||||
}
|
||||
|
||||
// Add 5 more nodes, 2 should become raft peers, 3 remote raft clients
|
||||
// Add 5 more ndes, 2 should become raft peers, 3 remote raft clients
|
||||
for i := 0; i < 5; i++ {
|
||||
if err := c.Join(); err != nil {
|
||||
t.Fatalf("failed to join cluster: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// The tests use a host assigned listener port. We need to re-use
|
||||
// The tests use a host host assigned listener port. We need to re-use
|
||||
// the original ports when the new cluster is restarted so that the existing
|
||||
// peer store addresses can be reached.
|
||||
addrs := []string{}
|
||||
|
@ -885,25 +858,10 @@ func TestCluster_Restart(t *testing.T) {
|
|||
|
||||
// Re-create the cluster nodes from existing disk paths and addresses
|
||||
stores := []*Store{}
|
||||
storeChan := make(chan *Store)
|
||||
for i, s := range c.Stores {
|
||||
|
||||
// Need to start each instance asynchronously because they have existing raft peers
|
||||
// store. Starting one will block indefinitely because it will not be able to become
|
||||
// leader until another peer is available to hold an election.
|
||||
go func(addr, path string) {
|
||||
store := MustOpenStoreWithPath(addr, path)
|
||||
storeChan <- store
|
||||
}(addrs[i], s.Path())
|
||||
|
||||
}
|
||||
|
||||
// Collect up our restart meta-stores
|
||||
for range c.Stores {
|
||||
store := <-storeChan
|
||||
store := MustOpenStoreWithPath(addrs[i], s.Path())
|
||||
stores = append(stores, store)
|
||||
}
|
||||
|
||||
c.Stores = stores
|
||||
|
||||
// Wait for the cluster to stabilize
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
# System Monitoring
|
||||
_This functionality should be considered experimental and is subject to change._
|
||||
|
||||
_System Monitoring_ means all statistical and diagnostic information made availabe to the user of InfluxDB system, about the system itself. Its purpose is to assist with troubleshooting and performance analysis of the database itself.
|
||||
|
||||
## Statistics vs. Diagnostics
|
||||
A distinction is made between _statistics_ and _diagnostics_ for the purposes of monitoring. Generally a statistical quality is something that is being counted, and for which it makes sense to store persistently for historical analysis. Diagnostic information is not necessarily numerical, and may not make sense to store.
|
||||
|
||||
An example of statistical information would be the number of points received over UDP, or the number of queries executed. Examples of diagnostic information would be a list of current Graphite TCP connections, the version of InfluxDB, or the uptime of the process.
|
||||
|
||||
## System Statistics
|
||||
`SHOW STATS` displays statisics about subsystems within the running `influxd` process. Statistics include points received, points indexed, bytes written to disk, TCP connections handled etc. These statistics are all zero when the InfluxDB process starts.
|
||||
|
||||
All statistics are written, by default, by each node to a "monitor" database within the InfluxDB system, allowing analysis of aggregated statistical data using the standard InfluxQL language. This allows users to track the performance of their system. Importantly, this allows cluster-level statistics to be viewed, since by querying the monitor database, statistics from all nodes may be queried. This can be a very powerful approach for troubleshooting your InfluxDB system and understanding its behaviour.
|
||||
|
||||
## System Diagnostics
|
||||
`SHOW DIAGNOSTICS` displays various diagnostic information about the `influxd` process. This information is not stored persistently within the InfluxDB system.
|
||||
|
||||
## Standard expvar support
|
||||
All statistical information is available at HTTP API endpoint `/debug/vars`, in [expvar](https://golang.org/pkg/expvar/) format, allowing external systems to monitor an InfluxDB node. By default, the full path to this endpoint is `http://localhost:8086/debug/vars`.
|
||||
|
||||
## Configuration
|
||||
The `monitor` module allows the following configuration:
|
||||
|
||||
* Whether to write statistical and diagnostic information to an InfluxDB system. This is enabled by default.
|
||||
* The name of the database to where this information should be written. Defaults to `_internal`. The information is written to the default retention policy for the given database.
|
||||
* The name of the retention policy, along with full configuration control of the retention policy, if the default retention policy is not suitable.
|
||||
* The rate at which this information should be written. The default rate is once every 10 seconds.
|
||||
|
||||
# Design and Implementation
|
||||
|
||||
A new module named `monitor` supports all basic statistics and diagnostic functionality. This includes:
|
||||
|
||||
* Allowing other modules to register statistics and diagnostics information, allowing it to be accessed on demand by the `monitor` module.
|
||||
* Serving the statistics and diagnostic information to the user, in response to commands such as `SHOW DIAGNOSTICS`.
|
||||
* Expose standard Go runtime information such as garbage collection statistics.
|
||||
* Make all collected expvar data via HTTP, for collection by 3rd-party tools.
|
||||
* Writing the statistical information to the "monitor" database, for query purposes.
|
||||
|
||||
## Registering statistics and diagnostics
|
||||
|
||||
To export statistical information with the `monitor` system, code simply calls `influxdb.NewStatistics()` and receives an `expvar.Map` instance in response. This object can then be used to store statistics. To register diagnostic information, `monitor.RegisterDiagnosticsClient` is called, passing a `influxdb.monitor.DiagsClient` object to `monitor`.
|
||||
|
||||
## expvar
|
||||
Statistical information is gathered by each package using [expvar](https://golang.org/pkg/expvar). Each package registers a map using its package name.
|
||||
|
||||
Due to the nature of `expvar`, statistical information is reset to its initial state when a server is restarted.
|
|
@ -1,18 +0,0 @@
|
|||
package monitor
|
||||
|
||||
// system captures build diagnostics
|
||||
type build struct {
|
||||
Version string
|
||||
Commit string
|
||||
Branch string
|
||||
}
|
||||
|
||||
func (b *build) Diagnostics() (*Diagnostic, error) {
|
||||
diagnostics := map[string]interface{}{
|
||||
"Version": b.Version,
|
||||
"Commit": b.Commit,
|
||||
"Branch": b.Branch,
|
||||
}
|
||||
|
||||
return DiagnosticFromMap(diagnostics), nil
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultStoreEnabled is whether the system writes gathered information in
|
||||
// an InfluxDB system for historical analysis.
|
||||
DefaultStoreEnabled = true
|
||||
|
||||
// DefaultStoreDatabase is the name of the database where gathered information is written
|
||||
DefaultStoreDatabase = "_internal"
|
||||
|
||||
// DefaultStoreInterval is the period between storing gathered information.
|
||||
DefaultStoreInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// Config represents the configuration for the monitor service.
|
||||
type Config struct {
|
||||
StoreEnabled bool `toml:"store-enabled"`
|
||||
StoreDatabase string `toml:"store-database"`
|
||||
StoreInterval toml.Duration `toml:"store-interval"`
|
||||
}
|
||||
|
||||
// NewConfig returns an instance of Config with defaults.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
StoreEnabled: true,
|
||||
StoreDatabase: DefaultStoreDatabase,
|
||||
StoreInterval: toml.Duration(DefaultStoreInterval),
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package monitor_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c monitor.Config
|
||||
if _, err := toml.Decode(`
|
||||
store-enabled=true
|
||||
store-database="the_db"
|
||||
store-interval="10m"
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if !c.StoreEnabled {
|
||||
t.Fatalf("unexpected store-enabled: %v", c.StoreEnabled)
|
||||
} else if c.StoreDatabase != "the_db" {
|
||||
t.Fatalf("unexpected store-database: %s", c.StoreDatabase)
|
||||
} else if time.Duration(c.StoreInterval) != 10*time.Minute {
|
||||
t.Fatalf("unexpected store-interval: %s", c.StoreInterval)
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// goRuntime captures Go runtime diagnostics
|
||||
type goRuntime struct{}
|
||||
|
||||
func (g *goRuntime) Diagnostics() (*Diagnostic, error) {
|
||||
diagnostics := map[string]interface{}{
|
||||
"GOARCH": runtime.GOARCH,
|
||||
"GOOS": runtime.GOOS,
|
||||
"GOMAXPROCS": runtime.GOMAXPROCS(-1),
|
||||
"version": runtime.Version(),
|
||||
}
|
||||
|
||||
return DiagnosticFromMap(diagnostics), nil
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// network captures network diagnostics
|
||||
type network struct{}
|
||||
|
||||
func (n *network) Diagnostics() (*Diagnostic, error) {
|
||||
h, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diagnostics := map[string]interface{}{
|
||||
"hostname": h,
|
||||
}
|
||||
|
||||
return DiagnosticFromMap(diagnostics), nil
|
||||
}
|
|
@ -1,406 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
const leaderWaitTimeout = 30 * time.Second
|
||||
|
||||
const (
|
||||
MonitorRetentionPolicy = "monitor"
|
||||
MonitorRetentionPolicyDuration = 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// DiagsClient is the interface modules implement if they register diags with monitor.
|
||||
type DiagsClient interface {
|
||||
Diagnostics() (*Diagnostic, error)
|
||||
}
|
||||
|
||||
// The DiagsClientFunc type is an adapter to allow the use of
|
||||
// ordinary functions as Diagnostis clients.
|
||||
type DiagsClientFunc func() (*Diagnostic, error)
|
||||
|
||||
// Diagnostics calls f().
|
||||
func (f DiagsClientFunc) Diagnostics() (*Diagnostic, error) {
|
||||
return f()
|
||||
}
|
||||
|
||||
// Diagnostic represents a table of diagnostic information. The first value
|
||||
// is the name of the columns, the second is a slice of interface slices containing
|
||||
// the values for each column, by row. This information is never written to an InfluxDB
|
||||
// system and is display-only. An example showing, say, connections follows:
|
||||
//
|
||||
// source_ip source_port dest_ip dest_port
|
||||
// 182.1.0.2 2890 127.0.0.1 38901
|
||||
// 174.33.1.2 2924 127.0.0.1 38902
|
||||
type Diagnostic struct {
|
||||
Columns []string
|
||||
Rows [][]interface{}
|
||||
}
|
||||
|
||||
func NewDiagnostic(columns []string) *Diagnostic {
|
||||
return &Diagnostic{
|
||||
Columns: columns,
|
||||
Rows: make([][]interface{}, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Diagnostic) AddRow(r []interface{}) {
|
||||
d.Rows = append(d.Rows, r)
|
||||
}
|
||||
|
||||
// Monitor represents an instance of the monitor system.
|
||||
type Monitor struct {
|
||||
// Build information for diagnostics.
|
||||
Version string
|
||||
Commit string
|
||||
Branch string
|
||||
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
mu sync.Mutex
|
||||
|
||||
diagRegistrations map[string]DiagsClient
|
||||
|
||||
storeEnabled bool
|
||||
storeDatabase string
|
||||
storeRetentionPolicy string
|
||||
storeRetentionDuration time.Duration
|
||||
storeReplicationFactor int
|
||||
storeAddress string
|
||||
storeInterval time.Duration
|
||||
|
||||
MetaStore interface {
|
||||
ClusterID() (uint64, error)
|
||||
NodeID() uint64
|
||||
WaitForLeader(d time.Duration) error
|
||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
CreateRetentionPolicyIfNotExists(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
|
||||
SetDefaultRetentionPolicy(database, name string) error
|
||||
DropRetentionPolicy(database, name string) error
|
||||
}
|
||||
|
||||
PointsWriter interface {
|
||||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
// New returns a new instance of the monitor system.
|
||||
func New(c Config) *Monitor {
|
||||
return &Monitor{
|
||||
done: make(chan struct{}),
|
||||
diagRegistrations: make(map[string]DiagsClient),
|
||||
storeEnabled: c.StoreEnabled,
|
||||
storeDatabase: c.StoreDatabase,
|
||||
storeInterval: time.Duration(c.StoreInterval),
|
||||
Logger: log.New(os.Stderr, "[monitor] ", log.LstdFlags),
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the monitoring system, using the given clusterID, node ID, and hostname
|
||||
// for identification purposem.
|
||||
func (m *Monitor) Open() error {
|
||||
m.Logger.Printf("Starting monitor system")
|
||||
|
||||
// Self-register various stats and diagnostics.
|
||||
m.RegisterDiagnosticsClient("build", &build{
|
||||
Version: m.Version,
|
||||
Commit: m.Commit,
|
||||
Branch: m.Branch,
|
||||
})
|
||||
m.RegisterDiagnosticsClient("runtime", &goRuntime{})
|
||||
m.RegisterDiagnosticsClient("network", &network{})
|
||||
m.RegisterDiagnosticsClient("system", &system{})
|
||||
|
||||
// If enabled, record stats in a InfluxDB system.
|
||||
if m.storeEnabled {
|
||||
|
||||
// Start periodic writes to system.
|
||||
m.wg.Add(1)
|
||||
go m.storeStatistics()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the monitor system.
|
||||
func (m *Monitor) Close() {
|
||||
m.Logger.Println("shutting down monitor system")
|
||||
close(m.done)
|
||||
m.wg.Wait()
|
||||
m.done = nil
|
||||
}
|
||||
|
||||
// SetLogger sets the internal logger to the logger passed in.
|
||||
func (m *Monitor) SetLogger(l *log.Logger) {
|
||||
m.Logger = l
|
||||
}
|
||||
|
||||
// RegisterDiagnosticsClient registers a diagnostics client with the given name and tags.
|
||||
func (m *Monitor) RegisterDiagnosticsClient(name string, client DiagsClient) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.diagRegistrations[name] = client
|
||||
m.Logger.Printf(`'%s' registered for diagnostics monitoring`, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Statistics returns the combined statistics for all expvar data. The given
|
||||
// tags are added to each of the returned statistics.
|
||||
func (m *Monitor) Statistics(tags map[string]string) ([]*statistic, error) {
|
||||
statistics := make([]*statistic, 0)
|
||||
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
// Skip built-in expvar stats.
|
||||
if kv.Key == "memstats" || kv.Key == "cmdline" {
|
||||
return
|
||||
}
|
||||
|
||||
statistic := &statistic{
|
||||
Tags: make(map[string]string),
|
||||
Values: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Add any supplied tags.
|
||||
for k, v := range tags {
|
||||
statistic.Tags[k] = v
|
||||
}
|
||||
|
||||
// Every other top-level expvar value is a map.
|
||||
m := kv.Value.(*expvar.Map)
|
||||
|
||||
m.Do(func(subKV expvar.KeyValue) {
|
||||
switch subKV.Key {
|
||||
case "name":
|
||||
// straight to string name.
|
||||
u, err := strconv.Unquote(subKV.Value.String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
statistic.Name = u
|
||||
case "tags":
|
||||
// string-string tags map.
|
||||
n := subKV.Value.(*expvar.Map)
|
||||
n.Do(func(t expvar.KeyValue) {
|
||||
u, err := strconv.Unquote(t.Value.String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
statistic.Tags[t.Key] = u
|
||||
})
|
||||
case "values":
|
||||
// string-interface map.
|
||||
n := subKV.Value.(*expvar.Map)
|
||||
n.Do(func(kv expvar.KeyValue) {
|
||||
var f interface{}
|
||||
var err error
|
||||
switch v := kv.Value.(type) {
|
||||
case *expvar.Float:
|
||||
f, err = strconv.ParseFloat(v.String(), 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case *expvar.Int:
|
||||
f, err = strconv.ParseInt(v.String(), 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
statistic.Values[kv.Key] = f
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// If a registered client has no field data, don't include it in the results
|
||||
if len(statistic.Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
statistics = append(statistics, statistic)
|
||||
})
|
||||
|
||||
// Add Go memstats.
|
||||
statistic := &statistic{
|
||||
Name: "runtime",
|
||||
Tags: make(map[string]string),
|
||||
Values: make(map[string]interface{}),
|
||||
}
|
||||
var rt runtime.MemStats
|
||||
runtime.ReadMemStats(&rt)
|
||||
statistic.Values = map[string]interface{}{
|
||||
"Alloc": int64(rt.Alloc),
|
||||
"TotalAlloc": int64(rt.TotalAlloc),
|
||||
"Sys": int64(rt.Sys),
|
||||
"Lookups": int64(rt.Lookups),
|
||||
"Mallocs": int64(rt.Mallocs),
|
||||
"Frees": int64(rt.Frees),
|
||||
"HeapAlloc": int64(rt.HeapAlloc),
|
||||
"HeapSys": int64(rt.HeapSys),
|
||||
"HeapIdle": int64(rt.HeapIdle),
|
||||
"HeapInUse": int64(rt.HeapInuse),
|
||||
"HeapReleased": int64(rt.HeapReleased),
|
||||
"HeapObjects": int64(rt.HeapObjects),
|
||||
"PauseTotalNs": int64(rt.PauseTotalNs),
|
||||
"NumGC": int64(rt.NumGC),
|
||||
"NumGoroutine": int64(runtime.NumGoroutine()),
|
||||
}
|
||||
statistics = append(statistics, statistic)
|
||||
|
||||
return statistics, nil
|
||||
}
|
||||
|
||||
func (m *Monitor) Diagnostics() (map[string]*Diagnostic, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
diags := make(map[string]*Diagnostic, len(m.diagRegistrations))
|
||||
for k, v := range m.diagRegistrations {
|
||||
d, err := v.Diagnostics()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
diags[k] = d
|
||||
}
|
||||
return diags, nil
|
||||
}
|
||||
|
||||
// storeStatistics writes the statistics to an InfluxDB system.
|
||||
func (m *Monitor) storeStatistics() {
|
||||
defer m.wg.Done()
|
||||
m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s",
|
||||
m.storeDatabase, m.storeRetentionPolicy, m.storeInterval)
|
||||
|
||||
if err := m.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
m.Logger.Printf("failed to detect a cluster leader, terminating storage: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get cluster-level metadata. Nothing different is going to happen if errors occur.
|
||||
clusterID, _ := m.MetaStore.ClusterID()
|
||||
nodeID := m.MetaStore.NodeID()
|
||||
hostname, _ := os.Hostname()
|
||||
clusterTags := map[string]string{
|
||||
"clusterID": fmt.Sprintf("%d", clusterID),
|
||||
"nodeID": fmt.Sprintf("%d", nodeID),
|
||||
"hostname": hostname,
|
||||
}
|
||||
|
||||
if _, err := m.MetaStore.CreateDatabaseIfNotExists(m.storeDatabase); err != nil {
|
||||
m.Logger.Printf("failed to create database '%s', terminating storage: %s",
|
||||
m.storeDatabase, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
rpi := meta.NewRetentionPolicyInfo(MonitorRetentionPolicy)
|
||||
rpi.Duration = MonitorRetentionPolicyDuration
|
||||
rpi.ReplicaN = 1
|
||||
if _, err := m.MetaStore.CreateRetentionPolicyIfNotExists(m.storeDatabase, rpi); err != nil {
|
||||
m.Logger.Printf("failed to create retention policy '%s', terminating storage: %s",
|
||||
rpi.Name, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := m.MetaStore.SetDefaultRetentionPolicy(m.storeDatabase, rpi.Name); err != nil {
|
||||
m.Logger.Printf("failed to set default retention policy on '%s', terminating storage: %s",
|
||||
m.storeDatabase, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := m.MetaStore.DropRetentionPolicy(m.storeDatabase, "default"); err != nil && err != meta.ErrRetentionPolicyNotFound {
|
||||
m.Logger.Printf("failed to delete retention policy 'default', terminating storage: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tick := time.NewTicker(m.storeInterval)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
stats, err := m.Statistics(clusterTags)
|
||||
if err != nil {
|
||||
m.Logger.Printf("failed to retrieve registered statistics: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
points := make(tsdb.Points, 0, len(stats))
|
||||
for _, s := range stats {
|
||||
points = append(points, tsdb.NewPoint(s.Name, s.Tags, s.Values, time.Now()))
|
||||
}
|
||||
|
||||
err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
||||
Database: m.storeDatabase,
|
||||
RetentionPolicy: m.storeRetentionPolicy,
|
||||
ConsistencyLevel: cluster.ConsistencyLevelOne,
|
||||
Points: points,
|
||||
})
|
||||
if err != nil {
|
||||
m.Logger.Printf("failed to store statistics: %s", err)
|
||||
}
|
||||
case <-m.done:
|
||||
m.Logger.Printf("terminating storage of statistics")
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// statistic represents the information returned by a single monitor client.
|
||||
type statistic struct {
|
||||
Name string
|
||||
Tags map[string]string
|
||||
Values map[string]interface{}
|
||||
}
|
||||
|
||||
// newStatistic returns a new statistic object.
|
||||
func newStatistic(name string, tags map[string]string, values map[string]interface{}) *statistic {
|
||||
return &statistic{
|
||||
Name: name,
|
||||
Tags: tags,
|
||||
Values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// valueNames returns a sorted list of the value names, if any.
|
||||
func (s *statistic) valueNames() []string {
|
||||
a := make([]string, 0, len(s.Values))
|
||||
for k, _ := range s.Values {
|
||||
a = append(a, k)
|
||||
}
|
||||
sort.Strings(a)
|
||||
return a
|
||||
}
|
||||
|
||||
// DiagnosticFromMap returns a Diagnostic from a map.
|
||||
func DiagnosticFromMap(m map[string]interface{}) *Diagnostic {
|
||||
// Display columns in deterministic order.
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k, _ := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
d := NewDiagnostic(sortedKeys)
|
||||
row := make([]interface{}, len(sortedKeys))
|
||||
for i, k := range sortedKeys {
|
||||
row[i] = m[k]
|
||||
}
|
||||
d.AddRow(row)
|
||||
|
||||
return d
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
)
|
||||
|
||||
// Test that a registered stats client results in the correct SHOW STATS output.
|
||||
func Test_RegisterStats(t *testing.T) {
|
||||
monitor := openMonitor(t)
|
||||
executor := &StatementExecutor{Monitor: monitor}
|
||||
|
||||
// Register stats without tags.
|
||||
statMap := influxdb.NewStatistics("foo", "foo", nil)
|
||||
statMap.Add("bar", 1)
|
||||
statMap.AddFloat("qux", 2.4)
|
||||
json := executeShowStatsJSON(t, executor)
|
||||
if !strings.Contains(json, `"columns":["bar","qux"],"values":[[1,2.4]]`) || !strings.Contains(json, `"name":"foo"`) {
|
||||
t.Fatalf("SHOW STATS response incorrect, got: %s\n", json)
|
||||
}
|
||||
|
||||
// Register a client with tags.
|
||||
statMap = influxdb.NewStatistics("bar", "baz", map[string]string{"proto": "tcp"})
|
||||
statMap.Add("bar", 1)
|
||||
statMap.AddFloat("qux", 2.4)
|
||||
json = executeShowStatsJSON(t, executor)
|
||||
if !strings.Contains(json, `"columns":["bar","qux"],"values":[[1,2.4]]`) ||
|
||||
!strings.Contains(json, `"name":"baz"`) ||
|
||||
!strings.Contains(json, `"proto":"tcp"`) {
|
||||
t.Fatalf("SHOW STATS response incorrect, got: %s\n", json)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
type mockMetastore struct{}
|
||||
|
||||
func (m *mockMetastore) ClusterID() (uint64, error) { return 1, nil }
|
||||
func (m *mockMetastore) NodeID() uint64 { return 2 }
|
||||
func (m *mockMetastore) WaitForLeader(d time.Duration) error { return nil }
|
||||
func (m *mockMetastore) SetDefaultRetentionPolicy(database, name string) error { return nil }
|
||||
func (m *mockMetastore) DropRetentionPolicy(database, name string) error { return nil }
|
||||
func (m *mockMetastore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockMetastore) CreateRetentionPolicyIfNotExists(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func openMonitor(t *testing.T) *Monitor {
|
||||
monitor := New(NewConfig())
|
||||
monitor.MetaStore = &mockMetastore{}
|
||||
err := monitor.Open()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open monitor: %s", err.Error())
|
||||
}
|
||||
return monitor
|
||||
}
|
||||
|
||||
func executeShowStatsJSON(t *testing.T, s *StatementExecutor) string {
|
||||
r := s.ExecuteStatement(&influxql.ShowStatsStatement{})
|
||||
b, err := r.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to decode SHOW STATS response: %s", err.Error())
|
||||
}
|
||||
return string(b)
|
||||
}
|
65
Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/statement_executor.go
generated
vendored
65
Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/statement_executor.go
generated
vendored
|
@ -1,65 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
)
|
||||
|
||||
// StatementExecutor translates InfluxQL queries to Monitor methods.
|
||||
type StatementExecutor struct {
|
||||
Monitor interface {
|
||||
Statistics(map[string]string) ([]*statistic, error)
|
||||
Diagnostics() (map[string]*Diagnostic, error)
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteStatement executes monitor-related query statements.
|
||||
func (s *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.Result {
|
||||
switch stmt := stmt.(type) {
|
||||
case *influxql.ShowStatsStatement:
|
||||
return s.executeShowStatistics()
|
||||
case *influxql.ShowDiagnosticsStatement:
|
||||
return s.executeShowDiagnostics()
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported statement type: %T", stmt))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StatementExecutor) executeShowStatistics() *influxql.Result {
|
||||
stats, err := s.Monitor.Statistics(nil)
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
rows := make([]*influxql.Row, len(stats))
|
||||
|
||||
for n, stat := range stats {
|
||||
row := &influxql.Row{Name: stat.Name, Tags: stat.Tags}
|
||||
|
||||
values := make([]interface{}, 0, len(stat.Values))
|
||||
for _, k := range stat.valueNames() {
|
||||
row.Columns = append(row.Columns, k)
|
||||
values = append(values, stat.Values[k])
|
||||
}
|
||||
row.Values = [][]interface{}{values}
|
||||
rows[n] = row
|
||||
}
|
||||
return &influxql.Result{Series: rows}
|
||||
}
|
||||
|
||||
func (s *StatementExecutor) executeShowDiagnostics() *influxql.Result {
|
||||
diags, err := s.Monitor.Diagnostics()
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
rows := make([]*influxql.Row, 0, len(diags))
|
||||
|
||||
for k, v := range diags {
|
||||
row := &influxql.Row{Name: k}
|
||||
|
||||
row.Columns = v.Columns
|
||||
row.Values = v.Rows
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return &influxql.Result{Series: rows}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var startTime time.Time
|
||||
|
||||
func init() {
|
||||
startTime = time.Now().UTC()
|
||||
}
|
||||
|
||||
// system captures system-level diagnostics
|
||||
type system struct{}
|
||||
|
||||
func (s *system) Diagnostics() (*Diagnostic, error) {
|
||||
diagnostics := map[string]interface{}{
|
||||
"PID": os.Getpid(),
|
||||
"currentTime": time.Now().UTC(),
|
||||
"started": startTime,
|
||||
"uptime": time.Since(startTime).String(),
|
||||
}
|
||||
|
||||
return DiagnosticFromMap(diagnostics), nil
|
||||
}
|
|
@ -10,5 +10,5 @@ cd $GOPATH/src/github.com/influxdb
|
|||
git clone https://github.com/influxdb/influxdb.git
|
||||
|
||||
cd $GOPATH/src/github.com/influxdb/influxdb
|
||||
NIGHTLY_BUILD=true ./package.sh 0.9.5-nightly-`git log --pretty=format:'%h' -n 1`
|
||||
NIGHTLY_BUILD=true ./package.sh 0.9.3-nightly-`git log --pretty=format:'%h' -n 1`
|
||||
rm -rf $REPO_DIR
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
# package is successful, the script will offer to tag the repo using the
|
||||
# supplied version string.
|
||||
#
|
||||
# See package.sh -h for options
|
||||
#
|
||||
# AWS upload: the script will also offer to upload the packages to S3. If
|
||||
# this option is selected, the credentials should be present in the file
|
||||
# ~/aws.conf. The contents should be of the form:
|
||||
|
@ -40,12 +38,9 @@ INSTALL_ROOT_DIR=/opt/influxdb
|
|||
INFLUXDB_LOG_DIR=/var/log/influxdb
|
||||
INFLUXDB_DATA_DIR=/var/opt/influxdb
|
||||
CONFIG_ROOT_DIR=/etc/opt/influxdb
|
||||
LOGROTATE_DIR=/etc/logrotate.d
|
||||
|
||||
SAMPLE_CONFIGURATION=etc/config.sample.toml
|
||||
INITD_SCRIPT=scripts/init.sh
|
||||
SYSTEMD_SCRIPT=scripts/influxdb.service
|
||||
LOGROTATE=scripts/logrotate
|
||||
|
||||
TMP_WORK_DIR=`mktemp -d`
|
||||
POST_INSTALL_PATH=`mktemp`
|
||||
|
@ -62,7 +57,7 @@ if [ -z "$FPM" ]; then
|
|||
FPM=`which fpm`
|
||||
fi
|
||||
|
||||
GO_VERSION="go1.5"
|
||||
GO_VERSION="go1.4.2"
|
||||
GOPATH_INSTALL=
|
||||
BINS=(
|
||||
influxd
|
||||
|
@ -74,16 +69,7 @@ BINS=(
|
|||
|
||||
# usage prints simple usage information.
|
||||
usage() {
|
||||
cat << EOF >&2
|
||||
$0 [-h] [-p|-w] [-t <dist>] <version>
|
||||
-p just build packages
|
||||
-w build packages for current working directory
|
||||
imply -p
|
||||
-t <dist>
|
||||
build package for <dist>
|
||||
<dist> can be rpm, tar or deb
|
||||
can have multiple -t
|
||||
EOF
|
||||
echo -e "$0 [<version>] [-h]\n"
|
||||
cleanup_exit $1
|
||||
}
|
||||
|
||||
|
@ -182,41 +168,20 @@ make_dir_tree() {
|
|||
echo "Failed to create configuration directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
mkdir -p $work_dir/$LOGROTATE_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create logrotate directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# do_build builds the code. The version and commit must be passed in.
|
||||
do_build() {
|
||||
for b in ${BINS[*]}; do
|
||||
rm -f $GOPATH_INSTALL/bin/$b
|
||||
done
|
||||
|
||||
if [ -n "$WORKING_DIR" ]; then
|
||||
STASH=`git stash create -a`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: failed to stash uncommited local changes"
|
||||
fi
|
||||
git reset --hard
|
||||
fi
|
||||
|
||||
go get -u -f -d ./...
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: failed to 'go get' packages."
|
||||
fi
|
||||
|
||||
git checkout $TARGET_BRANCH # go get switches to master, so ensure we're back.
|
||||
|
||||
if [ -n "$WORKING_DIR" ]; then
|
||||
git stash apply $STASH
|
||||
if [ $? -ne 0 ]; then #and apply previous uncommited local changes
|
||||
echo "WARNING: failed to restore uncommited local changes"
|
||||
fi
|
||||
fi
|
||||
|
||||
version=$1
|
||||
commit=`git rev-parse HEAD`
|
||||
branch=`current_branch`
|
||||
|
@ -225,7 +190,7 @@ do_build() {
|
|||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
go install -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit" ./...
|
||||
go install -a -ldflags="-X main.version $version -X main.branch $branch -X main.commit $commit" ./...
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed, unable to create package -- aborting"
|
||||
cleanup_exit 1
|
||||
|
@ -245,18 +210,6 @@ ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd
|
|||
ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx
|
||||
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
|
||||
|
||||
if ! id influxdb >/dev/null 2>&1; then
|
||||
useradd --system -U -M influxdb
|
||||
fi
|
||||
|
||||
# Systemd
|
||||
if which systemctl > /dev/null 2>&1 ; then
|
||||
cp $INSTALL_ROOT_DIR/versions/$version/scripts/influxdb.service \
|
||||
/lib/systemd/system/influxdb.service
|
||||
systemctl enable influxdb
|
||||
|
||||
# Sysv
|
||||
else
|
||||
rm -f /etc/init.d/influxdb
|
||||
ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/influxdb
|
||||
chmod +x /etc/init.d/influxdb
|
||||
|
@ -266,8 +219,10 @@ else
|
|||
else
|
||||
chkconfig --add influxdb
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! id influxdb >/dev/null 2>&1; then
|
||||
useradd --system -U -M influxdb
|
||||
fi
|
||||
chown -R -L influxdb:influxdb $INSTALL_ROOT_DIR
|
||||
chmod -R a+rX $INSTALL_ROOT_DIR
|
||||
|
||||
|
@ -280,80 +235,22 @@ EOF
|
|||
}
|
||||
|
||||
###########################################################################
|
||||
# Process options
|
||||
while :
|
||||
do
|
||||
case $1 in
|
||||
-h | --help)
|
||||
# Start the packaging process.
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage 1
|
||||
elif [ $1 == "-h" ]; then
|
||||
usage 0
|
||||
;;
|
||||
|
||||
-p | --packages-only)
|
||||
PACKAGES_ONLY="PACKAGES_ONLY"
|
||||
shift
|
||||
;;
|
||||
|
||||
-t | --target)
|
||||
case "$2" in
|
||||
'tar') TAR_WANTED="gz"
|
||||
;;
|
||||
'deb') DEB_WANTED="deb"
|
||||
;;
|
||||
'rpm') RPM_WANTED="rpm"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown target distribution $2"
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
shift 2
|
||||
;;
|
||||
|
||||
-w | --working-directory)
|
||||
PACKAGES_ONLY="PACKAGES_ONLY"
|
||||
WORKING_DIR="WORKING_DIR"
|
||||
shift
|
||||
;;
|
||||
|
||||
-*)
|
||||
echo "Unknown option $1"
|
||||
usage 1
|
||||
;;
|
||||
|
||||
?*)
|
||||
if [ -z $VERSION ]; then
|
||||
else
|
||||
VERSION=$1
|
||||
VERSION_UNDERSCORED=`echo "$VERSION" | tr - _`
|
||||
shift
|
||||
else
|
||||
echo "$1 : aborting version already set to $VERSION"
|
||||
usage 1
|
||||
fi
|
||||
;;
|
||||
|
||||
*) break
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$DEB_WANTED$RPM_WANTED$TAR_WANTED" ]; then
|
||||
TAR_WANTED="gz"
|
||||
DEB_WANTED="deb"
|
||||
RPM_WANTED="rpm"
|
||||
fi
|
||||
|
||||
if [ -z "$VERSION" ]; then
|
||||
echo -e "Missing version"
|
||||
usage 1
|
||||
fi
|
||||
|
||||
###########################################################################
|
||||
# Start the packaging process.
|
||||
|
||||
echo -e "\nStarting package process...\n"
|
||||
|
||||
# Ensure the current is correct.
|
||||
TARGET_BRANCH=`current_branch`
|
||||
if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then
|
||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
||||
echo -n "Current branch is $TARGET_BRANCH. Start packaging this branch? [Y/n] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
|
@ -365,7 +262,7 @@ fi
|
|||
|
||||
check_gvm
|
||||
check_gopath
|
||||
if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then
|
||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
||||
check_clean_tree
|
||||
update_tree
|
||||
check_tag_exists $VERSION
|
||||
|
@ -393,31 +290,18 @@ if [ $? -ne 0 ]; then
|
|||
fi
|
||||
echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
|
||||
|
||||
cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy systemd script to packaging directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
|
||||
|
||||
cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/influxdb.conf
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
cp $LOGROTATE $TMP_WORK_DIR/$LOGROTATE_DIR/influxd
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy logrotate configuration to packaging directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
generate_postinstall_script $VERSION
|
||||
|
||||
###########################################################################
|
||||
# Create the actual packages.
|
||||
|
||||
if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then
|
||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
||||
echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
|
@ -440,39 +324,32 @@ else
|
|||
debian_package=influxdb_${VERSION}_amd64.deb
|
||||
fi
|
||||
|
||||
COMMON_FPM_ARGS="--log error -C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name influxdb --version $VERSION --config-files $CONFIG_ROOT_DIR --config-files $LOGROTATE_DIR ."
|
||||
|
||||
if [ -n "$DEB_WANTED" ]; then
|
||||
$FPM -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create Debian package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Debian package created successfully."
|
||||
fi
|
||||
|
||||
if [ -n "$TAR_WANTED" ]; then
|
||||
$FPM -s dir -t tar --prefix influxdb_${VERSION}_${ARCH} -p influxdb_${VERSION}_${ARCH}.tar.gz --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create Tar package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Tar package created successfully."
|
||||
fi
|
||||
|
||||
if [ -n "$RPM_WANTED" ]; then
|
||||
COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name influxdb --version $VERSION --config-files $CONFIG_ROOT_DIR ."
|
||||
$rpm_args $FPM -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create RPM package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "RPM package created successfully."
|
||||
|
||||
$FPM -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create Debian package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Debian package created successfully."
|
||||
|
||||
$FPM -s dir -t tar --prefix influxdb_${VERSION}_${ARCH} -p influxdb_${VERSION}_${ARCH}.tar.gz --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create Tar package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Tar package created successfully."
|
||||
|
||||
###########################################################################
|
||||
# Offer to tag the repo.
|
||||
|
||||
if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then
|
||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
||||
echo -n "Tag source tree with v$VERSION and push to repo? [y/N] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
|
@ -483,13 +360,11 @@ if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then
|
|||
echo "Failed to create tag v$VERSION -- aborting"
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Tag v$VERSION created"
|
||||
git push origin v$VERSION
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to push tag v$VERSION to repo -- aborting"
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Tag v$VERSION pushed to repo"
|
||||
else
|
||||
echo "Not creating tag v$VERSION."
|
||||
fi
|
||||
|
@ -498,7 +373,7 @@ fi
|
|||
###########################################################################
|
||||
# Offer to publish the packages.
|
||||
|
||||
if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then
|
||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
||||
echo -n "Publish packages to S3? [y/N] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
|
@ -511,7 +386,7 @@ if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then
|
|||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
for filepath in `ls *.{$DEB_WANTED,$RPM_WANTED,$TAR_WANTED} 2> /dev/null`; do
|
||||
for filepath in `ls *.{deb,rpm,gz}`; do
|
||||
filename=`basename $filepath`
|
||||
if [ -n "$NIGHTLY_BUILD" ]; then
|
||||
filename=`echo $filename | sed s/$VERSION/nightly/`
|
||||
|
@ -519,10 +394,9 @@ if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then
|
|||
fi
|
||||
AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://influxdb/$filename --acl public-read --region us-east-1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Upload failed ($filename) -- aborting".
|
||||
echo "Upload failed -- aborting".
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "$filename uploaded"
|
||||
done
|
||||
else
|
||||
echo "Not publishing packages to S3."
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
[Unit]
|
||||
Description=InfluxDB is an open-source, distributed, time series database
|
||||
Documentation=https://influxdb.com/docs/
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
|
@ -11,9 +10,7 @@ Group=influxdb
|
|||
LimitNOFILE=65536
|
||||
EnvironmentFile=-/etc/default/influxdb
|
||||
ExecStart=/opt/influxdb/influxd -config /etc/opt/influxdb/influxdb.conf $INFLUXD_OPTS
|
||||
KillMode=process
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Alias=influxd.service
|
||||
|
|
|
@ -45,7 +45,7 @@ PIDFILE=/var/run/influxdb/influxd.pid
|
|||
PIDDIR=`dirname $PIDFILE`
|
||||
if [ ! -d "$PIDDIR" ]; then
|
||||
mkdir -p $PIDDIR
|
||||
chown $USER:$GROUP $PIDDIR
|
||||
chown $GROUP:$USER $PIDDIR
|
||||
fi
|
||||
|
||||
# Max open files
|
||||
|
@ -103,20 +103,7 @@ function killproc() {
|
|||
|
||||
PID=`cat $2`
|
||||
|
||||
/bin/kill -s $3 $PID
|
||||
while true; do
|
||||
pidof `basename $DAEMON` >/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
n=$(expr $n + 1)
|
||||
if [ $n -eq 30 ]; then
|
||||
/bin/kill -s SIGKILL $PID
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
kill -s $3 $PID
|
||||
}
|
||||
|
||||
function log_failure_msg() {
|
||||
|
@ -164,7 +151,7 @@ case $1 in
|
|||
if which start-stop-daemon > /dev/null 2>&1; then
|
||||
start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $PIDFILE --exec $DAEMON -- -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &
|
||||
else
|
||||
su -s /bin/sh -c "nohup $DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" $USER
|
||||
nohup $DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &
|
||||
fi
|
||||
log_success_msg "$NAME process was started"
|
||||
;;
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
/var/log/influxdb/influxd.log {
|
||||
daily
|
||||
rotate 7
|
||||
missingok
|
||||
dateext
|
||||
copytruncate
|
||||
compress
|
||||
}
|
|
@ -13,9 +13,7 @@ const (
|
|||
|
||||
DefaultRetentionPolicy = ""
|
||||
|
||||
DefaultBatchSize = 1000
|
||||
|
||||
DefaultBatchPending = 5
|
||||
DefaultBatchSize = 5000
|
||||
|
||||
DefaultBatchDuration = toml.Duration(10 * time.Second)
|
||||
|
||||
|
@ -29,7 +27,6 @@ type Config struct {
|
|||
Database string `toml:"database"`
|
||||
RetentionPolicy string `toml:"retention-policy"`
|
||||
BatchSize int `toml:"batch-size"`
|
||||
BatchPending int `toml:"batch-pending"`
|
||||
BatchDuration toml.Duration `toml:"batch-timeout"`
|
||||
TypesDB string `toml:"typesdb"`
|
||||
}
|
||||
|
@ -41,7 +38,6 @@ func NewConfig() Config {
|
|||
Database: DefaultDatabase,
|
||||
RetentionPolicy: DefaultRetentionPolicy,
|
||||
BatchSize: DefaultBatchSize,
|
||||
BatchPending: DefaultBatchPending,
|
||||
BatchDuration: DefaultBatchDuration,
|
||||
TypesDB: DefaultTypesDB,
|
||||
}
|
||||
|
|
41
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go
generated
vendored
41
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go
generated
vendored
|
@ -1,16 +1,13 @@
|
|||
package collectd
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
|
@ -19,17 +16,6 @@ import (
|
|||
|
||||
const leaderWaitTimeout = 30 * time.Second
|
||||
|
||||
// statistics gathered by the collectd service.
|
||||
const (
|
||||
statPointsReceived = "points_rx"
|
||||
statBytesReceived = "bytes_rx"
|
||||
statPointsParseFail = "points_parse_fail"
|
||||
statReadFail = "read_fail"
|
||||
statBatchesTrasmitted = "batches_tx"
|
||||
statPointsTransmitted = "points_tx"
|
||||
statBatchesTransmitFail = "batches_tx_fail"
|
||||
)
|
||||
|
||||
// pointsWriter is an internal interface to make testing easier.
|
||||
type pointsWriter interface {
|
||||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
|
@ -56,9 +42,6 @@ type Service struct {
|
|||
batcher *tsdb.PointBatcher
|
||||
typesdb gollectd.Types
|
||||
addr net.Addr
|
||||
|
||||
// expvar-based stats.
|
||||
statMap *expvar.Map
|
||||
}
|
||||
|
||||
// NewService returns a new instance of the collectd service.
|
||||
|
@ -76,12 +59,6 @@ func NewService(c Config) *Service {
|
|||
func (s *Service) Open() error {
|
||||
s.Logger.Printf("Starting collectd service")
|
||||
|
||||
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
|
||||
// should be done before any data could arrive for the service.
|
||||
key := strings.Join([]string{"collectd", s.Config.BindAddress}, ":")
|
||||
tags := map[string]string{"bind": s.Config.BindAddress}
|
||||
s.statMap = influxdb.NewStatistics(key, "collectd", tags)
|
||||
|
||||
if s.Config.BindAddress == "" {
|
||||
return fmt.Errorf("bind address is blank")
|
||||
} else if s.Config.Database == "" {
|
||||
|
@ -126,7 +103,7 @@ func (s *Service) Open() error {
|
|||
s.Logger.Println("Listening on UDP: ", ln.LocalAddr().String())
|
||||
|
||||
// Start the points batcher.
|
||||
s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, s.Config.BatchPending, time.Duration(s.Config.BatchDuration))
|
||||
s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, time.Duration(s.Config.BatchDuration))
|
||||
s.batcher.Start()
|
||||
|
||||
// Create channel and wait group for signalling goroutines to stop.
|
||||
|
@ -205,12 +182,10 @@ func (s *Service) serve() {
|
|||
|
||||
n, _, err := s.ln.ReadFromUDP(buffer)
|
||||
if err != nil {
|
||||
s.statMap.Add(statReadFail, 1)
|
||||
s.Logger.Printf("collectd ReadFromUDP error: %s", err)
|
||||
continue
|
||||
}
|
||||
if n > 0 {
|
||||
s.statMap.Add(statBytesReceived, int64(n))
|
||||
s.handleMessage(buffer[:n])
|
||||
}
|
||||
}
|
||||
|
@ -219,7 +194,6 @@ func (s *Service) serve() {
|
|||
func (s *Service) handleMessage(buffer []byte) {
|
||||
packets, err := gollectd.Packets(buffer, s.typesdb)
|
||||
if err != nil {
|
||||
s.statMap.Add(statPointsParseFail, 1)
|
||||
s.Logger.Printf("Collectd parse error: %s", err)
|
||||
return
|
||||
}
|
||||
|
@ -228,7 +202,6 @@ func (s *Service) handleMessage(buffer []byte) {
|
|||
for _, p := range points {
|
||||
s.batcher.In() <- p
|
||||
}
|
||||
s.statMap.Add(statPointsReceived, int64(len(points)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -240,17 +213,15 @@ func (s *Service) writePoints() {
|
|||
case <-s.stop:
|
||||
return
|
||||
case batch := <-s.batcher.Out():
|
||||
if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
||||
req := &cluster.WritePointsRequest{
|
||||
Database: s.Config.Database,
|
||||
RetentionPolicy: s.Config.RetentionPolicy,
|
||||
ConsistencyLevel: cluster.ConsistencyLevelAny,
|
||||
Points: batch,
|
||||
}); err == nil {
|
||||
s.statMap.Add(statBatchesTrasmitted, 1)
|
||||
s.statMap.Add(statPointsTransmitted, int64(len(batch)))
|
||||
} else {
|
||||
s.Logger.Printf("failed to write point batch to database %q: %s", s.Config.Database, err)
|
||||
s.statMap.Add(statBatchesTransmitFail, 1)
|
||||
}
|
||||
if err := s.PointsWriter.WritePoints(req); err != nil {
|
||||
s.Logger.Printf("failed to write batch: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
52
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go
generated
vendored
52
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go
generated
vendored
|
@ -261,32 +261,32 @@ var testData = func() []byte {
|
|||
}()
|
||||
|
||||
var expPoints = []string{
|
||||
"entropy_value,host=pf1-62-210-94-173,type=entropy value=288 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776 1414080767000000000",
|
||||
"interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0 1414080767000000000",
|
||||
"interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875 1414080767000000000",
|
||||
"interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0 1414080767000000000",
|
||||
"interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880 1414080767000000000",
|
||||
"interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000",
|
||||
"interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306 1414080767000000000",
|
||||
"entropy_value,host=pf1-62-210-94-173,type=entropy value=288.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0.0 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896.0 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0.0 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0.0 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776.0 1414080767000000000",
|
||||
"interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0.0 1414080767000000000",
|
||||
"interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050.0 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728.0 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875.0 1414080767000000000",
|
||||
"interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0.0 1414080767000000000",
|
||||
"interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704.0 1414080767000000000",
|
||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0.0 1414080767000000000",
|
||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880.0 1414080767000000000",
|
||||
"interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0.0 1414080767000000000",
|
||||
"interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0.0 1414080767000000000",
|
||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306.0 1414080767000000000",
|
||||
}
|
||||
|
||||
// Taken from /usr/share/collectd/types.db on a Ubuntu system
|
||||
|
|
|
@ -2,7 +2,6 @@ package continuous_querier
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -10,7 +9,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
|
@ -22,17 +20,10 @@ const (
|
|||
NoChunkingSize = 0
|
||||
)
|
||||
|
||||
// Statistics for the CQ service.
|
||||
const (
|
||||
statQueryOK = "query_ok"
|
||||
statQueryFail = "query_fail"
|
||||
statPointsWritten = "points_written"
|
||||
)
|
||||
|
||||
// ContinuousQuerier represents a service that executes continuous queries.
|
||||
type ContinuousQuerier interface {
|
||||
// Run executes the named query in the named database. Blank database or name matches all.
|
||||
Run(database, name string, t time.Time) error
|
||||
Run(database, name string) error
|
||||
}
|
||||
|
||||
// queryExecutor is an internal interface to make testing easier.
|
||||
|
@ -52,28 +43,6 @@ type pointsWriter interface {
|
|||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
|
||||
// RunRequest is a request to run one or more CQs.
|
||||
type RunRequest struct {
|
||||
// Now tells the CQ serivce what the current time is.
|
||||
Now time.Time
|
||||
// CQs tells the CQ service which queries to run.
|
||||
// If nil, all queries will be run.
|
||||
CQs []string
|
||||
}
|
||||
|
||||
// matches returns true if the CQ matches one of the requested CQs.
|
||||
func (rr *RunRequest) matches(cq *meta.ContinuousQueryInfo) bool {
|
||||
if rr.CQs == nil {
|
||||
return true
|
||||
}
|
||||
for _, q := range rr.CQs {
|
||||
if q == cq.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Service manages continuous query execution.
|
||||
type Service struct {
|
||||
MetaStore metaStore
|
||||
|
@ -82,12 +51,10 @@ type Service struct {
|
|||
Config *Config
|
||||
RunInterval time.Duration
|
||||
// RunCh can be used by clients to signal service to run CQs.
|
||||
RunCh chan *RunRequest
|
||||
RunCh chan struct{}
|
||||
Logger *log.Logger
|
||||
loggingEnabled bool
|
||||
statMap *expvar.Map
|
||||
// lastRuns maps CQ name to last time it was run.
|
||||
mu sync.RWMutex
|
||||
lastRuns map[string]time.Time
|
||||
stop chan struct{}
|
||||
wg *sync.WaitGroup
|
||||
|
@ -98,9 +65,8 @@ func NewService(c Config) *Service {
|
|||
s := &Service{
|
||||
Config: &c,
|
||||
RunInterval: time.Second,
|
||||
RunCh: make(chan *RunRequest),
|
||||
RunCh: make(chan struct{}),
|
||||
loggingEnabled: c.LogEnabled,
|
||||
statMap: influxdb.NewStatistics("cq", "cq", nil),
|
||||
Logger: log.New(os.Stderr, "[continuous_querier] ", log.LstdFlags),
|
||||
lastRuns: map[string]time.Time{},
|
||||
}
|
||||
|
@ -110,6 +76,7 @@ func NewService(c Config) *Service {
|
|||
|
||||
// Open starts the service.
|
||||
func (s *Service) Open() error {
|
||||
|
||||
s.Logger.Println("Starting continuous query service")
|
||||
|
||||
if s.stop != nil {
|
||||
|
@ -145,7 +112,7 @@ func (s *Service) SetLogger(l *log.Logger) {
|
|||
}
|
||||
|
||||
// Run runs the specified continuous query, or all CQs if none is specified.
|
||||
func (s *Service) Run(database, name string, t time.Time) error {
|
||||
func (s *Service) Run(database, name string) error {
|
||||
var dbs []meta.DatabaseInfo
|
||||
|
||||
if database != "" {
|
||||
|
@ -167,8 +134,6 @@ func (s *Service) Run(database, name string, t time.Time) error {
|
|||
}
|
||||
|
||||
// Loop through databases.
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, db := range dbs {
|
||||
// Loop through CQs in each DB executing the ones that match name.
|
||||
for _, cq := range db.ContinuousQueries {
|
||||
|
@ -180,7 +145,7 @@ func (s *Service) Run(database, name string, t time.Time) error {
|
|||
}
|
||||
|
||||
// Signal the background routine to run CQs.
|
||||
s.RunCh <- &RunRequest{Now: t}
|
||||
s.RunCh <- struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -193,21 +158,21 @@ func (s *Service) backgroundLoop() {
|
|||
case <-s.stop:
|
||||
s.Logger.Println("continuous query service terminating")
|
||||
return
|
||||
case req := <-s.RunCh:
|
||||
case <-s.RunCh:
|
||||
if s.MetaStore.IsLeader() {
|
||||
s.Logger.Printf("running continuous queries by request for time: %v", req.Now.UnixNano())
|
||||
s.runContinuousQueries(req)
|
||||
s.Logger.Print("running continuous queries by request")
|
||||
s.runContinuousQueries()
|
||||
}
|
||||
case <-time.After(s.RunInterval):
|
||||
if s.MetaStore.IsLeader() {
|
||||
s.runContinuousQueries(&RunRequest{Now: time.Now()})
|
||||
s.runContinuousQueries()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runContinuousQueries gets CQs from the meta store and runs them.
|
||||
func (s *Service) runContinuousQueries(req *RunRequest) {
|
||||
func (s *Service) runContinuousQueries() {
|
||||
// Get list of all databases.
|
||||
dbs, err := s.MetaStore.Databases()
|
||||
if err != nil {
|
||||
|
@ -218,21 +183,15 @@ func (s *Service) runContinuousQueries(req *RunRequest) {
|
|||
for _, db := range dbs {
|
||||
// TODO: distribute across nodes
|
||||
for _, cq := range db.ContinuousQueries {
|
||||
if !req.matches(&cq) {
|
||||
continue
|
||||
}
|
||||
if err := s.ExecuteContinuousQuery(&db, &cq, req.Now); err != nil {
|
||||
if err := s.ExecuteContinuousQuery(&db, &cq); err != nil {
|
||||
s.Logger.Printf("error executing query: %s: err = %s", cq.Query, err)
|
||||
s.statMap.Add(statQueryFail, 1)
|
||||
} else {
|
||||
s.statMap.Add(statQueryOK, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteContinuousQuery executes a single CQ.
|
||||
func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.ContinuousQueryInfo, now time.Time) error {
|
||||
func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.ContinuousQueryInfo) error {
|
||||
// TODO: re-enable stats
|
||||
//s.stats.Inc("continuousQueryExecuted")
|
||||
|
||||
|
@ -243,8 +202,6 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti
|
|||
}
|
||||
|
||||
// Get the last time this CQ was run from the service's cache.
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
cq.LastRun = s.lastRuns[cqi.Name]
|
||||
|
||||
// Set the retention policy to default if it wasn't specified in the query.
|
||||
|
@ -262,9 +219,9 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti
|
|||
}
|
||||
|
||||
// We're about to run the query so store the time.
|
||||
lastRun := time.Now()
|
||||
cq.LastRun = lastRun
|
||||
s.lastRuns[cqi.Name] = lastRun
|
||||
now := time.Now()
|
||||
cq.LastRun = now
|
||||
s.lastRuns[cqi.Name] = now
|
||||
|
||||
// Get the group by interval.
|
||||
interval, err := cq.q.GroupByInterval()
|
||||
|
@ -331,6 +288,12 @@ func (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Drain results
|
||||
defer func() {
|
||||
for _ = range ch {
|
||||
}
|
||||
}()
|
||||
|
||||
// Read all rows from the result channel.
|
||||
points := make([]tsdb.Point, 0, 100)
|
||||
for result := range ch {
|
||||
|
@ -339,13 +302,8 @@ func (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {
|
|||
}
|
||||
|
||||
for _, row := range result.Series {
|
||||
// Get the measurement name for the result.
|
||||
measurement := cq.intoMeasurement()
|
||||
if measurement == "" {
|
||||
measurement = row.Name
|
||||
}
|
||||
// Convert the result row to points.
|
||||
part, err := s.convertRowToPoints(measurement, row)
|
||||
part, err := s.convertRowToPoints(cq.intoMeasurement(), row)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
|
@ -387,9 +345,8 @@ func (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.statMap.Add(statPointsWritten, int64(len(points)))
|
||||
if s.loggingEnabled {
|
||||
s.Logger.Printf("wrote %d point(s) to %s.%s", len(points), cq.intoDB(), cq.intoRP())
|
||||
s.Logger.Printf("wrote %d point(s) to %s.%s.%s", len(points), cq.intoDB(), cq.intoRP(), cq.Info.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -436,13 +393,7 @@ type ContinuousQuery struct {
|
|||
q *influxql.SelectStatement
|
||||
}
|
||||
|
||||
func (cq *ContinuousQuery) intoDB() string {
|
||||
if cq.q.Target.Measurement.Database != "" {
|
||||
return cq.q.Target.Measurement.Database
|
||||
}
|
||||
return cq.Database
|
||||
}
|
||||
|
||||
func (cq *ContinuousQuery) intoDB() string { return cq.q.Target.Measurement.Database }
|
||||
func (cq *ContinuousQuery) intoRP() string { return cq.q.Target.Measurement.RetentionPolicy }
|
||||
func (cq *ContinuousQuery) setIntoRP(rp string) { cq.q.Target.Measurement.RetentionPolicy = rp }
|
||||
func (cq *ContinuousQuery) intoMeasurement() string { return cq.q.Target.Measurement.Name }
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -37,8 +36,8 @@ func TestOpenAndClose(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test ExecuteContinuousQuery.
|
||||
func TestExecuteContinuousQuery(t *testing.T) {
|
||||
// Test ExecuteContinuousQuery happy path.
|
||||
func TestExecuteContinuousQuery_HappyPath(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
dbis, _ := s.MetaStore.Databases()
|
||||
dbi := dbis[0]
|
||||
|
@ -56,53 +55,14 @@ func TestExecuteContinuousQuery(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test ExecuteContinuousQuery when INTO measurements are taken from the FROM clause.
|
||||
func TestExecuteContinuousQuery_ReferenceSource(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
dbis, _ := s.MetaStore.Databases()
|
||||
dbi := dbis[2]
|
||||
cqi := dbi.ContinuousQueries[0]
|
||||
|
||||
rowCnt := 2
|
||||
pointCnt := 1
|
||||
qe := s.QueryExecutor.(*QueryExecutor)
|
||||
qe.Results = []*influxql.Result{genResult(rowCnt, pointCnt)}
|
||||
|
||||
pw := s.PointsWriter.(*PointsWriter)
|
||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
||||
if len(p.Points) != pointCnt*rowCnt {
|
||||
return fmt.Errorf("exp = %d, got = %d", pointCnt, len(p.Points))
|
||||
}
|
||||
|
||||
exp := "cpu,host=server01 value=0"
|
||||
got := p.Points[0].String()
|
||||
if !strings.Contains(got, exp) {
|
||||
return fmt.Errorf("\n\tExpected ':MEASUREMENT' to be expanded to the measurement name(s) in the FROM regexp.\n\tqry = %s\n\texp = %s\n\tgot = %s\n", cqi.Query, got, exp)
|
||||
}
|
||||
|
||||
exp = "cpu2,host=server01 value=0"
|
||||
got = p.Points[1].String()
|
||||
if !strings.Contains(got, exp) {
|
||||
return fmt.Errorf("\n\tExpected ':MEASUREMENT' to be expanded to the measurement name(s) in the FROM regexp.\n\tqry = %s\n\texp = %s\n\tgot = %s\n", cqi.Query, got, exp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test the service happy path.
|
||||
func TestContinuousQueryService(t *testing.T) {
|
||||
func TestService_HappyPath(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
|
||||
pointCnt := 100
|
||||
|
@ -110,7 +70,7 @@ func TestContinuousQueryService(t *testing.T) {
|
|||
qe.Results = []*influxql.Result{genResult(1, pointCnt)}
|
||||
|
||||
pw := s.PointsWriter.(*PointsWriter)
|
||||
ch := make(chan int, 10)
|
||||
ch := make(chan int, 5)
|
||||
defer close(ch)
|
||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
||||
ch <- len(p.Points)
|
||||
|
@ -127,7 +87,7 @@ func TestContinuousQueryService(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test Run method.
|
||||
func TestContinuousQueryService_Run(t *testing.T) {
|
||||
func TestService_Run(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
|
||||
// Set RunInterval high so we can trigger using Run method.
|
||||
|
@ -137,7 +97,7 @@ func TestContinuousQueryService_Run(t *testing.T) {
|
|||
s.Config.RecomputePreviousN = 0
|
||||
|
||||
done := make(chan struct{})
|
||||
expectCallCnt := 3
|
||||
expectCallCnt := 2
|
||||
callCnt := 0
|
||||
|
||||
// Set a callback for ExecuteQuery.
|
||||
|
@ -152,7 +112,7 @@ func TestContinuousQueryService_Run(t *testing.T) {
|
|||
|
||||
s.Open()
|
||||
// Trigger service to run all CQs.
|
||||
s.Run("", "", time.Now())
|
||||
s.Run("", "")
|
||||
// Shouldn't time out.
|
||||
if err := wait(done, 100*time.Millisecond); err != nil {
|
||||
t.Error(err)
|
||||
|
@ -167,7 +127,7 @@ func TestContinuousQueryService_Run(t *testing.T) {
|
|||
expectCallCnt = 1
|
||||
callCnt = 0
|
||||
s.Open()
|
||||
s.Run("db", "cq", time.Now())
|
||||
s.Run("db", "cq")
|
||||
// Shouldn't time out.
|
||||
if err := wait(done, 100*time.Millisecond); err != nil {
|
||||
t.Error(err)
|
||||
|
@ -180,7 +140,7 @@ func TestContinuousQueryService_Run(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test service when not the cluster leader (CQs shouldn't run).
|
||||
func TestContinuousQueryService_NotLeader(t *testing.T) {
|
||||
func TestService_NotLeader(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
// Set RunInterval high so we can test triggering with the RunCh below.
|
||||
s.RunInterval = 10 * time.Second
|
||||
|
@ -196,7 +156,7 @@ func TestContinuousQueryService_NotLeader(t *testing.T) {
|
|||
|
||||
s.Open()
|
||||
// Trigger service to run CQs.
|
||||
s.RunCh <- &RunRequest{Now: time.Now()}
|
||||
s.RunCh <- struct{}{}
|
||||
// Expect timeout error because ExecuteQuery callback wasn't called.
|
||||
if err := wait(done, 100*time.Millisecond); err == nil {
|
||||
t.Error(err)
|
||||
|
@ -205,7 +165,7 @@ func TestContinuousQueryService_NotLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test service behavior when meta store fails to get databases.
|
||||
func TestContinuousQueryService_MetaStoreFailsToGetDatabases(t *testing.T) {
|
||||
func TestService_MetaStoreFailsToGetDatabases(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
// Set RunInterval high so we can test triggering with the RunCh below.
|
||||
s.RunInterval = 10 * time.Second
|
||||
|
@ -221,7 +181,7 @@ func TestContinuousQueryService_MetaStoreFailsToGetDatabases(t *testing.T) {
|
|||
|
||||
s.Open()
|
||||
// Trigger service to run CQs.
|
||||
s.RunCh <- &RunRequest{Now: time.Now()}
|
||||
s.RunCh <- struct{}{}
|
||||
// Expect timeout error because ExecuteQuery callback wasn't called.
|
||||
if err := wait(done, 100*time.Millisecond); err == nil {
|
||||
t.Error(err)
|
||||
|
@ -237,21 +197,21 @@ func TestExecuteContinuousQuery_InvalidQueries(t *testing.T) {
|
|||
cqi := dbi.ContinuousQueries[0]
|
||||
|
||||
cqi.Query = `this is not a query`
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi)
|
||||
if err == nil {
|
||||
t.Error("expected error but got nil")
|
||||
}
|
||||
|
||||
// Valid query but invalid continuous query.
|
||||
cqi.Query = `SELECT * FROM cpu`
|
||||
err = s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
||||
err = s.ExecuteContinuousQuery(&dbi, &cqi)
|
||||
if err == nil {
|
||||
t.Error("expected error but got nil")
|
||||
}
|
||||
|
||||
// Group by requires aggregate.
|
||||
cqi.Query = `SELECT value INTO other_value FROM cpu WHERE time > now() - 1h GROUP BY time(1s)`
|
||||
err = s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
||||
err = s.ExecuteContinuousQuery(&dbi, &cqi)
|
||||
if err == nil {
|
||||
t.Error("expected error but got nil")
|
||||
}
|
||||
|
@ -267,7 +227,7 @@ func TestExecuteContinuousQuery_QueryExecutor_Error(t *testing.T) {
|
|||
dbi := dbis[0]
|
||||
cqi := dbi.ContinuousQueries[0]
|
||||
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
||||
err := s.ExecuteContinuousQuery(&dbi, &cqi)
|
||||
if err != expectedErr {
|
||||
t.Errorf("exp = %s, got = %v", expectedErr, err)
|
||||
}
|
||||
|
@ -292,8 +252,6 @@ func NewTestService(t *testing.T) *Service {
|
|||
ms.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db BEGIN SELECT count(cpu) INTO cpu_count FROM cpu WHERE time > now() - 1h GROUP BY time(1s) END`)
|
||||
ms.CreateDatabase("db2", "default")
|
||||
ms.CreateContinuousQuery("db2", "cq2", `CREATE CONTINUOUS QUERY cq2 ON db2 BEGIN SELECT mean(value) INTO cpu_mean FROM cpu WHERE time > now() - 10m GROUP BY time(1m) END`)
|
||||
ms.CreateDatabase("db3", "default")
|
||||
ms.CreateContinuousQuery("db3", "cq3", `CREATE CONTINUOUS QUERY cq3 ON db3 BEGIN SELECT mean(value) INTO "1hAverages".:MEASUREMENT FROM /cpu[0-9]?/ GROUP BY time(10s) END`)
|
||||
|
||||
return s
|
||||
}
|
||||
|
@ -513,9 +471,6 @@ func genResult(rowCnt, valCnt int) *influxql.Result {
|
|||
Columns: []string{"time", "value"},
|
||||
Values: vals,
|
||||
}
|
||||
if len(rows) > 0 {
|
||||
row.Name = fmt.Sprintf("cpu%d", len(rows)+1)
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return &influxql.Result{
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: internal/internal.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package internal is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
internal/internal.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Request
|
||||
Response
|
||||
*/
|
||||
package internal
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = math.Inf
|
||||
|
||||
type Request struct {
|
||||
ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Request) Reset() { *m = Request{} }
|
||||
func (m *Request) String() string { return proto.CompactTextString(m) }
|
||||
func (*Request) ProtoMessage() {}
|
||||
|
||||
func (m *Request) GetShardID() uint64 {
|
||||
if m != nil && m.ShardID != nil {
|
||||
return *m.ShardID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Error *string `protobuf:"bytes,1,opt" json:"Error,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Response) Reset() { *m = Response{} }
|
||||
func (m *Response) String() string { return proto.CompactTextString(m) }
|
||||
func (*Response) ProtoMessage() {}
|
||||
|
||||
func (m *Response) GetError() string {
|
||||
if m != nil && m.Error != nil {
|
||||
return *m.Error
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package internal;
|
||||
|
||||
message Request {
|
||||
required uint64 ShardID = 1;
|
||||
}
|
||||
|
||||
message Response {
|
||||
optional string Error = 1;
|
||||
}
|
261
Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/service.go
generated
vendored
261
Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/service.go
generated
vendored
|
@ -1,261 +0,0 @@
|
|||
package copier
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/influxdb/influxdb/services/copier/internal"
|
||||
"github.com/influxdb/influxdb/tcp"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
//go:generate protoc --gogo_out=. internal/internal.proto
|
||||
|
||||
// MuxHeader is the header byte used for the TCP muxer.
|
||||
const MuxHeader = 6
|
||||
|
||||
// Service manages the listener for the endpoint.
|
||||
type Service struct {
|
||||
wg sync.WaitGroup
|
||||
err chan error
|
||||
|
||||
TSDBStore interface {
|
||||
Shard(id uint64) *tsdb.Shard
|
||||
}
|
||||
|
||||
Listener net.Listener
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService() *Service {
|
||||
return &Service{
|
||||
err: make(chan error),
|
||||
Logger: log.New(os.Stderr, "[copier] ", log.LstdFlags),
|
||||
}
|
||||
}
|
||||
|
||||
// Open starts the service.
|
||||
func (s *Service) Open() error {
|
||||
s.Logger.Println("Starting copier service")
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.serve()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements the Service interface.
|
||||
func (s *Service) Close() error {
|
||||
if s.Listener != nil {
|
||||
s.Listener.Close()
|
||||
}
|
||||
s.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLogger sets the internal logger to the logger passed in.
|
||||
func (s *Service) SetLogger(l *log.Logger) {
|
||||
s.Logger = l
|
||||
}
|
||||
|
||||
// Err returns a channel for fatal out-of-band errors.
|
||||
func (s *Service) Err() <-chan error { return s.err }
|
||||
|
||||
// serve serves shard copy requests from the listener.
|
||||
func (s *Service) serve() {
|
||||
defer s.wg.Done()
|
||||
|
||||
for {
|
||||
// Wait for next connection.
|
||||
conn, err := s.Listener.Accept()
|
||||
if err != nil && strings.Contains(err.Error(), "connection closed") {
|
||||
s.Logger.Println("copier listener closed")
|
||||
return
|
||||
} else if err != nil {
|
||||
s.Logger.Println("error accepting copier request: ", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle connection in separate goroutine.
|
||||
s.wg.Add(1)
|
||||
go func(conn net.Conn) {
|
||||
defer s.wg.Done()
|
||||
defer conn.Close()
|
||||
if err := s.handleConn(conn); err != nil {
|
||||
s.Logger.Println(err)
|
||||
}
|
||||
}(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConn processes conn. This is run in a separate goroutine.
|
||||
func (s *Service) handleConn(conn net.Conn) error {
|
||||
// Read request from connection.
|
||||
req, err := s.readRequest(conn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read request: %s", err)
|
||||
}
|
||||
|
||||
// Retrieve shard.
|
||||
sh := s.TSDBStore.Shard(req.GetShardID())
|
||||
|
||||
// Return error response if the shard doesn't exist.
|
||||
if sh == nil {
|
||||
if err := s.writeResponse(conn, &internal.Response{
|
||||
Error: proto.String(fmt.Sprintf("shard not found: id=%d", req.GetShardID())),
|
||||
}); err != nil {
|
||||
return fmt.Errorf("write error response: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write successful response.
|
||||
if err := s.writeResponse(conn, &internal.Response{}); err != nil {
|
||||
return fmt.Errorf("write response: %s", err)
|
||||
}
|
||||
|
||||
// Write shard to response.
|
||||
if _, err := sh.WriteTo(conn); err != nil {
|
||||
return fmt.Errorf("write shard: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readRequest reads and unmarshals a Request from r.
|
||||
func (s *Service) readRequest(r io.Reader) (*internal.Request, error) {
|
||||
// Read request length.
|
||||
var n uint32
|
||||
if err := binary.Read(r, binary.BigEndian, &n); err != nil {
|
||||
return nil, fmt.Errorf("read request length: %s", err)
|
||||
}
|
||||
|
||||
// Read body.
|
||||
buf := make([]byte, n)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return nil, fmt.Errorf("read request: %s", err)
|
||||
}
|
||||
|
||||
// Unmarshal request.
|
||||
req := &internal.Request{}
|
||||
if err := proto.Unmarshal(buf, req); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal request: %s", err)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// writeResponse marshals and writes a Response to w.
|
||||
func (s *Service) writeResponse(w io.Writer, resp *internal.Response) error {
|
||||
// Marshal the response to a byte slice.
|
||||
buf, err := proto.Marshal(resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal error: %s", err)
|
||||
}
|
||||
|
||||
// Write response length to writer.
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return fmt.Errorf("write response length error: %s", err)
|
||||
}
|
||||
|
||||
// Write body to writer.
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
return fmt.Errorf("write body error: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Client represents a client for connecting remotely to a copier service.
|
||||
type Client struct {
|
||||
host string
|
||||
}
|
||||
|
||||
// NewClient return a new instance of Client.
|
||||
func NewClient(host string) *Client {
|
||||
return &Client{
|
||||
host: host,
|
||||
}
|
||||
}
|
||||
|
||||
// ShardReader returns a reader for streaming shard data.
|
||||
// Returned ReadCloser must be closed by the caller.
|
||||
func (c *Client) ShardReader(id uint64) (io.ReadCloser, error) {
|
||||
// Connect to remote server.
|
||||
conn, err := tcp.Dial("tcp", c.host, MuxHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send request to server.
|
||||
if err := c.writeRequest(conn, &internal.Request{ShardID: proto.Uint64(id)}); err != nil {
|
||||
return nil, fmt.Errorf("write request: %s", err)
|
||||
}
|
||||
|
||||
// Read response from the server.
|
||||
resp, err := c.readResponse(conn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read response: %s", err)
|
||||
}
|
||||
|
||||
// If there was an error then return it and close connection.
|
||||
if resp.GetError() != "" {
|
||||
conn.Close()
|
||||
return nil, errors.New(resp.GetError())
|
||||
}
|
||||
|
||||
// Returning remaining stream for caller to consume.
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// writeRequest marshals and writes req to w.
|
||||
func (c *Client) writeRequest(w io.Writer, req *internal.Request) error {
|
||||
// Marshal request.
|
||||
buf, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal request: %s", err)
|
||||
}
|
||||
|
||||
// Write request length.
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return fmt.Errorf("write request length: %s", err)
|
||||
}
|
||||
|
||||
// Send request to server.
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
return fmt.Errorf("write request body: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readResponse reads and unmarshals a Response from r.
|
||||
func (c *Client) readResponse(r io.Reader) (*internal.Response, error) {
|
||||
// Read response length.
|
||||
var n uint32
|
||||
if err := binary.Read(r, binary.BigEndian, &n); err != nil {
|
||||
return nil, fmt.Errorf("read response length: %s", err)
|
||||
}
|
||||
|
||||
// Read response.
|
||||
buf := make([]byte, n)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return nil, fmt.Errorf("read response: %s", err)
|
||||
}
|
||||
|
||||
// Unmarshal response.
|
||||
resp := &internal.Response{}
|
||||
if err := proto.Unmarshal(buf, resp); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal response: %s", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
184
Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/service_test.go
generated
vendored
184
Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/service_test.go
generated
vendored
|
@ -1,184 +0,0 @@
|
|||
package copier_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/influxdb/services/copier"
|
||||
"github.com/influxdb/influxdb/tcp"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
_ "github.com/influxdb/influxdb/tsdb/engine"
|
||||
)
|
||||
|
||||
// Ensure the service can return shard data.
|
||||
func TestService_handleConn(t *testing.T) {
|
||||
s := MustOpenService()
|
||||
defer s.Close()
|
||||
|
||||
// Mock shard.
|
||||
sh := MustOpenShard(123)
|
||||
defer sh.Close()
|
||||
s.TSDBStore.ShardFn = func(id uint64) *tsdb.Shard {
|
||||
if id != 123 {
|
||||
t.Fatalf("unexpected id: %d", id)
|
||||
}
|
||||
return sh.Shard
|
||||
}
|
||||
|
||||
// Create client and request shard from service.
|
||||
c := copier.NewClient(s.Addr().String())
|
||||
r, err := c.ShardReader(123)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if r == nil {
|
||||
t.Fatal("expected reader")
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Slurp from reader.
|
||||
var n uint64
|
||||
if err := binary.Read(r, binary.BigEndian, &n); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read database from disk.
|
||||
exp, err := ioutil.ReadFile(sh.Path())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Trim expected bytes since bolt won't read beyond the HWM.
|
||||
exp = exp[0:len(buf)]
|
||||
|
||||
// Compare disk and reader contents.
|
||||
if !bytes.Equal(exp, buf) {
|
||||
t.Fatalf("data mismatch: exp=len(%d), got=len(%d)", len(exp), len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the service can return an error to the client.
|
||||
func TestService_handleConn_Error(t *testing.T) {
|
||||
s := MustOpenService()
|
||||
defer s.Close()
|
||||
|
||||
// Mock missing shard.
|
||||
s.TSDBStore.ShardFn = func(id uint64) *tsdb.Shard { return nil }
|
||||
|
||||
// Create client and request shard from service.
|
||||
c := copier.NewClient(s.Addr().String())
|
||||
r, err := c.ShardReader(123)
|
||||
if err == nil || err.Error() != `shard not found: id=123` {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if r != nil {
|
||||
t.Fatal("expected nil reader")
|
||||
}
|
||||
}
|
||||
|
||||
// Service represents a test wrapper for copier.Service.
|
||||
type Service struct {
|
||||
*copier.Service
|
||||
|
||||
ln net.Listener
|
||||
TSDBStore ServiceTSDBStore
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService() *Service {
|
||||
s := &Service{
|
||||
Service: copier.NewService(),
|
||||
}
|
||||
s.Service.TSDBStore = &s.TSDBStore
|
||||
|
||||
if !testing.Verbose() {
|
||||
s.SetLogger(log.New(ioutil.Discard, "", 0))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// MustOpenService returns a new, opened service. Panic on error.
|
||||
func MustOpenService() *Service {
|
||||
// Open randomly assigned port.
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Start muxer.
|
||||
mux := tcp.NewMux()
|
||||
|
||||
// Create new service and attach mux'd listener.
|
||||
s := NewService()
|
||||
s.ln = ln
|
||||
s.Listener = mux.Listen(copier.MuxHeader)
|
||||
go mux.Serve(ln)
|
||||
|
||||
if err := s.Open(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Close shuts down the service and the attached listener.
|
||||
func (s *Service) Close() error {
|
||||
s.ln.Close()
|
||||
err := s.Service.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Addr returns the address of the service.
|
||||
func (s *Service) Addr() net.Addr { return s.ln.Addr() }
|
||||
|
||||
// ServiceTSDBStore is a mock that implements copier.Service.TSDBStore.
|
||||
type ServiceTSDBStore struct {
|
||||
ShardFn func(id uint64) *tsdb.Shard
|
||||
}
|
||||
|
||||
func (ss *ServiceTSDBStore) Shard(id uint64) *tsdb.Shard { return ss.ShardFn(id) }
|
||||
|
||||
// Shard is a test wrapper for tsdb.Shard.
|
||||
type Shard struct {
|
||||
*tsdb.Shard
|
||||
path string
|
||||
}
|
||||
|
||||
// MustOpenShard returns a temporary, opened shard.
|
||||
func MustOpenShard(id uint64) *Shard {
|
||||
path, err := ioutil.TempDir("", "copier-")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sh := &Shard{
|
||||
Shard: tsdb.NewShard(id,
|
||||
tsdb.NewDatabaseIndex(),
|
||||
filepath.Join(path, "data"),
|
||||
filepath.Join(path, "wal"),
|
||||
tsdb.NewEngineOptions(),
|
||||
),
|
||||
path: path,
|
||||
}
|
||||
if err := sh.Open(); err != nil {
|
||||
sh.Close()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return sh
|
||||
}
|
||||
|
||||
func (sh *Shard) Close() error {
|
||||
err := sh.Shard.Close()
|
||||
os.RemoveAll(sh.Path())
|
||||
return err
|
||||
}
|
10
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md
generated
vendored
10
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md
generated
vendored
|
@ -1,10 +1,4 @@
|
|||
# Configuration
|
||||
|
||||
Each Graphite input allows the binding address, target database, and protocol to be set. If the database does not exist, it will be created automatically when the input is initialized. The write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`.
|
||||
|
||||
Each Graphite input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
|
||||
|
||||
# Parsing Metrics
|
||||
## Introduction
|
||||
|
||||
The graphite plugin allows measurements to be saved using the graphite line protocol. By default, enabling the graphite plugin will allow you to collect metrics and store them using the metric name as the measurement. If you send a metric named `servers.localhost.cpu.loadavg.10`, it will store the full metric name as the measurement with no extracted tags.
|
||||
|
||||
|
@ -46,7 +40,7 @@ Additional tags can be added to a metric that don't exist on the received metric
|
|||
|
||||
`servers.localhost.cpu.loadavg.10`
|
||||
* Template: `.host.resource.measurement* region=us-west,zone=1a`
|
||||
* Output: _measurement_ = `loadavg.10` _tags_ = `host=localhost resource=cpu region=us-west zone=1a`
|
||||
* Output: _measurement_ = `loading.10` _tags_ = `host=localhost resource=cpu region=us-west zone=1a`
|
||||
|
||||
## Multiple Templates
|
||||
|
||||
|
|
26
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go
generated
vendored
26
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go
generated
vendored
|
@ -29,9 +29,6 @@ const (
|
|||
// DefaultBatchSize is the default Graphite batch size.
|
||||
DefaultBatchSize = 1000
|
||||
|
||||
// DefaultBatchPending is the default number of pending Graphite batches.
|
||||
DefaultBatchPending = 5
|
||||
|
||||
// DefaultBatchTimeout is the default Graphite batch timeout.
|
||||
DefaultBatchTimeout = time.Second
|
||||
)
|
||||
|
@ -43,7 +40,6 @@ type Config struct {
|
|||
Enabled bool `toml:"enabled"`
|
||||
Protocol string `toml:"protocol"`
|
||||
BatchSize int `toml:"batch-size"`
|
||||
BatchPending int `toml:"batch-pending"`
|
||||
BatchTimeout toml.Duration `toml:"batch-timeout"`
|
||||
ConsistencyLevel string `toml:"consistency-level"`
|
||||
Templates []string `toml:"templates"`
|
||||
|
@ -51,6 +47,19 @@ type Config struct {
|
|||
Separator string `toml:"separator"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config with defaults.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
BindAddress: DefaultBindAddress,
|
||||
Database: DefaultDatabase,
|
||||
Protocol: DefaultProtocol,
|
||||
BatchSize: DefaultBatchSize,
|
||||
BatchTimeout: toml.Duration(DefaultBatchTimeout),
|
||||
ConsistencyLevel: DefaultConsistencyLevel,
|
||||
Separator: DefaultSeparator,
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaults takes the given config and returns a new config with any required
|
||||
// default values set.
|
||||
func (c *Config) WithDefaults() *Config {
|
||||
|
@ -64,15 +73,6 @@ func (c *Config) WithDefaults() *Config {
|
|||
if d.Protocol == "" {
|
||||
d.Protocol = DefaultProtocol
|
||||
}
|
||||
if d.BatchSize == 0 {
|
||||
d.BatchSize = DefaultBatchSize
|
||||
}
|
||||
if d.BatchPending == 0 {
|
||||
d.BatchPending = DefaultBatchPending
|
||||
}
|
||||
if d.BatchTimeout == 0 {
|
||||
d.BatchTimeout = toml.Duration(DefaultBatchTimeout)
|
||||
}
|
||||
if d.ConsistencyLevel == "" {
|
||||
d.ConsistencyLevel = DefaultConsistencyLevel
|
||||
}
|
||||
|
|
17
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config_test.go
generated
vendored
17
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config_test.go
generated
vendored
|
@ -17,7 +17,6 @@ database = "mydb"
|
|||
enabled = true
|
||||
protocol = "tcp"
|
||||
batch-size=100
|
||||
batch-pending=77
|
||||
batch-timeout="1s"
|
||||
consistency-level="one"
|
||||
templates=["servers.* .host.measurement*"]
|
||||
|
@ -37,8 +36,6 @@ tags=["region=us-east"]
|
|||
t.Fatalf("unexpected graphite protocol: %s", c.Protocol)
|
||||
} else if c.BatchSize != 100 {
|
||||
t.Fatalf("unexpected graphite batch size: %d", c.BatchSize)
|
||||
} else if c.BatchPending != 77 {
|
||||
t.Fatalf("unexpected graphite batch pending: %d", c.BatchPending)
|
||||
} else if time.Duration(c.BatchTimeout) != time.Second {
|
||||
t.Fatalf("unexpected graphite batch timeout: %v", c.BatchTimeout)
|
||||
} else if c.ConsistencyLevel != "one" {
|
||||
|
@ -54,7 +51,7 @@ tags=["region=us-east"]
|
|||
}
|
||||
|
||||
func TestConfigValidateEmptyTemplate(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Templates = []string{""}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
@ -67,7 +64,7 @@ func TestConfigValidateEmptyTemplate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigValidateTooManyField(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Templates = []string{"a measurement b c"}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
@ -75,7 +72,7 @@ func TestConfigValidateTooManyField(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigValidateTemplatePatterns(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Templates = []string{"*measurement"}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
@ -88,7 +85,7 @@ func TestConfigValidateTemplatePatterns(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigValidateFilter(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Templates = []string{".server measurement*"}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
@ -106,7 +103,7 @@ func TestConfigValidateFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigValidateTemplateTags(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Templates = []string{"*.server measurement* foo"}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
@ -129,7 +126,7 @@ func TestConfigValidateTemplateTags(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigValidateDefaultTags(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Tags = []string{"foo"}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
@ -152,7 +149,7 @@ func TestConfigValidateDefaultTags(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigValidateFilterDuplicates(t *testing.T) {
|
||||
c := &graphite.Config{}
|
||||
c := graphite.NewConfig()
|
||||
c.Templates = []string{"foo measurement*", "foo .host.measurement"}
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Errorf("config validate expected error. got nil")
|
||||
|
|
|
@ -11,11 +11,7 @@ import (
|
|||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTemplate *template
|
||||
MinDate = time.Date(1901, 12, 13, 0, 0, 0, 0, time.UTC)
|
||||
MaxDate = time.Date(2038, 1, 19, 0, 0, 0, 0, time.UTC)
|
||||
)
|
||||
var defaultTemplate *template
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
|
@ -128,9 +124,6 @@ func (p *Parser) Parse(line string) (tsdb.Point, error) {
|
|||
if unixTime != float64(-1) {
|
||||
// Check if we have fractional seconds
|
||||
timestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))
|
||||
if timestamp.Before(MinDate) || timestamp.After(MaxDate) {
|
||||
return nil, fmt.Errorf("timestamp out of range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
121
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service.go
generated
vendored
121
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service.go
generated
vendored
|
@ -2,7 +2,6 @@ package graphite
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
|
@ -12,10 +11,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -24,70 +21,11 @@ const (
|
|||
leaderWaitTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// Initialize the graphite stats and diags
|
||||
func init() {
|
||||
tcpConnections = make(map[string]*tcpConnectionDiag)
|
||||
}
|
||||
|
||||
// Package-level tracking of connections for diagnostics.
|
||||
var monitorOnce sync.Once
|
||||
|
||||
type tcpConnectionDiag struct {
|
||||
conn net.Conn
|
||||
connectTime time.Time
|
||||
}
|
||||
|
||||
var tcpConnectionsMu sync.Mutex
|
||||
var tcpConnections map[string]*tcpConnectionDiag
|
||||
|
||||
func addConnection(c net.Conn) {
|
||||
tcpConnectionsMu.Lock()
|
||||
defer tcpConnectionsMu.Unlock()
|
||||
tcpConnections[c.RemoteAddr().String()] = &tcpConnectionDiag{
|
||||
conn: c,
|
||||
connectTime: time.Now().UTC(),
|
||||
}
|
||||
}
|
||||
func removeConnection(c net.Conn) {
|
||||
tcpConnectionsMu.Lock()
|
||||
defer tcpConnectionsMu.Unlock()
|
||||
delete(tcpConnections, c.RemoteAddr().String())
|
||||
}
|
||||
|
||||
func handleDiagnostics() (*monitor.Diagnostic, error) {
|
||||
tcpConnectionsMu.Lock()
|
||||
defer tcpConnectionsMu.Unlock()
|
||||
|
||||
d := &monitor.Diagnostic{
|
||||
Columns: []string{"local", "remote", "connect time"},
|
||||
Rows: make([][]interface{}, 0, len(tcpConnections)),
|
||||
}
|
||||
for _, v := range tcpConnections {
|
||||
_ = v
|
||||
d.Rows = append(d.Rows, []interface{}{v.conn.LocalAddr().String(), v.conn.RemoteAddr().String(), v.connectTime})
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// statistics gathered by the graphite package.
|
||||
const (
|
||||
statPointsReceived = "points_rx"
|
||||
statBytesReceived = "bytes_rx"
|
||||
statPointsParseFail = "points_parse_fail"
|
||||
statPointsUnsupported = "points_unsupported_fail"
|
||||
statBatchesTrasmitted = "batches_tx"
|
||||
statPointsTransmitted = "points_tx"
|
||||
statBatchesTransmitFail = "batches_tx_fail"
|
||||
statConnectionsActive = "connections_active"
|
||||
statConnectionsHandled = "connections_handled"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
bindAddress string
|
||||
database string
|
||||
protocol string
|
||||
batchSize int
|
||||
batchPending int
|
||||
batchTimeout time.Duration
|
||||
consistencyLevel cluster.ConsistencyLevel
|
||||
|
||||
|
@ -95,18 +33,13 @@ type Service struct {
|
|||
parser *Parser
|
||||
|
||||
logger *log.Logger
|
||||
statMap *expvar.Map
|
||||
|
||||
ln net.Listener
|
||||
addr net.Addr
|
||||
udpConn *net.UDPConn
|
||||
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
|
||||
Monitor interface {
|
||||
RegisterDiagnosticsClient(name string, client monitor.DiagsClient) error
|
||||
}
|
||||
PointsWriter interface {
|
||||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
|
@ -126,7 +59,6 @@ func NewService(c Config) (*Service, error) {
|
|||
database: d.Database,
|
||||
protocol: d.Protocol,
|
||||
batchSize: d.BatchSize,
|
||||
batchPending: d.BatchPending,
|
||||
batchTimeout: time.Duration(d.BatchTimeout),
|
||||
logger: log.New(os.Stderr, "[graphite] ", log.LstdFlags),
|
||||
done: make(chan struct{}),
|
||||
|
@ -155,21 +87,6 @@ func NewService(c Config) (*Service, error) {
|
|||
func (s *Service) Open() error {
|
||||
s.logger.Printf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout)
|
||||
|
||||
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
|
||||
// should be done before any data could arrive for the service.
|
||||
key := strings.Join([]string{"graphite", s.protocol, s.bindAddress}, ":")
|
||||
tags := map[string]string{"proto": s.protocol, "bind": s.bindAddress}
|
||||
s.statMap = influxdb.NewStatistics(key, "graphite", tags)
|
||||
|
||||
// One Graphite service hooks up diagnostics for all Graphite functionality.
|
||||
monitorOnce.Do(func() {
|
||||
if s.Monitor == nil {
|
||||
s.logger.Println("no monitor service available, no monitoring will be performed")
|
||||
return
|
||||
}
|
||||
s.Monitor.RegisterDiagnosticsClient("graphite", monitor.DiagsClientFunc(handleDiagnostics))
|
||||
})
|
||||
|
||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
s.logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
||||
return err
|
||||
|
@ -180,7 +97,7 @@ func (s *Service) Open() error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)
|
||||
s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchTimeout)
|
||||
s.batcher.Start()
|
||||
|
||||
// Start processing batches.
|
||||
|
@ -208,9 +125,6 @@ func (s *Service) Close() error {
|
|||
if s.ln != nil {
|
||||
s.ln.Close()
|
||||
}
|
||||
if s.udpConn != nil {
|
||||
s.udpConn.Close()
|
||||
}
|
||||
|
||||
s.batcher.Stop()
|
||||
close(s.done)
|
||||
|
@ -260,13 +174,8 @@ func (s *Service) openTCPServer() (net.Addr, error) {
|
|||
|
||||
// handleTCPConnection services an individual TCP connection for the Graphite input.
|
||||
func (s *Service) handleTCPConnection(conn net.Conn) {
|
||||
defer s.wg.Done()
|
||||
defer conn.Close()
|
||||
defer removeConnection(conn)
|
||||
defer s.statMap.Add(statConnectionsActive, -1)
|
||||
addConnection(conn)
|
||||
s.statMap.Add(statConnectionsActive, 1)
|
||||
s.statMap.Add(statConnectionsHandled, 1)
|
||||
defer s.wg.Done()
|
||||
|
||||
reader := bufio.NewReader(conn)
|
||||
|
||||
|
@ -280,8 +189,6 @@ func (s *Service) handleTCPConnection(conn net.Conn) {
|
|||
// Trim the buffer, even though there should be no padding
|
||||
line := strings.TrimSpace(string(buf))
|
||||
|
||||
s.statMap.Add(statPointsReceived, 1)
|
||||
s.statMap.Add(statBytesReceived, int64(len(buf)))
|
||||
s.handleLine(line)
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +200,7 @@ func (s *Service) openUDPServer() (net.Addr, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
s.udpConn, err = net.ListenUDP("udp", addr)
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -303,33 +210,27 @@ func (s *Service) openUDPServer() (net.Addr, error) {
|
|||
go func() {
|
||||
defer s.wg.Done()
|
||||
for {
|
||||
n, _, err := s.udpConn.ReadFromUDP(buf)
|
||||
n, _, err := conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
s.udpConn.Close()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
lines := strings.Split(string(buf[:n]), "\n")
|
||||
for _, line := range lines {
|
||||
for _, line := range strings.Split(string(buf[:n]), "\n") {
|
||||
s.handleLine(line)
|
||||
}
|
||||
s.statMap.Add(statPointsReceived, int64(len(lines)))
|
||||
s.statMap.Add(statBytesReceived, int64(n))
|
||||
}
|
||||
}()
|
||||
return s.udpConn.LocalAddr(), nil
|
||||
return conn.LocalAddr(), nil
|
||||
}
|
||||
|
||||
func (s *Service) handleLine(line string) {
|
||||
if line == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse it.
|
||||
point, err := s.parser.Parse(line)
|
||||
if err != nil {
|
||||
s.logger.Printf("unable to parse line: %s", err)
|
||||
s.statMap.Add(statPointsParseFail, 1)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -338,7 +239,6 @@ func (s *Service) handleLine(line string) {
|
|||
// Drop NaN and +/-Inf data points since they are not supported values
|
||||
if math.IsNaN(f) || math.IsInf(f, 0) {
|
||||
s.logger.Printf("dropping unsupported value: '%v'", line)
|
||||
s.statMap.Add(statPointsUnsupported, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -357,14 +257,9 @@ func (s *Service) processBatches(batcher *tsdb.PointBatcher) {
|
|||
RetentionPolicy: "",
|
||||
ConsistencyLevel: s.consistencyLevel,
|
||||
Points: batch,
|
||||
}); err == nil {
|
||||
s.statMap.Add(statBatchesTrasmitted, 1)
|
||||
s.statMap.Add(statPointsTransmitted, int64(len(batch)))
|
||||
} else {
|
||||
}); err != nil {
|
||||
s.logger.Printf("failed to write point batch to database %q: %s", s.database, err)
|
||||
s.statMap.Add(statBatchesTransmitFail, 1)
|
||||
}
|
||||
|
||||
case <-s.done:
|
||||
return
|
||||
}
|
||||
|
|
4
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service_test.go
generated
vendored
4
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service_test.go
generated
vendored
|
@ -19,7 +19,7 @@ func Test_ServerGraphiteTCP(t *testing.T) {
|
|||
|
||||
now := time.Now().UTC().Round(time.Second)
|
||||
|
||||
config := graphite.Config{}
|
||||
config := graphite.NewConfig()
|
||||
config.Database = "graphitedb"
|
||||
config.BatchSize = 0 // No batching.
|
||||
config.BatchTimeout = toml.Duration(time.Second)
|
||||
|
@ -87,7 +87,7 @@ func Test_ServerGraphiteUDP(t *testing.T) {
|
|||
|
||||
now := time.Now().UTC().Round(time.Second)
|
||||
|
||||
config := graphite.Config{}
|
||||
config := graphite.NewConfig()
|
||||
config.Database = "graphitedb"
|
||||
config.BatchSize = 0 // No batching.
|
||||
config.BatchTimeout = toml.Duration(time.Second)
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -74,18 +73,16 @@ type Handler struct {
|
|||
Logger *log.Logger
|
||||
loggingEnabled bool // Log every HTTP access.
|
||||
WriteTrace bool // Detailed logging of write path
|
||||
statMap *expvar.Map
|
||||
}
|
||||
|
||||
// NewHandler returns a new instance of handler with routes.
|
||||
func NewHandler(requireAuthentication, loggingEnabled, writeTrace bool, statMap *expvar.Map) *Handler {
|
||||
func NewHandler(requireAuthentication, loggingEnabled, writeTrace bool) *Handler {
|
||||
h := &Handler{
|
||||
mux: pat.New(),
|
||||
requireAuthentication: requireAuthentication,
|
||||
Logger: log.New(os.Stderr, "[http] ", log.LstdFlags),
|
||||
loggingEnabled: loggingEnabled,
|
||||
WriteTrace: writeTrace,
|
||||
statMap: statMap,
|
||||
}
|
||||
|
||||
h.SetRoutes([]route{
|
||||
|
@ -152,8 +149,6 @@ func (h *Handler) SetRoutes(routes []route) {
|
|||
|
||||
// ServeHTTP responds to HTTP request to the handler.
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
h.statMap.Add(statRequest, 1)
|
||||
|
||||
// FIXME(benbjohnson): Add pprof enabled flag.
|
||||
if strings.HasPrefix(r.URL.Path, "/debug/pprof") {
|
||||
switch r.URL.Path {
|
||||
|
@ -166,16 +161,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
default:
|
||||
pprof.Index(w, r)
|
||||
}
|
||||
} else if strings.HasPrefix(r.URL.Path, "/debug/vars") {
|
||||
serveExpvar(w, r)
|
||||
} else {
|
||||
h.mux.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
h.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (h *Handler) serveProcessContinuousQueries(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
|
||||
h.statMap.Add(statCQRequest, 1)
|
||||
|
||||
// If the continuous query service isn't configured, return 404.
|
||||
if h.ContinuousQuerier == nil {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
|
@ -188,25 +180,9 @@ func (h *Handler) serveProcessContinuousQueries(w http.ResponseWriter, r *http.R
|
|||
db := q.Get("db")
|
||||
// Get the name of the CQ to run (blank means run all).
|
||||
name := q.Get("name")
|
||||
// Get the time for which the CQ should be evaluated.
|
||||
var t time.Time
|
||||
var err error
|
||||
s := q.Get("time")
|
||||
if s != "" {
|
||||
t, err = time.Parse(time.RFC3339Nano, s)
|
||||
if err != nil {
|
||||
// Try parsing as an int64 nanosecond timestamp.
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
t = time.Unix(0, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Pass the request to the CQ service.
|
||||
if err := h.ContinuousQuerier.Run(db, name, t); err != nil {
|
||||
if err := h.ContinuousQuerier.Run(db, name); err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -216,8 +192,6 @@ func (h *Handler) serveProcessContinuousQueries(w http.ResponseWriter, r *http.R
|
|||
|
||||
// serveQuery parses an incoming query and, if valid, executes the query.
|
||||
func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
|
||||
h.statMap.Add(statQueryRequest, 1)
|
||||
|
||||
q := r.URL.Query()
|
||||
pretty := q.Get("pretty") == "true"
|
||||
|
||||
|
@ -296,10 +270,9 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.
|
|||
|
||||
// Write out result immediately if chunked.
|
||||
if chunked {
|
||||
n, _ := w.Write(MarshalJSON(Response{
|
||||
w.Write(MarshalJSON(Response{
|
||||
Results: []*influxql.Result{r},
|
||||
}, pretty))
|
||||
h.statMap.Add(statQueryRequestBytesTransmitted, int64(n))
|
||||
w.(http.Flusher).Flush()
|
||||
continue
|
||||
}
|
||||
|
@ -336,13 +309,11 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.
|
|||
|
||||
// If it's not chunked we buffered everything in memory, so write it out
|
||||
if !chunked {
|
||||
n, _ := w.Write(MarshalJSON(resp, pretty))
|
||||
h.statMap.Add(statQueryRequestBytesTransmitted, int64(n))
|
||||
w.Write(MarshalJSON(resp, pretty))
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
|
||||
h.statMap.Add(statWriteRequest, 1)
|
||||
|
||||
// Handle gzip decoding of the body
|
||||
body := r.Body
|
||||
|
@ -364,7 +335,6 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
|
|||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
h.statMap.Add(statWriteRequestBytesReceived, int64(len(b)))
|
||||
if h.WriteTrace {
|
||||
h.Logger.Printf("write body received by handler: %s", string(b))
|
||||
}
|
||||
|
@ -427,16 +397,13 @@ func (h *Handler) serveWriteJSON(w http.ResponseWriter, r *http.Request, body []
|
|||
RetentionPolicy: bp.RetentionPolicy,
|
||||
ConsistencyLevel: cluster.ConsistencyLevelOne,
|
||||
Points: points,
|
||||
}); err != nil {
|
||||
h.statMap.Add(statPointsWrittenFail, int64(len(points)))
|
||||
if influxdb.IsClientError(err) {
|
||||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
||||
} else {
|
||||
h.writeError(w, influxql.Result{Err: err}, http.StatusInternalServerError)
|
||||
}
|
||||
}); influxdb.IsClientError(err) {
|
||||
resultError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
||||
return
|
||||
} else if err != nil {
|
||||
resultError(w, influxql.Result{Err: err}, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
h.statMap.Add(statPointsWrittenOK, int64(len(points)))
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
@ -527,16 +494,13 @@ func (h *Handler) serveWriteLine(w http.ResponseWriter, r *http.Request, body []
|
|||
ConsistencyLevel: consistency,
|
||||
Points: points,
|
||||
}); influxdb.IsClientError(err) {
|
||||
h.statMap.Add(statPointsWrittenFail, int64(len(points)))
|
||||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
||||
return
|
||||
} else if err != nil {
|
||||
h.statMap.Add(statPointsWrittenFail, int64(len(points)))
|
||||
h.writeError(w, influxql.Result{Err: err}, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
h.statMap.Add(statPointsWrittenOK, int64(len(points)))
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
|
@ -547,7 +511,6 @@ func (h *Handler) serveOptions(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// servePing returns a simple response to let the client know the server is running.
|
||||
func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {
|
||||
h.statMap.Add(statPingRequest, 1)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
|
@ -606,21 +569,6 @@ type Batch struct {
|
|||
Points []Point `json:"points"`
|
||||
}
|
||||
|
||||
// serveExpvar serves registered expvar information over HTTP.
|
||||
func serveExpvar(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
// httpError writes an error to the client in a standard format.
|
||||
func httpError(w http.ResponseWriter, error string, pretty bool, code int) {
|
||||
w.Header().Add("content-type", "application/json")
|
||||
|
@ -686,19 +634,16 @@ func authenticate(inner func(http.ResponseWriter, *http.Request, *meta.UserInfo)
|
|||
if requireAuthentication && len(uis) > 0 {
|
||||
username, password, err := parseCredentials(r)
|
||||
if err != nil {
|
||||
h.statMap.Add(statAuthFail, 1)
|
||||
httpError(w, err.Error(), false, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if username == "" {
|
||||
h.statMap.Add(statAuthFail, 1)
|
||||
httpError(w, "username required", false, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
user, err = h.MetaStore.Authenticate(username, password)
|
||||
if err != nil {
|
||||
h.statMap.Add(statAuthFail, 1)
|
||||
httpError(w, err.Error(), false, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
|
4
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go
generated
vendored
4
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go
generated
vendored
|
@ -12,7 +12,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
|
@ -366,9 +365,8 @@ type Handler struct {
|
|||
|
||||
// NewHandler returns a new instance of Handler.
|
||||
func NewHandler(requireAuthentication bool) *Handler {
|
||||
statMap := influxdb.NewStatistics("httpd", "httpd", nil)
|
||||
h := &Handler{
|
||||
Handler: httpd.NewHandler(requireAuthentication, true, false, statMap),
|
||||
Handler: httpd.NewHandler(requireAuthentication, true, false),
|
||||
}
|
||||
h.Handler.MetaStore = &h.MetaStore
|
||||
h.Handler.QueryExecutor = &h.QueryExecutor
|
||||
|
|
|
@ -2,29 +2,12 @@ package httpd
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
)
|
||||
|
||||
// statistics gathered by the httpd package.
|
||||
const (
|
||||
statRequest = "req" // Number of HTTP requests served
|
||||
statCQRequest = "cq_req" // Number of CQ-execute requests served
|
||||
statQueryRequest = "query_req" // Number of query requests served
|
||||
statWriteRequest = "write_req" // Number of write requests serverd
|
||||
statPingRequest = "ping_req" // Number of ping requests served
|
||||
statWriteRequestBytesReceived = "write_req_bytes" // Sum of all bytes in write requests
|
||||
statQueryRequestBytesTransmitted = "query_resp_bytes" // Sum of all bytes returned in query reponses
|
||||
statPointsWrittenOK = "points_written_ok" // Number of points written OK
|
||||
statPointsWrittenFail = "points_written_fail" // Number of points that failed to be written
|
||||
statAuthFail = "auth_fail" // Number of authentication failures
|
||||
)
|
||||
|
||||
// Service manages the listener and handler for an HTTP endpoint.
|
||||
|
@ -38,17 +21,10 @@ type Service struct {
|
|||
Handler *Handler
|
||||
|
||||
Logger *log.Logger
|
||||
statMap *expvar.Map
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService(c Config) *Service {
|
||||
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
|
||||
// should be done before any data could arrive for the service.
|
||||
key := strings.Join([]string{"httpd", c.BindAddress}, ":")
|
||||
tags := map[string]string{"bind": c.BindAddress}
|
||||
statMap := influxdb.NewStatistics(key, "httpd", tags)
|
||||
|
||||
s := &Service{
|
||||
addr: c.BindAddress,
|
||||
https: c.HttpsEnabled,
|
||||
|
@ -58,7 +34,6 @@ func NewService(c Config) *Service {
|
|||
c.AuthEnabled,
|
||||
c.LogEnabled,
|
||||
c.WriteTracing,
|
||||
statMap,
|
||||
),
|
||||
Logger: log.New(os.Stderr, "[httpd] ", log.LstdFlags),
|
||||
}
|
||||
|
|
25
Godeps/_workspace/src/github.com/influxdb/influxdb/services/monitor/config.go
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/influxdb/influxdb/services/monitor/config.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultStatisticsWriteInterval is the interval of time between internal stats are written
|
||||
DefaultStatisticsWriteInterval = 1 * time.Minute
|
||||
)
|
||||
|
||||
// Config represents a configuration for the monitor.
|
||||
type Config struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
WriteInterval toml.Duration `toml:"write-interval"`
|
||||
}
|
||||
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Enabled: false,
|
||||
WriteInterval: toml.Duration(DefaultStatisticsWriteInterval),
|
||||
}
|
||||
}
|
83
Godeps/_workspace/src/github.com/influxdb/influxdb/services/monitor/monitor.go
generated
vendored
Normal file
83
Godeps/_workspace/src/github.com/influxdb/influxdb/services/monitor/monitor.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package monitor
|
||||
|
||||
// Monitor represents a TSDB monitoring service.
|
||||
type Monitor struct {
|
||||
Store interface{}
|
||||
}
|
||||
|
||||
func (m *Monitor) Open() error { return nil }
|
||||
func (m *Monitor) Close() error { return nil }
|
||||
|
||||
// StartSelfMonitoring starts a goroutine which monitors the InfluxDB server
|
||||
// itself and stores the results in the specified database at a given interval.
|
||||
/*
|
||||
func (s *Server) StartSelfMonitoring(database, retention string, interval time.Duration) error {
|
||||
if interval == 0 {
|
||||
return fmt.Errorf("statistics check interval must be non-zero")
|
||||
}
|
||||
|
||||
go func() {
|
||||
tick := time.NewTicker(interval)
|
||||
for {
|
||||
<-tick.C
|
||||
|
||||
// Create the batch and tags
|
||||
tags := map[string]string{"serverID": strconv.FormatUint(s.ID(), 10)}
|
||||
if h, err := os.Hostname(); err == nil {
|
||||
tags["host"] = h
|
||||
}
|
||||
batch := pointsFromStats(s.stats, tags)
|
||||
|
||||
// Shard-level stats.
|
||||
tags["shardID"] = strconv.FormatUint(s.id, 10)
|
||||
s.mu.RLock()
|
||||
for _, sh := range s.shards {
|
||||
if !sh.HasDataNodeID(s.id) {
|
||||
// No stats for non-local shards.
|
||||
continue
|
||||
}
|
||||
batch = append(batch, pointsFromStats(sh.stats, tags)...)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
|
||||
// Server diagnostics.
|
||||
for _, row := range s.DiagnosticsAsRows() {
|
||||
points, err := s.convertRowToPoints(row.Name, row)
|
||||
if err != nil {
|
||||
s.Logger.Printf("failed to write diagnostic row for %s: %s", row.Name, err.Error())
|
||||
continue
|
||||
}
|
||||
for _, p := range points {
|
||||
p.AddTag("serverID", strconv.FormatUint(s.ID(), 10))
|
||||
}
|
||||
batch = append(batch, points...)
|
||||
}
|
||||
|
||||
s.WriteSeries(database, retention, batch)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Function for local use turns stats into a slice of points
|
||||
func pointsFromStats(st *Stats, tags map[string]string) []tsdb.Point {
|
||||
var points []tsdb.Point
|
||||
now := time.Now()
|
||||
st.Walk(func(k string, v int64) {
|
||||
point := tsdb.NewPoint(
|
||||
st.name+"_"+k,
|
||||
make(map[string]string),
|
||||
map[string]interface{}{"value": int(v)},
|
||||
now,
|
||||
)
|
||||
// Specifically create a new map.
|
||||
for k, v := range tags {
|
||||
tags[k] = v
|
||||
point.AddTag(k, v)
|
||||
}
|
||||
points = append(points, point)
|
||||
})
|
||||
|
||||
return points
|
||||
}
|
||||
*/
|
|
@ -6,5 +6,3 @@ InfluxDB supports both the telnet and HTTP openTSDB protocol. This means that In
|
|||
The openTSDB input allows the binding address, target database, and target retention policy within that database, to be set. If the database does not exist, it will be created automatically when the input is initialized. If you also decide to configure retention policy (without configuration the input will use the auto-created default retention policy), both the database and retention policy must already exist.
|
||||
|
||||
The write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`.
|
||||
|
||||
The openTSDB input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
|
||||
|
|
21
Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/config.go
generated
vendored
21
Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/config.go
generated
vendored
|
@ -1,11 +1,5 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBindAddress is the default address that the service binds to.
|
||||
DefaultBindAddress = ":4242"
|
||||
|
@ -18,15 +12,6 @@ const (
|
|||
|
||||
// DefaultConsistencyLevel is the default write consistency level.
|
||||
DefaultConsistencyLevel = "one"
|
||||
|
||||
// DefaultBatchSize is the default Graphite batch size.
|
||||
DefaultBatchSize = 1000
|
||||
|
||||
// DefaultBatchTimeout is the default Graphite batch timeout.
|
||||
DefaultBatchTimeout = time.Second
|
||||
|
||||
// DefaultBatchPending is the default number of batches that can be in the queue.
|
||||
DefaultBatchPending = 5
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
@ -37,9 +22,6 @@ type Config struct {
|
|||
ConsistencyLevel string `toml:"consistency-level"`
|
||||
TLSEnabled bool `toml:"tls-enabled"`
|
||||
Certificate string `toml:"certificate"`
|
||||
BatchSize int `toml:"batch-size"`
|
||||
BatchPending int `toml:"batch-pending"`
|
||||
BatchTimeout toml.Duration `toml:"batch-timeout"`
|
||||
}
|
||||
|
||||
func NewConfig() Config {
|
||||
|
@ -50,8 +32,5 @@ func NewConfig() Config {
|
|||
ConsistencyLevel: DefaultConsistencyLevel,
|
||||
TLSEnabled: false,
|
||||
Certificate: "/etc/ssl/influxdb.pem",
|
||||
BatchSize: DefaultBatchSize,
|
||||
BatchPending: DefaultBatchPending,
|
||||
BatchTimeout: toml.Duration(DefaultBatchTimeout),
|
||||
}
|
||||
}
|
||||
|
|
115
Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service.go
generated
vendored
115
Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"expvar"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
|
@ -16,7 +15,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
|
@ -24,32 +22,12 @@ import (
|
|||
|
||||
const leaderWaitTimeout = 30 * time.Second
|
||||
|
||||
// statistics gathered by the openTSDB package.
|
||||
const (
|
||||
statHTTPConnectionsHandled = "http_connections_handled"
|
||||
statTelnetConnectionsActive = "tl_connections_active"
|
||||
statTelnetConnectionsHandled = "tl_connections_handled"
|
||||
statTelnetPointsReceived = "tl_points_rx"
|
||||
statTelnetBytesReceived = "tl_bytes_rx"
|
||||
statTelnetReadError = "tl_read_err"
|
||||
statTelnetBadLine = "tl_bad_line"
|
||||
statTelnetBadTime = "tl_bad_time"
|
||||
statTelnetBadTag = "tl_bad_tag"
|
||||
statTelnetBadFloat = "tl_bad_float"
|
||||
statBatchesTrasmitted = "batches_tx"
|
||||
statPointsTransmitted = "points_tx"
|
||||
statBatchesTransmitFail = "batches_tx_fail"
|
||||
statConnectionsActive = "connections_active"
|
||||
statConnectionsHandled = "connections_handled"
|
||||
)
|
||||
|
||||
// Service manages the listener and handler for an HTTP endpoint.
|
||||
type Service struct {
|
||||
ln net.Listener // main listener
|
||||
httpln *chanListener // http channel-based listener
|
||||
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
err chan error
|
||||
tls bool
|
||||
cert string
|
||||
|
@ -67,14 +45,7 @@ type Service struct {
|
|||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
||||
// Points received over the telnet protocol are batched.
|
||||
batchSize int
|
||||
batchPending int
|
||||
batchTimeout time.Duration
|
||||
batcher *tsdb.PointBatcher
|
||||
|
||||
Logger *log.Logger
|
||||
statMap *expvar.Map
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
|
@ -85,7 +56,6 @@ func NewService(c Config) (*Service, error) {
|
|||
}
|
||||
|
||||
s := &Service{
|
||||
done: make(chan struct{}),
|
||||
tls: c.TLSEnabled,
|
||||
cert: c.Certificate,
|
||||
err: make(chan error),
|
||||
|
@ -93,9 +63,6 @@ func NewService(c Config) (*Service, error) {
|
|||
Database: c.Database,
|
||||
RetentionPolicy: c.RetentionPolicy,
|
||||
ConsistencyLevel: consistencyLevel,
|
||||
batchSize: c.BatchSize,
|
||||
batchPending: c.BatchPending,
|
||||
batchTimeout: time.Duration(c.BatchTimeout),
|
||||
Logger: log.New(os.Stderr, "[opentsdb] ", log.LstdFlags),
|
||||
}
|
||||
return s, nil
|
||||
|
@ -105,12 +72,6 @@ func NewService(c Config) (*Service, error) {
|
|||
func (s *Service) Open() error {
|
||||
s.Logger.Println("Starting OpenTSDB service")
|
||||
|
||||
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
|
||||
// should be done before any data could arrive for the service.
|
||||
key := strings.Join([]string{"opentsdb", s.BindAddress}, ":")
|
||||
tags := map[string]string{"bind": s.BindAddress}
|
||||
s.statMap = influxdb.NewStatistics(key, "opentsdb", tags)
|
||||
|
||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
s.Logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
||||
return err
|
||||
|
@ -121,13 +82,6 @@ func (s *Service) Open() error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)
|
||||
s.batcher.Start()
|
||||
|
||||
// Start processing batches.
|
||||
s.wg.Add(1)
|
||||
go s.processBatches(s.batcher)
|
||||
|
||||
// Open listener.
|
||||
if s.tls {
|
||||
cert, err := tls.LoadX509KeyPair(s.cert, s.cert)
|
||||
|
@ -169,8 +123,6 @@ func (s *Service) Close() error {
|
|||
return s.ln.Close()
|
||||
}
|
||||
|
||||
s.batcher.Stop()
|
||||
close(s.done)
|
||||
s.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
@ -211,10 +163,6 @@ func (s *Service) serve() {
|
|||
|
||||
// handleConn processes conn. This is run in a separate goroutine.
|
||||
func (s *Service) handleConn(conn net.Conn) {
|
||||
defer s.statMap.Add(statConnectionsActive, -1)
|
||||
s.statMap.Add(statConnectionsActive, 1)
|
||||
s.statMap.Add(statConnectionsHandled, 1)
|
||||
|
||||
// Read header into buffer to check if it's HTTP.
|
||||
var buf bytes.Buffer
|
||||
r := bufio.NewReader(io.TeeReader(conn, &buf))
|
||||
|
@ -228,7 +176,6 @@ func (s *Service) handleConn(conn net.Conn) {
|
|||
|
||||
// If no HTTP parsing error occurred then process as HTTP.
|
||||
if err == nil {
|
||||
s.statMap.Add(statHTTPConnectionsHandled, 1)
|
||||
s.httpln.ch <- conn
|
||||
return
|
||||
}
|
||||
|
@ -244,26 +191,15 @@ func (s *Service) handleConn(conn net.Conn) {
|
|||
func (s *Service) handleTelnetConn(conn net.Conn) {
|
||||
defer conn.Close()
|
||||
defer s.wg.Done()
|
||||
defer s.statMap.Add(statTelnetConnectionsActive, -1)
|
||||
s.statMap.Add(statTelnetConnectionsActive, 1)
|
||||
s.statMap.Add(statTelnetConnectionsHandled, 1)
|
||||
|
||||
// Get connection details.
|
||||
remoteAddr := conn.RemoteAddr().String()
|
||||
|
||||
// Wrap connection in a text protocol reader.
|
||||
r := textproto.NewReader(bufio.NewReader(conn))
|
||||
for {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
s.statMap.Add(statTelnetReadError, 1)
|
||||
s.Logger.Println("error reading from openTSDB connection", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
s.statMap.Add(statTelnetPointsReceived, 1)
|
||||
s.statMap.Add(statTelnetBytesReceived, int64(len(line)))
|
||||
|
||||
inputStrs := strings.Fields(line)
|
||||
|
||||
|
@ -273,8 +209,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) {
|
|||
}
|
||||
|
||||
if len(inputStrs) < 4 || inputStrs[0] != "put" {
|
||||
s.statMap.Add(statTelnetBadLine, 1)
|
||||
s.Logger.Printf("malformed line '%s' from %s", line, remoteAddr)
|
||||
s.Logger.Println("TSDBServer: malformed line, skipping: ", line)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -286,8 +221,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) {
|
|||
var t time.Time
|
||||
ts, err := strconv.ParseInt(tsStr, 10, 64)
|
||||
if err != nil {
|
||||
s.statMap.Add(statTelnetBadTime, 1)
|
||||
s.Logger.Printf("malformed time '%s' from %s", tsStr, remoteAddr)
|
||||
s.Logger.Println("TSDBServer: malformed time, skipping: ", tsStr)
|
||||
}
|
||||
|
||||
switch len(tsStr) {
|
||||
|
@ -298,8 +232,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) {
|
|||
t = time.Unix(ts/1000, (ts%1000)*1000)
|
||||
break
|
||||
default:
|
||||
s.statMap.Add(statTelnetBadTime, 1)
|
||||
s.Logger.Printf("bad time '%s' must be 10 or 13 chars, from %s ", tsStr, remoteAddr)
|
||||
s.Logger.Println("TSDBServer: time must be 10 or 13 chars, skipping: ", tsStr)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -307,8 +240,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) {
|
|||
for t := range tagStrs {
|
||||
parts := strings.SplitN(tagStrs[t], "=", 2)
|
||||
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
|
||||
s.statMap.Add(statTelnetBadTag, 1)
|
||||
s.Logger.Printf("malformed tag data '%v' from %s", tagStrs[t], remoteAddr)
|
||||
s.Logger.Println("TSDBServer: malformed tag data", tagStrs[t])
|
||||
continue
|
||||
}
|
||||
k := parts[0]
|
||||
|
@ -319,12 +251,20 @@ func (s *Service) handleTelnetConn(conn net.Conn) {
|
|||
fields := make(map[string]interface{})
|
||||
fields["value"], err = strconv.ParseFloat(valueStr, 64)
|
||||
if err != nil {
|
||||
s.statMap.Add(statTelnetBadFloat, 1)
|
||||
s.Logger.Printf("bad float '%s' from %s", valueStr, remoteAddr)
|
||||
s.Logger.Println("TSDBServer: could not parse value as float: ", valueStr)
|
||||
continue
|
||||
}
|
||||
|
||||
s.batcher.In() <- tsdb.NewPoint(measurement, tags, fields, t)
|
||||
p := tsdb.NewPoint(measurement, tags, fields, t)
|
||||
if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
||||
Database: s.Database,
|
||||
RetentionPolicy: s.RetentionPolicy,
|
||||
ConsistencyLevel: s.ConsistencyLevel,
|
||||
Points: []tsdb.Point{p},
|
||||
}); err != nil {
|
||||
s.Logger.Println("TSDB cannot write data: ", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -339,28 +279,3 @@ func (s *Service) serveHTTP() {
|
|||
}}
|
||||
srv.Serve(s.httpln)
|
||||
}
|
||||
|
||||
// processBatches continually drains the given batcher and writes the batches to the database.
|
||||
func (s *Service) processBatches(batcher *tsdb.PointBatcher) {
|
||||
defer s.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case batch := <-batcher.Out():
|
||||
if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
||||
Database: s.Database,
|
||||
RetentionPolicy: s.RetentionPolicy,
|
||||
ConsistencyLevel: s.ConsistencyLevel,
|
||||
Points: batch,
|
||||
}); err == nil {
|
||||
s.statMap.Add(statBatchesTrasmitted, 1)
|
||||
s.statMap.Add(statPointsTransmitted, int64(len(batch)))
|
||||
} else {
|
||||
s.Logger.Printf("failed to write point batch to database %q: %s", s.Database, err)
|
||||
s.statMap.Add(statBatchesTransmitFail, 1)
|
||||
}
|
||||
|
||||
case <-s.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
8
Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/service.go
generated
vendored
8
Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/service.go
generated
vendored
|
@ -18,7 +18,7 @@ type Service struct {
|
|||
|
||||
MetaStore interface {
|
||||
IsLeader() bool
|
||||
PrecreateShardGroups(now, cutoff time.Time) error
|
||||
PrecreateShardGroups(cutoff time.Time) error
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,9 +91,9 @@ func (s *Service) runPrecreation() {
|
|||
}
|
||||
|
||||
// precreate performs actual resource precreation.
|
||||
func (s *Service) precreate(now time.Time) error {
|
||||
cutoff := now.Add(s.advancePeriod).UTC()
|
||||
if err := s.MetaStore.PrecreateShardGroups(now, cutoff); err != nil {
|
||||
func (s *Service) precreate(t time.Time) error {
|
||||
cutoff := t.Add(s.advancePeriod).UTC()
|
||||
if err := s.MetaStore.PrecreateShardGroups(cutoff); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -18,7 +18,7 @@ func Test_ShardPrecreation(t *testing.T) {
|
|||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
ms := metaStore{
|
||||
PrecreateShardGroupsFn: func(v, u time.Time) error {
|
||||
PrecreateShardGroupsFn: func(u time.Time) error {
|
||||
wg.Done()
|
||||
if u != now.Add(advancePeriod) {
|
||||
t.Fatalf("precreation called with wrong time, got %s, exp %s", u, now)
|
||||
|
@ -47,13 +47,13 @@ func Test_ShardPrecreation(t *testing.T) {
|
|||
|
||||
// PointsWriter represents a mock impl of PointsWriter.
|
||||
type metaStore struct {
|
||||
PrecreateShardGroupsFn func(now, cutoff time.Time) error
|
||||
PrecreateShardGroupsFn func(cutoff time.Time) error
|
||||
}
|
||||
|
||||
func (m metaStore) IsLeader() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m metaStore) PrecreateShardGroups(now, cutoff time.Time) error {
|
||||
return m.PrecreateShardGroupsFn(now, cutoff)
|
||||
func (m metaStore) PrecreateShardGroups(timestamp time.Time) error {
|
||||
return m.PrecreateShardGroupsFn(timestamp)
|
||||
}
|
||||
|
|
2
Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/config.go
generated
vendored
2
Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/config.go
generated
vendored
|
@ -12,5 +12,5 @@ type Config struct {
|
|||
}
|
||||
|
||||
func NewConfig() Config {
|
||||
return Config{Enabled: true, CheckInterval: toml.Duration(30 * time.Minute)}
|
||||
return Config{Enabled: true, CheckInterval: toml.Duration(10 * time.Minute)}
|
||||
}
|
||||
|
|
2
Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/service.go
generated
vendored
2
Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/service.go
generated
vendored
|
@ -40,7 +40,7 @@ func NewService(c Config) *Service {
|
|||
|
||||
// Open starts retention policy enforcement.
|
||||
func (s *Service) Open() error {
|
||||
s.logger.Println("Starting retention policy enforcement service with check interval of", s.checkInterval)
|
||||
s.logger.Println("Starting rentention policy enforcement service")
|
||||
s.wg.Add(2)
|
||||
go s.deleteShardGroups()
|
||||
go s.deleteShards()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue