Godep update: influxdb

This commit is contained in:
Cameron Sparr 2015-11-10 14:21:02 -07:00
parent dcd1c6766c
commit a8bcc51071
118 changed files with 6999 additions and 4287 deletions

4
Godeps/Godeps.json generated
View File

@ -111,8 +111,8 @@
},
{
"ImportPath": "github.com/influxdb/influxdb",
"Comment": "v0.9.4-rc1-703-g956efae",
"Rev": "956efaeb94ee57ecd8dc23e2f654b5231204e28f"
"Comment": "v0.9.4-rc1-884-g9625953",
"Rev": "9625953d3e06bd41b18c9d05aa1feccf353e20c8"
},
{
"ImportPath": "github.com/lib/pq",

View File

@ -1,6 +1,10 @@
## v0.9.5 [unreleased]
### Release Notes
- Field names for the internal stats have been changed to be more inline with Go style.
### Features
- [#4098](https://github.com/influxdb/influxdb/pull/4702): Support 'history' command at CLI
- [#4098](https://github.com/influxdb/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage
- [#4141](https://github.com/influxdb/influxdb/pull/4141): Control whether each query should be logged
- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex
@ -20,12 +24,26 @@
- [#4379](https://github.com/influxdb/influxdb/pull/4379): Auto-create database for UDP input.
- [#4375](https://github.com/influxdb/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party.
- [#4506](https://github.com/influxdb/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available.
- [#4516](https://github.com/influxdb/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics
- [#4501](https://github.com/influxdb/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex.
- [#4547](https://github.com/influxdb/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader).
- [#4600](https://github.com/influxdb/influxdb/pull/4600): ping endpoint can wait for leader
- [#4648](https://github.com/influxdb/influxdb/pull/4648): UDP Client (v2 client)
- [#4690](https://github.com/influxdb/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires
- [#4676](https://github.com/influxdb/influxdb/pull/4676): UDP service listener performance enhancements
- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau
- [#4721](https://github.com/influxdb/influxdb/pull/4721): Export tsdb.InterfaceValues
- [#4681](https://github.com/influxdb/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners
- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE
### Bugfixes
- [#4715](https://github.com/influxdb/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdb/influxdb/issues/4707). Thanks @oiooj
- [#4643](https://github.com/influxdb/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj
- [#4632](https://github.com/influxdb/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn
- [#4389](https://github.com/influxdb/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle.
- [#4166](https://github.com/influxdb/influxdb/pull/4166): Fix parser error on invalid SHOW
- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name
- [#4704](https://github.com/influxdb/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires
- [#4225](https://github.com/influxdb/influxdb/pull/4225): Always display diags in name-sorted order
- [#4111](https://github.com/influxdb/influxdb/pull/4111): Update pre-commit hook for go vet composites
- [#4136](https://github.com/influxdb/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier
@ -33,6 +51,7 @@
- [#4124](https://github.com/influxdb/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service
- [#4238](https://github.com/influxdb/influxdb/pull/4238): Fully disable hinted-handoff service if so requested.
- [#4165](https://github.com/influxdb/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database.
- [#4586](https://github.com/influxdb/influxdb/pull/4586): Exit when invalid engine is selected
- [#4118](https://github.com/influxdb/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions
- [#4191](https://github.com/influxdb/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdb/influxdb/issues/4170)
- [#4222](https://github.com/influxdb/influxdb/pull/4222): Graphite TCP connections should not block shutdown
@ -41,6 +60,8 @@
- [#4264](https://github.com/influxdb/influxdb/issues/4264): Refactor map functions to use list of values
- [#4278](https://github.com/influxdb/influxdb/pull/4278): Fix error marshalling across the cluster
- [#4149](https://github.com/influxdb/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri!
- [#4674](https://github.com/influxdb/influxdb/pull/4674): Fix panic during restore. Thanks @simcap.
- [#4725](https://github.com/influxdb/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS.
- [#4237](https://github.com/influxdb/influxdb/issues/4237): DERIVATIVE() edge conditions
- [#4263](https://github.com/influxdb/influxdb/issues/4263): derivative does not work when data is missing
- [#4293](https://github.com/influxdb/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson
@ -56,6 +77,7 @@
- [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given.
- [#3429](https://github.com/influxdb/influxdb/issues/3429): Incorrect parsing of regex containing '/'
- [#4374](https://github.com/influxdb/influxdb/issues/4374): Add tsm1 quickcheck tests
- [#4644](https://github.com/influxdb/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdb/influxdb/issues/4641)
- [#4377](https://github.com/influxdb/influxdb/pull/4377): Hinted handoff should not process dropped nodes
- [#4365](https://github.com/influxdb/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock
- [#4280](https://github.com/influxdb/influxdb/issues/4280): Only drop points matching WHERE clause
@ -75,6 +97,20 @@
- [#4486](https://github.com/influxdb/influxdb/pull/4486): Fix missing comments for runner package
- [#4497](https://github.com/influxdb/influxdb/pull/4497): Fix sequence in meta proto
- [#3367](https://github.com/influxdb/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol.
- [#4563](https://github.com/influxdb/influxdb/pull/4536): Fix broken subscriptions updates.
- [#4538](https://github.com/influxdb/influxdb/issues/4538): Dropping database under a write load causes panics
- [#4582](https://github.com/influxdb/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj
- [#4513](https://github.com/influxdb/influxdb/issues/4513): TSM1: panic: runtime error: index out of range
- [#4521](https://github.com/influxdb/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9
- [#4587](https://github.com/influxdb/influxdb/pull/4587): Prevent NaN float values from being stored
- [#4596](https://github.com/influxdb/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau
- [#4610](https://github.com/influxdb/influxdb/pull/4610): Make internal stats names consistent with Go style.
- [#4625](https://github.com/influxdb/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj.
- [#4650](https://github.com/influxdb/influxdb/issues/4650): Importer should skip empty lines
- [#4651](https://github.com/influxdb/influxdb/issues/4651): Importer doesn't flush out last batch
- [#4602](https://github.com/influxdb/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services.
- [#4691](https://github.com/influxdb/influxdb/issues/4691): Enable toml test `TestConfig_Encode`.
- [#4684](https://github.com/influxdb/influxdb/pull/4684): Add Graphite and UDP section to default config. Thanks @nkatsaros
## v0.9.4 [2015-09-14]

View File

@ -64,12 +64,6 @@ To assist in review for the PR, please add the following to your pull request co
- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed)
```
Use of third-party packages
------------
A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use.
For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).
Signing the CLA
---------------
@ -87,8 +81,8 @@ on how to install it see [the gvm page on github](https://github.com/moovweb/gvm
After installing gvm you can install and set the default go version by
running the following:
gvm install go1.5
gvm use go1.5 --default
gvm install go1.5.1
gvm use go1.5.1 --default
Revision Control Systems
-------------
@ -234,6 +228,12 @@ go tool pprof ./influxd influxd.prof
```
Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*.
Use of third-party packages
------------
A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use.
For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).
Continuous Integration testing
-----
InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file.

View File

@ -15,5 +15,5 @@
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- golang.org/x/crypto/bcrypt [BSD LICENSE](https://go.googlesource.com/crypto/+/master/LICENSE)
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)

View File

@ -160,7 +160,7 @@ And the show series output looks like this:
# Continuous Queries
Continous queries are going to be inspired by MySQL `TRIGGER` syntax:
Continuous queries are going to be inspired by MySQL `TRIGGER` syntax:
http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html

View File

@ -75,6 +75,10 @@ case $CIRCLE_NODE_INDEX in
rc=${PIPESTATUS[0]}
;;
1)
INFLUXDB_DATA_ENGINE="tsm1" go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt
rc=${PIPESTATUS[0]}
;;
2)
# 32bit tests.
if [[ -e ~/docker/image.tar ]]; then docker load -i ~/docker/image.tar; fi
docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test .
@ -86,7 +90,7 @@ case $CIRCLE_NODE_INDEX in
-c "cd /root/go/src/github.com/influxdb/influxdb && go get -t -d -v ./... && go build -v ./... && go test ${PARALLELISM} ${TIMEOUT} -v ./... 2>&1 | tee /tmp/artifacts/test_logs_i386.txt && exit \${PIPESTATUS[0]}"
rc=$?
;;
2)
3)
GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt
rc=${PIPESTATUS[0]}
;;

View File

@ -212,10 +212,42 @@ for i, row := range res[0].Series[0].Values {
}
```
### Using the UDP Client
The **InfluxDB** client also supports writing over UDP.
```go
func WriteUDP() {
// Make client
c := client.NewUDPClient("localhost:8089")
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
panic(err.Error())
}
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
```
## Go Docs
Please refer to
[http://godoc.org/github.com/influxdb/influxdb/client](http://godoc.org/github.com/influxdb/influxdb/client)
[http://godoc.org/github.com/influxdb/influxdb/client/v2](http://godoc.org/github.com/influxdb/influxdb/client/v2)
for documentation.
## See Also

View File

@ -38,22 +38,21 @@ func ParseConnectionString(path string, ssl bool) (url.URL, error) {
var host string
var port int
if strings.Contains(path, ":") {
h := strings.Split(path, ":")
i, e := strconv.Atoi(h[1])
if e != nil {
return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, e)
}
port = i
if h[0] == "" {
h, p, err := net.SplitHostPort(path)
if err != nil {
if path == "" {
host = DefaultHost
} else {
host = h[0]
host = path
}
} else {
host = path
// If they didn't specify a port, always use the default port
port = DefaultPort
} else {
host = h
port, err = strconv.Atoi(p)
if err != nil {
return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
}
}
u := url.URL{
@ -62,6 +61,7 @@ func ParseConnectionString(path string, ssl bool) (url.URL, error) {
if ssl {
u.Scheme = "https"
}
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
return u, nil
@ -69,7 +69,7 @@ func ParseConnectionString(path string, ssl bool) (url.URL, error) {
// Config is used to specify what server to connect to.
// URL: The URL of the server connecting to.
// Username/Password are optional. They will be passed via basic auth if provided.
// Username/Password are optional. They will be passed via basic auth if provided.
// UserAgent: If not provided, will default "InfluxDBClient",
// Timeout: If not provided, will default to 0 (no timeout)
type Config struct {
@ -180,7 +180,7 @@ func (c *Client) Query(q Query) (*Response, error) {
if decErr != nil {
return nil, decErr
}
// If we don't have an error in our json response, and didn't get statusOK, then send back an error
// If we don't have an error in our json response, and didn't get StatusOK, then send back an error
if resp.StatusCode != http.StatusOK && response.Error() == nil {
return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
}
@ -474,7 +474,10 @@ func (p *Point) MarshalJSON() ([]byte, error) {
// MarshalString renders string representation of a Point with specified
// precision. The default precision is nanoseconds.
func (p *Point) MarshalString() string {
pt := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
if err != nil {
return "# ERROR: " + err.Error() + " " + p.Measurement
}
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
return pt.String()
}
@ -561,7 +564,7 @@ func normalizeFields(fields map[string]interface{}) map[string]interface{} {
// BatchPoints is used to send batched data in a single write.
// Database and Points are required
// If no retention policy is specified, it will use the databases default retention policy.
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored.
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
// If time is specified, it will be applied to any point with an empty time.
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h

View File

@ -547,3 +547,14 @@ func TestClient_NoTimeout(t *testing.T) {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_ParseConnectionString_IPv6(t *testing.T) {
path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086"
u, err := client.ParseConnectionString(path, false)
if err != nil {
t.Fatalf("unexpected error, expected %v, actual %v", nil, err)
}
if u.Host != path {
t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host)
}
}

View File

@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"time"
@ -13,6 +14,12 @@ import (
"github.com/influxdb/influxdb/models"
)
// UDPPayloadSize is a reasonable default payload size for UDP packets that
// could be travelling over the internet.
const (
UDPPayloadSize = 512
)
type Config struct {
// URL of the InfluxDB database
URL *url.URL
@ -34,6 +41,15 @@ type Config struct {
InsecureSkipVerify bool
}
type UDPConfig struct {
// Addr should be of the form "host:port" or "[ipv6-host%zone]:port".
Addr string
// PayloadSize is the maximum size of a UDP client message, optional
// Tune this based on your network. Defaults to UDPBufferSize.
PayloadSize int
}
type BatchPointsConfig struct {
// Precision is the write precision of the points, defaults to "ns"
Precision string
@ -48,12 +64,17 @@ type BatchPointsConfig struct {
WriteConsistency string
}
// Client is a client interface for writing & querying the database
type Client interface {
// Write takes a BatchPoints object and writes all Points to InfluxDB.
Write(bp BatchPoints) error
// Query makes an InfluxDB Query on the database
// Query makes an InfluxDB Query on the database. This will fail if using
// the UDP client.
Query(q Query) (*Response, error)
// Close releases any resources a Client may be using.
Close() error
}
// NewClient creates a client interface from the given config.
@ -78,6 +99,41 @@ func NewClient(conf Config) Client {
}
}
// Close releases the client's resources.
func (c *client) Close() error {
return nil
}
// NewUDPClient returns a client interface for writing to an InfluxDB UDP
// service from the given config.
func NewUDPClient(conf UDPConfig) (Client, error) {
var udpAddr *net.UDPAddr
udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
if err != nil {
return nil, err
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
return nil, err
}
payloadSize := conf.PayloadSize
if payloadSize == 0 {
payloadSize = UDPPayloadSize
}
return &udpclient{
conn: conn,
payloadSize: payloadSize,
}, nil
}
// Close releases the udpclient's resources.
func (uc *udpclient) Close() error {
return uc.conn.Close()
}
type client struct {
url *url.URL
username string
@ -86,6 +142,11 @@ type client struct {
httpClient *http.Client
}
type udpclient struct {
conn *net.UDPConn
payloadSize int
}
// BatchPoints is an interface into a batched grouping of points to write into
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
// batch for each goroutine.
@ -198,14 +259,19 @@ func NewPoint(
tags map[string]string,
fields map[string]interface{},
t ...time.Time,
) *Point {
) (*Point, error) {
var T time.Time
if len(t) > 0 {
T = t[0]
}
return &Point{
pt: models.NewPoint(name, tags, fields, T),
pt, err := models.NewPoint(name, tags, fields, T)
if err != nil {
return nil, err
}
return &Point{
pt: pt,
}, nil
}
// String returns a line-protocol string of the Point
@ -243,11 +309,34 @@ func (p *Point) Fields() map[string]interface{} {
return p.pt.Fields()
}
func (c *client) Write(bp BatchPoints) error {
u := c.url
u.Path = "write"
func (uc *udpclient) Write(bp BatchPoints) error {
var b bytes.Buffer
var d time.Duration
d, _ = time.ParseDuration("1" + bp.Precision())
for _, p := range bp.Points() {
pointstring := p.pt.RoundedString(d) + "\n"
// Write and reset the buffer if we reach the max size
if b.Len()+len(pointstring) >= uc.payloadSize {
if _, err := uc.conn.Write(b.Bytes()); err != nil {
return err
}
b.Reset()
}
if _, err := b.WriteString(pointstring); err != nil {
return err
}
}
_, err := uc.conn.Write(b.Bytes())
return err
}
func (c *client) Write(bp BatchPoints) error {
var b bytes.Buffer
for _, p := range bp.Points() {
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
return err
@ -258,6 +347,8 @@ func (c *client) Write(bp BatchPoints) error {
}
}
u := c.url
u.Path = "write"
req, err := http.NewRequest("POST", u.String(), &b)
if err != nil {
return err
@ -327,28 +418,33 @@ type Result struct {
Err error
}
func (uc *udpclient) Query(q Query) (*Response, error) {
return nil, fmt.Errorf("Querying via UDP is not supported")
}
// Query sends a command to the server and returns the Response
func (c *client) Query(q Query) (*Response, error) {
u := c.url
u.Path = "query"
values := u.Query()
values.Set("q", q.Command)
values.Set("db", q.Database)
if q.Precision != "" {
values.Set("epoch", q.Precision)
}
u.RawQuery = values.Encode()
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("q", q.Command)
params.Set("db", q.Database)
if q.Precision != "" {
params.Set("epoch", q.Precision)
}
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err

View File

@ -11,6 +11,53 @@ import (
"time"
)
func TestUDPClient_Query(t *testing.T) {
config := UDPConfig{Addr: "localhost:8089"}
c, err := NewUDPClient(config)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
defer c.Close()
query := Query{}
_, err = c.Query(query)
if err == nil {
t.Error("Querying UDP client should fail")
}
}
func TestUDPClient_Write(t *testing.T) {
config := UDPConfig{Addr: "localhost:8089"}
c, err := NewUDPClient(config)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
defer c.Close()
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
fields := make(map[string]interface{})
fields["value"] = 1.0
pt, _ := NewPoint("cpu", make(map[string]string), fields)
bp.AddPoint(pt)
err = c.Write(bp)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestUDPClient_BadAddr(t *testing.T) {
config := UDPConfig{Addr: "foobar@wahoo"}
c, err := NewUDPClient(config)
if err == nil {
defer c.Close()
t.Error("Expected resolve error")
}
}
func TestClient_Query(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
@ -22,11 +69,12 @@ func TestClient_Query(t *testing.T) {
u, _ := url.Parse(ts.URL)
config := Config{URL: u}
c := NewClient(config)
defer c.Close()
query := Query{}
_, err := c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
@ -53,11 +101,12 @@ func TestClient_BasicAuth(t *testing.T) {
u.User = url.UserPassword("username", "password")
config := Config{URL: u, Username: "username", Password: "password"}
c := NewClient(config)
defer c.Close()
query := Query{}
_, err := c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
@ -72,14 +121,15 @@ func TestClient_Write(t *testing.T) {
u, _ := url.Parse(ts.URL)
config := Config{URL: u}
c := NewClient(config)
defer c.Close()
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
err = c.Write(bp)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
@ -96,7 +146,7 @@ func TestClient_UserAgent(t *testing.T) {
_, err := http.Get(ts.URL)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
tests := []struct {
@ -120,34 +170,35 @@ func TestClient_UserAgent(t *testing.T) {
u, _ := url.Parse(ts.URL)
config := Config{URL: u, UserAgent: test.userAgent}
c := NewClient(config)
defer c.Close()
receivedUserAgent = ""
query := Query{}
_, err = c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
bp, _ := NewBatchPoints(BatchPointsConfig{})
err = c.Write(bp)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
_, err := c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if receivedUserAgent != test.expected {
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
}
}
@ -157,7 +208,7 @@ func TestClient_PointString(t *testing.T) {
time1, _ := time.Parse(shortForm, "2013-Feb-03")
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields, time1)
p, _ := NewPoint("cpu_usage", tags, fields, time1)
s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000"
if p.String() != s {
@ -174,7 +225,7 @@ func TestClient_PointString(t *testing.T) {
func TestClient_PointWithoutTimeString(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
p, _ := NewPoint("cpu_usage", tags, fields)
s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39"
if p.String() != s {
@ -190,7 +241,7 @@ func TestClient_PointWithoutTimeString(t *testing.T) {
func TestClient_PointName(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
p, _ := NewPoint("cpu_usage", tags, fields)
exp := "cpu_usage"
if p.Name() != exp {
@ -202,7 +253,7 @@ func TestClient_PointName(t *testing.T) {
func TestClient_PointTags(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
p, _ := NewPoint("cpu_usage", tags, fields)
if !reflect.DeepEqual(tags, p.Tags()) {
t.Errorf("Error, got %v, expected %v",
@ -215,7 +266,7 @@ func TestClient_PointUnixNano(t *testing.T) {
time1, _ := time.Parse(shortForm, "2013-Feb-03")
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields, time1)
p, _ := NewPoint("cpu_usage", tags, fields, time1)
exp := int64(1359849600000000000)
if p.UnixNano() != exp {
@ -227,7 +278,7 @@ func TestClient_PointUnixNano(t *testing.T) {
func TestClient_PointFields(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
p, _ := NewPoint("cpu_usage", tags, fields)
if !reflect.DeepEqual(fields, p.Fields()) {
t.Errorf("Error, got %v, expected %v",

View File

@ -1,129 +0,0 @@
package client_example
import (
"fmt"
"log"
"math/rand"
"net/url"
"os"
"time"
"github.com/influxdb/influxdb/client/v2"
)
func ExampleNewClient() client.Client {
u, _ := url.Parse("http://localhost:8086")
// NOTE: this assumes you've setup a user and have setup shell env variables,
// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
client := client.NewClient(client.Config{
URL: u,
Username: os.Getenv("INFLUX_USER"),
Password: os.Getenv("INFLUX_PWD"),
})
return client
}
func ExampleWrite() {
// Make client
u, _ := url.Parse("http://localhost:8086")
c := client.NewClient(client.Config{
URL: u,
})
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "BumbleBeeTuna",
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt := client.NewPoint("cpu_usage", tags, fields, time.Now())
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
// Write 1000 points
func ExampleWrite1000() {
sampleSize := 1000
// Make client
u, _ := url.Parse("http://localhost:8086")
clnt := client.NewClient(client.Config{
URL: u,
})
rand.Seed(42)
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "systemstats",
Precision: "us",
})
for i := 0; i < sampleSize; i++ {
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
tags := map[string]string{
"cpu": "cpu-total",
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
"region": regions[rand.Intn(len(regions))],
}
idle := rand.Float64() * 100.0
fields := map[string]interface{}{
"idle": idle,
"busy": 100.0 - idle,
}
bp.AddPoint(client.NewPoint(
"cpu_usage",
tags,
fields,
time.Now(),
))
}
err := clnt.Write(bp)
if err != nil {
log.Fatal(err)
}
}
func ExampleQuery() {
// Make client
u, _ := url.Parse("http://localhost:8086")
c := client.NewClient(client.Config{
URL: u,
})
q := client.Query{
Command: "SELECT count(value) FROM shapes",
Database: "square_holes",
Precision: "ns",
}
if response, err := c.Query(q); err == nil && response.Error() == nil {
log.Println(response.Results)
}
}
func ExampleCreateDatabase() {
// Make client
u, _ := url.Parse("http://localhost:8086")
c := client.NewClient(client.Config{
URL: u,
})
q := client.Query{
Command: "CREATE DATABASE telegraf",
}
if response, err := c.Query(q); err == nil && response.Error() == nil {
log.Println(response.Results)
}
}

View File

@ -0,0 +1,248 @@
package client_test
import (
"fmt"
"log"
"math/rand"
"net/url"
"os"
"time"
"github.com/influxdb/influxdb/client/v2"
)
// Create a new client
func ExampleClient() client.Client {
u, _ := url.Parse("http://localhost:8086")
// NOTE: this assumes you've setup a user and have setup shell env variables,
// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
client := client.NewClient(client.Config{
URL: u,
Username: os.Getenv("INFLUX_USER"),
Password: os.Getenv("INFLUX_PWD"),
})
return client
}
// Write a point using the UDP client
func ExampleClient_uDP() {
// Make client
config := client.UDPConfig{Addr: "localhost:8089"}
c, err := client.NewUDPClient(config)
if err != nil {
panic(err.Error())
}
defer c.Close()
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
panic(err.Error())
}
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
// Write a point using the HTTP client
func ExampleClient_write() {
// Make client
u, _ := url.Parse("http://localhost:8086")
c := client.NewClient(client.Config{
URL: u,
})
defer c.Close()
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "BumbleBeeTuna",
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
panic(err.Error())
}
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
// Create a batch and add a point
func ExampleBatchPoints() {
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "BumbleBeeTuna",
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
panic(err.Error())
}
bp.AddPoint(pt)
}
// Using the BatchPoints setter functions
func ExampleBatchPoints_setters() {
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{})
bp.SetDatabase("BumbleBeeTuna")
bp.SetPrecision("ms")
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
panic(err.Error())
}
bp.AddPoint(pt)
}
// Create a new point with a timestamp
func ExamplePoint() {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err == nil {
fmt.Println("We created a point: ", pt.String())
}
}
// Create a new point without a timestamp
func ExamplePoint_withoutTime() {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields)
if err == nil {
fmt.Println("We created a point w/o time: ", pt.String())
}
}
// Write 1000 points
func ExampleClient_write1000() {
sampleSize := 1000
// Make client
u, _ := url.Parse("http://localhost:8086")
clnt := client.NewClient(client.Config{
URL: u,
})
defer clnt.Close()
rand.Seed(42)
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "systemstats",
Precision: "us",
})
for i := 0; i < sampleSize; i++ {
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
tags := map[string]string{
"cpu": "cpu-total",
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
"region": regions[rand.Intn(len(regions))],
}
idle := rand.Float64() * 100.0
fields := map[string]interface{}{
"idle": idle,
"busy": 100.0 - idle,
}
pt, err := client.NewPoint(
"cpu_usage",
tags,
fields,
time.Now(),
)
if err != nil {
println("Error:", err.Error())
continue
}
bp.AddPoint(pt)
}
err := clnt.Write(bp)
if err != nil {
log.Fatal(err)
}
}
// Make a Query
func ExampleClient_query() {
// Make client
u, _ := url.Parse("http://localhost:8086")
c := client.NewClient(client.Config{
URL: u,
})
defer c.Close()
q := client.Query{
Command: "SELECT count(value) FROM shapes",
Database: "square_holes",
Precision: "ns",
}
if response, err := c.Query(q); err == nil && response.Error() == nil {
log.Println(response.Results)
}
}
// Create a Database with a query
func ExampleClient_createDatabase() {
// Make client
u, _ := url.Parse("http://localhost:8086")
c := client.NewClient(client.Config{
URL: u,
})
defer c.Close()
q := client.Query{
Command: "CREATE DATABASE telegraf",
}
if response, err := c.Query(q); err == nil && response.Error() == nil {
log.Println(response.Results)
}
}

View File

@ -1,4 +1,4 @@
package influxdb
package cluster
import (
"math/rand"

View File

@ -1,10 +1,10 @@
package influxdb_test
package cluster_test
import (
"fmt"
"testing"
"github.com/influxdb/influxdb"
"github.com/influxdb/influxdb/cluster"
"github.com/influxdb/influxdb/meta"
)
@ -20,7 +20,7 @@ func NewNodes() []meta.NodeInfo {
}
func TestBalancerEmptyNodes(t *testing.T) {
b := influxdb.NewNodeBalancer([]meta.NodeInfo{})
b := cluster.NewNodeBalancer([]meta.NodeInfo{})
got := b.Next()
if got != nil {
t.Errorf("expected nil, got %v", got)
@ -29,7 +29,7 @@ func TestBalancerEmptyNodes(t *testing.T) {
func TestBalancerUp(t *testing.T) {
nodes := NewNodes()
b := influxdb.NewNodeBalancer(nodes)
b := cluster.NewNodeBalancer(nodes)
// First node in randomized round-robin order
first := b.Next()
@ -52,7 +52,7 @@ func TestBalancerUp(t *testing.T) {
/*
func TestBalancerDown(t *testing.T) {
nodes := NewNodes()
b := influxdb.NewNodeBalancer(nodes)
b := cluster.NewNodeBalancer(nodes)
nodes[0].Down()
@ -78,7 +78,7 @@ func TestBalancerDown(t *testing.T) {
/*
func TestBalancerBackUp(t *testing.T) {
nodes := newDataNodes()
b := influxdb.NewNodeBalancer(nodes)
b := cluster.NewNodeBalancer(nodes)
nodes[0].Down()

View File

@ -23,16 +23,16 @@ type ConsistencyLevel int
// The statistics generated by the "write" mdoule
const (
statWriteReq = "req"
statPointWriteReq = "point_req"
statPointWriteReqLocal = "point_req_local"
statPointWriteReqRemote = "point_req_remote"
statWriteOK = "write_ok"
statWritePartial = "write_partial"
statWriteTimeout = "write_timeout"
statWriteErr = "write_error"
statWritePointReqHH = "point_req_hh"
statSubWriteOK = "sub_write_ok"
statSubWriteDrop = "sub_write_drop"
statPointWriteReq = "pointReq"
statPointWriteReqLocal = "pointReqLocal"
statPointWriteReqRemote = "pointReqRemote"
statWriteOK = "writeOk"
statWritePartial = "writePartial"
statWriteTimeout = "writeTimeout"
statWriteErr = "writeError"
statWritePointReqHH = "pointReqHH"
statSubWriteOK = "subWriteOk"
statSubWriteDrop = "subWriteDrop"
)
const (
@ -112,6 +112,7 @@ type PointsWriter struct {
Subscriber interface {
Points() chan<- *WritePointsRequest
}
subPoints chan<- *WritePointsRequest
statMap *expvar.Map
}
@ -155,8 +156,9 @@ func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) {
func (w *PointsWriter) Open() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.closing == nil {
w.closing = make(chan struct{})
w.closing = make(chan struct{})
if w.Subscriber != nil {
w.subPoints = w.Subscriber.Points()
}
return nil
}
@ -167,7 +169,12 @@ func (w *PointsWriter) Close() error {
defer w.mu.Unlock()
if w.closing != nil {
close(w.closing)
w.closing = nil
}
if w.subPoints != nil {
// 'nil' channels always block so this makes the
// select statement in WritePoints hit its default case
// dropping any in-flight writes.
w.subPoints = nil
}
return nil
}
@ -252,13 +259,19 @@ func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
}
// Send points to subscriptions if possible.
if w.Subscriber != nil {
select {
case w.Subscriber.Points() <- p:
w.statMap.Add(statSubWriteOK, 1)
default:
w.statMap.Add(statSubWriteDrop, 1)
}
ok := false
// We need to lock just in case the channel is about to be nil'ed
w.mu.RLock()
select {
case w.subPoints <- p:
ok = true
default:
}
w.mu.RUnlock()
if ok {
w.statMap.Add(statSubWriteOK, 1)
} else {
w.statMap.Add(statSubWriteDrop, 1)
}
for range shardMappings.Points {

View File

@ -322,6 +322,9 @@ func TestPointsWriter_WritePoints(t *testing.T) {
c.HintedHandoff = hh
c.Subscriber = sub
c.Open()
defer c.Close()
err := c.WritePoints(pr)
if err == nil && test.expErr != nil {
t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)

View File

@ -113,9 +113,13 @@ type WritePointsRequest struct {
// AddPoint adds a point to the WritePointRequest with field key 'value'
func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
w.Points = append(w.Points, models.NewPoint(
pt, err := models.NewPoint(
name, tags, map[string]interface{}{"value": value}, timestamp,
))
)
if err != nil {
return
}
w.Points = append(w.Points, pt)
}
// WriteShardRequest represents the a request to write a slice of points to a shard
@ -139,9 +143,13 @@ func (w *WriteShardRequest) Points() []models.Point { return w.unmarshalPoints()
// AddPoint adds a new time series point
func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
w.AddPoints([]models.Point{models.NewPoint(
pt, err := models.NewPoint(
name, tags, map[string]interface{}{"value": value}, timestamp,
)})
)
if err != nil {
return
}
w.AddPoints([]models.Point{pt})
}
// AddPoints adds a new time series point

View File

@ -27,11 +27,11 @@ const MuxHeader = 2
// Statistics maintained by the cluster package
const (
writeShardReq = "write_shard_req"
writeShardPointsReq = "write_shard_points_req"
writeShardFail = "write_shard_fail"
mapShardReq = "map_shard_req"
mapShardResp = "map_shard_resp"
writeShardReq = "writeShardReq"
writeShardPointsReq = "writeShardPointsReq"
writeShardFail = "writeShardFail"
mapShardReq = "mapShardReq"
mapShardResp = "mapShardResp"
)
// Service processes data received over raw TCP connections.
@ -61,7 +61,7 @@ type Service struct {
func NewService(c Config) *Service {
return &Service{
closing: make(chan struct{}),
Logger: log.New(os.Stderr, "[tcp] ", log.LstdFlags),
Logger: log.New(os.Stderr, "[cluster] ", log.LstdFlags),
statMap: influxdb.NewStatistics("cluster", "cluster", nil),
}
}

View File

@ -28,7 +28,7 @@ func TestShardWriter_WriteShard_Success(t *testing.T) {
// Build a single point.
now := time.Now()
var points []models.Point
points = append(points, models.NewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))
points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))
// Write to shard and close.
if err := w.WriteShard(1, 2, points); err != nil {
@ -75,7 +75,7 @@ func TestShardWriter_WriteShard_Multiple(t *testing.T) {
// Build a single point.
now := time.Now()
var points []models.Point
points = append(points, models.NewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))
points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))
// Write to shard twice and close.
if err := w.WriteShard(1, 2, points); err != nil {
@ -125,7 +125,7 @@ func TestShardWriter_WriteShard_Error(t *testing.T) {
shardID := uint64(1)
ownerID := uint64(2)
var points []models.Point
points = append(points, models.NewPoint(
points = append(points, models.MustNewPoint(
"cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
))
@ -153,7 +153,7 @@ func TestShardWriter_Write_ErrDialTimeout(t *testing.T) {
shardID := uint64(1)
ownerID := uint64(2)
var points []models.Point
points = append(points, models.NewPoint(
points = append(points, models.MustNewPoint(
"cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
))
@ -176,7 +176,7 @@ func TestShardWriter_Write_ErrReadTimeout(t *testing.T) {
shardID := uint64(1)
ownerID := uint64(2)
var points []models.Point
points = append(points, models.NewPoint(
points = append(points, models.MustNewPoint(
"cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
))

View File

@ -20,6 +20,7 @@ import (
"github.com/influxdb/influxdb/cluster"
"github.com/influxdb/influxdb/importer/v8"
"github.com/peterh/liner"
"io/ioutil"
)
// These variables are populated via the Go linker.
@ -39,6 +40,10 @@ const (
defaultPPS = 0
)
const (
noTokenMsg = "Visit https://enterprise.influxdata.com to register for updates, InfluxDB server management, and monitoring.\n"
)
type CommandLine struct {
Client *client.Client
Line *liner.State
@ -163,7 +168,16 @@ Examples:
c.Client.Addr())
return
}
if c.Execute == "" && !c.Import {
token, err := c.DatabaseToken()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to check token: %s\n", err.Error())
return
}
if token == "" {
fmt.Printf(noTokenMsg)
}
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version)
}
@ -248,41 +262,54 @@ func showVersion() {
func (c *CommandLine) ParseCommand(cmd string) bool {
lcmd := strings.TrimSpace(strings.ToLower(cmd))
switch {
case strings.HasPrefix(lcmd, "exit"):
// signal the program to exit
return false
case strings.HasPrefix(lcmd, "gopher"):
c.gopher()
case strings.HasPrefix(lcmd, "connect"):
c.connect(cmd)
case strings.HasPrefix(lcmd, "auth"):
c.SetAuth(cmd)
case strings.HasPrefix(lcmd, "help"):
c.help()
case strings.HasPrefix(lcmd, "format"):
c.SetFormat(cmd)
case strings.HasPrefix(lcmd, "precision"):
c.SetPrecision(cmd)
case strings.HasPrefix(lcmd, "consistency"):
c.SetWriteConsistency(cmd)
case strings.HasPrefix(lcmd, "settings"):
c.Settings()
case strings.HasPrefix(lcmd, "pretty"):
c.Pretty = !c.Pretty
if c.Pretty {
fmt.Println("Pretty print enabled")
} else {
fmt.Println("Pretty print disabled")
split := strings.Split(lcmd, " ")
var tokens []string
for _, token := range split {
if token != "" {
tokens = append(tokens, token)
}
}
if len(tokens) > 0 {
switch tokens[0] {
case "":
break
case "exit":
// signal the program to exit
return false
case "gopher":
c.gopher()
case "connect":
c.connect(cmd)
case "auth":
c.SetAuth(cmd)
case "help":
c.help()
case "history":
c.history()
case "format":
c.SetFormat(cmd)
case "precision":
c.SetPrecision(cmd)
case "consistency":
c.SetWriteConsistency(cmd)
case "settings":
c.Settings()
case "pretty":
c.Pretty = !c.Pretty
if c.Pretty {
fmt.Println("Pretty print enabled")
} else {
fmt.Println("Pretty print disabled")
}
case "use":
c.use(cmd)
case "insert":
c.Insert(cmd)
default:
c.ExecuteQuery(cmd)
}
case strings.HasPrefix(lcmd, "use"):
c.use(cmd)
case strings.HasPrefix(lcmd, "insert"):
c.Insert(cmd)
case lcmd == "":
break
default:
c.ExecuteQuery(cmd)
}
return true
}
@ -531,6 +558,24 @@ func (c *CommandLine) ExecuteQuery(query string) error {
return nil
}
func (c *CommandLine) DatabaseToken() (string, error) {
response, err := c.Client.Query(client.Query{Command: "SHOW DIAGNOSTICS for 'registration'"})
if err != nil {
return "", err
}
if response.Error() != nil || len((*response).Results[0].Series) == 0 {
return "", nil
}
// Look for position of "token" column.
for i, s := range (*response).Results[0].Series[0].Columns {
if s == "token" {
return (*response).Results[0].Series[0].Values[0][i].(string), nil
}
}
return "", nil
}
func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {
switch c.Format {
case "json":
@ -724,6 +769,17 @@ func (c *CommandLine) help() {
`)
}
func (c *CommandLine) history() {
usr, err := user.Current()
// Only load history if we can get the user
if err == nil {
historyFile := filepath.Join(usr.HomeDir, ".influx_history")
if history, err := ioutil.ReadFile(historyFile); err == nil {
fmt.Print(string(history))
}
}
}
func (c *CommandLine) gopher() {
fmt.Println(`
.-::-::://:-::- .:/++/'

View File

@ -20,6 +20,7 @@ func TestParseCommand_CommandsExist(t *testing.T) {
{cmd: "gopher"},
{cmd: "connect"},
{cmd: "help"},
{cmd: "history"},
{cmd: "pretty"},
{cmd: "use"},
{cmd: ""}, // test that a blank command just returns
@ -31,6 +32,42 @@ func TestParseCommand_CommandsExist(t *testing.T) {
}
}
func TestParseCommand_CommandsSamePrefix(t *testing.T) {
t.Parallel()
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
m := main.CommandLine{Client: c}
tests := []struct {
cmd string
}{
{cmd: "use db"},
{cmd: "user nodb"},
{cmd: "puse nodb"},
{cmd: ""}, // test that a blank command just returns
}
for _, test := range tests {
if !m.ParseCommand(test.cmd) {
t.Fatalf(`Command failed for %q.`, test.cmd)
}
}
if m.Database != "db" {
t.Fatalf(`Command "use" changed database to %q. Expected db`, m.Database)
}
}
func TestParseCommand_TogglePretty(t *testing.T) {
t.Parallel()
c := main.CommandLine{}
@ -217,3 +254,22 @@ func TestParseCommand_InsertInto(t *testing.T) {
}
}
}
func TestParseCommand_History(t *testing.T) {
t.Parallel()
c := main.CommandLine{}
tests := []struct {
cmd string
}{
{cmd: "history"},
{cmd: " history"},
{cmd: "history "},
{cmd: "History "},
}
for _, test := range tests {
if !c.ParseCommand(test.cmd) {
t.Fatalf(`Command "history" failed for %q.`, test.cmd)
}
}
}

View File

@ -353,7 +353,8 @@ func cmdDumpTsm1(opts *tsdmDumpOpts) {
encoded := buf[9:]
v, err := tsm1.DecodeBlock(buf)
var v []tsm1.Value
err := tsm1.DecodeBlock(buf, &v)
if err != nil {
fmt.Printf("error: %v\n", err.Error())
os.Exit(1)

View File

@ -38,8 +38,6 @@ func main() {
return
}
fmt.Printf("%#v\n", cfg.Write)
if *batchSize != 0 {
cfg.Write.BatchSize = *batchSize
}
@ -64,8 +62,6 @@ func main() {
cfg.Write.Precision = *precision
}
fmt.Printf("%#v\n", cfg.Write)
d := make(chan struct{})
seriesQueryResults := make(chan runner.QueryResults)

View File

@ -158,6 +158,7 @@ func (cmd *Command) unpackMeta(mr *snapshot.MultiReader, sf snapshot.File, confi
store := meta.NewStore(config.Meta)
store.RaftListener = newNopListener()
store.ExecListener = newNopListener()
store.RPCListener = newNopListener()
// Determine advertised address.
_, port, err := net.SplitHostPort(config.Meta.BindAddress)
@ -172,6 +173,7 @@ func (cmd *Command) unpackMeta(mr *snapshot.MultiReader, sf snapshot.File, confi
return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err)
}
store.Addr = addr
store.RemoteAddr = addr
// Open the meta store.
if err := store.Open(); err != nil {
@ -246,5 +248,12 @@ func (ln *nopListener) Accept() (net.Conn, error) {
return nil, errors.New("listener closing")
}
func (ln *nopListener) Close() error { close(ln.closing); return nil }
func (ln *nopListener) Close() error {
if ln.closing != nil {
close(ln.closing)
ln.closing = nil
}
return nil
}
func (ln *nopListener) Addr() net.Addr { return nil }

View File

@ -69,8 +69,10 @@ func NewConfig() *Config {
c.Monitor = monitor.NewConfig()
c.Subscriber = subscriber.NewConfig()
c.HTTPD = httpd.NewConfig()
c.Graphites = []graphite.Config{graphite.NewConfig()}
c.Collectd = collectd.NewConfig()
c.OpenTSDB = opentsdb.NewConfig()
c.UDPs = []udp.Config{udp.NewConfig()}
c.ContinuousQuery = continuous_querier.NewConfig()
c.Retention = retention.NewConfig()
@ -108,12 +110,12 @@ func NewDemoConfig() (*Config, error) {
func (c *Config) Validate() error {
if c.Meta.Dir == "" {
return errors.New("Meta.Dir must be specified")
} else if c.Data.Dir == "" {
return errors.New("Data.Dir must be specified")
} else if c.HintedHandoff.Dir == "" {
return errors.New("HintedHandoff.Dir must be specified")
} else if c.Data.WALDir == "" {
return errors.New("Data.WALDir must be specified")
}
if err := c.Data.Validate(); err != nil {
return err
}
for _, g := range c.Graphites {

View File

@ -1,17 +1,16 @@
package run
import (
"bytes"
"fmt"
"log"
"net"
"net/http"
"os"
"runtime"
"runtime/pprof"
"strings"
"time"
"github.com/influxdb/enterprise-client/v1"
"github.com/influxdb/influxdb/cluster"
"github.com/influxdb/influxdb/meta"
"github.com/influxdb/influxdb/monitor"
@ -129,6 +128,7 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
// Create the hinted handoff service
s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore)
s.HintedHandoff.Monitor = s.Monitor
// Create the Subscriber service
s.Subscriber = subscriber.NewService(c.Subscriber)
@ -384,10 +384,6 @@ func (s *Server) Open() error {
// Wait for the store to initialize.
<-s.MetaStore.Ready()
if err := s.Monitor.Open(); err != nil {
return fmt.Errorf("open monitor: %v", err)
}
// Open TSDB store.
if err := s.TSDBStore.Open(); err != nil {
return fmt.Errorf("open tsdb store: %s", err)
@ -403,6 +399,16 @@ func (s *Server) Open() error {
return fmt.Errorf("open subscriber: %s", err)
}
// Open the points writer service
if err := s.PointsWriter.Open(); err != nil {
return fmt.Errorf("open points writer: %s", err)
}
// Open the monitor service
if err := s.Monitor.Open(); err != nil {
return fmt.Errorf("open monitor: %v", err)
}
for _, service := range s.Services {
if err := service.Open(); err != nil {
return fmt.Errorf("open service: %s", err)
@ -443,6 +449,10 @@ func (s *Server) Close() error {
s.Monitor.Close()
}
if s.PointsWriter != nil {
s.PointsWriter.Close()
}
if s.HintedHandoff != nil {
s.HintedHandoff.Close()
}
@ -511,18 +521,28 @@ func (s *Server) reportServer() {
return
}
json := fmt.Sprintf(`[{
"name":"reports",
"columns":["os", "arch", "version", "server_id", "cluster_id", "num_series", "num_measurements", "num_databases"],
"points":[["%s", "%s", "%s", "%x", "%x", "%d", "%d", "%d"]]
}]`, runtime.GOOS, runtime.GOARCH, s.buildInfo.Version, s.MetaStore.NodeID(), clusterID, numSeries, numMeasurements, numDatabases)
data := bytes.NewBufferString(json)
cl := client.New("")
usage := client.Usage{
Product: "influxdb",
Data: []client.UsageData{
{
Values: client.Values{
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"version": s.buildInfo.Version,
"server_id": s.MetaStore.NodeID(),
"cluster_id": clusterID,
"num_series": numSeries,
"num_measurements": numMeasurements,
"num_databases": numDatabases,
},
},
},
}
log.Printf("Sending anonymous usage statistics to m.influxdb.com")
client := http.Client{Timeout: time.Duration(5 * time.Second)}
go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data)
go cl.Save(usage)
}
// monitorErrorChan reads an error channel and resends it through the server.

View File

@ -53,7 +53,6 @@ func OpenServer(c *run.Config, joinURLs string) *Server {
if err := s.Open(); err != nil {
panic(err.Error())
}
return s
}
@ -77,12 +76,24 @@ func OpenServerWithVersion(c *run.Config, version string) *Server {
return &s
}
// OpenDefaultServer opens a test server with a default database & retention policy.
func OpenDefaultServer(c *run.Config, joinURLs string) *Server {
s := OpenServer(c, joinURLs)
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
panic(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
panic(err)
}
return s
}
// Close shuts down the server and removes all temporary paths.
func (s *Server) Close() {
s.Server.Close()
os.RemoveAll(s.Config.Meta.Dir)
os.RemoveAll(s.Config.Data.Dir)
os.RemoveAll(s.Config.HintedHandoff.Dir)
s.Server.Close()
}
// URL returns the base URL for the httpd endpoint.
@ -180,6 +191,15 @@ func (s *Server) Write(db, rp, body string, params url.Values) (results string,
return string(MustReadAll(resp.Body)), nil
}
// MustWrite executes a write to the server. Panic on error.
func (s *Server) MustWrite(db, rp, body string, params url.Values) string {
results, err := s.Write(db, rp, body, params)
if err != nil {
panic(err)
}
return results
}
// NewConfig returns the default config with temporary paths.
func NewConfig() *run.Config {
c := run.NewConfig()
@ -347,6 +367,7 @@ func configureLogging(s *Server) {
s.HintedHandoff.SetLogger(nullLogger)
s.Monitor.SetLogger(nullLogger)
s.QueryExecutor.SetLogger(nullLogger)
s.Subscriber.SetLogger(nullLogger)
for _, service := range s.Services {
if service, ok := service.(logSetter); ok {
service.SetLogger(nullLogger)

View File

@ -8,6 +8,8 @@ import (
"strings"
"testing"
"time"
"github.com/influxdb/influxdb/cluster"
)
// Ensure that HTTP responses include the InfluxDB version.
@ -76,6 +78,16 @@ func TestServer_DatabaseCommands(t *testing.T) {
command: `DROP DATABASE db1`,
exp: `{"results":[{}]}`,
},
&Query{
name: "drop database should error if it does not exists",
command: `DROP DATABASE db1`,
exp: `{"results":[{"error":"database not found: db1"}]}`,
},
&Query{
name: "drop database should not error with non-existing database db1 WITH IF EXISTS",
command: `DROP DATABASE IF EXISTS db1`,
exp: `{"results":[{}]}`,
},
&Query{
name: "show database should have no results",
command: `SHOW DATABASES`,
@ -769,6 +781,39 @@ func TestServer_Write_LineProtocol_Integer(t *testing.T) {
}
}
// Ensure the server returns a partial write response when some points fail to parse. Also validate that
// the successfully parsed points can be queried.
func TestServer_Write_LineProtocol_Partial(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
points := []string{
"cpu,host=server01 value=100 " + strconv.FormatInt(now.UnixNano(), 10),
"cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 20),
"cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 30),
}
if res, err := s.Write("db0", "rp0", strings.Join(points, "\n"), nil); err == nil {
t.Fatal("expected error. got nil", err)
} else if exp := ``; exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
} else if exp := "partial write"; !strings.Contains(err.Error(), exp) {
t.Fatalf("unexpected error: exp\nexp: %v\ngot: %v", exp, err)
}
// Verify the data was written.
if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil {
t.Fatal(err)
} else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
}
// Ensure the server can query with default databases (via param) and default retention policy
func TestServer_Query_DefaultDBAndRP(t *testing.T) {
t.Parallel()
@ -1937,70 +1982,15 @@ func TestServer_Query_Regex(t *testing.T) {
}
}
func TestServer_Query_AggregatesCommon(t *testing.T) {
func TestServer_Query_Aggregates_Int(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
writes := []string{
fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.write = strings.Join(writes, "\n")
test.write = strings.Join([]string{
fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
// int64
@ -2010,12 +2000,82 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
command: `SELECT STDDEV(value) FROM int`,
exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_IntMax(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "large mean and stddev - int",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEAN(value), STDDEV(value) FROM intmax`,
exp: `{"results":[{"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_IntMany(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "mean and stddev - int",
params: url.Values{"db": []string{"db0"}},
@ -2106,6 +2166,176 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
command: `SELECT COUNT(DISTINCT host) FROM intmany`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "max order by time with time specified group by 10s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`,
},
&Query{
name: "max order by time without time specified group by 30s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`,
},
&Query{
name: "max order by time with time specified group by 30s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`,
},
&Query{
name: "min order by time without time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "min order by time with time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "first order by time without time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "first order by time with time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "last order by time without time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`,
},
&Query{
name: "last order by time with time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "aggregate order by time desc",
params: url.Values{"db": []string{"db0"}},
command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_IntOverlap(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "aggregation with no interval - int",
params: url.Values{"db": []string{"db0"}},
@ -2137,20 +2367,81 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`,
exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`,
},
}...)
// float64
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_FloatSingle(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "stddev with just one point - float",
params: url.Values{"db": []string{"db0"}},
command: `SELECT STDDEV(value) FROM floatsingle`,
exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`,
},
&Query{
name: "large mean and stddev - float",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEAN(value), STDDEV(value) FROM floatmax`,
exp: `{"results":[{"series":[{"name":"floatmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxFloat64() + `,0]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_FloatMany(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "mean and stddev - float",
params: url.Values{"db": []string{"db0"}},
@ -2235,6 +2526,40 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
command: `SELECT COUNT(DISTINCT host) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_FloatOverlap(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "aggregation with no interval - float",
params: url.Values{"db": []string{"db0"}},
@ -2265,7 +2590,127 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`,
exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_Load(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) FROM load GROUP BY region, host`,
exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`,
},
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value)*2 FROM load`,
exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`,
},
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value)/2 FROM load`,
exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_CPU(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
&Query{
name: "aggregation with WHERE and AND",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates_String(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
test := NewTest("db0", "rp0")
test.write = strings.Join([]string{
fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()),
}, "\n")
test.addQueries([]*Query{
// strings
&Query{
name: "STDDEV on string data - string",
@ -2303,98 +2748,6 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
command: `SELECT LAST(value) FROM stringdata`,
exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["2000-01-01T00:00:04Z","last"]]}]}]}`,
},
// general queries
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) FROM load GROUP BY region, host`,
exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`,
},
&Query{
name: "aggregation with WHERE and AND",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`,
},
// Mathematics
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value)*2 FROM load`,
exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`,
},
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value)/2 FROM load`,
exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`,
},
// group by
&Query{
name: "max order by time with time specified group by 10s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`,
},
&Query{
name: "max order by time without time specified group by 30s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`,
},
&Query{
name: "max order by time with time specified group by 30s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`,
},
&Query{
name: "min order by time without time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "min order by time with time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "first order by time without time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "first order by time with time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`,
},
&Query{
name: "last order by time without time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`,
},
&Query{
name: "last order by time with time specified group by 15s",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`,
},
// order by time desc
&Query{
name: "aggregate order by time desc",
params: url.Values{"db": []string{"db0"}},
command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`,
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`,
},
}...)
for i, query := range test.queries {
@ -2899,6 +3252,11 @@ func TestServer_Query_TopInt(t *testing.T) {
t.Logf("SKIP: %s", query.name)
continue
}
println(">>>>", query.name)
if query.name != `top - memory - host tag with limit 2` { // FIXME: temporary
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
@ -4948,3 +5306,33 @@ func TestServer_Query_IntoTarget(t *testing.T) {
}
}
}
// This test reproduced a data race with closing the
// Subscriber points channel while writes were in-flight in the PointsWriter.
func TestServer_ConcurrentPointsWriter_Subscriber(t *testing.T) {
t.Parallel()
s := OpenDefaultServer(NewConfig(), "")
defer s.Close()
// goroutine to write points
done := make(chan struct{})
go func() {
for {
select {
case <-done:
return
default:
wpr := &cluster.WritePointsRequest{
Database: "db0",
RetentionPolicy: "rp0",
}
s.PointsWriter.WritePoints(wpr)
}
}
}()
time.Sleep(10 * time.Millisecond)
close(done)
// Race occurs on s.Close()
}

View File

@ -48,7 +48,9 @@ reporting-disabled = false
# Controls the engine type for new shards. Options are b1, bz1, or tsm1.
# b1 is the 0.9.2 storage engine, bz1 is the 0.9.3 and 0.9.4 engine.
# tsm1 is the 0.9.5 engine
# tsm1 is the 0.9.5 engine and is currenly EXPERIMENTAL. Until 0.9.5 is
# actually released data written into a tsm1 engine may be need to be wiped
# between upgrades.
# engine ="bz1"
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
@ -85,6 +87,34 @@ reporting-disabled = false
# log any sensitive data contained within a query.
# query-log-enabled = true
###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###
[hinted-handoff]
enabled = true
dir = "/var/opt/influxdb/hh"
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0
# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
# If any error occurs, it will backoff in an exponential manner, until the interval
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
# interval will reset to retry-interval.
retry-interval = "1s"
retry-max-interval = "1m"
# Interval between running checks for data that should be purged. Data is purged from
# hinted-handoff queues for two reasons. 1) The data is older than the max age, or
# 2) the target node has been dropped from the cluster. Data is never dropped until
# it has reached max-age however, for a dropped node or not.
purge-interval = "1h"
###
### [cluster]
###
@ -106,6 +136,17 @@ reporting-disabled = false
enabled = true
check-interval = "30m"
###
### [shard-precreation]
###
### Controls the precreation of shards, so they are created before data arrives.
### Only shards that will exist in the future, at time of creation, are precreated.
[shard-precreation]
enabled = true
check-interval = "10m"
advance-period = "30m"
###
### Controls the system self-monitoring, statistics and diagnostics.
###
@ -171,6 +212,7 @@ reporting-disabled = false
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
## separated by `name-separator`.
@ -211,6 +253,7 @@ reporting-disabled = false
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
###
### [opentsdb]
@ -254,6 +297,7 @@ reporting-disabled = false
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
###
### [continuous_queries]
@ -268,25 +312,3 @@ reporting-disabled = false
recompute-no-older-than = "10m"
compute-runs-per-interval = 10
compute-no-more-than = "2m"
###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###
[hinted-handoff]
enabled = true
dir = "/var/opt/influxdb/hh"
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0
# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
# If any error occurs, it will backoff in an exponential manner, until the interval
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
# interval will reset to retry-interval.
retry-interval = "1s"
retry-max-interval = "1m"

View File

@ -145,6 +145,10 @@ func (i *Importer) processDDL(scanner *bufio.Scanner) {
if strings.HasPrefix(line, "#") {
continue
}
// Skip blank lines
if strings.TrimSpace(line) == "" {
continue
}
i.queryExecutor(line)
}
}
@ -162,8 +166,14 @@ func (i *Importer) processDML(scanner *bufio.Scanner) {
if strings.HasPrefix(line, "#") {
continue
}
// Skip blank lines
if strings.TrimSpace(line) == "" {
continue
}
i.batchAccumulator(line, start)
}
// Call batchWrite one last time to flush anything out in the batch
i.batchWrite()
}
func (i *Importer) execute(command string) {
@ -185,14 +195,7 @@ func (i *Importer) queryExecutor(command string) {
func (i *Importer) batchAccumulator(line string, start time.Time) {
i.batch = append(i.batch, line)
if len(i.batch) == batchSize {
if e := i.batchWrite(); e != nil {
log.Println("error writing batch: ", e)
// Output failed lines to STDOUT so users can capture lines that failed to import
fmt.Println(strings.Join(i.batch, "\n"))
i.failedInserts += len(i.batch)
} else {
i.totalInserts += len(i.batch)
}
i.batchWrite()
i.batch = i.batch[:0]
// Give some status feedback every 100000 lines processed
processed := i.totalInserts + i.failedInserts
@ -204,7 +207,7 @@ func (i *Importer) batchAccumulator(line string, start time.Time) {
}
}
func (i *Importer) batchWrite() error {
func (i *Importer) batchWrite() {
// Accumulate the batch size to see how many points we have written this second
i.throttlePointsWritten += len(i.batch)
@ -226,11 +229,20 @@ func (i *Importer) batchWrite() error {
// Decrement the batch size back out as it is going to get called again
i.throttlePointsWritten -= len(i.batch)
return i.batchWrite()
i.batchWrite()
return
}
_, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency)
if e != nil {
log.Println("error writing batch: ", e)
// Output failed lines to STDOUT so users can capture lines that failed to import
fmt.Println(strings.Join(i.batch, "\n"))
i.failedInserts += len(i.batch)
} else {
i.totalInserts += len(i.batch)
}
i.throttlePointsWritten = 0
i.lastWrite = time.Now()
return e
return
}

View File

@ -84,15 +84,17 @@ _cpu_stats
```
ALL ALTER ANY AS ASC BEGIN
BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT
DELETE DESC DESTINATIONS DROP DURATION END
EXISTS EXPLAIN FIELD FROM GRANT GROUP
IF IN INNER INSERT INTO KEY
DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP
DURATION END EXISTS EXPLAIN FIELD FOR
FORCE FROM GRANT GRANTS GROUP IF
IN INF INNER INSERT INTO KEY
KEYS LIMIT SHOW MEASUREMENT MEASUREMENTS NOT
OFFSET ON ORDER PASSWORD POLICY POLICIES
PRIVILEGES QUERIES QUERY READ REPLICATION RETENTION
REVOKE SELECT SERIES SLIMIT SOFFSET SUBSCRIPTION
SUBSCRIPTIONS TAG TO USER USERS VALUES
WHERE WITH WRITE
REVOKE SELECT SERIES SERVER SERVERS SET
SHARDS SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS
TAG TO USER USERS VALUES WHERE
WITH WRITE
```
## Literals

View File

@ -340,12 +340,19 @@ func (s *CreateDatabaseStatement) RequiredPrivileges() ExecutionPrivileges {
type DropDatabaseStatement struct {
// Name of the database to be dropped.
Name string
// IfExists indicates whether to return without error if the database
// does not exists.
IfExists bool
}
// String returns a string representation of the drop database statement.
func (s *DropDatabaseStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("DROP DATABASE ")
if s.IfExists {
_, _ = buf.WriteString("IF EXISTS ")
}
_, _ = buf.WriteString(s.Name)
return buf.String()
}

View File

@ -1458,6 +1458,16 @@ func (p *Parser) parseCreateDatabaseStatement() (*CreateDatabaseStatement, error
func (p *Parser) parseDropDatabaseStatement() (*DropDatabaseStatement, error) {
stmt := &DropDatabaseStatement{}
// Look for "IF EXISTS"
if tok, _, _ := p.scanIgnoreWhitespace(); tok == IF {
if err := p.parseTokens([]Token{EXISTS}); err != nil {
return nil, err
}
stmt.IfExists = true
} else {
p.unscan()
}
// Parse the name of the database to be dropped.
lit, err := p.parseIdent()
if err != nil {

View File

@ -1224,8 +1224,18 @@ func TestParser_ParseStatement(t *testing.T) {
// DROP DATABASE statement
{
s: `DROP DATABASE testdb`,
stmt: &influxql.DropDatabaseStatement{Name: "testdb"},
s: `DROP DATABASE testdb`,
stmt: &influxql.DropDatabaseStatement{
Name: "testdb",
IfExists: false,
},
},
{
s: `DROP DATABASE IF EXISTS testdb`,
stmt: &influxql.DropDatabaseStatement{
Name: "testdb",
IfExists: true,
},
},
// DROP MEASUREMENT statement
@ -1599,6 +1609,8 @@ func TestParser_ParseStatement(t *testing.T) {
{s: `CREATE DATABASE IF NOT`, err: `found EOF, expected EXISTS at line 1, char 24`},
{s: `CREATE DATABASE IF NOT EXISTS`, err: `found EOF, expected identifier at line 1, char 31`},
{s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`},
{s: `DROP DATABASE IF`, err: `found EOF, expected EXISTS at line 1, char 18`},
{s: `DROP DATABASE IF EXISTS`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`},
{s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`},
{s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 31`},

View File

@ -33,9 +33,6 @@ var (
// ErrNodeUnableToDropSingleNode is returned if the node being dropped is the last
// node in the cluster
ErrNodeUnableToDropFinalNode = newError("unable to drop the final node in a cluster")
// ErrNodeRaft is returned when attempting an operation prohibted for a Raft-node.
ErrNodeRaft = newError("node is a Raft node")
)
var (
@ -70,7 +67,7 @@ var (
// ErrRetentionPolicyDurationTooLow is returned when updating a retention
// policy that has a duration lower than the allowed minimum.
ErrRetentionPolicyDurationTooLow = newError(fmt.Sprintf("retention policy duration must be at least %s",
RetentionPolicyMinDuration))
MinRetentionPolicyDuration))
// ErrReplicationFactorTooLow is returned when the replication factor is not in an
// acceptable range.

View File

@ -42,6 +42,7 @@ It has these top-level messages:
UpdateNodeCommand
CreateSubscriptionCommand
DropSubscriptionCommand
RemovePeerCommand
Response
ResponseHeader
ErrorResponse
@ -119,6 +120,7 @@ const (
Command_UpdateNodeCommand Command_Type = 19
Command_CreateSubscriptionCommand Command_Type = 21
Command_DropSubscriptionCommand Command_Type = 22
Command_RemovePeerCommand Command_Type = 23
)
var Command_Type_name = map[int32]string{
@ -143,6 +145,7 @@ var Command_Type_name = map[int32]string{
19: "UpdateNodeCommand",
21: "CreateSubscriptionCommand",
22: "DropSubscriptionCommand",
23: "RemovePeerCommand",
}
var Command_Type_value = map[string]int32{
"CreateNodeCommand": 1,
@ -166,6 +169,7 @@ var Command_Type_value = map[string]int32{
"UpdateNodeCommand": 19,
"CreateSubscriptionCommand": 21,
"DropSubscriptionCommand": 22,
"RemovePeerCommand": 23,
}
func (x Command_Type) Enum() *Command_Type {
@ -1368,6 +1372,38 @@ var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{
Tag: "bytes,122,opt,name=command",
}
type RemovePeerCommand struct {
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
Addr *string `protobuf:"bytes,2,req,name=Addr" json:"Addr,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *RemovePeerCommand) Reset() { *m = RemovePeerCommand{} }
func (m *RemovePeerCommand) String() string { return proto.CompactTextString(m) }
func (*RemovePeerCommand) ProtoMessage() {}
func (m *RemovePeerCommand) GetID() uint64 {
if m != nil && m.ID != nil {
return *m.ID
}
return 0
}
func (m *RemovePeerCommand) GetAddr() string {
if m != nil && m.Addr != nil {
return *m.Addr
}
return ""
}
var E_RemovePeerCommand_Command = &proto.ExtensionDesc{
ExtendedType: (*Command)(nil),
ExtensionType: (*RemovePeerCommand)(nil),
Field: 123,
Name: "internal.RemovePeerCommand.command",
Tag: "bytes,123,opt,name=command",
}
type Response struct {
OK *bool `protobuf:"varint,1,req" json:"OK,omitempty"`
Error *string `protobuf:"bytes,2,opt" json:"Error,omitempty"`
@ -1598,4 +1634,5 @@ func init() {
proto.RegisterExtension(E_UpdateNodeCommand_Command)
proto.RegisterExtension(E_CreateSubscriptionCommand_Command)
proto.RegisterExtension(E_DropSubscriptionCommand_Command)
proto.RegisterExtension(E_RemovePeerCommand_Command)
}

View File

@ -114,6 +114,7 @@ message Command {
UpdateNodeCommand = 19;
CreateSubscriptionCommand = 21;
DropSubscriptionCommand = 22;
RemovePeerCommand = 23;
}
required Type type = 1;
@ -296,6 +297,14 @@ message DropSubscriptionCommand {
required string RetentionPolicy = 3;
}
message RemovePeerCommand {
extend Command {
optional RemovePeerCommand command = 123;
}
required uint64 ID = 1;
required string Addr = 2;
}
message Response {
required bool OK = 1;
optional string Error = 2;

View File

@ -51,7 +51,7 @@ type Reply interface {
// proxyLeader proxies the connection to the current raft leader
func (r *rpc) proxyLeader(conn *net.TCPConn) {
if r.store.Leader() == "" {
r.sendError(conn, "no leader")
r.sendError(conn, "no leader detected during proxyLeader")
return
}
@ -289,7 +289,7 @@ func (r *rpc) fetchMetaData(blocking bool) (*Data, error) {
// Retrieve the current known leader.
leader := r.store.Leader()
if leader == "" {
return nil, errors.New("no leader")
return nil, errors.New("no leader detected during fetchMetaData")
}
var index, term uint64

View File

@ -28,6 +28,7 @@ type raftState interface {
sync(index uint64, timeout time.Duration) error
setPeers(addrs []string) error
addPeer(addr string) error
removePeer(addr string) error
peers() ([]string, error)
invalidate() error
close() error
@ -91,7 +92,7 @@ func (r *localRaft) invalidate() error {
ms, err := r.store.rpc.fetchMetaData(false)
if err != nil {
return err
return fmt.Errorf("error fetching meta data: %s", err)
}
r.updateMetaData(ms)
@ -208,11 +209,6 @@ func (r *localRaft) close() error {
r.transport = nil
}
if r.raftLayer != nil {
r.raftLayer.Close()
r.raftLayer = nil
}
// Shutdown raft.
if r.raft != nil {
if err := r.raft.Shutdown().Error(); err != nil {
@ -318,6 +314,18 @@ func (r *localRaft) addPeer(addr string) error {
return nil
}
// removePeer removes addr from the list of peers in the cluster.
func (r *localRaft) removePeer(addr string) error {
// Only do this on the leader
if !r.isLeader() {
return errors.New("not the leader")
}
if fut := r.raft.RemovePeer(addr); fut.Error() != nil {
return fut.Error()
}
return nil
}
// setPeers sets a list of peers in the cluster.
func (r *localRaft) setPeers(addrs []string) error {
return r.raft.SetPeers(addrs).Error()
@ -377,7 +385,7 @@ func (r *remoteRaft) updateMetaData(ms *Data) {
func (r *remoteRaft) invalidate() error {
ms, err := r.store.rpc.fetchMetaData(false)
if err != nil {
return err
return fmt.Errorf("error fetching meta data: %s", err)
}
r.updateMetaData(ms)
@ -401,6 +409,11 @@ func (r *remoteRaft) addPeer(addr string) error {
return fmt.Errorf("cannot add peer using remote raft")
}
// removePeer does nothing for remoteRaft.
func (r *remoteRaft) removePeer(addr string) error {
return nil
}
func (r *remoteRaft) peers() ([]string, error) {
return readPeersJSON(filepath.Join(r.store.path, "peers.json"))
}

View File

@ -174,15 +174,6 @@ func (e *StatementExecutor) executeDropServerStatement(q *influxql.DropServerSta
return &influxql.Result{Err: ErrNodeNotFound}
}
// Dropping only non-Raft nodes supported.
peers, err := e.Store.Peers()
if err != nil {
return &influxql.Result{Err: err}
}
if contains(peers, ni.Host) {
return &influxql.Result{Err: ErrNodeRaft}
}
err = e.Store.DeleteNode(q.NodeID, q.Force)
return &influxql.Result{Err: err}
}
@ -369,9 +360,15 @@ func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShards
rows := []*models.Row{}
for _, di := range dis {
row := &models.Row{Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name}
row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name}
for _, rpi := range di.RetentionPolicies {
for _, sgi := range rpi.ShardGroups {
// Shards associated with deleted shard groups are effectively deleted.
// Don't list them.
if sgi.Deleted() {
continue
}
for _, si := range sgi.Shards {
ownerIDs := make([]uint64, len(si.Owners))
for i, owner := range si.Owners {
@ -380,6 +377,9 @@ func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShards
row.Values = append(row.Values, []interface{}{
si.ID,
di.Name,
rpi.Name,
sgi.ID,
sgi.StartTime.UTC().Format(time.RFC3339),
sgi.EndTime.UTC().Format(time.RFC3339),
sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),

View File

@ -166,8 +166,12 @@ func TestStatementExecutor_ExecuteStatement_DropServer(t *testing.T) {
}, nil
}
// Ensure Raft nodes cannot be dropped.
if res := e.ExecuteStatement(influxql.MustParseStatement(`DROP SERVER 1`)); res.Err != meta.ErrNodeRaft {
e.Store.DeleteNodeFn = func(id uint64, force bool) error {
return nil
}
// Ensure Raft nodes can be dropped.
if res := e.ExecuteStatement(influxql.MustParseStatement(`DROP SERVER 1`)); res.Err != nil {
t.Fatalf("unexpected error: %s", res.Err)
}
@ -970,9 +974,11 @@ func TestStatementExecutor_ExecuteStatement_ShowShards(t *testing.T) {
Name: "foo",
RetentionPolicies: []meta.RetentionPolicyInfo{
{
Name: "rpi_foo",
Duration: time.Second,
ShardGroups: []meta.ShardGroupInfo{
{
ID: 66,
StartTime: time.Unix(0, 0),
EndTime: time.Unix(1, 0),
Shards: []meta.ShardInfo{
@ -1001,10 +1007,10 @@ func TestStatementExecutor_ExecuteStatement_ShowShards(t *testing.T) {
} else if !reflect.DeepEqual(res.Series, models.Rows{
{
Name: "foo",
Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"},
Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"},
Values: [][]interface{}{
{uint64(1), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", "1,2,3"},
{uint64(2), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", ""},
{uint64(1), "foo", "rpi_foo", uint64(66), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", "1,2,3"},
{uint64(2), "foo", "rpi_foo", uint64(66), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", ""},
},
},
}) {

View File

@ -46,7 +46,6 @@ const ExecMagic = "EXEC"
const (
AutoCreateRetentionPolicyName = "default"
AutoCreateRetentionPolicyPeriod = 0
RetentionPolicyMinDuration = time.Hour
// MaxAutoCreatedRetentionPolicyReplicaN is the maximum replication factor that will
// be set for auto-created retention policies.
@ -230,7 +229,6 @@ func (s *Store) Open() error {
return nil
}(); err != nil {
s.close()
return err
}
@ -375,6 +373,9 @@ func (s *Store) joinCluster() error {
}
func (s *Store) enableLocalRaft() error {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.raftState.(*localRaft); ok {
return nil
}
@ -395,15 +396,16 @@ func (s *Store) enableRemoteRaft() error {
}
func (s *Store) changeState(state raftState) error {
if err := s.raftState.close(); err != nil {
return err
}
if s.raftState != nil {
if err := s.raftState.close(); err != nil {
return err
}
// Clear out any persistent state
if err := s.raftState.remove(); err != nil {
return err
// Clear out any persistent state
if err := s.raftState.remove(); err != nil {
return err
}
}
s.raftState = state
if err := s.raftState.open(); err != nil {
@ -454,15 +456,34 @@ func (s *Store) close() error {
}
s.opened = false
// Notify goroutines of close.
close(s.closing)
// FIXME(benbjohnson): s.wg.Wait()
// Close our exec listener
if err := s.ExecListener.Close(); err != nil {
s.Logger.Printf("error closing ExecListener %s", err)
}
// Close our RPC listener
if err := s.RPCListener.Close(); err != nil {
s.Logger.Printf("error closing ExecListener %s", err)
}
if s.raftState != nil {
s.raftState.close()
s.raftState = nil
}
// Because a go routine could of already fired in the time we acquired the lock
// it could then try to acquire another lock, and will deadlock.
// For that reason, we will release our lock and signal the close so that
// all go routines can exit cleanly and fullfill their contract to the wait group.
s.mu.Unlock()
// Notify goroutines of close.
close(s.closing)
s.wg.Wait()
// Now that all go routines are cleaned up, w lock to do final clean up and exit
s.mu.Lock()
s.raftState = nil
return nil
}
@ -519,7 +540,9 @@ func (s *Store) createLocalNode() error {
}
// Set ID locally.
s.mu.Lock()
s.id = ni.ID
s.mu.Unlock()
s.Logger.Printf("Created local node: id=%d, host=%s", s.id, s.RemoteAddr)
@ -578,9 +601,6 @@ func (s *Store) Err() <-chan error { return s.err }
func (s *Store) IsLeader() bool {
s.mu.RLock()
defer s.mu.RUnlock()
if s.raftState == nil {
return false
}
return s.raftState.isLeader()
}
@ -619,6 +639,7 @@ func (s *Store) serveExecListener() {
for {
// Accept next TCP connection.
var err error
conn, err := s.ExecListener.Accept()
if err != nil {
if strings.Contains(err.Error(), "connection closed") {
@ -631,6 +652,12 @@ func (s *Store) serveExecListener() {
// Handle connection in a separate goroutine.
s.wg.Add(1)
go s.handleExecConn(conn)
select {
case <-s.closing:
return
default:
}
}
}
@ -739,6 +766,12 @@ func (s *Store) serveRPCListener() {
defer s.wg.Done()
s.rpc.handleRPCConn(conn)
}()
select {
case <-s.closing:
return
default:
}
}
}
@ -829,12 +862,23 @@ func (s *Store) DeleteNode(id uint64, force bool) error {
return ErrNodeNotFound
}
return s.exec(internal.Command_DeleteNodeCommand, internal.E_DeleteNodeCommand_Command,
err := s.exec(internal.Command_DeleteNodeCommand, internal.E_DeleteNodeCommand_Command,
&internal.DeleteNodeCommand{
ID: proto.Uint64(id),
Force: proto.Bool(force),
},
)
if err != nil {
return err
}
// Need to send a second message to remove the peer
return s.exec(internal.Command_RemovePeerCommand, internal.E_RemovePeerCommand_Command,
&internal.RemovePeerCommand{
ID: proto.Uint64(id),
Addr: proto.String(ni.Host),
},
)
}
// Database returns a database by name.
@ -975,7 +1019,7 @@ func (s *Store) RetentionPolicies(database string) (a []RetentionPolicyInfo, err
// CreateRetentionPolicy creates a new retention policy for a database.
func (s *Store) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error) {
if rpi.Duration < RetentionPolicyMinDuration && rpi.Duration != 0 {
if rpi.Duration < MinRetentionPolicyDuration && rpi.Duration != 0 {
return nil, ErrRetentionPolicyDurationTooLow
}
if err := s.exec(internal.Command_CreateRetentionPolicyCommand, internal.E_CreateRetentionPolicyCommand_Command,
@ -1443,10 +1487,10 @@ func (s *Store) PrecreateShardGroups(from, to time.Time) error {
// Create successive shard group.
nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond)
if newGroup, err := s.CreateShardGroupIfNotExists(di.Name, rp.Name, nextShardGroupTime); err != nil {
s.Logger.Printf("failed to create successive shard group for group %d: %s",
s.Logger.Printf("failed to precreate successive shard group for group %d: %s",
g.ID, err.Error())
} else {
s.Logger.Printf("new shard group %d successfully created for database %s, retention policy %s",
s.Logger.Printf("new shard group %d successfully precreated for database %s, retention policy %s",
newGroup.ID, di.Name, rp.Name)
}
}
@ -1539,7 +1583,7 @@ func (s *Store) remoteExec(b []byte) error {
// Retrieve the current known leader.
leader := s.raftState.leader()
if leader == "" {
return errors.New("no leader")
return errors.New("no leader detected during remoteExec")
}
// Create a connection to the leader.
@ -1650,6 +1694,8 @@ func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
err := func() interface{} {
switch cmd.GetType() {
case internal.Command_RemovePeerCommand:
return fsm.applyRemovePeerCommand(&cmd)
case internal.Command_CreateNodeCommand:
return fsm.applyCreateNodeCommand(&cmd)
case internal.Command_DeleteNodeCommand:
@ -1705,6 +1751,33 @@ func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
return err
}
func (fsm *storeFSM) applyRemovePeerCommand(cmd *internal.Command) interface{} {
ext, _ := proto.GetExtension(cmd, internal.E_RemovePeerCommand_Command)
v := ext.(*internal.RemovePeerCommand)
id := v.GetID()
addr := v.GetAddr()
// Only do this if you are the leader
if fsm.raftState.isLeader() {
//Remove that node from the peer
fsm.Logger.Printf("removing peer for node id %d, %s", id, addr)
if err := fsm.raftState.removePeer(addr); err != nil {
fsm.Logger.Printf("error removing peer: %s", err)
}
}
// If this is the node being shutdown, close raft
if fsm.id == id {
fsm.Logger.Printf("shutting down raft for %s", addr)
if err := fsm.raftState.close(); err != nil {
fsm.Logger.Printf("failed to shut down raft: %s", err)
}
}
return nil
}
func (fsm *storeFSM) applyCreateNodeCommand(cmd *internal.Command) interface{} {
ext, _ := proto.GetExtension(cmd, internal.E_CreateNodeCommand_Command)
v := ext.(*internal.CreateNodeCommand)

View File

@ -11,6 +11,7 @@ import (
"reflect"
"sort"
"strconv"
"sync"
"testing"
"time"
@ -971,6 +972,7 @@ func TestCluster_OpenRaft(t *testing.T) {
// Ensure a multi-node cluster can restart
func TestCluster_Restart(t *testing.T) {
t.Skip("ISSUE https://github.com/influxdb/influxdb/issues/4723")
// Start a single node.
c := MustOpenCluster(1)
defer c.Close()
@ -1041,6 +1043,17 @@ func TestCluster_Restart(t *testing.T) {
// ensure all the nodes see the same metastore data
assertDatabaseReplicated(t, c)
var wg sync.WaitGroup
wg.Add(len(c.Stores))
for _, s := range c.Stores {
go func(s *Store) {
defer wg.Done()
if err := s.Close(); err != nil {
t.Fatalf("error closing store %s", err)
}
}(s)
}
wg.Wait()
}
// Store is a test wrapper for meta.Store.
@ -1057,7 +1070,9 @@ func NewStore(c *meta.Config) *Store {
s := &Store{
Store: meta.NewStore(c),
}
s.Logger = log.New(&s.Stderr, "", log.LstdFlags)
if !testing.Verbose() {
s.Logger = log.New(&s.Stderr, "", log.LstdFlags)
}
s.SetHashPasswordFn(mockHashPassword)
return s
}
@ -1219,9 +1234,16 @@ func (c *Cluster) Open() error {
// Close shuts down all stores.
func (c *Cluster) Close() error {
var wg sync.WaitGroup
wg.Add(len(c.Stores))
for _, s := range c.Stores {
s.Close()
go func(s *Store) {
defer wg.Done()
s.Close()
}(s)
}
wg.Wait()
return nil
}

View File

@ -4,8 +4,10 @@ import (
"bytes"
"fmt"
"hash/fnv"
"math"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdb/influxdb/pkg/escape"
@ -55,6 +57,11 @@ type Point interface {
// is a timestamp associated with the point then it will be specified in the
// given unit
PrecisionString(precision string) string
// RoundedString returns a string representation of the point object, if there
// is a timestamp associated with the point, then it will be rounded to the
// given duration
RoundedString(d time.Duration) string
}
// Points represents a sortable list of points by timestamp.
@ -112,7 +119,8 @@ func ParsePointsString(buf string) ([]Point, error) {
}
// ParsePoints returns a slice of Points from a text representation of a point
// with each point separated by newlines.
// with each point separated by newlines. If any points fail to parse, a non-nil error
// will be returned in addition to the points that parsed successfully.
func ParsePoints(buf []byte) ([]Point, error) {
return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
}
@ -120,8 +128,9 @@ func ParsePoints(buf []byte) ([]Point, error) {
func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
points := []Point{}
var (
pos int
block []byte
pos int
block []byte
failed []string
)
for {
pos, block = scanLine(buf, pos)
@ -150,15 +159,19 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin
pt, err := parsePoint(block[start:len(block)], defaultTime, precision)
if err != nil {
return nil, fmt.Errorf("unable to parse '%s': %v", string(block[start:len(block)]), err)
failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
} else {
points = append(points, pt)
}
points = append(points, pt)
if pos >= len(buf) {
break
}
}
if len(failed) > 0 {
return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
}
return points, nil
}
@ -614,14 +627,11 @@ func scanNumber(buf []byte, i int) (int, error) {
continue
}
// NaN is a valid float
// NaN is an unsupported value
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
if (buf[i+1] == 'a' || buf[i+1] == 'A') && (buf[i+2] == 'N' || buf[i+2] == 'n') {
i += 3
continue
}
return i, fmt.Errorf("invalid number")
}
if !isNumeric(buf[i]) {
return i, fmt.Errorf("invalid number")
}
@ -721,16 +731,11 @@ func scanBoolean(buf []byte, i int) (int, []byte, error) {
// skipWhitespace returns the end position within buf, starting at i after
// scanning over spaces in tags
func skipWhitespace(buf []byte, i int) int {
for {
if i >= len(buf) {
return i
for i < len(buf) {
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
break
}
if buf[i] == ' ' || buf[i] == '\t' {
i += 1
continue
}
break
i++
}
return i
}
@ -954,13 +959,33 @@ func unescapeStringField(in string) string {
return string(out)
}
// NewPoint returns a new point with the given measurement name, tags, fields and timestamp
func NewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
// an unsupported field value (NaN) is passed, this function returns an error.
func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) {
for key, value := range fields {
if fv, ok := value.(float64); ok {
// Ensure the caller validates and handles invalid field values
if math.IsNaN(fv) {
return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
}
}
}
return &point{
key: MakeKey([]byte(name), tags),
time: time,
fields: fields.MarshalBinary(),
}, nil
}
// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
// an unsupported field value (NaN) is passed, this function panics.
func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
pt, err := NewPoint(name, tags, fields, time)
if err != nil {
panic(err.Error())
}
return pt
}
func (p *point) Data() []byte {
@ -1123,6 +1148,14 @@ func (p *point) PrecisionString(precision string) string {
p.UnixNano()/p.GetPrecisionMultiplier(precision))
}
func (p *point) RoundedString(d time.Duration) string {
if p.Time().IsZero() {
return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
}
return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
p.time.Round(d).UnixNano())
}
func (p *point) unmarshalBinary() Fields {
return newFieldsFromBinary(p.fields)
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"math"
"math/rand"
"reflect"
"strconv"
"strings"
@ -201,7 +202,7 @@ func TestParsePointNoFields(t *testing.T) {
}
func TestParsePointNoTimestamp(t *testing.T) {
test(t, "cpu value=1", models.NewPoint("cpu", nil, nil, time.Unix(0, 0)))
test(t, "cpu value=1", models.MustNewPoint("cpu", nil, nil, time.Unix(0, 0)))
}
func TestParsePointMissingQuote(t *testing.T) {
@ -524,7 +525,7 @@ func TestParsePointScientificIntInvalid(t *testing.T) {
func TestParsePointUnescape(t *testing.T) {
test(t, `foo\,bar value=1i`,
models.NewPoint(
models.MustNewPoint(
"foo,bar", // comma in the name
models.Tags{},
models.Fields{
@ -534,7 +535,7 @@ func TestParsePointUnescape(t *testing.T) {
// commas in measurement name
test(t, `cpu\,main,regions=east\,west value=1.0`,
models.NewPoint(
models.MustNewPoint(
"cpu,main", // comma in the name
models.Tags{
"regions": "east,west",
@ -546,7 +547,7 @@ func TestParsePointUnescape(t *testing.T) {
// spaces in measurement name
test(t, `cpu\ load,region=east value=1.0`,
models.NewPoint(
models.MustNewPoint(
"cpu load", // space in the name
models.Tags{
"region": "east",
@ -558,7 +559,7 @@ func TestParsePointUnescape(t *testing.T) {
// commas in tag names
test(t, `cpu,region\,zone=east value=1.0`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"region,zone": "east", // comma in the tag key
},
@ -569,7 +570,7 @@ func TestParsePointUnescape(t *testing.T) {
// spaces in tag names
test(t, `cpu,region\ zone=east value=1.0`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"region zone": "east", // comma in the tag key
},
@ -580,7 +581,7 @@ func TestParsePointUnescape(t *testing.T) {
// commas in tag values
test(t, `cpu,regions=east\,west value=1.0`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"regions": "east,west", // comma in the tag value
},
@ -591,7 +592,7 @@ func TestParsePointUnescape(t *testing.T) {
// spaces in tag values
test(t, `cpu,regions=east\ west value=1.0`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"regions": "east west", // comma in the tag value
},
@ -602,7 +603,7 @@ func TestParsePointUnescape(t *testing.T) {
// commas in field keys
test(t, `cpu,regions=east value\,ms=1.0`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"regions": "east",
},
@ -613,7 +614,7 @@ func TestParsePointUnescape(t *testing.T) {
// spaces in field keys
test(t, `cpu,regions=east value\ ms=1.0`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"regions": "east",
},
@ -624,7 +625,7 @@ func TestParsePointUnescape(t *testing.T) {
// tag with no value
test(t, `cpu,regions=east value="1"`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"regions": "east",
"foobar": "",
@ -636,7 +637,7 @@ func TestParsePointUnescape(t *testing.T) {
// commas in field values
test(t, `cpu,regions=east value="1,0"`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"regions": "east",
},
@ -647,7 +648,7 @@ func TestParsePointUnescape(t *testing.T) {
// random character escaped
test(t, `cpu,regions=eas\t value=1.0`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"regions": "eas\\t",
@ -659,7 +660,7 @@ func TestParsePointUnescape(t *testing.T) {
// field keys using escape char.
test(t, `cpu \a=1i`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -669,7 +670,7 @@ func TestParsePointUnescape(t *testing.T) {
// measurement, tag and tag value with equals
test(t, `cpu=load,equals\=foo=tag\=value value=1i`,
models.NewPoint(
models.MustNewPoint(
"cpu=load", // Not escaped
models.Tags{
"equals=foo": "tag=value", // Tag and value unescaped
@ -684,7 +685,7 @@ func TestParsePointUnescape(t *testing.T) {
func TestParsePointWithTags(t *testing.T) {
test(t,
"cpu,host=serverA,region=us-east value=1.0 1000000000",
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{"host": "serverA", "region": "us-east"},
models.Fields{"value": 1.0}, time.Unix(1, 0)))
}
@ -698,7 +699,7 @@ func TestParsPointWithDuplicateTags(t *testing.T) {
func TestParsePointWithStringField(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"host": "serverA",
"region": "us-east",
@ -712,7 +713,7 @@ func TestParsePointWithStringField(t *testing.T) {
)
test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`,
models.NewPoint("cpu",
models.MustNewPoint("cpu",
models.Tags{
"host": "serverA",
"region": "us-east",
@ -727,7 +728,7 @@ func TestParsePointWithStringField(t *testing.T) {
func TestParsePointWithStringWithSpaces(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -743,7 +744,7 @@ func TestParsePointWithStringWithSpaces(t *testing.T) {
func TestParsePointWithStringWithNewline(t *testing.T) {
test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000",
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -760,7 +761,7 @@ func TestParsePointWithStringWithNewline(t *testing.T) {
func TestParsePointWithStringWithCommas(t *testing.T) {
// escaped comma
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -775,7 +776,7 @@ func TestParsePointWithStringWithCommas(t *testing.T) {
// non-escaped comma
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -792,7 +793,7 @@ func TestParsePointWithStringWithCommas(t *testing.T) {
func TestParsePointQuotedMeasurement(t *testing.T) {
// non-escaped comma
test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`,
models.NewPoint(
models.MustNewPoint(
`"cpu"`,
models.Tags{
"host": "serverA",
@ -807,7 +808,7 @@ func TestParsePointQuotedMeasurement(t *testing.T) {
func TestParsePointQuotedTags(t *testing.T) {
test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
`"host"`: `"serverA"`,
@ -831,7 +832,7 @@ func TestParsePointsUnbalancedQuotedTags(t *testing.T) {
}
// Expected " in the tag value
exp := models.NewPoint("baz", models.Tags{"mytag": `"a`},
exp := models.MustNewPoint("baz", models.Tags{"mytag": `"a`},
models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125))
if pts[0].String() != exp.String() {
@ -839,7 +840,7 @@ func TestParsePointsUnbalancedQuotedTags(t *testing.T) {
}
// Expected two points to ensure we did not overscan the line
exp = models.NewPoint("baz", models.Tags{"mytag": `a`},
exp = models.MustNewPoint("baz", models.Tags{"mytag": `a`},
models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126))
if pts[1].String() != exp.String() {
@ -851,7 +852,7 @@ func TestParsePointsUnbalancedQuotedTags(t *testing.T) {
func TestParsePointEscapedStringsAndCommas(t *testing.T) {
// non-escaped comma and quotes
test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -865,7 +866,7 @@ func TestParsePointEscapedStringsAndCommas(t *testing.T) {
// escaped comma and quotes
test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -880,7 +881,7 @@ func TestParsePointEscapedStringsAndCommas(t *testing.T) {
func TestParsePointWithStringWithEquals(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -896,7 +897,7 @@ func TestParsePointWithStringWithEquals(t *testing.T) {
func TestParsePointWithStringWithBackslash(t *testing.T) {
test(t, `cpu value="test\\\"" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -906,7 +907,7 @@ func TestParsePointWithStringWithBackslash(t *testing.T) {
)
test(t, `cpu value="test\\" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -916,7 +917,7 @@ func TestParsePointWithStringWithBackslash(t *testing.T) {
)
test(t, `cpu value="test\\\"" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -926,7 +927,7 @@ func TestParsePointWithStringWithBackslash(t *testing.T) {
)
test(t, `cpu value="test\"" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -938,7 +939,7 @@ func TestParsePointWithStringWithBackslash(t *testing.T) {
func TestParsePointWithBoolField(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -962,7 +963,7 @@ func TestParsePointWithBoolField(t *testing.T) {
func TestParsePointUnicodeString(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{
"host": "serverA",
@ -977,7 +978,7 @@ func TestParsePointUnicodeString(t *testing.T) {
func TestParsePointNegativeTimestamp(t *testing.T) {
test(t, `cpu value=1 -1`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -989,7 +990,7 @@ func TestParsePointNegativeTimestamp(t *testing.T) {
func TestParsePointMaxTimestamp(t *testing.T) {
test(t, `cpu value=1 9223372036854775807`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1001,7 +1002,7 @@ func TestParsePointMaxTimestamp(t *testing.T) {
func TestParsePointMinTimestamp(t *testing.T) {
test(t, `cpu value=1 -9223372036854775807`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1040,7 +1041,7 @@ func TestParsePointInvalidTimestamp(t *testing.T) {
func TestNewPointFloatWithoutDecimal(t *testing.T) {
test(t, `cpu value=1 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1051,7 +1052,7 @@ func TestNewPointFloatWithoutDecimal(t *testing.T) {
}
func TestNewPointNegativeFloat(t *testing.T) {
test(t, `cpu value=-0.64 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1063,7 +1064,7 @@ func TestNewPointNegativeFloat(t *testing.T) {
func TestNewPointFloatNoDecimal(t *testing.T) {
test(t, `cpu value=1. 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1075,7 +1076,7 @@ func TestNewPointFloatNoDecimal(t *testing.T) {
func TestNewPointFloatScientific(t *testing.T) {
test(t, `cpu value=6.632243e+06 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1087,7 +1088,7 @@ func TestNewPointFloatScientific(t *testing.T) {
func TestNewPointLargeInteger(t *testing.T) {
test(t, `cpu value=6632243i 1000000000`,
models.NewPoint(
models.MustNewPoint(
"cpu",
models.Tags{},
models.Fields{
@ -1097,36 +1098,21 @@ func TestNewPointLargeInteger(t *testing.T) {
)
}
func TestNewPointNaN(t *testing.T) {
test(t, `cpu value=NaN 1000000000`,
models.NewPoint(
"cpu",
models.Tags{},
models.Fields{
"value": math.NaN(),
},
time.Unix(1, 0)),
)
func TestParsePointNaN(t *testing.T) {
_, err := models.ParsePointsString("cpu value=NaN 1000000000")
if err == nil {
t.Fatalf("ParsePoints expected error, got nil")
}
test(t, `cpu value=nAn 1000000000`,
models.NewPoint(
"cpu",
models.Tags{},
models.Fields{
"value": math.NaN(),
},
time.Unix(1, 0)),
)
_, err = models.ParsePointsString("cpu value=nAn 1000000000")
if err == nil {
t.Fatalf("ParsePoints expected error, got nil")
}
test(t, `nan value=NaN`,
models.NewPoint(
"nan",
models.Tags{},
models.Fields{
"value": math.NaN(),
},
time.Unix(0, 0)),
)
_, err = models.ParsePointsString("cpu value=NaN")
if err == nil {
t.Fatalf("ParsePoints expected error, got nil")
}
}
func TestNewPointLargeNumberOfTags(t *testing.T) {
@ -1201,7 +1187,7 @@ func TestParsePointToString(t *testing.T) {
t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line)
}
pt = models.NewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"},
pt = models.MustNewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"},
models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"},
time.Unix(1, 0))
@ -1398,19 +1384,19 @@ cpu,host=serverA,region=us-east value=1.0 946730096789012345`,
func TestNewPointEscaped(t *testing.T) {
// commas
pt := models.NewPoint("cpu,main", models.Tags{"tag,bar": "value"}, models.Fields{"name,bar": 1.0}, time.Unix(0, 0))
pt := models.MustNewPoint("cpu,main", models.Tags{"tag,bar": "value"}, models.Fields{"name,bar": 1.0}, time.Unix(0, 0))
if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
// spaces
pt = models.NewPoint("cpu main", models.Tags{"tag bar": "value"}, models.Fields{"name bar": 1.0}, time.Unix(0, 0))
pt = models.MustNewPoint("cpu main", models.Tags{"tag bar": "value"}, models.Fields{"name bar": 1.0}, time.Unix(0, 0))
if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
// equals
pt = models.NewPoint("cpu=main", models.Tags{"tag=bar": "value=foo"}, models.Fields{"name=bar": 1.0}, time.Unix(0, 0))
pt = models.MustNewPoint("cpu=main", models.Tags{"tag=bar": "value=foo"}, models.Fields{"name=bar": 1.0}, time.Unix(0, 0))
if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
@ -1418,14 +1404,14 @@ func TestNewPointEscaped(t *testing.T) {
func TestNewPointUnhandledType(t *testing.T) {
// nil value
pt := models.NewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0))
pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0))
if exp := `cpu value= 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
// unsupported type gets stored as string
now := time.Unix(0, 0).UTC()
pt = models.NewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0))
pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0))
if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
@ -1500,7 +1486,7 @@ func TestPrecisionString(t *testing.T) {
}
for _, test := range tests {
pt := models.NewPoint("cpu", nil, tags, tm)
pt := models.MustNewPoint("cpu", nil, tags, tm)
act := pt.PrecisionString(test.precision)
if act != test.exp {
@ -1509,3 +1495,81 @@ func TestPrecisionString(t *testing.T) {
}
}
}
func TestRoundedString(t *testing.T) {
tags := map[string]interface{}{"value": float64(1)}
tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z")
tests := []struct {
name string
precision time.Duration
exp string
}{
{
name: "no precision",
precision: time.Duration(0),
exp: "cpu value=1 946730096789012345",
},
{
name: "nanosecond precision",
precision: time.Nanosecond,
exp: "cpu value=1 946730096789012345",
},
{
name: "microsecond precision",
precision: time.Microsecond,
exp: "cpu value=1 946730096789012000",
},
{
name: "millisecond precision",
precision: time.Millisecond,
exp: "cpu value=1 946730096789000000",
},
{
name: "second precision",
precision: time.Second,
exp: "cpu value=1 946730097000000000",
},
{
name: "minute precision",
precision: time.Minute,
exp: "cpu value=1 946730100000000000",
},
{
name: "hour precision",
precision: time.Hour,
exp: "cpu value=1 946731600000000000",
},
}
for _, test := range tests {
pt := models.MustNewPoint("cpu", nil, tags, tm)
act := pt.RoundedString(test.precision)
if act != test.exp {
t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v",
test.name, act, test.exp)
}
}
}
func TestParsePointsStringWithExtraBuffer(t *testing.T) {
b := make([]byte, 70*5000)
buf := bytes.NewBuffer(b)
key := "cpu,host=A,region=uswest"
buf.WriteString(fmt.Sprintf("%s value=%.3f 1\n", key, rand.Float64()))
points, err := models.ParsePointsString(buf.String())
if err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
pointKey := string(points[0].Key())
if len(key) != len(pointKey) {
t.Fatalf("expected length of both keys are same but got %d and %d", len(key), len(pointKey))
}
if key != pointKey {
t.Fatalf("expected both keys are same but got %s and %s", key, pointKey)
}
}

View File

@ -368,7 +368,12 @@ func (m *Monitor) storeStatistics() {
points := make(models.Points, 0, len(stats))
for _, s := range stats {
points = append(points, models.NewPoint(s.Name, s.Tags, s.Values, time.Now().Truncate(time.Second)))
pt, err := models.NewPoint(s.Name, s.Tags, s.Values, time.Now().Truncate(time.Second))
if err != nil {
m.Logger.Printf("Dropping point %v: %v", s.Name, err)
continue
}
points = append(points, pt)
}
err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{

View File

@ -267,7 +267,7 @@ do_build() {
fi
date=`date -u --iso-8601=seconds`
go install $RACE -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit -X main.buildTime='$date'" ./...
go install $RACE -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit -X main.buildTime=$date" ./...
if [ $? -ne 0 ]; then
echo "Build failed, unable to create package -- aborting"
cleanup_exit 1

View File

@ -2,6 +2,7 @@ package slices
import "strings"
// Union combines two string sets
func Union(setA, setB []string, ignoreCase bool) []string {
for _, b := range setB {
if ignoreCase {
@ -17,6 +18,7 @@ func Union(setA, setB []string, ignoreCase bool) []string {
return setA
}
// Exists checks if a string is in a set
func Exists(set []string, find string) bool {
for _, s := range set {
if s == find {
@ -26,6 +28,7 @@ func Exists(set []string, find string) bool {
return false
}
// ExistsIgnoreCase checks if a string is in a set but ignores its case
func ExistsIgnoreCase(set []string, find string) bool {
find = strings.ToLower(find)
for _, s := range set {

View File

@ -8,14 +8,14 @@ const (
type Config struct {
Enabled bool `toml:"enabled"`
BindAddress string `toml:"bind-address"`
HttpsEnabled bool `toml:"https-enabled"`
HttpsCertificate string `toml:"https-certificate"`
HTTPSEnabled bool `toml:"https-enabled"`
HTTPSCertificate string `toml:"https-certificate"`
}
func NewConfig() Config {
return Config{
BindAddress: DefaultBindAddress,
HttpsEnabled: false,
HttpsCertificate: "/etc/ssl/influxdb.pem",
HTTPSEnabled: false,
HTTPSCertificate: "/etc/ssl/influxdb.pem",
}
}

View File

@ -24,9 +24,9 @@ https-certificate = "/dev/null"
t.Fatalf("unexpected enabled: %v", c.Enabled)
} else if c.BindAddress != ":8083" {
t.Fatalf("unexpected bind address: %s", c.BindAddress)
} else if c.HttpsEnabled != true {
t.Fatalf("unexpected https enabled: %v", c.HttpsEnabled)
} else if c.HttpsCertificate != "/dev/null" {
t.Fatalf("unexpected https certificate: %v", c.HttpsCertificate)
} else if c.HTTPSEnabled != true {
t.Fatalf("unexpected https enabled: %v", c.HTTPSEnabled)
} else if c.HTTPSCertificate != "/dev/null" {
t.Fatalf("unexpected https certificate: %v", c.HTTPSCertificate)
}
}

View File

@ -29,8 +29,8 @@ type Service struct {
func NewService(c Config) *Service {
return &Service{
addr: c.BindAddress,
https: c.HttpsEnabled,
cert: c.HttpsCertificate,
https: c.HTTPSEnabled,
cert: c.HTTPSCertificate,
err: make(chan error),
logger: log.New(os.Stderr, "[admin] ", log.LstdFlags),
}

View File

@ -2,6 +2,11 @@
The _collectd_ input allows InfluxDB to accept data transmitted in collectd native format. This data is transmitted over UDP.
## A note on UDP/IP OS Buffer sizes
If you're running Linux or FreeBSD, please adjust your OS UDP buffer
size limit, [see here for more details.](../udp/README.md#a-note-on-udpip-os-buffer-sizes)
## Configuration
Each collectd input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it.
@ -13,3 +18,18 @@ The path to the collectd types database file may also be set
## Large UDP packets
Please note that UDP packages larger than the standard size of 1452 are dropped at the time of ingestion, so be sure to set `MaxPacketSize` to 1452 in the collectd configuration.
## Config Example
```
[collectd]
enabled = false
bind-address = ":25826" # the bind address
database = "collectd" # Name of the database that will be written to
retention-policy = ""
batch-size = 5000 # will flush if this many points get buffered
batch-pending = 10 # number of batches that may be pending in memory
batch-timeout = "10s"
read-buffer = 0 # UDP read buffer size, 0 means to use OS default
typesdb = "/usr/share/collectd/types.db"
```

View File

@ -7,19 +7,38 @@ import (
)
const (
// DefaultBindAddress is the default port to bind to
DefaultBindAddress = ":25826"
// DefaultDatabase is the default DB to write to
DefaultDatabase = "collectd"
// DefaultRetentionPolicy is the default retention policy of the writes
DefaultRetentionPolicy = ""
DefaultBatchSize = 1000
// DefaultBatchSize is the default write batch size.
DefaultBatchSize = 5000
DefaultBatchPending = 5
// DefaultBatchPending is the default number of pending write batches.
DefaultBatchPending = 10
// DefaultBatchTimeout is the default batch timeout.
DefaultBatchDuration = toml.Duration(10 * time.Second)
DefaultTypesDB = "/usr/share/collectd/types.db"
// DefaultReadBuffer is the default buffer size for the UDP listener.
// Sets the size of the operating system's receive buffer associated with
// the UDP traffic. Keep in mind that the OS must be able
// to handle the number set here or the UDP listener will error and exit.
//
// DefaultReadBuffer = 0 means to use the OS default, which is usually too
// small for high UDP performance.
//
// Increasing OS buffer limits:
// Linux: sudo sysctl -w net.core.rmem_max=<read-buffer>
// BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf=<read-buffer>
DefaultReadBuffer = 0
)
// Config represents a configuration for the collectd service.
@ -31,6 +50,7 @@ type Config struct {
BatchSize int `toml:"batch-size"`
BatchPending int `toml:"batch-pending"`
BatchDuration toml.Duration `toml:"batch-timeout"`
ReadBuffer int `toml:"read-buffer"`
TypesDB string `toml:"typesdb"`
}
@ -40,6 +60,7 @@ func NewConfig() Config {
BindAddress: DefaultBindAddress,
Database: DefaultDatabase,
RetentionPolicy: DefaultRetentionPolicy,
ReadBuffer: DefaultReadBuffer,
BatchSize: DefaultBatchSize,
BatchPending: DefaultBatchPending,
BatchDuration: DefaultBatchDuration,

View File

@ -22,13 +22,13 @@ const leaderWaitTimeout = 30 * time.Second
// statistics gathered by the collectd service.
const (
statPointsReceived = "points_rx"
statBytesReceived = "bytes_rx"
statPointsParseFail = "points_parse_fail"
statReadFail = "read_fail"
statBatchesTrasmitted = "batches_tx"
statPointsTransmitted = "points_tx"
statBatchesTransmitFail = "batches_tx_fail"
statPointsReceived = "pointsRx"
statBytesReceived = "bytesRx"
statPointsParseFail = "pointsParseFail"
statReadFail = "readFail"
statBatchesTrasmitted = "batchesTx"
statPointsTransmitted = "pointsTx"
statBatchesTransmitFail = "batchesTxFail"
)
// pointsWriter is an internal interface to make testing easier.
@ -53,7 +53,7 @@ type Service struct {
wg sync.WaitGroup
err chan error
stop chan struct{}
ln *net.UDPConn
conn *net.UDPConn
batcher *tsdb.PointBatcher
typesdb gollectd.Types
addr net.Addr
@ -118,13 +118,21 @@ func (s *Service) Open() error {
s.addr = addr
// Start listening
ln, err := net.ListenUDP("udp", addr)
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return fmt.Errorf("unable to listen on UDP: %s", err)
}
s.ln = ln
s.Logger.Println("Listening on UDP: ", ln.LocalAddr().String())
if s.Config.ReadBuffer != 0 {
err = conn.SetReadBuffer(s.Config.ReadBuffer)
if err != nil {
return fmt.Errorf("unable to set UDP read buffer to %d: %s",
s.Config.ReadBuffer, err)
}
}
s.conn = conn
s.Logger.Println("Listening on UDP: ", conn.LocalAddr().String())
// Start the points batcher.
s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, s.Config.BatchPending, time.Duration(s.Config.BatchDuration))
@ -147,8 +155,8 @@ func (s *Service) Close() error {
if s.stop != nil {
close(s.stop)
}
if s.ln != nil {
s.ln.Close()
if s.conn != nil {
s.conn.Close()
}
if s.batcher != nil {
s.batcher.Stop()
@ -157,7 +165,7 @@ func (s *Service) Close() error {
// Release all remaining resources.
s.stop = nil
s.ln = nil
s.conn = nil
s.batcher = nil
s.Logger.Println("collectd UDP closed")
return nil
@ -179,7 +187,7 @@ func (s *Service) Err() chan error { return s.err }
// Addr returns the listener's address. Returns nil if listener is closed.
func (s *Service) Addr() net.Addr {
return s.ln.LocalAddr()
return s.conn.LocalAddr()
}
func (s *Service) serve() {
@ -204,7 +212,7 @@ func (s *Service) serve() {
// Keep processing.
}
n, _, err := s.ln.ReadFromUDP(buffer)
n, _, err := s.conn.ReadFromUDP(buffer)
if err != nil {
s.statMap.Add(statReadFail, 1)
s.Logger.Printf("collectd ReadFromUDP error: %s", err)
@ -293,7 +301,11 @@ func Unmarshal(packet *gollectd.Packet) []models.Point {
if packet.TypeInstance != "" {
tags["type_instance"] = packet.TypeInstance
}
p := models.NewPoint(name, tags, fields, timestamp)
p, err := models.NewPoint(name, tags, fields, timestamp)
// Drop points values of NaN since they are not supported
if err != nil {
continue
}
points = append(points, p)
}

View File

@ -23,9 +23,9 @@ const (
// Statistics for the CQ service.
const (
statQueryOK = "query_ok"
statQueryFail = "query_fail"
statPointsWritten = "points_written"
statQueryOK = "queryOk"
statQueryFail = "queryFail"
statPointsWritten = "pointsWritten"
)
// ContinuousQuerier represents a service that executes continuous queries.

View File

@ -1,10 +1,17 @@
# Configuration
# The graphite Input
## A note on UDP/IP OS Buffer sizes
If you're using UDP input and running Linux or FreeBSD, please adjust your UDP buffer
size limit, [see here for more details.](../udp/README.md#a-note-on-udpip-os-buffer-sizes)
## Configuration
Each Graphite input allows the binding address, target database, and protocol to be set. If the database does not exist, it will be created automatically when the input is initialized. The write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`.
Each Graphite input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
# Parsing Metrics
## Parsing Metrics
The graphite plugin allows measurements to be saved using the graphite line protocol. By default, enabling the graphite plugin will allow you to collect metrics and store them using the metric name as the measurement. If you send a metric named `servers.localhost.cpu.loadavg.10`, it will store the full metric name as the measurement with no extracted tags.
@ -95,10 +102,12 @@ For example,
servers.localhost.cpu.loadavg.10
servers.host123.elasticsearch.cache_hits 100
servers.host456.mysql.tx_count 10
servers.host789.prod.mysql.tx_count 10
```
* `servers.*` would match all values
* `servers.*.mysql` would match `servers.host456.mysql.tx_count 10`
* `servers.localhost.*` would match `servers.localhost.cpu.loadavg`
* `servers.*.*.mysql` would match `servers.host789.prod.mysql.tx_count 10`
## Default Templates
@ -145,7 +154,7 @@ If you need to add the same set of tags to all metrics, you can define them glob
#]
```
## Customized Config
## Customized Config
```
[[graphite]]
enabled = true
@ -165,3 +174,21 @@ If you need to add the same set of tags to all metrics, you can define them glob
".measurement*",
]
```
## Two graphite listener, UDP & TCP, Config
```
[[graphite]]
enabled = true
bind-address = ":2003"
protocol = "tcp"
# consistency-level = "one"
[[graphite]]
enabled = true
bind-address = ":2004" # the bind address
protocol = "udp" # protocol to read via
udp-read-buffer = 8388608 # (8*1024*1024) UDP read buffer size
```

View File

@ -26,21 +26,34 @@ const (
// measurment parts in a template.
DefaultSeparator = "."
// DefaultBatchSize is the default Graphite batch size.
DefaultBatchSize = 1000
// DefaultBatchSize is the default write batch size.
DefaultBatchSize = 5000
// DefaultBatchPending is the default number of pending Graphite batches.
DefaultBatchPending = 5
// DefaultBatchPending is the default number of pending write batches.
DefaultBatchPending = 10
// DefaultBatchTimeout is the default Graphite batch timeout.
DefaultBatchTimeout = time.Second
// DefaultUDPReadBuffer is the default buffer size for the UDP listener.
// Sets the size of the operating system's receive buffer associated with
// the UDP traffic. Keep in mind that the OS must be able
// to handle the number set here or the UDP listener will error and exit.
//
// DefaultReadBuffer = 0 means to use the OS default, which is usually too
// small for high UDP performance.
//
// Increasing OS buffer limits:
// Linux: sudo sysctl -w net.core.rmem_max=<read-buffer>
// BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf=<read-buffer>
DefaultUDPReadBuffer = 0
)
// Config represents the configuration for Graphite endpoints.
type Config struct {
Enabled bool `toml:"enabled"`
BindAddress string `toml:"bind-address"`
Database string `toml:"database"`
Enabled bool `toml:"enabled"`
Protocol string `toml:"protocol"`
BatchSize int `toml:"batch-size"`
BatchPending int `toml:"batch-pending"`
@ -49,6 +62,20 @@ type Config struct {
Templates []string `toml:"templates"`
Tags []string `toml:"tags"`
Separator string `toml:"separator"`
UDPReadBuffer int `toml:"udp-read-buffer"`
}
func NewConfig() Config {
return Config{
BindAddress: DefaultBindAddress,
Database: DefaultDatabase,
Protocol: DefaultProtocol,
BatchSize: DefaultBatchSize,
BatchPending: DefaultBatchPending,
BatchTimeout: toml.Duration(DefaultBatchTimeout),
ConsistencyLevel: DefaultConsistencyLevel,
Separator: DefaultSeparator,
}
}
// WithDefaults takes the given config and returns a new config with any required
@ -79,6 +106,9 @@ func (c *Config) WithDefaults() *Config {
if d.Separator == "" {
d.Separator = DefaultSeparator
}
if d.UDPReadBuffer == 0 {
d.UDPReadBuffer = DefaultUDPReadBuffer
}
return &d
}

View File

@ -116,6 +116,10 @@ func (p *Parser) Parse(line string) (models.Point, error) {
return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
}
if math.IsNaN(v) || math.IsInf(v, 0) {
return nil, fmt.Errorf(`field "%s" value: '%v" is unsupported`, fields[0], v)
}
fieldValues := map[string]interface{}{}
if field != "" {
fieldValues[field] = v
@ -150,9 +154,7 @@ func (p *Parser) Parse(line string) (models.Point, error) {
tags[k] = v
}
}
point := models.NewPoint(measurement, tags, fieldValues, timestamp)
return point, nil
return models.NewPoint(measurement, tags, fieldValues, timestamp)
}
// Apply extracts the template fields form the given line and returns the

View File

@ -1,7 +1,6 @@
package graphite_test
import (
"math"
"strconv"
"testing"
"time"
@ -224,22 +223,9 @@ func TestParseNaN(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
pt, err := p.Parse("servers.localhost.cpu_load NaN 1435077219")
if err != nil {
t.Fatalf("parse error: %v", err)
}
exp := models.NewPoint("servers.localhost.cpu_load",
models.Tags{},
models.Fields{"value": math.NaN()},
time.Unix(1435077219, 0))
if exp.String() != pt.String() {
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
}
if !math.IsNaN(pt.Fields()["value"].(float64)) {
t.Errorf("parse value mismatch: expected NaN")
_, err = p.Parse("servers.localhost.cpu_load NaN 1435077219")
if err == nil {
t.Fatalf("expected error. got nil")
}
}
@ -249,7 +235,7 @@ func TestFilterMatchDefault(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("miss.servers.localhost.cpu_load",
exp := models.MustNewPoint("miss.servers.localhost.cpu_load",
models.Tags{},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -270,7 +256,7 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu.cpu_load.10",
exp := models.MustNewPoint("cpu.cpu_load.10",
models.Tags{"host": "localhost"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -294,7 +280,7 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_cpu_load_10",
exp := models.MustNewPoint("cpu_cpu_load_10",
models.Tags{"host": "localhost"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -315,7 +301,7 @@ func TestFilterMatchSingle(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -336,7 +322,7 @@ func TestParseNoMatch(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("servers.localhost.memory.VmallocChunk",
exp := models.MustNewPoint("servers.localhost.memory.VmallocChunk",
models.Tags{},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -357,7 +343,7 @@ func TestFilterMatchWildcard(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -380,7 +366,7 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -408,7 +394,7 @@ func TestFilterMatchMostLongestFilter(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost", "resource": "cpu"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -435,7 +421,7 @@ func TestFilterMatchMultipleWildcards(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "server01"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -460,7 +446,7 @@ func TestParseDefaultTags(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -484,7 +470,7 @@ func TestParseDefaultTemplateTags(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -508,7 +494,7 @@ func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))
@ -532,7 +518,7 @@ func TestParseTemplateWhitespace(t *testing.T) {
t.Fatalf("unexpected error creating parser, got %v", err)
}
exp := models.NewPoint("cpu_load",
exp := models.MustNewPoint("cpu_load",
models.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
models.Fields{"value": float64(11)},
time.Unix(1435077219, 0))

View File

@ -5,7 +5,6 @@ import (
"expvar"
"fmt"
"log"
"math"
"net"
"os"
"strings"
@ -26,15 +25,15 @@ const (
// statistics gathered by the graphite package.
const (
statPointsReceived = "points_rx"
statBytesReceived = "bytes_rx"
statPointsParseFail = "points_parse_fail"
statPointsUnsupported = "points_unsupported_fail"
statBatchesTrasmitted = "batches_tx"
statPointsTransmitted = "points_tx"
statBatchesTransmitFail = "batches_tx_fail"
statConnectionsActive = "connections_active"
statConnectionsHandled = "connections_handled"
statPointsReceived = "pointsRx"
statBytesReceived = "bytesRx"
statPointsParseFail = "pointsParseFail"
statPointsUnsupported = "pointsUnsupportedFail"
statBatchesTrasmitted = "batchesTx"
statPointsTransmitted = "pointsTx"
statBatchesTransmitFail = "batchesTxFail"
statConnectionsActive = "connsActive"
statConnectionsHandled = "connsHandled"
)
type tcpConnection struct {
@ -56,6 +55,7 @@ type Service struct {
batchPending int
batchTimeout time.Duration
consistencyLevel cluster.ConsistencyLevel
udpReadBuffer int
batcher *tsdb.PointBatcher
parser *Parser
@ -96,6 +96,7 @@ func NewService(c Config) (*Service, error) {
protocol: d.Protocol,
batchSize: d.BatchSize,
batchPending: d.BatchPending,
udpReadBuffer: d.UDPReadBuffer,
batchTimeout: time.Duration(d.BatchTimeout),
logger: log.New(os.Stderr, "[graphite] ", log.LstdFlags),
tcpConnections: make(map[string]*tcpConnection),
@ -295,6 +296,14 @@ func (s *Service) openUDPServer() (net.Addr, error) {
return nil, err
}
if s.udpReadBuffer != 0 {
err = s.udpConn.SetReadBuffer(s.udpReadBuffer)
if err != nil {
return nil, fmt.Errorf("unable to set UDP read buffer to %d: %s",
s.udpReadBuffer, err)
}
}
buf := make([]byte, udpBufferSize)
s.wg.Add(1)
go func() {
@ -325,21 +334,11 @@ func (s *Service) handleLine(line string) {
// Parse it.
point, err := s.parser.Parse(line)
if err != nil {
s.logger.Printf("unable to parse line: %s", err)
s.logger.Printf("unable to parse line: %s: %s", line, err)
s.statMap.Add(statPointsParseFail, 1)
return
}
f, ok := point.Fields()["value"].(float64)
if ok {
// Drop NaN and +/-Inf data points since they are not supported values
if math.IsNaN(f) || math.IsInf(f, 0) {
s.logger.Printf("dropping unsupported value: '%v'", line)
s.statMap.Add(statPointsUnsupported, 1)
return
}
}
s.batcher.In() <- point
}

View File

@ -38,16 +38,17 @@ func Test_ServerGraphiteTCP(t *testing.T) {
WritePointsFn: func(req *cluster.WritePointsRequest) error {
defer wg.Done()
pt, _ := models.NewPoint(
"cpu",
map[string]string{},
map[string]interface{}{"value": 23.456},
time.Unix(now.Unix(), 0))
if req.Database != "graphitedb" {
t.Fatalf("unexpected database: %s", req.Database)
} else if req.RetentionPolicy != "" {
t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
} else if req.Points[0].String() !=
models.NewPoint(
"cpu",
map[string]string{},
map[string]interface{}{"value": 23.456},
time.Unix(now.Unix(), 0)).String() {
} else if req.Points[0].String() != pt.String() {
}
return nil
},
@ -107,16 +108,16 @@ func Test_ServerGraphiteUDP(t *testing.T) {
WritePointsFn: func(req *cluster.WritePointsRequest) error {
defer wg.Done()
pt, _ := models.NewPoint(
"cpu",
map[string]string{},
map[string]interface{}{"value": 23.456},
time.Unix(now.Unix(), 0))
if req.Database != "graphitedb" {
t.Fatalf("unexpected database: %s", req.Database)
} else if req.RetentionPolicy != "" {
t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
} else if req.Points[0].String() !=
models.NewPoint(
"cpu",
map[string]string{},
map[string]interface{}{"value": 23.456},
time.Unix(now.Unix(), 0)).String() {
} else if req.Points[0].String() != pt.String() {
t.Fatalf("unexpected points: %#v", req.Points[0].String())
}
return nil

View File

@ -28,6 +28,10 @@ const (
// DefaultRetryMaxInterval is the maximum the hinted handoff retry interval
// will ever be.
DefaultRetryMaxInterval = time.Minute
// DefaultPurgeInterval is the amount of time the system waits before attempting
// to purge hinted handoff data due to age or inactive nodes.
DefaultPurgeInterval = time.Hour
)
type Config struct {
@ -38,6 +42,7 @@ type Config struct {
RetryRateLimit int64 `toml:"retry-rate-limit"`
RetryInterval toml.Duration `toml:"retry-interval"`
RetryMaxInterval toml.Duration `toml:"retry-max-interval"`
PurgeInterval toml.Duration `toml:"purge-interval"`
}
func NewConfig() Config {
@ -48,5 +53,6 @@ func NewConfig() Config {
RetryRateLimit: DefaultRetryRateLimit,
RetryInterval: toml.Duration(DefaultRetryInterval),
RetryMaxInterval: toml.Duration(DefaultRetryMaxInterval),
PurgeInterval: toml.Duration(DefaultPurgeInterval),
}
}

View File

@ -18,6 +18,7 @@ retry-max-interval = "100m"
max-size=2048
max-age="20m"
retry-rate-limit=1000
purge-interval = "1h"
`, &c); err != nil {
t.Fatal(err)
}
@ -47,4 +48,8 @@ retry-rate-limit=1000
t.Fatalf("unexpected retry rate limit: got %v, exp %v", c.RetryRateLimit, exp)
}
if exp := time.Hour; c.PurgeInterval.String() != exp.String() {
t.Fatalf("unexpected purge interval: got %v, exp %v", c.PurgeInterval, exp)
}
}

View File

@ -0,0 +1,293 @@
package hh
import (
"encoding/binary"
"expvar"
"fmt"
"io"
"log"
"os"
"strings"
"sync"
"time"
"github.com/influxdb/influxdb"
"github.com/influxdb/influxdb/models"
)
// NodeProcessor encapsulates a queue of hinted-handoff data for a node, and the
// transmission of the data to the node.
type NodeProcessor struct {
PurgeInterval time.Duration // Interval between periodic purge checks
RetryInterval time.Duration // Interval between periodic write-to-node attempts.
RetryMaxInterval time.Duration // Max interval between periodic write-to-node attempts.
MaxSize int64 // Maximum size an underlying queue can get.
MaxAge time.Duration // Maximum age queue data can get before purging.
RetryRateLimit int64 // Limits the rate data is sent to node.
nodeID uint64
dir string
mu sync.RWMutex
wg sync.WaitGroup
done chan struct{}
queue *queue
meta metaStore
writer shardWriter
statMap *expvar.Map
Logger *log.Logger
}
// NewNodeProcessor returns a new NodeProcessor for the given node, using dir for
// the hinted-handoff data.
func NewNodeProcessor(nodeID uint64, dir string, w shardWriter, m metaStore) *NodeProcessor {
key := strings.Join([]string{"hh_processor", dir}, ":")
tags := map[string]string{"node": fmt.Sprintf("%d", nodeID), "path": dir}
return &NodeProcessor{
PurgeInterval: DefaultPurgeInterval,
RetryInterval: DefaultRetryInterval,
RetryMaxInterval: DefaultRetryMaxInterval,
MaxSize: DefaultMaxSize,
MaxAge: DefaultMaxAge,
nodeID: nodeID,
dir: dir,
writer: w,
meta: m,
statMap: influxdb.NewStatistics(key, "hh_processor", tags),
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
}
}
// Open opens the NodeProcessor. It will read and write data present in dir, and
// start transmitting data to the node. A NodeProcessor must be opened before it
// can accept hinted data.
func (n *NodeProcessor) Open() error {
n.mu.Lock()
defer n.mu.Unlock()
if n.done != nil {
// Already open.
return nil
}
n.done = make(chan struct{})
// Create the queue directory if it doesn't already exist.
if err := os.MkdirAll(n.dir, 0700); err != nil {
return fmt.Errorf("mkdir all: %s", err)
}
// Create the queue of hinted-handoff data.
queue, err := newQueue(n.dir, n.MaxSize)
if err != nil {
return err
}
if err := queue.Open(); err != nil {
return err
}
n.queue = queue
n.wg.Add(1)
go n.run()
return nil
}
// Close closes the NodeProcessor, terminating all data tranmission to the node.
// When closed it will not accept hinted-handoff data.
func (n *NodeProcessor) Close() error {
n.mu.Lock()
defer n.mu.Unlock()
if n.done == nil {
// Already closed.
return nil
}
close(n.done)
n.wg.Wait()
n.done = nil
return n.queue.Close()
}
// Purge deletes all hinted-handoff data under management by a NodeProcessor.
// The NodeProcessor should be in the closed state before calling this function.
func (n *NodeProcessor) Purge() error {
n.mu.Lock()
defer n.mu.Unlock()
if n.done != nil {
return fmt.Errorf("node processor is open")
}
return os.RemoveAll(n.dir)
}
// WriteShard writes hinted-handoff data for the given shard and node. Since it may manipulate
// hinted-handoff queues, and be called concurrently, it takes a lock during queue access.
func (n *NodeProcessor) WriteShard(shardID uint64, points []models.Point) error {
n.mu.RLock()
defer n.mu.RUnlock()
if n.done == nil {
return fmt.Errorf("node processor is closed")
}
n.statMap.Add(writeShardReq, 1)
n.statMap.Add(writeShardReqPoints, int64(len(points)))
b := marshalWrite(shardID, points)
return n.queue.Append(b)
}
// LastModified returns the time the NodeProcessor last receieved hinted-handoff data.
func (n *NodeProcessor) LastModified() (time.Time, error) {
t, err := n.queue.LastModified()
if err != nil {
return time.Time{}, err
}
return t.UTC(), nil
}
// run attempts to send any existing hinted handoff data to the target node. It also purges
// any hinted handoff data older than the configured time.
func (n *NodeProcessor) run() {
defer n.wg.Done()
currInterval := time.Duration(n.RetryInterval)
if currInterval > time.Duration(n.RetryMaxInterval) {
currInterval = time.Duration(n.RetryMaxInterval)
}
for {
select {
case <-n.done:
return
case <-time.After(n.PurgeInterval):
if err := n.queue.PurgeOlderThan(time.Now().Add(-n.MaxAge)); err != nil {
n.Logger.Printf("failed to purge for node %d: %s", n.nodeID, err.Error())
}
case <-time.After(currInterval):
limiter := NewRateLimiter(n.RetryRateLimit)
for {
c, err := n.SendWrite()
if err != nil {
if err == io.EOF {
// No more data, return to configured interval
currInterval = time.Duration(n.RetryInterval)
} else {
currInterval = currInterval * 2
if currInterval > time.Duration(n.RetryMaxInterval) {
currInterval = time.Duration(n.RetryMaxInterval)
}
}
break
}
// Success! Ensure backoff is cancelled.
currInterval = time.Duration(n.RetryInterval)
// Update how many bytes we've sent
limiter.Update(c)
// Block to maintain the throughput rate
time.Sleep(limiter.Delay())
}
}
}
}
// SendWrite attempts to sent the current block of hinted data to the target node. If successful,
// it returns the number of bytes it sent and advances to the next block. Otherwise returns EOF
// when there is no more data or the node is inactive.
func (n *NodeProcessor) SendWrite() (int, error) {
n.mu.RLock()
defer n.mu.RUnlock()
active, err := n.Active()
if err != nil {
return 0, err
}
if !active {
return 0, io.EOF
}
// Get the current block from the queue
buf, err := n.queue.Current()
if err != nil {
return 0, err
}
// unmarshal the byte slice back to shard ID and points
shardID, points, err := unmarshalWrite(buf)
if err != nil {
n.Logger.Printf("unmarshal write failed: %v", err)
// Try to skip it.
if err := n.queue.Advance(); err != nil {
n.Logger.Printf("failed to advance queue for node %d: %s", n.nodeID, err.Error())
}
return 0, err
}
if err := n.writer.WriteShard(shardID, n.nodeID, points); err != nil {
n.statMap.Add(writeNodeReqFail, 1)
return 0, err
}
n.statMap.Add(writeNodeReq, 1)
n.statMap.Add(writeNodeReqPoints, int64(len(points)))
if err := n.queue.Advance(); err != nil {
n.Logger.Printf("failed to advance queue for node %d: %s", n.nodeID, err.Error())
}
return len(buf), nil
}
func (n *NodeProcessor) Head() string {
qp, err := n.queue.Position()
if err != nil {
return ""
}
return qp.head
}
func (n *NodeProcessor) Tail() string {
qp, err := n.queue.Position()
if err != nil {
return ""
}
return qp.tail
}
// Active returns whether this node processor is for a currently active node.
func (n *NodeProcessor) Active() (bool, error) {
nio, err := n.meta.Node(n.nodeID)
if err != nil {
n.Logger.Printf("failed to determine if node %d is active: %s", n.nodeID, err.Error())
return false, err
}
return nio != nil, nil
}
func marshalWrite(shardID uint64, points []models.Point) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, shardID)
for _, p := range points {
b = append(b, []byte(p.String())...)
b = append(b, '\n')
}
return b
}
func unmarshalWrite(b []byte) (uint64, []models.Point, error) {
if len(b) < 8 {
return 0, nil, fmt.Errorf("too short: len = %d", len(b))
}
ownerID := binary.BigEndian.Uint64(b[:8])
points, err := models.ParsePoints(b[8:])
return ownerID, points, err
}

View File

@ -0,0 +1,155 @@
package hh
import (
"io"
"io/ioutil"
"os"
"testing"
"time"
"github.com/influxdb/influxdb/meta"
"github.com/influxdb/influxdb/models"
)
type fakeShardWriter struct {
ShardWriteFn func(shardID, nodeID uint64, points []models.Point) error
}
func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Point) error {
return f.ShardWriteFn(shardID, nodeID, points)
}
type fakeMetaStore struct {
NodeFn func(nodeID uint64) (*meta.NodeInfo, error)
}
func (f *fakeMetaStore) Node(nodeID uint64) (*meta.NodeInfo, error) {
return f.NodeFn(nodeID)
}
func TestNodeProcessorSendBlock(t *testing.T) {
dir, err := ioutil.TempDir("", "node_processor_test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
// expected data to be queue and sent to the shardWriter
var expShardID, expNodeID, count = uint64(100), uint64(200), 0
pt := models.MustNewPoint("cpu", models.Tags{"foo": "bar"}, models.Fields{"value": 1.0}, time.Unix(0, 0))
sh := &fakeShardWriter{
ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error {
count += 1
if shardID != expShardID {
t.Errorf("SendWrite() shardID mismatch: got %v, exp %v", shardID, expShardID)
}
if nodeID != expNodeID {
t.Errorf("SendWrite() nodeID mismatch: got %v, exp %v", nodeID, expNodeID)
}
if exp := 1; len(points) != exp {
t.Fatalf("SendWrite() points mismatch: got %v, exp %v", len(points), exp)
}
if points[0].String() != pt.String() {
t.Fatalf("SendWrite() points mismatch:\n got %v\n exp %v", points[0].String(), pt.String())
}
return nil
},
}
metastore := &fakeMetaStore{
NodeFn: func(nodeID uint64) (*meta.NodeInfo, error) {
if nodeID == expNodeID {
return &meta.NodeInfo{}, nil
}
return nil, nil
},
}
n := NewNodeProcessor(expNodeID, dir, sh, metastore)
if n == nil {
t.Fatalf("Failed to create node processor: %v", err)
}
if err := n.Open(); err != nil {
t.Fatalf("Failed to open node processor: %v", err)
}
// Check the active state.
active, err := n.Active()
if err != nil {
t.Fatalf("Failed to check node processor state: %v", err)
}
if !active {
t.Fatalf("Node processor state is unexpected value of: %v", active)
}
// This should queue a write for the active node.
if err := n.WriteShard(expShardID, []models.Point{pt}); err != nil {
t.Fatalf("SendWrite() failed to write points: %v", err)
}
// This should send the write to the shard writer
if _, err := n.SendWrite(); err != nil {
t.Fatalf("SendWrite() failed to write points: %v", err)
}
if exp := 1; count != exp {
t.Fatalf("SendWrite() write count mismatch: got %v, exp %v", count, exp)
}
// All data should have been handled so no writes should be sent again
if _, err := n.SendWrite(); err != nil && err != io.EOF {
t.Fatalf("SendWrite() failed to write points: %v", err)
}
// Count should stay the same
if exp := 1; count != exp {
t.Fatalf("SendWrite() write count mismatch: got %v, exp %v", count, exp)
}
// Make the node inactive.
sh.ShardWriteFn = func(shardID, nodeID uint64, points []models.Point) error {
t.Fatalf("write sent to inactive node")
return nil
}
metastore.NodeFn = func(nodeID uint64) (*meta.NodeInfo, error) {
return nil, nil
}
// Check the active state.
active, err = n.Active()
if err != nil {
t.Fatalf("Failed to check node processor state: %v", err)
}
if active {
t.Fatalf("Node processor state is unexpected value of: %v", active)
}
// This should queue a write for the node.
if err := n.WriteShard(expShardID, []models.Point{pt}); err != nil {
t.Fatalf("SendWrite() failed to write points: %v", err)
}
// This should not send the write to the shard writer since the node is inactive.
if _, err := n.SendWrite(); err != nil && err != io.EOF {
t.Fatalf("SendWrite() failed to write points: %v", err)
}
if exp := 1; count != exp {
t.Fatalf("SendWrite() write count mismatch: got %v, exp %v", count, exp)
}
if err := n.Close(); err != nil {
t.Fatalf("Failed to close node processor: %v", err)
}
// Confirm that purging works ok.
if err := n.Purge(); err != nil {
t.Fatalf("Failed to purge node processor: %v", err)
}
if _, err := os.Stat(dir); !os.IsNotExist(err) {
t.Fatalf("Node processor directory still present after purge")
}
}

View File

@ -1,341 +0,0 @@
package hh
import (
"encoding/binary"
"expvar"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/influxdb/influxdb"
"github.com/influxdb/influxdb/models"
"github.com/influxdb/influxdb/tsdb"
)
const (
pointsHint = "points_hint"
pointsWrite = "points_write"
bytesWrite = "bytes_write"
writeErr = "write_err"
unmarshalErr = "unmarshal_err"
advanceErr = "advance_err"
currentErr = "current_err"
)
type Processor struct {
mu sync.RWMutex
dir string
maxSize int64
maxAge time.Duration
retryRateLimit int64
queues map[uint64]*queue
meta metaStore
writer shardWriter
metastore metaStore
Logger *log.Logger
// Shard-level and node-level HH stats.
shardStatMaps map[uint64]*expvar.Map
nodeStatMaps map[uint64]*expvar.Map
}
type ProcessorOptions struct {
MaxSize int64
RetryRateLimit int64
}
func NewProcessor(dir string, writer shardWriter, metastore metaStore, options ProcessorOptions) (*Processor, error) {
p := &Processor{
dir: dir,
queues: map[uint64]*queue{},
writer: writer,
metastore: metastore,
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
shardStatMaps: make(map[uint64]*expvar.Map),
nodeStatMaps: make(map[uint64]*expvar.Map),
}
p.setOptions(options)
// Create the root directory if it doesn't already exist.
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, fmt.Errorf("mkdir all: %s", err)
}
if err := p.loadQueues(); err != nil {
return p, err
}
return p, nil
}
func (p *Processor) setOptions(options ProcessorOptions) {
p.maxSize = DefaultMaxSize
if options.MaxSize != 0 {
p.maxSize = options.MaxSize
}
p.retryRateLimit = DefaultRetryRateLimit
if options.RetryRateLimit != 0 {
p.retryRateLimit = options.RetryRateLimit
}
}
func (p *Processor) loadQueues() error {
files, err := ioutil.ReadDir(p.dir)
if err != nil {
return err
}
for _, file := range files {
nodeID, err := strconv.ParseUint(file.Name(), 10, 64)
if err != nil {
return err
}
if _, err := p.addQueue(nodeID); err != nil {
return err
}
}
return nil
}
// addQueue adds a hinted-handoff queue for the given node. This function is not thread-safe
// and the caller must ensure this function is not called concurrently.
func (p *Processor) addQueue(nodeID uint64) (*queue, error) {
path := filepath.Join(p.dir, strconv.FormatUint(nodeID, 10))
if err := os.MkdirAll(path, 0700); err != nil {
return nil, err
}
queue, err := newQueue(path, p.maxSize)
if err != nil {
return nil, err
}
if err := queue.Open(); err != nil {
return nil, err
}
p.queues[nodeID] = queue
// Create node stats for this queue.
key := fmt.Sprintf("hh_processor:node:%d", nodeID)
tags := map[string]string{"nodeID": strconv.FormatUint(nodeID, 10)}
p.nodeStatMaps[nodeID] = influxdb.NewStatistics(key, "hh_processor", tags)
return queue, nil
}
// WriteShard writes hinted-handoff data for the given shard and node. Since it may manipulate
// hinted-handoff queues, and be called concurrently, it takes a lock during queue access.
func (p *Processor) WriteShard(shardID, ownerID uint64, points []models.Point) error {
p.mu.RLock()
queue, ok := p.queues[ownerID]
p.mu.RUnlock()
if !ok {
if err := func() error {
// Check again under write-lock.
p.mu.Lock()
defer p.mu.Unlock()
queue, ok = p.queues[ownerID]
if !ok {
var err error
if queue, err = p.addQueue(ownerID); err != nil {
return err
}
}
return nil
}(); err != nil {
return err
}
}
// Update stats
p.updateShardStats(shardID, pointsHint, int64(len(points)))
p.nodeStatMaps[ownerID].Add(pointsHint, int64(len(points)))
b := p.marshalWrite(shardID, points)
return queue.Append(b)
}
func (p *Processor) Process() error {
p.mu.RLock()
defer p.mu.RUnlock()
activeQueues, err := p.activeQueues()
if err != nil {
return err
}
res := make(chan error, len(activeQueues))
for nodeID, q := range activeQueues {
go func(nodeID uint64, q *queue) {
// Log how many writes we successfully sent at the end
var sent int
start := time.Now()
defer func(start time.Time) {
if sent > 0 {
p.Logger.Printf("%d queued writes sent to node %d in %s", sent, nodeID, time.Since(start))
}
}(start)
limiter := NewRateLimiter(p.retryRateLimit)
for {
// Get the current block from the queue
buf, err := q.Current()
if err != nil {
if err != io.EOF {
p.nodeStatMaps[nodeID].Add(currentErr, 1)
}
res <- nil
break
}
// unmarshal the byte slice back to shard ID and points
shardID, points, err := p.unmarshalWrite(buf)
if err != nil {
p.nodeStatMaps[nodeID].Add(unmarshalErr, 1)
p.Logger.Printf("unmarshal write failed: %v", err)
if err := q.Advance(); err != nil {
p.nodeStatMaps[nodeID].Add(advanceErr, 1)
res <- err
}
// Skip and try the next block.
continue
}
// Try to send the write to the node
if err := p.writer.WriteShard(shardID, nodeID, points); err != nil && tsdb.IsRetryable(err) {
p.nodeStatMaps[nodeID].Add(writeErr, 1)
p.Logger.Printf("remote write failed: %v", err)
res <- nil
break
}
p.updateShardStats(shardID, pointsWrite, int64(len(points)))
p.nodeStatMaps[nodeID].Add(pointsWrite, int64(len(points)))
// If we get here, the write succeeded so advance the queue to the next item
if err := q.Advance(); err != nil {
p.nodeStatMaps[nodeID].Add(advanceErr, 1)
res <- err
return
}
sent += 1
// Update how many bytes we've sent
limiter.Update(len(buf))
p.updateShardStats(shardID, bytesWrite, int64(len(buf)))
p.nodeStatMaps[nodeID].Add(bytesWrite, int64(len(buf)))
// Block to maintain the throughput rate
time.Sleep(limiter.Delay())
}
}(nodeID, q)
}
for range activeQueues {
err := <-res
if err != nil {
return err
}
}
return nil
}
func (p *Processor) marshalWrite(shardID uint64, points []models.Point) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, shardID)
for _, p := range points {
b = append(b, []byte(p.String())...)
b = append(b, '\n')
}
return b
}
func (p *Processor) unmarshalWrite(b []byte) (uint64, []models.Point, error) {
if len(b) < 8 {
return 0, nil, fmt.Errorf("too short: len = %d", len(b))
}
ownerID := binary.BigEndian.Uint64(b[:8])
points, err := models.ParsePoints(b[8:])
return ownerID, points, err
}
func (p *Processor) updateShardStats(shardID uint64, stat string, inc int64) {
m, ok := p.shardStatMaps[shardID]
if !ok {
key := fmt.Sprintf("hh_processor:shard:%d", shardID)
tags := map[string]string{"shardID": strconv.FormatUint(shardID, 10)}
p.shardStatMaps[shardID] = influxdb.NewStatistics(key, "hh_processor", tags)
m = p.shardStatMaps[shardID]
}
m.Add(stat, inc)
}
func (p *Processor) activeQueues() (map[uint64]*queue, error) {
queues := make(map[uint64]*queue)
for id, q := range p.queues {
ni, err := p.metastore.Node(id)
if err != nil {
return nil, err
}
if ni != nil {
queues[id] = q
}
}
return queues, nil
}
func (p *Processor) PurgeOlderThan(when time.Duration) error {
p.mu.Lock()
defer p.mu.Unlock()
for _, queue := range p.queues {
if err := queue.PurgeOlderThan(time.Now().Add(-when)); err != nil {
return err
}
}
return nil
}
func (p *Processor) PurgeInactiveOlderThan(when time.Duration) error {
p.mu.Lock()
defer p.mu.Unlock()
for nodeID, queue := range p.queues {
// Only delete queues for inactive nodes.
ni, err := p.metastore.Node(nodeID)
if err != nil {
return err
}
if ni != nil {
continue
}
last, err := queue.LastModified()
if err != nil {
return err
}
if last.Before(time.Now().Add(-when)) {
// Close and remove the queue.
if err := queue.Close(); err != nil {
return err
}
if err := queue.Remove(); err != nil {
return err
}
delete(p.queues, nodeID)
}
}
return nil
}

View File

@ -1,143 +0,0 @@
package hh
import (
"io/ioutil"
"testing"
"time"
"github.com/influxdb/influxdb/meta"
"github.com/influxdb/influxdb/models"
)
type fakeShardWriter struct {
ShardWriteFn func(shardID, nodeID uint64, points []models.Point) error
}
func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Point) error {
return f.ShardWriteFn(shardID, nodeID, points)
}
type fakeMetaStore struct {
NodeFn func(nodeID uint64) (*meta.NodeInfo, error)
}
func (f *fakeMetaStore) Node(nodeID uint64) (*meta.NodeInfo, error) {
return f.NodeFn(nodeID)
}
func TestProcessorProcess(t *testing.T) {
dir, err := ioutil.TempDir("", "processor_test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
// expected data to be queue and sent to the shardWriter
var expShardID, activeNodeID, inactiveNodeID, count = uint64(100), uint64(200), uint64(300), 0
pt := models.NewPoint("cpu", models.Tags{"foo": "bar"}, models.Fields{"value": 1.0}, time.Unix(0, 0))
sh := &fakeShardWriter{
ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error {
count += 1
if shardID != expShardID {
t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
}
if nodeID != activeNodeID {
t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, activeNodeID)
}
if exp := 1; len(points) != exp {
t.Fatalf("Process() points mismatch: got %v, exp %v", len(points), exp)
}
if points[0].String() != pt.String() {
t.Fatalf("Process() points mismatch:\n got %v\n exp %v", points[0].String(), pt.String())
}
return nil
},
}
metastore := &fakeMetaStore{
NodeFn: func(nodeID uint64) (*meta.NodeInfo, error) {
if nodeID == activeNodeID {
return &meta.NodeInfo{}, nil
}
return nil, nil
},
}
p, err := NewProcessor(dir, sh, metastore, ProcessorOptions{MaxSize: 1024})
if err != nil {
t.Fatalf("Process() failed to create processor: %v", err)
}
// This should queue a write for the active node.
if err := p.WriteShard(expShardID, activeNodeID, []models.Point{pt}); err != nil {
t.Fatalf("Process() failed to write points: %v", err)
}
// This should queue a write for the inactive node.
if err := p.WriteShard(expShardID, inactiveNodeID, []models.Point{pt}); err != nil {
t.Fatalf("Process() failed to write points: %v", err)
}
// This should send the write to the shard writer
if err := p.Process(); err != nil {
t.Fatalf("Process() failed to write points: %v", err)
}
if exp := 1; count != exp {
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
}
// All active nodes should have been handled so no writes should be sent again
if err := p.Process(); err != nil {
t.Fatalf("Process() failed to write points: %v", err)
}
// Count should stay the same
if exp := 1; count != exp {
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
}
// Make the inactive node active.
sh.ShardWriteFn = func(shardID, nodeID uint64, points []models.Point) error {
count += 1
if shardID != expShardID {
t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
}
if nodeID != inactiveNodeID {
t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, activeNodeID)
}
if exp := 1; len(points) != exp {
t.Fatalf("Process() points mismatch: got %v, exp %v", len(points), exp)
}
if points[0].String() != pt.String() {
t.Fatalf("Process() points mismatch:\n got %v\n exp %v", points[0].String(), pt.String())
}
return nil
}
metastore.NodeFn = func(nodeID uint64) (*meta.NodeInfo, error) {
return &meta.NodeInfo{}, nil
}
// This should send the final write to the shard writer
if err := p.Process(); err != nil {
t.Fatalf("Process() failed to write points: %v", err)
}
if exp := 2; count != exp {
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
}
// All queues should have been handled, so no more writes should result.
if err := p.Process(); err != nil {
t.Fatalf("Process() failed to write points: %v", err)
}
if exp := 2; count != exp {
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
}
}

View File

@ -72,6 +72,10 @@ type queue struct {
// The segments that exist on disk
segments segments
}
type queuePos struct {
head string
tail string
}
type segments []*segment
@ -211,7 +215,21 @@ func (l *queue) LastModified() (time.Time, error) {
if l.tail != nil {
return l.tail.lastModified()
}
return time.Time{}, nil
return time.Time{}.UTC(), nil
}
func (l *queue) Position() (*queuePos, error) {
l.mu.RLock()
defer l.mu.RUnlock()
qp := &queuePos{}
if l.head != nil {
qp.head = fmt.Sprintf("%s:%d", l.head.path, l.head.pos)
}
if l.tail != nil {
qp.tail = fmt.Sprintf("%s:%d", l.tail.path, l.tail.filePos())
}
return qp, nil
}
// diskUsage returns the total size on disk used by the queue
@ -606,7 +624,7 @@ func (l *segment) lastModified() (time.Time, error) {
if err != nil {
return time.Time{}, err
}
return stats.ModTime(), nil
return stats.ModTime().UTC(), nil
}
func (l *segment) diskUsage() int64 {

View File

@ -3,9 +3,11 @@ package hh
import (
"expvar"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
@ -13,15 +15,17 @@ import (
"github.com/influxdb/influxdb"
"github.com/influxdb/influxdb/meta"
"github.com/influxdb/influxdb/models"
"github.com/influxdb/influxdb/monitor"
)
var ErrHintedHandoffDisabled = fmt.Errorf("hinted handoff disabled")
const (
writeShardReq = "write_shard_req"
writeShardReqPoints = "write_shard_req_points"
processReq = "process_req"
processReqFail = "process_req_fail"
writeShardReq = "writeShardReq"
writeShardReqPoints = "writeShardReqPoints"
writeNodeReq = "writeNodeReq"
writeNodeReqFail = "writeNodeReqFail"
writeNodeReqPoints = "writeNodeReqPoints"
)
type Service struct {
@ -29,17 +33,18 @@ type Service struct {
wg sync.WaitGroup
closing chan struct{}
processors map[uint64]*NodeProcessor
statMap *expvar.Map
Logger *log.Logger
cfg Config
ShardWriter shardWriter
shardWriter shardWriter
metastore metaStore
HintedHandoff interface {
WriteShard(shardID, ownerID uint64, points []models.Point) error
Process() error
PurgeOlderThan(when time.Duration) error
PurgeInactiveOlderThan(when time.Duration) error
Monitor interface {
RegisterDiagnosticsClient(name string, client monitor.DiagsClient)
DeregisterDiagnosticsClient(name string)
}
}
@ -56,55 +61,81 @@ func NewService(c Config, w shardWriter, m metaStore) *Service {
key := strings.Join([]string{"hh", c.Dir}, ":")
tags := map[string]string{"path": c.Dir}
s := &Service{
cfg: c,
statMap: influxdb.NewStatistics(key, "hh", tags),
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
return &Service{
cfg: c,
closing: make(chan struct{}),
processors: make(map[uint64]*NodeProcessor),
statMap: influxdb.NewStatistics(key, "hh", tags),
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
shardWriter: w,
metastore: m,
}
processor, err := NewProcessor(c.Dir, w, m, ProcessorOptions{
MaxSize: c.MaxSize,
RetryRateLimit: c.RetryRateLimit,
})
if err != nil {
s.Logger.Fatalf("Failed to start hinted handoff processor: %v", err)
}
processor.Logger = s.Logger
s.HintedHandoff = processor
return s
}
func (s *Service) Open() error {
if !s.cfg.Enabled {
// Allow Open to proceed, but don't anything.
return nil
}
s.Logger.Printf("Starting hinted handoff service")
s.mu.Lock()
defer s.mu.Unlock()
if !s.cfg.Enabled {
// Allow Open to proceed, but don't do anything.
return nil
}
s.Logger.Printf("Starting hinted handoff service")
s.closing = make(chan struct{})
s.Logger.Printf("Using data dir: %v", s.cfg.Dir)
// Register diagnostics if a Monitor service is available.
if s.Monitor != nil {
s.Monitor.RegisterDiagnosticsClient("hh", s)
}
s.wg.Add(3)
go s.retryWrites()
go s.expireWrites()
go s.deleteInactiveQueues()
// Create the root directory if it doesn't already exist.
s.Logger.Printf("Using data dir: %v", s.cfg.Dir)
if err := os.MkdirAll(s.cfg.Dir, 0700); err != nil {
return fmt.Errorf("mkdir all: %s", err)
}
// Create a node processor for each node directory.
files, err := ioutil.ReadDir(s.cfg.Dir)
if err != nil {
return err
}
for _, file := range files {
nodeID, err := strconv.ParseUint(file.Name(), 10, 64)
if err != nil {
// Not a number? Skip it.
continue
}
n := NewNodeProcessor(nodeID, s.pathforNode(nodeID), s.shardWriter, s.metastore)
if err := n.Open(); err != nil {
return err
}
s.processors[nodeID] = n
}
s.wg.Add(1)
go s.purgeInactiveProcessors()
return nil
}
func (s *Service) Close() error {
s.Logger.Println("shutting down hh service")
s.mu.Lock()
defer s.mu.Unlock()
for _, p := range s.processors {
if err := p.Close(); err != nil {
return err
}
}
if s.closing != nil {
close(s.closing)
}
s.wg.Wait()
s.closing = nil
return nil
}
@ -115,76 +146,125 @@ func (s *Service) SetLogger(l *log.Logger) {
// WriteShard queues the points write for shardID to node ownerID to handoff queue
func (s *Service) WriteShard(shardID, ownerID uint64, points []models.Point) error {
s.statMap.Add(writeShardReq, 1)
s.statMap.Add(writeShardReqPoints, int64(len(points)))
if !s.cfg.Enabled {
return ErrHintedHandoffDisabled
}
s.statMap.Add(writeShardReq, 1)
s.statMap.Add(writeShardReqPoints, int64(len(points)))
return s.HintedHandoff.WriteShard(shardID, ownerID, points)
}
s.mu.RLock()
processor, ok := s.processors[ownerID]
s.mu.RUnlock()
if !ok {
if err := func() error {
// Check again under write-lock.
s.mu.Lock()
defer s.mu.Unlock()
func (s *Service) retryWrites() {
defer s.wg.Done()
currInterval := time.Duration(s.cfg.RetryInterval)
if currInterval > time.Duration(s.cfg.RetryMaxInterval) {
currInterval = time.Duration(s.cfg.RetryMaxInterval)
}
for {
select {
case <-s.closing:
return
case <-time.After(currInterval):
s.statMap.Add(processReq, 1)
if err := s.HintedHandoff.Process(); err != nil && err != io.EOF {
s.statMap.Add(processReqFail, 1)
s.Logger.Printf("retried write failed: %v", err)
currInterval = currInterval * 2
if currInterval > time.Duration(s.cfg.RetryMaxInterval) {
currInterval = time.Duration(s.cfg.RetryMaxInterval)
processor, ok = s.processors[ownerID]
if !ok {
processor = NewNodeProcessor(ownerID, s.pathforNode(ownerID), s.shardWriter, s.metastore)
if err := processor.Open(); err != nil {
return err
}
} else {
// Success! Return to configured interval.
currInterval = time.Duration(s.cfg.RetryInterval)
s.processors[ownerID] = processor
}
return nil
}(); err != nil {
return err
}
}
if err := processor.WriteShard(shardID, points); err != nil {
return err
}
return nil
}
// expireWrites will cause the handoff queues to remove writes that are older
// than the configured threshold
func (s *Service) expireWrites() {
// Diagnostics returns diagnostic information.
func (s *Service) Diagnostics() (*monitor.Diagnostic, error) {
s.mu.RLock()
defer s.mu.RUnlock()
d := &monitor.Diagnostic{
Columns: []string{"node", "active", "last modified", "head", "tail"},
Rows: make([][]interface{}, 0, len(s.processors)),
}
for k, v := range s.processors {
lm, err := v.LastModified()
if err != nil {
return nil, err
}
active := "no"
b, err := v.Active()
if err != nil {
return nil, err
}
if b {
active = "yes"
}
d.Rows = append(d.Rows, []interface{}{k, active, lm, v.Head(), v.Tail()})
}
return d, nil
}
// purgeInactiveProcessors will cause the service to remove processors for inactive nodes.
func (s *Service) purgeInactiveProcessors() {
defer s.wg.Done()
ticker := time.NewTicker(time.Hour)
ticker := time.NewTicker(time.Duration(s.cfg.PurgeInterval))
defer ticker.Stop()
for {
select {
case <-s.closing:
return
case <-ticker.C:
if err := s.HintedHandoff.PurgeOlderThan(time.Duration(s.cfg.MaxAge)); err != nil {
s.Logger.Printf("purge write failed: %v", err)
}
func() {
s.mu.Lock()
defer s.mu.Unlock()
for k, v := range s.processors {
lm, err := v.LastModified()
if err != nil {
s.Logger.Printf("failed to determine LastModified for processor %d: %s", k, err.Error())
continue
}
active, err := v.Active()
if err != nil {
s.Logger.Printf("failed to determine if node %d is active: %s", k, err.Error())
continue
}
if active {
// Node is active.
continue
}
if !lm.Before(time.Now().Add(-time.Duration(s.cfg.MaxAge))) {
// Node processor contains too-young data.
continue
}
if err := v.Close(); err != nil {
s.Logger.Printf("failed to close node processor %d: %s", k, err.Error())
continue
}
if err := v.Purge(); err != nil {
s.Logger.Printf("failed to purge node processor %d: %s", k, err.Error())
continue
}
delete(s.processors, k)
}
}()
}
}
}
// deleteInactiveQueues will cause the service to remove queues for inactive nodes.
func (s *Service) deleteInactiveQueues() {
defer s.wg.Done()
ticker := time.NewTicker(time.Hour)
defer ticker.Stop()
for {
select {
case <-s.closing:
return
case <-ticker.C:
if err := s.HintedHandoff.PurgeInactiveOlderThan(time.Duration(s.cfg.MaxAge)); err != nil {
s.Logger.Printf("delete queues failed: %v", err)
}
}
}
// pathforNode returns the directory for HH data, for the given node.
func (s *Service) pathforNode(nodeID uint64) string {
return filepath.Join(s.cfg.Dir, fmt.Sprintf("%d", nodeID))
}

View File

@ -7,8 +7,8 @@ type Config struct {
LogEnabled bool `toml:"log-enabled"`
WriteTracing bool `toml:"write-tracing"`
PprofEnabled bool `toml:"pprof-enabled"`
HttpsEnabled bool `toml:"https-enabled"`
HttpsCertificate string `toml:"https-certificate"`
HTTPSEnabled bool `toml:"https-enabled"`
HTTPSCertificate string `toml:"https-certificate"`
}
func NewConfig() Config {
@ -16,7 +16,7 @@ func NewConfig() Config {
Enabled: true,
BindAddress: ":8086",
LogEnabled: true,
HttpsEnabled: false,
HttpsCertificate: "/etc/ssl/influxdb.pem",
HTTPSEnabled: false,
HTTPSCertificate: "/etc/ssl/influxdb.pem",
}
}

View File

@ -36,10 +36,10 @@ https-certificate = "/dev/null"
t.Fatalf("unexpected write tracing: %v", c.WriteTracing)
} else if c.PprofEnabled != true {
t.Fatalf("unexpected pprof enabled: %v", c.PprofEnabled)
} else if c.HttpsEnabled != true {
t.Fatalf("unexpected https enabled: %v", c.HttpsEnabled)
} else if c.HttpsCertificate != "/dev/null" {
t.Fatalf("unexpected https certificate: %v", c.HttpsCertificate)
} else if c.HTTPSEnabled != true {
t.Fatalf("unexpected https enabled: %v", c.HTTPSEnabled)
} else if c.HTTPSCertificate != "/dev/null" {
t.Fatalf("unexpected https certificate: %v", c.HTTPSCertificate)
}
}

View File

@ -55,6 +55,7 @@ type Handler struct {
Version string
MetaStore interface {
WaitForLeader(timeout time.Duration) error
Database(name string) (*meta.DatabaseInfo, error)
Authenticate(username, password string) (ui *meta.UserInfo, err error)
Users() ([]meta.UserInfo, error)
@ -461,7 +462,7 @@ func (h *Handler) serveWriteLine(w http.ResponseWriter, r *http.Request, body []
}
// check that the byte is in the standard ascii code range
if body[i] > 32 {
if body[i] > 32 || i >= len(body)-1 {
break
}
i += 1
@ -473,13 +474,14 @@ func (h *Handler) serveWriteLine(w http.ResponseWriter, r *http.Request, body []
precision = "n"
}
points, err := models.ParsePointsWithPrecision(body, time.Now().UTC(), precision)
if err != nil {
if err.Error() == "EOF" {
points, parseError := models.ParsePointsWithPrecision(body, time.Now().UTC(), precision)
// Not points parsed correctly so return the error now
if parseError != nil && len(points) == 0 {
if parseError.Error() == "EOF" {
w.WriteHeader(http.StatusOK)
return
}
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
h.writeError(w, influxql.Result{Err: parseError}, http.StatusBadRequest)
return
}
@ -534,6 +536,13 @@ func (h *Handler) serveWriteLine(w http.ResponseWriter, r *http.Request, body []
h.statMap.Add(statPointsWrittenFail, int64(len(points)))
h.writeError(w, influxql.Result{Err: err}, http.StatusInternalServerError)
return
} else if parseError != nil {
// We wrote some of the points
h.statMap.Add(statPointsWrittenOK, int64(len(points)))
// The other points failed to parse which means the client sent invalid line protocol. We return a 400
// response code as well as the lines that failed to parse.
h.writeError(w, influxql.Result{Err: fmt.Errorf("partial write:\n%v", parseError)}, http.StatusBadRequest)
return
}
h.statMap.Add(statPointsWrittenOK, int64(len(points)))
@ -547,6 +556,21 @@ func (h *Handler) serveOptions(w http.ResponseWriter, r *http.Request) {
// servePing returns a simple response to let the client know the server is running.
func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
wfl := q.Get("wait_for_leader")
if wfl != "" {
d, err := time.ParseDuration(wfl)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if err := h.MetaStore.WaitForLeader(d); err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
}
h.statMap.Add(statPingRequest, 1)
w.WriteHeader(http.StatusNoContent)
}
@ -905,7 +929,11 @@ func NormalizeBatchPoints(bp client.BatchPoints) ([]models.Point, error) {
return points, fmt.Errorf("missing fields")
}
// Need to convert from a client.Point to a influxdb.Point
points = append(points, models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time))
pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
if err != nil {
return points, err
}
points = append(points, pt)
}
return points, nil

View File

@ -1,6 +1,7 @@
package httpd_test
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@ -284,6 +285,76 @@ func TestHandler_Query_ErrResult(t *testing.T) {
}
}
// Ensure the handler handles ping requests correctly.
func TestHandler_Ping(t *testing.T) {
h := NewHandler(false)
w := httptest.NewRecorder()
h.ServeHTTP(w, MustNewRequest("GET", "/ping", nil))
if w.Code != http.StatusNoContent {
t.Fatalf("unexpected status: %d", w.Code)
}
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping", nil))
if w.Code != http.StatusNoContent {
t.Fatalf("unexpected status: %d", w.Code)
}
}
// Ensure the handler handles ping requests correctly, when waiting for leader.
func TestHandler_PingWaitForLeader(t *testing.T) {
h := NewHandler(false)
w := httptest.NewRecorder()
h.ServeHTTP(w, MustNewRequest("GET", "/ping?wait_for_leader=1s", nil))
if w.Code != http.StatusNoContent {
t.Fatalf("unexpected status: %d", w.Code)
}
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping?wait_for_leader=1s", nil))
if w.Code != http.StatusNoContent {
t.Fatalf("unexpected status: %d", w.Code)
}
}
// Ensure the handler handles ping requests correctly, when timeout expires waiting for leader.
func TestHandler_PingWaitForLeaderTimeout(t *testing.T) {
h := NewHandler(false)
h.MetaStore.WaitForLeaderFn = func(d time.Duration) error {
return fmt.Errorf("timeout")
}
w := httptest.NewRecorder()
h.ServeHTTP(w, MustNewRequest("GET", "/ping?wait_for_leader=1s", nil))
if w.Code != http.StatusServiceUnavailable {
t.Fatalf("unexpected status: %d", w.Code)
}
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping?wait_for_leader=1s", nil))
if w.Code != http.StatusServiceUnavailable {
t.Fatalf("unexpected status: %d", w.Code)
}
}
// Ensure the handler handles bad ping requests
func TestHandler_PingWaitForLeaderBadRequest(t *testing.T) {
h := NewHandler(false)
w := httptest.NewRecorder()
h.ServeHTTP(w, MustNewRequest("GET", "/ping?wait_for_leader=1xxx", nil))
if w.Code != http.StatusBadRequest {
t.Fatalf("unexpected status: %d", w.Code)
}
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping?wait_for_leader=abc", nil))
if w.Code != http.StatusBadRequest {
t.Fatalf("unexpected status: %d", w.Code)
}
}
// Ensure write endpoint can handle bad requests
func TestHandler_HandleBadRequestBody(t *testing.T) {
b := bytes.NewReader(make([]byte, 10))
h := NewHandler(false)
w := httptest.NewRecorder()
h.ServeHTTP(w, MustNewRequest("POST", "/write", b))
if w.Code != http.StatusBadRequest {
t.Fatalf("unexpected status: %d", w.Code)
}
}
func TestMarshalJSON_NoPretty(t *testing.T) {
if b := httpd.MarshalJSON(struct {
Name string `json:"name"`
@ -326,7 +397,7 @@ func TestNormalizeBatchPoints(t *testing.T) {
},
},
p: []models.Point{
models.NewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
models.MustNewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
},
},
{
@ -338,7 +409,7 @@ func TestNormalizeBatchPoints(t *testing.T) {
},
},
p: []models.Point{
models.NewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
models.MustNewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
},
},
{
@ -351,8 +422,8 @@ func TestNormalizeBatchPoints(t *testing.T) {
},
},
p: []models.Point{
models.NewPoint("cpu", map[string]string{"day": "monday", "region": "useast"}, map[string]interface{}{"value": 1.0}, now),
models.NewPoint("memory", map[string]string{"day": "monday"}, map[string]interface{}{"value": 2.0}, now),
models.MustNewPoint("cpu", map[string]string{"day": "monday", "region": "useast"}, map[string]interface{}{"value": 1.0}, now),
models.MustNewPoint("memory", map[string]string{"day": "monday"}, map[string]interface{}{"value": 2.0}, now),
},
},
}
@ -397,9 +468,18 @@ func NewHandler(requireAuthentication bool) *Handler {
// HandlerMetaStore is a mock implementation of Handler.MetaStore.
type HandlerMetaStore struct {
DatabaseFn func(name string) (*meta.DatabaseInfo, error)
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
UsersFn func() ([]meta.UserInfo, error)
WaitForLeaderFn func(d time.Duration) error
DatabaseFn func(name string) (*meta.DatabaseInfo, error)
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
UsersFn func() ([]meta.UserInfo, error)
}
func (s *HandlerMetaStore) WaitForLeader(d time.Duration) error {
if s.WaitForLeaderFn == nil {
// Default behaviour is to assume there is a leader.
return nil
}
return s.WaitForLeaderFn(d)
}
func (s *HandlerMetaStore) Database(name string) (*meta.DatabaseInfo, error) {

View File

@ -15,16 +15,16 @@ import (
// statistics gathered by the httpd package.
const (
statRequest = "req" // Number of HTTP requests served
statCQRequest = "cq_req" // Number of CQ-execute requests served
statQueryRequest = "query_req" // Number of query requests served
statWriteRequest = "write_req" // Number of write requests serverd
statPingRequest = "ping_req" // Number of ping requests served
statWriteRequestBytesReceived = "write_req_bytes" // Sum of all bytes in write requests
statQueryRequestBytesTransmitted = "query_resp_bytes" // Sum of all bytes returned in query reponses
statPointsWrittenOK = "points_written_ok" // Number of points written OK
statPointsWrittenFail = "points_written_fail" // Number of points that failed to be written
statAuthFail = "auth_fail" // Number of authentication failures
statRequest = "req" // Number of HTTP requests served
statCQRequest = "cqReq" // Number of CQ-execute requests served
statQueryRequest = "queryReq" // Number of query requests served
statWriteRequest = "writeReq" // Number of write requests serverd
statPingRequest = "pingReq" // Number of ping requests served
statWriteRequestBytesReceived = "writeReqBytes" // Sum of all bytes in write requests
statQueryRequestBytesTransmitted = "queryRespBytes" // Sum of all bytes returned in query reponses
statPointsWrittenOK = "pointsWritteOk" // Number of points written OK
statPointsWrittenFail = "pointsWrittenFail" // Number of points that failed to be written
statAuthFail = "authFail" // Number of authentication failures
)
// Service manages the listener and handler for an HTTP endpoint.
@ -51,8 +51,8 @@ func NewService(c Config) *Service {
s := &Service{
addr: c.BindAddress,
https: c.HttpsEnabled,
cert: c.HttpsCertificate,
https: c.HTTPSEnabled,
cert: c.HTTPSCertificate,
err: make(chan error),
Handler: NewHandler(
c.AuthEnabled,

View File

@ -109,7 +109,12 @@ func (h *Handler) servePut(w http.ResponseWriter, r *http.Request) {
ts = time.Unix(p.Time/1000, (p.Time%1000)*1000)
}
points = append(points, models.NewPoint(p.Metric, p.Tags, map[string]interface{}{"value": p.Value}, ts))
pt, err := models.NewPoint(p.Metric, p.Tags, map[string]interface{}{"value": p.Value}, ts)
if err != nil {
h.Logger.Printf("Dropping point %v: %v", p.Metric, err)
continue
}
points = append(points, pt)
}
// Write points.

View File

@ -27,21 +27,21 @@ const leaderWaitTimeout = 30 * time.Second
// statistics gathered by the openTSDB package.
const (
statHTTPConnectionsHandled = "http_connections_handled"
statTelnetConnectionsActive = "tl_connections_active"
statTelnetConnectionsHandled = "tl_connections_handled"
statTelnetPointsReceived = "tl_points_rx"
statTelnetBytesReceived = "tl_bytes_rx"
statTelnetReadError = "tl_read_err"
statTelnetBadLine = "tl_bad_line"
statTelnetBadTime = "tl_bad_time"
statTelnetBadTag = "tl_bad_tag"
statTelnetBadFloat = "tl_bad_float"
statBatchesTrasmitted = "batches_tx"
statPointsTransmitted = "points_tx"
statBatchesTransmitFail = "batches_tx_fail"
statConnectionsActive = "connections_active"
statConnectionsHandled = "connections_handled"
statHTTPConnectionsHandled = "httpConnsHandled"
statTelnetConnectionsActive = "tlConnsActive"
statTelnetConnectionsHandled = "tlConnsHandled"
statTelnetPointsReceived = "tlPointsRx"
statTelnetBytesReceived = "tlBytesRx"
statTelnetReadError = "tlReadErr"
statTelnetBadLine = "tlBadLine"
statTelnetBadTime = "tlBadTime"
statTelnetBadTag = "tlBadTag"
statTelnetBadFloat = "tlBadFloat"
statBatchesTrasmitted = "batchesTx"
statPointsTransmitted = "pointsTx"
statBatchesTransmitFail = "batchesTxFail"
statConnectionsActive = "connsActive"
statConnectionsHandled = "connsHandled"
)
// Service manages the listener and handler for an HTTP endpoint.
@ -327,14 +327,21 @@ func (s *Service) handleTelnetConn(conn net.Conn) {
}
fields := make(map[string]interface{})
fields["value"], err = strconv.ParseFloat(valueStr, 64)
fv, err := strconv.ParseFloat(valueStr, 64)
if err != nil {
s.statMap.Add(statTelnetBadFloat, 1)
s.Logger.Printf("bad float '%s' from %s", valueStr, remoteAddr)
continue
}
fields["value"] = fv
s.batcher.In() <- models.NewPoint(measurement, tags, fields, t)
pt, err := models.NewPoint(measurement, tags, fields, t)
if err != nil {
s.statMap.Add(statTelnetBadFloat, 1)
s.Logger.Printf("bad float '%s' from %s", valueStr, remoteAddr)
continue
}
s.batcher.In() <- pt
}
}

View File

@ -38,7 +38,7 @@ func TestService_Telnet(t *testing.T) {
} else if req.RetentionPolicy != "" {
t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
} else if !reflect.DeepEqual(req.Points, []models.Point{
models.NewPoint(
models.MustNewPoint(
"sys.cpu.user",
map[string]string{"host": "webserver01", "cpu": "0"},
map[string]interface{}{"value": 42.5},
@ -92,7 +92,7 @@ func TestService_HTTP(t *testing.T) {
} else if req.RetentionPolicy != "" {
t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
} else if !reflect.DeepEqual(req.Points, []models.Point{
models.NewPoint(
models.MustNewPoint(
"sys.cpu.nice",
map[string]string{"dc": "lga", "host": "web01"},
map[string]interface{}{"value": 18.0},

View File

@ -1,17 +1,14 @@
package registration
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"sync"
"time"
"github.com/influxdb/enterprise-client/v1"
"github.com/influxdb/influxdb/monitor"
)
@ -103,6 +100,10 @@ func (s *Service) registerServer() error {
if !s.enabled || s.token == "" {
return nil
}
cl := client.New(s.token)
cl.URL = s.url.String()
clusterID, err := s.MetaStore.ClusterID()
if err != nil {
s.logger.Printf("failed to retrieve cluster ID for registration: %s", err.Error())
@ -112,41 +113,26 @@ func (s *Service) registerServer() error {
if err != nil {
return err
}
j := map[string]interface{}{
"cluster_id": fmt.Sprintf("%d", clusterID),
"server_id": fmt.Sprintf("%d", s.MetaStore.NodeID()),
"host": hostname,
"product": "influxdb",
"version": s.version,
server := client.Server{
ClusterID: fmt.Sprintf("%d", clusterID),
ServerID: fmt.Sprintf("%d", s.MetaStore.NodeID()),
Host: hostname,
Product: "influxdb",
Version: s.version,
}
b, err := json.Marshal(j)
if err != nil {
return err
}
url := fmt.Sprintf("%s/api/v1/servers?token=%s", s.url.String(), s.token)
s.wg.Add(1)
go func() {
defer s.wg.Done()
client := http.Client{Timeout: time.Duration(5 * time.Second)}
resp, err := client.Post(url, "application/json", bytes.NewBuffer(b))
resp, err := cl.Save(server)
if err != nil {
s.logger.Printf("failed to register server with %s: %s", s.url.String(), err.Error())
s.logger.Printf("failed to register server with %s: received code %s, error: %s", s.url.String(), resp.Status, err)
return
}
s.updateLastContact(time.Now().UTC())
defer resp.Body.Close()
if resp.StatusCode == http.StatusCreated {
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
s.logger.Printf("failed to read response from registration server: %s", err.Error())
return
}
s.logger.Printf("failed to register server with %s: received code %s, body: %s", s.url.String(), resp.Status, string(body))
}()
return nil
}
@ -157,7 +143,9 @@ func (s *Service) reportStats() {
// No reporting, for now, without token.
return
}
statsURL := fmt.Sprintf("%s/api/v1/stats/influxdb?token=%s", s.url.String(), s.token)
cl := client.New(s.token)
cl.URL = s.url.String()
clusterID, err := s.MetaStore.ClusterID()
if err != nil {
@ -175,30 +163,28 @@ func (s *Service) reportStats() {
continue
}
o := map[string]interface{}{
"cluster_id": fmt.Sprintf("%d", clusterID),
"server_id": fmt.Sprintf("%d", s.MetaStore.NodeID()),
"stats": stats,
st := client.Stats{
Product: "influxdb",
ClusterID: fmt.Sprintf("%d", clusterID),
ServerID: fmt.Sprintf("%d", s.MetaStore.NodeID()),
}
b, err := json.Marshal(o)
if err != nil {
s.logger.Printf("failed to JSON-encode stats: %s", err.Error())
continue
data := make([]client.StatsData, len(stats))
for i, x := range stats {
data[i] = client.StatsData{
Name: x.Name,
Tags: x.Tags,
Values: x.Values,
}
}
st.Data = data
client := http.Client{Timeout: time.Duration(5 * time.Second)}
resp, err := client.Post(statsURL, "application/json", bytes.NewBuffer(b))
resp, err := cl.Save(st)
if err != nil {
s.logger.Printf("failed to post statistics to %s: %s", statsURL, err.Error())
s.logger.Printf("failed to post statistics to Enterprise: repsonse code: %d: error: %s", resp.StatusCode, err)
continue
}
s.updateLastContact(time.Now().UTC())
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
s.logger.Printf("failed to post statistics to %s: repsonse code: %d", statsURL, resp.StatusCode)
continue
}
case <-s.done:
return
}

View File

@ -106,22 +106,28 @@ func (s *Service) deleteShards() {
case <-ticker.C:
s.logger.Println("retention policy shard deletion check commencing")
deletedShardIDs := make(map[uint64]struct{}, 0)
type deletionInfo struct {
db string
rp string
}
deletedShardIDs := make(map[uint64]deletionInfo, 0)
s.MetaStore.VisitRetentionPolicies(func(d meta.DatabaseInfo, r meta.RetentionPolicyInfo) {
for _, g := range r.DeletedShardGroups() {
for _, sh := range g.Shards {
deletedShardIDs[sh.ID] = struct{}{}
deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name}
}
}
})
for _, id := range s.TSDBStore.ShardIDs() {
if _, ok := deletedShardIDs[id]; ok {
if di, ok := deletedShardIDs[id]; ok {
if err := s.TSDBStore.DeleteShard(id); err != nil {
s.logger.Printf("failed to delete shard ID %d: %s", id, err.Error())
s.logger.Printf("failed to delete shard ID %d from database %s, retention policy %s: %s",
id, di.db, di.rp, err.Error())
continue
}
s.logger.Printf("shard ID %d deleted", id)
s.logger.Printf("shard ID %d from database %s, retention policy %s, deleted",
id, di.db, di.rp)
}
}
}

View File

@ -16,8 +16,8 @@ import (
// Statistics for the Subscriber service.
const (
statPointsWritten = "points_written"
statWriteFailures = "write_failures"
statPointsWritten = "pointsWritten"
statWriteFailures = "writeFailures"
)
type PointsWriter interface {
@ -56,6 +56,7 @@ func NewService(c Config) *Service {
Logger: log.New(os.Stderr, "[subscriber] ", log.LstdFlags),
statMap: influxdb.NewStatistics("subscriber", "subscriber", nil),
points: make(chan *cluster.WritePointsRequest),
closed: true,
}
}
@ -91,6 +92,11 @@ func (s *Service) Close() error {
return nil
}
// SetLogger sets the internal logger to the logger passed in.
func (s *Service) SetLogger(l *log.Logger) {
s.Logger = l
}
func (s *Service) waitForMetaUpdates() {
for {
err := s.MetaStore.WaitForDataChanged()
@ -100,9 +106,10 @@ func (s *Service) waitForMetaUpdates() {
} else {
//Check that we haven't been closed before performing update.
s.mu.Lock()
if !s.closed {
if s.closed {
s.mu.Unlock()
break
s.Logger.Println("service closed not updating")
return
}
s.mu.Unlock()
s.Update()
@ -113,7 +120,6 @@ func (s *Service) waitForMetaUpdates() {
// start new and stop deleted subscriptions.
func (s *Service) Update() error {
s.Logger.Println("updating subscriptions")
dbis, err := s.MetaStore.Databases()
if err != nil {
return err
@ -145,6 +151,7 @@ func (s *Service) Update() error {
for se := range s.subs {
if !allEntries[se] {
delete(s.subs, se)
s.Logger.Println("deleted old subscription for", se.db, se.rp)
}
}
@ -183,6 +190,7 @@ func (s *Service) createSubscription(se subEntry, mode string, destinations []st
key := strings.Join([]string{"subscriber", se.db, se.rp, se.name, dest}, ":")
statMaps[i] = influxdb.NewStatistics(key, "subscriber", tags)
}
s.Logger.Println("created new subscription for", se.db, se.rp)
return &balancewriter{
bm: bm,
writers: writers,

View File

@ -387,3 +387,62 @@ func TestService_Multiple(t *testing.T) {
}
close(dataChanged)
}
func TestService_WaitForDataChanged(t *testing.T) {
dataChanged := make(chan bool)
ms := MetaStore{}
ms.WaitForDataChangedFn = func() error {
<-dataChanged
return nil
}
calls := make(chan bool, 2)
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
calls <- true
return nil, nil
}
s := subscriber.NewService(subscriber.NewConfig())
s.MetaStore = ms
// Explicitly closed below for testing
s.Open()
// Should be called once during open
select {
case <-calls:
case <-time.After(10 * time.Millisecond):
t.Fatal("expected call")
}
select {
case <-calls:
t.Fatal("unexpected call")
case <-time.After(time.Millisecond):
}
// Signal that data has changed
dataChanged <- true
// Should be called once more after data changed
select {
case <-calls:
case <-time.After(10 * time.Millisecond):
t.Fatal("expected call")
}
select {
case <-calls:
t.Fatal("unexpected call")
case <-time.After(time.Millisecond):
}
//Close service ensure not called
s.Close()
dataChanged <- true
select {
case <-calls:
t.Fatal("unexpected call")
case <-time.After(time.Millisecond):
}
close(dataChanged)
}

View File

@ -1,13 +1,125 @@
# Configuration
# The UDP Input
## A note on UDP/IP OS Buffer sizes
Some OSes (most notably, Linux) place very restricive limits on the performance
of UDP protocols. It is _highly_ recommended that you increase these OS limits to
at least 8MB before trying to run large amounts of UDP traffic to your instance.
8MB is just a recommendation, and should be adjusted to be inline with your
`read-buffer` plugin setting.
### Linux
Check the current UDP/IP receive buffer limit by typing the following commands:
```
sysctl net.core.rmem_max
```
If the values are less than 8388608 bytes you should add the following lines to the /etc/sysctl.conf file:
```
net.core.rmem_max=8388608
```
Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following commands as root:
```
sysctl -w net.core.rmem_max=8388608
```
### BSD/Darwin
On BSD/Darwin systems you need to add about a 15% padding to the kernel limit
socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set
the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but
happens
[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64)
Check the current UDP/IP buffer limit by typing the following command:
```
sysctl kern.ipc.maxsockbuf
```
If the value is less than 9646900 bytes you should add the following lines to the /etc/sysctl.conf file (create it if necessary):
```
kern.ipc.maxsockbuf=9646900
```
Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following commands as root:
```
sysctl -w kern.ipc.maxsockbuf=9646900
```
### Using the read-buffer option for the UDP listener
The `read-buffer` option allows users to set the buffer size for the UDP listener.
It Sets the size of the operating system's receive buffer associated with
the UDP traffic. Keep in mind that the OS must be able
to handle the number set here or the UDP listener will error and exit.
`read-buffer = 0` means to use the OS default, which is usually too
small for high UDP performance.
## Configuration
Each UDP input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it.
Each UDP input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
# Processing
## Processing
The UDP input can receive up to 64KB per read, and splits the received data by newline. Each part is then interpreted as line-protocol encoded points, and parsed accordingly.
# UDP is connectionless
## UDP is connectionless
Since UDP is a connectionless protocol there is no way to signal to the data source if any error occurs, and if data has even been successfully indexed. This should be kept in mind when deciding if and when to use the UDP input. The built-in UDP statistics are useful for monitoring the UDP inputs.
## Config Examples
One UDP listener
```
# influxd.conf
...
[[udp]]
enabled = true
bind-address = ":8089" # the bind address
database = "telegraf" # Name of the database that will be written to
batch-size = 5000 # will flush if this many points get buffered
batch-timeout = "1s" # will flush at least this often even if the batch-size is not reached
batch-pending = 10 # number of batches that may be pending in memory
read-buffer = 0 # UDP read buffer, 0 means to use OS default
...
```
Multiple UDP listeners
```
# influxd.conf
...
[[udp]]
# Default UDP for Telegraf
enabled = true
bind-address = ":8089" # the bind address
database = "telegraf" # Name of the database that will be written to
batch-size = 5000 # will flush if this many points get buffered
batch-timeout = "1s" # will flush at least this often even if the batch-size is not reached
batch-pending = 10 # number of batches that may be pending in memory
read-buffer = 0 # UDP read buffer size, 0 means to use OS default
[[udp]]
# High-traffic UDP
enabled = true
bind-address = ":80891" # the bind address
database = "mymetrics" # Name of the database that will be written to
batch-size = 5000 # will flush if this many points get buffered
batch-timeout = "1s" # will flush at least this often even if the batch-size is not reached
batch-pending = 100 # number of batches that may be pending in memory
read-buffer = 8388608 # (8*1024*1024) UDP read buffer size
...
```

View File

@ -7,17 +7,36 @@ import (
)
const (
// DefaultBindAddress is the default binding interface if none is specified.
DefaultBindAddress = ":8089"
// DefaultDatabase is the default database for UDP traffic.
DefaultDatabase = "udp"
// DefaultRetentionPolicy is the default retention policy used for writes.
DefaultRetentionPolicy = ""
// DefaultBatchSize is the default UDP batch size.
DefaultBatchSize = 1000
DefaultBatchSize = 5000
// DefaultBatchPending is the default number of pending UDP batches.
DefaultBatchPending = 5
DefaultBatchPending = 10
// DefaultBatchTimeout is the default UDP batch timeout.
DefaultBatchTimeout = time.Second
// DefaultReadBuffer is the default buffer size for the UDP listener.
// Sets the size of the operating system's receive buffer associated with
// the UDP traffic. Keep in mind that the OS must be able
// to handle the number set here or the UDP listener will error and exit.
//
// DefaultReadBuffer = 0 means to use the OS default, which is usually too
// small for high UDP performance.
//
// Increasing OS buffer limits:
// Linux: sudo sysctl -w net.core.rmem_max=<read-buffer>
// BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf=<read-buffer>
DefaultReadBuffer = 0
)
type Config struct {
@ -28,9 +47,21 @@ type Config struct {
RetentionPolicy string `toml:"retention-policy"`
BatchSize int `toml:"batch-size"`
BatchPending int `toml:"batch-pending"`
ReadBuffer int `toml:"read-buffer"`
BatchTimeout toml.Duration `toml:"batch-timeout"`
}
func NewConfig() Config {
return Config{
BindAddress: DefaultBindAddress,
Database: DefaultDatabase,
RetentionPolicy: DefaultRetentionPolicy,
BatchSize: DefaultBatchSize,
BatchPending: DefaultBatchPending,
BatchTimeout: toml.Duration(DefaultBatchTimeout),
}
}
// WithDefaults takes the given config and returns a new config with any required
// default values set.
func (c *Config) WithDefaults() *Config {
@ -47,5 +78,8 @@ func (c *Config) WithDefaults() *Config {
if d.BatchTimeout == 0 {
d.BatchTimeout = toml.Duration(DefaultBatchTimeout)
}
if d.ReadBuffer == 0 {
d.ReadBuffer = DefaultReadBuffer
}
return &d
}

View File

@ -18,18 +18,23 @@ import (
)
const (
// Maximum UDP packet size
// see https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
UDPBufferSize = 65536
// Arbitrary, testing indicated that this doesn't typically get over 10
parserChanLen = 1000
)
// statistics gathered by the UDP package.
const (
statPointsReceived = "points_rx"
statBytesReceived = "bytes_rx"
statPointsParseFail = "points_parse_fail"
statReadFail = "read_fail"
statBatchesTrasmitted = "batches_tx"
statPointsTransmitted = "points_tx"
statBatchesTransmitFail = "batches_tx_fail"
statPointsReceived = "pointsRx"
statBytesReceived = "bytesRx"
statPointsParseFail = "pointsParseFail"
statReadFail = "readFail"
statBatchesTrasmitted = "batchesTx"
statPointsTransmitted = "pointsTx"
statBatchesTransmitFail = "batchesTxFail"
)
//
@ -43,8 +48,9 @@ type Service struct {
wg sync.WaitGroup
done chan struct{}
batcher *tsdb.PointBatcher
config Config
parserChan chan []byte
batcher *tsdb.PointBatcher
config Config
PointsWriter interface {
WritePoints(p *cluster.WritePointsRequest) error
@ -61,10 +67,11 @@ type Service struct {
func NewService(c Config) *Service {
d := *c.WithDefaults()
return &Service{
config: d,
done: make(chan struct{}),
batcher: tsdb.NewPointBatcher(d.BatchSize, d.BatchPending, time.Duration(d.BatchTimeout)),
Logger: log.New(os.Stderr, "[udp] ", log.LstdFlags),
config: d,
done: make(chan struct{}),
parserChan: make(chan []byte, parserChanLen),
batcher: tsdb.NewPointBatcher(d.BatchSize, d.BatchPending, time.Duration(d.BatchTimeout)),
Logger: log.New(os.Stderr, "[udp] ", log.LstdFlags),
}
}
@ -98,16 +105,26 @@ func (s *Service) Open() (err error) {
return err
}
if s.config.ReadBuffer != 0 {
err = s.conn.SetReadBuffer(s.config.ReadBuffer)
if err != nil {
s.Logger.Printf("Failed to set UDP read buffer to %d: %s",
s.config.ReadBuffer, err)
return err
}
}
s.Logger.Printf("Started listening on UDP: %s", s.config.BindAddress)
s.wg.Add(2)
s.wg.Add(3)
go s.serve()
go s.writePoints()
go s.parser()
go s.writer()
return nil
}
func (s *Service) writePoints() {
func (s *Service) writer() {
defer s.wg.Done()
for {
@ -137,7 +154,6 @@ func (s *Service) serve() {
s.batcher.Start()
for {
buf := make([]byte, UDPBufferSize)
select {
case <-s.done:
@ -145,27 +161,39 @@ func (s *Service) serve() {
return
default:
// Keep processing.
buf := make([]byte, UDPBufferSize)
n, _, err := s.conn.ReadFromUDP(buf)
if err != nil {
s.statMap.Add(statReadFail, 1)
s.Logger.Printf("Failed to read UDP message: %s", err)
continue
}
s.statMap.Add(statBytesReceived, int64(n))
s.parserChan <- buf[:n]
}
}
}
n, _, err := s.conn.ReadFromUDP(buf)
if err != nil {
s.statMap.Add(statReadFail, 1)
s.Logger.Printf("Failed to read UDP message: %s", err)
continue
}
s.statMap.Add(statBytesReceived, int64(n))
func (s *Service) parser() {
defer s.wg.Done()
points, err := models.ParsePoints(buf[:n])
if err != nil {
s.statMap.Add(statPointsParseFail, 1)
s.Logger.Printf("Failed to parse points: %s", err)
continue
}
for {
select {
case <-s.done:
return
case buf := <-s.parserChan:
points, err := models.ParsePoints(buf)
if err != nil {
s.statMap.Add(statPointsParseFail, 1)
s.Logger.Printf("Failed to parse points: %s", err)
return
}
for _, point := range points {
s.batcher.In() <- point
for _, point := range points {
s.batcher.In() <- point
}
s.statMap.Add(statPointsReceived, int64(len(points)))
}
s.statMap.Add(statPointsReceived, int64(len(points)))
}
}

View File

@ -275,7 +275,6 @@ func Run(cfg *Config, done chan struct{}, ts chan time.Time) (totalPoints int, f
fmt.Println("ERROR: ", err.Error())
}
failedRequests += 1
//totalPoints -= len(b.Points)
totalPoints -= cfg.Write.BatchSize
lastSuccess = false
mu.Unlock()

View File

@ -35,7 +35,7 @@ func NewMux() *Mux {
return &Mux{
m: make(map[byte]*listener),
Timeout: DefaultTimeout,
Logger: log.New(os.Stderr, "", log.LstdFlags),
Logger: log.New(os.Stderr, "[tcp] ", log.LstdFlags),
}
}

View File

@ -1,14 +1,19 @@
package toml_test
import (
"bytes"
"strings"
"testing"
"time"
"github.com/influxdb/influxdb/toml"
"github.com/BurntSushi/toml"
"github.com/influxdb/influxdb/cmd/influxd/run"
itoml "github.com/influxdb/influxdb/toml"
)
// Ensure that megabyte sizes can be parsed.
func TestSize_UnmarshalText_MB(t *testing.T) {
var s toml.Size
var s itoml.Size
if err := s.UnmarshalText([]byte("200m")); err != nil {
t.Fatalf("unexpected error: %s", err)
} else if s != 200*(1<<20) {
@ -18,7 +23,7 @@ func TestSize_UnmarshalText_MB(t *testing.T) {
// Ensure that gigabyte sizes can be parsed.
func TestSize_UnmarshalText_GB(t *testing.T) {
var s toml.Size
var s itoml.Size
if err := s.UnmarshalText([]byte("1g")); err != nil {
t.Fatalf("unexpected error: %s", err)
} else if s != 1073741824 {
@ -26,17 +31,15 @@ func TestSize_UnmarshalText_GB(t *testing.T) {
}
}
/*
func TestConfig_Encode(t *testing.T) {
var c influxdb.Config
c.Monitoring.WriteInterval = influxdb.Duration(time.Minute)
var c run.Config
c.Cluster.WriteTimeout = itoml.Duration(time.Minute)
buf := new(bytes.Buffer)
if err := toml.NewEncoder(buf).Encode(&c); err != nil {
t.Fatal("Failed to encode: ", err)
}
got, search := buf.String(), `write-interval = "1m0s"`
got, search := buf.String(), `write-timeout = "1m0s"`
if !strings.Contains(got, search) {
t.Fatalf("Encoding config failed.\nfailed to find %s in:\n%s\n", search, got)
}
}
*/

View File

@ -0,0 +1,892 @@
package tsdb
import (
"errors"
"fmt"
"sort"
"strings"
"time"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/models"
"github.com/influxdb/influxdb/pkg/slices"
)
// AggregateExecutor represents a mapper for execute aggregate SELECT statements.
type AggregateExecutor struct {
stmt *influxql.SelectStatement
mappers []*StatefulMapper
}
// NewAggregateExecutor returns a new AggregateExecutor.
func NewAggregateExecutor(stmt *influxql.SelectStatement, mappers []Mapper) *AggregateExecutor {
e := &AggregateExecutor{
stmt: stmt,
mappers: make([]*StatefulMapper, 0, len(mappers)),
}
for _, m := range mappers {
e.mappers = append(e.mappers, &StatefulMapper{m, nil, false})
}
return e
}
// close closes the executor such that all resources are released.
// Once closed, an executor may not be re-used.
func (e *AggregateExecutor) close() {
if e != nil {
for _, m := range e.mappers {
m.Close()
}
}
}
// Execute begins execution of the query and returns a channel to receive rows.
func (e *AggregateExecutor) Execute() <-chan *models.Row {
out := make(chan *models.Row, 0)
go e.execute(out)
return out
}
func (e *AggregateExecutor) execute(out chan *models.Row) {
// It's important to close all resources when execution completes.
defer e.close()
// Create the functions which will reduce values from mappers for
// a given interval. The function offsets within this slice match
// the offsets within the value slices that are returned by the
// mapper.
reduceFuncs, err := e.initReduceFuncs()
if err != nil {
out <- &models.Row{Err: err}
return
}
// Put together the rows to return, starting with columns.
columnNames := e.stmt.ColumnNames()
// Open the mappers.
if err := e.openMappers(); err != nil {
out <- &models.Row{Err: err}
return
}
// Filter out empty sets if there are multiple tag sets.
hasMultipleTagSets := e.hasMultipleTagSets()
ascending := e.ascending()
// Prime each mapper's chunk buffer.
if err := e.initMappers(); err != nil {
out <- &models.Row{Err: err}
return
}
// Keep looping until all mappers drained.
for !e.mappersDrained() {
chunks, err := e.readNextTagset()
if err != nil {
out <- &models.Row{Err: err}
return
}
// Prep a row, ready for kicking out.
row := &models.Row{
Name: chunks[0].Name,
Tags: chunks[0].Tags,
Columns: columnNames,
}
// Prep for bucketing data by start time of the interval.
buckets := map[int64][][]interface{}{}
var chunkValues []*MapperValue
for _, chunk := range chunks {
for _, chunkValue := range chunk.Values {
chunkValues = append(chunkValues, chunkValue)
}
}
sort.Sort(MapperValues(chunkValues))
for _, chunkValue := range chunkValues {
startTime := chunkValue.Time
values := chunkValue.Value.([]interface{})
if _, ok := buckets[startTime]; !ok {
buckets[startTime] = make([][]interface{}, len(values))
}
for i, v := range values {
buckets[startTime][i] = append(buckets[startTime][i], v)
}
}
// Now, after the loop above, within each time bucket is a slice. Within the element of each
// slice is another slice of interface{}, ready for passing to the reducer functions.
// Work each bucket of time, in time ascending order.
tMins := make(int64Slice, 0, len(buckets))
for k, _ := range buckets {
tMins = append(tMins, k)
}
if ascending {
sort.Sort(tMins)
} else {
sort.Sort(sort.Reverse(tMins))
}
values := make([][]interface{}, len(tMins))
for i, t := range tMins {
values[i] = make([]interface{}, 0, len(columnNames))
values[i] = append(values[i], time.Unix(0, t).UTC()) // Time value is always first.
for j, f := range reduceFuncs {
reducedVal := f(buckets[t][j])
values[i] = append(values[i], reducedVal)
}
}
// Perform aggregate unwraps
values, err = e.processFunctions(values, columnNames)
if err != nil {
out <- &models.Row{Err: err}
}
// Perform any mathematics.
values = processForMath(e.stmt.Fields, values)
// Handle any fill options
values = e.processFill(values)
// process derivatives
values = e.processDerivative(values)
// If we have multiple tag sets we'll want to filter out the empty ones
if hasMultipleTagSets && resultsEmpty(values) {
continue
}
row.Values = values
out <- row
}
close(out)
}
// initReduceFuncs returns a list of reduce functions for the aggregates in the query.
func (e *AggregateExecutor) initReduceFuncs() ([]reduceFunc, error) {
calls := e.stmt.FunctionCalls()
fns := make([]reduceFunc, len(calls))
for i, c := range calls {
fn, err := initializeReduceFunc(c)
if err != nil {
return nil, err
}
fns[i] = fn
}
return fns, nil
}
// openMappers opens all the mappers.
func (e *AggregateExecutor) openMappers() error {
for _, m := range e.mappers {
if err := m.Open(); err != nil {
return err
}
}
return nil
}
// initMappers buffers the first chunk of each mapper.
func (e *AggregateExecutor) initMappers() error {
for _, m := range e.mappers {
chunk, err := m.NextChunk()
if err != nil {
return err
}
m.bufferedChunk = chunk
if m.bufferedChunk == nil {
m.drained = true
}
}
return nil
}
// hasMultipleTagSets returns true if there is more than one tagset in the mappers.
func (e *AggregateExecutor) hasMultipleTagSets() bool {
set := make(map[string]struct{})
for _, m := range e.mappers {
for _, t := range m.TagSets() {
set[t] = struct{}{}
if len(set) > 1 {
return true
}
}
}
return false
}
// ascending returns true if statement is sorted in ascending order.
func (e *AggregateExecutor) ascending() bool {
if len(e.stmt.SortFields) == 0 {
return true
}
return e.stmt.SortFields[0].Ascending
}
// mappersDrained returns whether all the executors Mappers have been drained of data.
func (e *AggregateExecutor) mappersDrained() bool {
for _, m := range e.mappers {
if !m.drained {
return false
}
}
return true
}
// nextMapperTagset returns the alphabetically lowest tagset across all Mappers.
func (e *AggregateExecutor) nextMapperTagSet() string {
tagset := ""
for _, m := range e.mappers {
if m.bufferedChunk != nil {
if tagset == "" {
tagset = m.bufferedChunk.key()
} else if m.bufferedChunk.key() < tagset {
tagset = m.bufferedChunk.key()
}
}
}
return tagset
}
// readNextTagset returns all chunks for the next tagset.
func (e *AggregateExecutor) readNextTagset() ([]*MapperOutput, error) {
// Send out data for the next alphabetically-lowest tagset.
// All Mappers send out in this order so collect data for this tagset, ignoring all others.
tagset := e.nextMapperTagSet()
chunks := []*MapperOutput{}
// Pull as much as possible from each mapper. Stop when a mapper offers
// data for a new tagset, or empties completely.
for _, m := range e.mappers {
if m.drained {
continue
}
for {
if m.bufferedChunk == nil {
chunk, err := m.NextChunk()
if err != nil {
return nil, err
}
m.bufferedChunk = chunk
if m.bufferedChunk == nil {
m.drained = true
break
}
}
// Got a chunk. Can we use it?
if m.bufferedChunk.key() != tagset {
break // No, so just leave it in the buffer.
}
// We can, take it.
chunks = append(chunks, m.bufferedChunk)
m.bufferedChunk = nil
}
}
return chunks, nil
}
// processFill will take the results and return new results (or the same if no fill modifications are needed)
// with whatever fill options the query has.
func (e *AggregateExecutor) processFill(results [][]interface{}) [][]interface{} {
// don't do anything if we're supposed to leave the nulls
if e.stmt.Fill == influxql.NullFill {
return results
}
if e.stmt.Fill == influxql.NoFill {
// remove any rows that have even one nil value. This one is tricky because they could have multiple
// aggregates, but this option means that any row that has even one nil gets purged.
newResults := make([][]interface{}, 0, len(results))
for _, vals := range results {
hasNil := false
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
hasNil = true
break
}
}
if !hasNil {
newResults = append(newResults, vals)
}
}
return newResults
}
// They're either filling with previous values or a specific number
for i, vals := range results {
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
switch e.stmt.Fill {
case influxql.PreviousFill:
if i != 0 {
vals[j] = results[i-1][j]
}
case influxql.NumberFill:
vals[j] = e.stmt.FillValue
}
}
}
}
return results
}
// processDerivative returns the derivatives of the results
func (e *AggregateExecutor) processDerivative(results [][]interface{}) [][]interface{} {
// Return early if we're not supposed to process the derivatives
if e.stmt.HasDerivative() {
interval, err := derivativeInterval(e.stmt)
if err != nil {
return results // XXX need to handle this better.
}
// Determines whether to drop negative differences
isNonNegative := e.stmt.FunctionCalls()[0].Name == "non_negative_derivative"
return ProcessAggregateDerivative(results, isNonNegative, interval)
}
return results
}
func (e *AggregateExecutor) processFunctions(results [][]interface{}, columnNames []string) ([][]interface{}, error) {
callInPosition := e.stmt.FunctionCallsByPosition()
hasTimeField := e.stmt.HasTimeFieldSpecified()
var err error
for i, calls := range callInPosition {
// We can only support expanding fields if a single selector call was specified
// i.e. select tx, max(rx) from foo
// If you have multiple selectors or aggregates, there is no way of knowing who gets to insert the values, so we don't
// i.e. select tx, max(rx), min(rx) from foo
if len(calls) == 1 {
var c *influxql.Call
c = calls[0]
switch c.Name {
case "top", "bottom":
results, err = e.processAggregates(results, columnNames, c)
if err != nil {
return results, err
}
case "first", "last", "min", "max":
results, err = e.processSelectors(results, i, hasTimeField, columnNames)
if err != nil {
return results, err
}
}
}
}
return results, nil
}
func (e *AggregateExecutor) processSelectors(results [][]interface{}, callPosition int, hasTimeField bool, columnNames []string) ([][]interface{}, error) {
// if the columns doesn't have enough columns, expand it
for i, columns := range results {
if len(columns) != len(columnNames) {
columns = append(columns, make([]interface{}, len(columnNames)-len(columns))...)
}
for j := 1; j < len(columns); j++ {
switch v := columns[j].(type) {
case PositionPoint:
tMin := columns[0].(time.Time)
results[i] = e.selectorPointToQueryResult(columns, hasTimeField, callPosition, v, tMin, columnNames)
}
}
}
return results, nil
}
func (e *AggregateExecutor) selectorPointToQueryResult(columns []interface{}, hasTimeField bool, columnIndex int, p PositionPoint, tMin time.Time, columnNames []string) []interface{} {
callCount := len(e.stmt.FunctionCalls())
if callCount == 1 {
tm := time.Unix(0, p.Time).UTC()
// If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned
if len(e.stmt.Dimensions) > 0 && !hasTimeField {
tm = tMin.UTC()
}
columns[0] = tm
}
for i, c := range columnNames {
// skip over time, we already handled that above
if i == 0 {
continue
}
if (i == columnIndex && hasTimeField) || (i == columnIndex+1 && !hasTimeField) {
// Check to see if we previously processed this column, if so, continue
if _, ok := columns[i].(PositionPoint); !ok && columns[i] != nil {
continue
}
columns[i] = p.Value
continue
}
if callCount == 1 {
// Always favor fields over tags if there is a name collision
if t, ok := p.Fields[c]; ok {
columns[i] = t
} else if t, ok := p.Tags[c]; ok {
// look in the tags for a value
columns[i] = t
}
}
}
return columns
}
func (e *AggregateExecutor) processAggregates(results [][]interface{}, columnNames []string, call *influxql.Call) ([][]interface{}, error) {
var values [][]interface{}
// Check if we have a group by, if not, rewrite the entire result by flattening it out
for _, vals := range results {
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
switch v := vals[j].(type) {
case PositionPoints:
tMin := vals[0].(time.Time)
for _, p := range v {
result := e.aggregatePointToQueryResult(p, tMin, call, columnNames)
values = append(values, result)
}
case nil:
continue
default:
return nil, fmt.Errorf("unrechable code - processAggregates for type %T %v", v, v)
}
}
}
return values, nil
}
func (e *AggregateExecutor) aggregatePointToQueryResult(p PositionPoint, tMin time.Time, call *influxql.Call, columnNames []string) []interface{} {
tm := time.Unix(0, p.Time).UTC()
// If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned
if len(e.stmt.Dimensions) > 0 && !e.stmt.HasTimeFieldSpecified() {
tm = tMin.UTC()
}
vals := []interface{}{tm}
for _, c := range columnNames {
if c == call.Name {
vals = append(vals, p.Value)
continue
}
// TODO in the future fields will also be available to us.
// we should always favor fields over tags if there is a name collision
// look in the tags for a value
if t, ok := p.Tags[c]; ok {
vals = append(vals, t)
}
}
return vals
}
// AggregateMapper runs the map phase for aggregate SELECT queries.
type AggregateMapper struct {
shard *Shard
stmt *influxql.SelectStatement
qmin, qmax int64 // query time range
tx Tx
cursors []CursorSet
cursorIndex int
interval int // Current interval for which data is being fetched.
intervalN int // Maximum number of intervals to return.
intervalSize int64 // Size of each interval.
qminWindow int64 // Minimum time of the query floored to start of interval.
mapFuncs []mapFunc // The mapping functions.
fieldNames []string // the field name being read for mapping.
selectFields []string
selectTags []string
whereFields []string
}
// NewAggregateMapper returns a new instance of AggregateMapper.
func NewAggregateMapper(sh *Shard, stmt *influxql.SelectStatement) *AggregateMapper {
return &AggregateMapper{
shard: sh,
stmt: stmt,
}
}
// Open opens and initializes the mapper.
func (m *AggregateMapper) Open() error {
// Ignore if node has the shard but hasn't written to it yet.
if m.shard == nil {
return nil
}
// Rewrite statement.
stmt, err := m.shard.index.RewriteSelectStatement(m.stmt)
if err != nil {
return err
}
m.stmt = stmt
// Set all time-related parameters on the mapper.
m.qmin, m.qmax = influxql.TimeRangeAsEpochNano(m.stmt.Condition)
if err := m.initializeMapFunctions(); err != nil {
return err
}
// For GROUP BY time queries, limit the number of data points returned by the limit and offset
d, err := m.stmt.GroupByInterval()
if err != nil {
return err
}
m.intervalSize = d.Nanoseconds()
if m.qmin == 0 || m.intervalSize == 0 {
m.intervalN = 1
m.intervalSize = m.qmax - m.qmin
} else {
intervalTop := m.qmax/m.intervalSize*m.intervalSize + m.intervalSize
intervalBottom := m.qmin / m.intervalSize * m.intervalSize
m.intervalN = int((intervalTop - intervalBottom) / m.intervalSize)
}
if m.stmt.Limit > 0 || m.stmt.Offset > 0 {
// ensure that the offset isn't higher than the number of points we'd get
if m.stmt.Offset > m.intervalN {
return nil
}
// Take the lesser of either the pre computed number of GROUP BY buckets that
// will be in the result or the limit passed in by the user
if m.stmt.Limit < m.intervalN {
m.intervalN = m.stmt.Limit
}
}
// If we are exceeding our MaxGroupByPoints error out
if m.intervalN > MaxGroupByPoints {
return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?")
}
// Ensure that the start time for the results is on the start of the window.
m.qminWindow = m.qmin
if m.intervalSize > 0 && m.intervalN > 1 {
m.qminWindow = m.qminWindow / m.intervalSize * m.intervalSize
}
// Get a read-only transaction.
tx, err := m.shard.engine.Begin(false)
if err != nil {
return err
}
m.tx = tx
// Collect measurements.
mms := Measurements(m.shard.index.MeasurementsByName(m.stmt.SourceNames()))
m.selectFields = mms.SelectFields(m.stmt)
m.selectTags = mms.SelectTags(m.stmt)
m.whereFields = mms.WhereFields(m.stmt)
// Open cursors for each measurement.
for _, mm := range mms {
if err := m.openMeasurement(mm); err != nil {
return err
}
}
return nil
}
func (m *AggregateMapper) openMeasurement(mm *Measurement) error {
// Validate that ANY GROUP BY is not a field for the measurement.
if err := mm.ValidateGroupBy(m.stmt); err != nil {
return err
}
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
selectFields := mm.SelectFields(m.stmt)
selectTags := mm.SelectTags(m.stmt)
// If we only have tags in our select clause we just return
if len(selectFields) == 0 && len(selectTags) > 0 {
return fmt.Errorf("statement must have at least one field in select clause")
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets, err := mm.DimensionTagSets(m.stmt)
if err != nil {
return err
}
tagSets = m.stmt.LimitTagSets(tagSets)
// Create all cursors for reading the data from this shard.
for _, t := range tagSets {
cursorSet := CursorSet{
Measurement: mm.Name,
Tags: t.Tags,
}
if len(t.Tags) == 0 {
cursorSet.Key = mm.Name
} else {
cursorSet.Key = strings.Join([]string{mm.Name, string(MarshalTags(t.Tags))}, "|")
}
for i, key := range t.SeriesKeys {
fields := slices.Union(selectFields, m.fieldNames, false)
c := m.tx.Cursor(key, fields, m.shard.FieldCodec(mm.Name), true)
if c == nil {
continue
}
seriesTags := m.shard.index.TagsForSeries(key)
cursorSet.Cursors = append(cursorSet.Cursors, NewTagsCursor(c, t.Filters[i], seriesTags))
}
// tsc.Init(m.qmin)
m.cursors = append(m.cursors, cursorSet)
}
sort.Sort(CursorSets(m.cursors))
return nil
}
// initializeMapFunctions initialize the mapping functions for the mapper.
func (m *AggregateMapper) initializeMapFunctions() error {
// Set up each mapping function for this statement.
aggregates := m.stmt.FunctionCalls()
m.mapFuncs = make([]mapFunc, len(aggregates))
m.fieldNames = make([]string, len(m.mapFuncs))
for i, c := range aggregates {
mfn, err := initializeMapFunc(c)
if err != nil {
return err
}
m.mapFuncs[i] = mfn
// Check for calls like `derivative(lmean(value), 1d)`
var nested *influxql.Call = c
if fn, ok := c.Args[0].(*influxql.Call); ok {
nested = fn
}
switch lit := nested.Args[0].(type) {
case *influxql.VarRef:
m.fieldNames[i] = lit.Val
case *influxql.Distinct:
if c.Name != "count" {
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
m.fieldNames[i] = lit.Val
default:
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
}
return nil
}
// Close closes the mapper.
func (m *AggregateMapper) Close() {
if m != nil && m.tx != nil {
m.tx.Rollback()
}
return
}
// TagSets returns the list of tag sets for which this mapper has data.
func (m *AggregateMapper) TagSets() []string { return CursorSets(m.cursors).Keys() }
// Fields returns all SELECT fields.
func (m *AggregateMapper) Fields() []string { return append(m.selectFields, m.selectTags...) }
// NextChunk returns the next interval of data.
// Tagsets are always processed in the same order as AvailTagsSets().
// When there is no more data for any tagset nil is returned.
func (m *AggregateMapper) NextChunk() (interface{}, error) {
var tmin, tmax int64
for {
// All tagset cursors processed. NextChunk'ing complete.
if m.cursorIndex == len(m.cursors) {
return nil, nil
}
// All intervals complete for this tagset. Move to the next tagset.
tmin, tmax = m.nextInterval()
if tmin < 0 {
m.interval = 0
m.cursorIndex++
continue
}
break
}
// Prep the return data for this tagset.
// This will hold data for a single interval for a single tagset.
cursorSet := m.cursors[m.cursorIndex]
output := &MapperOutput{
Name: cursorSet.Measurement,
Tags: cursorSet.Tags,
Fields: m.selectFields,
cursorKey: cursorSet.Key,
}
// Always clamp tmin and tmax. This can happen as bucket-times are bucketed to the nearest
// interval. This is necessary to grab the "partial" buckets at the beginning and end of the time range
qmin, qmax := tmin, tmax
if qmin < m.qmin {
qmin = m.qmin
}
if qmax > m.qmax {
qmax = m.qmax + 1
}
for _, c := range cursorSet.Cursors {
mapperValue := &MapperValue{
Time: tmin,
Value: make([]interface{}, len(m.mapFuncs)),
}
for i := range m.mapFuncs {
// Build a map input from the cursor.
input := &MapInput{
TMin: -1,
Items: readMapItems(c, m.fieldNames[i], qmin, qmin, qmax),
}
if len(m.stmt.Dimensions) > 0 && !m.stmt.HasTimeFieldSpecified() {
input.TMin = tmin
}
// Execute the map function which walks the entire interval, and aggregates the result.
value := m.mapFuncs[i](input)
if value == nil {
continue
}
mapperValue.Value.([]interface{})[i] = value
}
output.Values = append(output.Values, mapperValue)
}
return output, nil
}
func readMapItems(c *TagsCursor, field string, seek, tmin, tmax int64) []MapItem {
var items []MapItem
var seeked bool
for {
var timestamp int64
var value interface{}
if !seeked {
timestamp, value = c.SeekTo(seek)
seeked = true
} else {
timestamp, value = c.Next()
}
// We're done if the point is outside the query's time range [tmin:tmax).
if timestamp != tmin && (timestamp < tmin || timestamp >= tmax) {
return items
}
// Convert values to fields map.
fields, ok := value.(map[string]interface{})
if !ok {
fields = map[string]interface{}{"": value}
}
// Value didn't match, look for the next one.
if value == nil {
continue
}
// Filter value.
if c.filter != nil {
// Convert value to a map for filter evaluation.
m, ok := value.(map[string]interface{})
if !ok {
m = map[string]interface{}{field: value}
}
// If filter fails then skip to the next value.
if !influxql.EvalBool(c.filter, m) {
continue
}
}
// Filter out single field, if specified.
if m, ok := value.(map[string]interface{}); ok {
value = m[field]
}
if value == nil {
continue
}
items = append(items, MapItem{
Timestamp: timestamp,
Value: value,
Fields: fields,
Tags: c.tags,
})
}
}
// nextInterval returns the next interval for which to return data.
// If start is less than 0 there are no more intervals.
func (m *AggregateMapper) nextInterval() (start, end int64) {
t := m.qminWindow + int64(m.interval+m.stmt.Offset)*m.intervalSize
// On to next interval.
m.interval++
if t > m.qmax || m.interval > m.intervalN {
start, end = -1, 1
} else {
start, end = t, t+m.intervalSize
}
return
}
type CursorSet struct {
Measurement string
Tags map[string]string
Key string
Cursors []*TagsCursor
}
// CursorSets represents a sortable slice of CursorSet.
type CursorSets []CursorSet
func (a CursorSets) Len() int { return len(a) }
func (a CursorSets) Less(i, j int) bool { return a[i].Key < a[j].Key }
func (a CursorSets) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a CursorSets) Keys() []string {
keys := make([]string, len(a))
for i := range a {
keys[i] = a[i].Key
}
sort.Strings(keys)
return keys
}
type int64Slice []int64
func (a int64Slice) Len() int { return len(a) }
func (a int64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a int64Slice) Less(i, j int) bool { return a[i] < a[j] }

View File

@ -1,6 +1,10 @@
package tsdb
import (
"errors"
"fmt"
"log"
"os"
"time"
"github.com/influxdb/influxdb/toml"
@ -98,8 +102,14 @@ type Config struct {
}
func NewConfig() Config {
defaultEngine := DefaultEngine
if engine := os.Getenv("INFLUXDB_DATA_ENGINE"); engine != "" {
log.Println("TSDB engine selected via environment variable:", engine)
defaultEngine = engine
}
return Config{
Engine: DefaultEngine,
Engine: defaultEngine,
MaxWALSize: DefaultMaxWALSize,
WALFlushInterval: toml.Duration(DefaultWALFlushInterval),
WALPartitionFlushDelay: toml.Duration(DefaultWALPartitionFlushDelay),
@ -120,3 +130,24 @@ func NewConfig() Config {
QueryLogEnabled: true,
}
}
func (c *Config) Validate() error {
if c.Dir == "" {
return errors.New("Data.Dir must be specified")
} else if c.WALDir == "" {
return errors.New("Data.WALDir must be specified")
}
valid := false
for _, e := range RegisteredEngines() {
if e == c.Engine {
valid = true
break
}
}
if !valid {
return fmt.Errorf("unrecognized engine %s", c.Engine)
}
return nil
}

View File

@ -154,8 +154,7 @@ type TagSetCursor struct {
cursors []*TagsCursor // Underlying tags cursors.
currentTags map[string]string // the current tags for the underlying series cursor in play
SelectFields []string // fields to be selected
SelectWhereFields []string // fields in both the select and where clause to be returned or filtered on
SelectFields []string // fields to be selected
// Min-heap of cursors ordered by timestamp.
heap *pointHeap

View File

@ -63,6 +63,16 @@ func RegisterEngine(name string, fn NewEngineFunc) {
newEngineFuncs[name] = fn
}
// RegisteredEngines returns the slice of currently registered engines.
func RegisteredEngines() []string {
a := make([]string, 0, len(newEngineFuncs))
for k, _ := range newEngineFuncs {
a = append(a, k)
}
sort.Strings(a)
return a
}
// NewEngine returns an instance of an engine based on its format.
// If the path does not exist then the DefaultFormat is used.
func NewEngine(path string, walPath string, options EngineOptions) (Engine, error) {

View File

@ -33,12 +33,12 @@ const (
)
const (
statSlowInsert = "slow_insert"
statPointsWrite = "points_write"
statPointsWriteDedupe = "points_write_dedupe"
statBlocksWrite = "blks_write"
statBlocksWriteBytes = "blks_write_bytes"
statBlocksWriteBytesCompress = "blks_write_bytes_c"
statSlowInsert = "slowInsert"
statPointsWrite = "pointsWrite"
statPointsWriteDedupe = "pointsWriteDedupe"
statBlocksWrite = "blksWrite"
statBlocksWriteBytes = "blksWriteBytes"
statBlocksWriteBytesCompress = "blksWriteBytesC"
)
func init() {

View File

@ -99,11 +99,11 @@ func TestEngine_WritePoints_PointsWriter(t *testing.T) {
// Points to be inserted.
points := []models.Point{
models.NewPoint("cpu", models.Tags{}, models.Fields{}, time.Unix(0, 1)),
models.NewPoint("cpu", models.Tags{}, models.Fields{}, time.Unix(0, 0)),
models.NewPoint("cpu", models.Tags{}, models.Fields{}, time.Unix(1, 0)),
models.MustNewPoint("cpu", models.Tags{}, models.Fields{}, time.Unix(0, 1)),
models.MustNewPoint("cpu", models.Tags{}, models.Fields{}, time.Unix(0, 0)),
models.MustNewPoint("cpu", models.Tags{}, models.Fields{}, time.Unix(1, 0)),
models.NewPoint("cpu", models.Tags{"host": "serverA"}, models.Fields{}, time.Unix(0, 0)),
models.MustNewPoint("cpu", models.Tags{"host": "serverA"}, models.Fields{}, time.Unix(0, 0)),
}
// Mock points writer to ensure points are passed through.

View File

@ -189,7 +189,7 @@ type cursor struct {
pos uint32
// vals is the current decoded block of Values we're iterating from
vals Values
vals []Value
ascending bool
@ -207,6 +207,7 @@ func newCursor(id uint64, files []*dataFile, ascending bool) *cursor {
id: id,
ascending: ascending,
files: files,
vals: make([]Value, 0),
}
}
@ -472,7 +473,8 @@ func (c *cursor) blockLength(pos uint32) uint32 {
func (c *cursor) decodeBlock(position uint32) {
length := c.blockLength(position)
block := c.f.mmap[position+blockHeaderSize : position+blockHeaderSize+length]
c.vals, _ = DecodeBlock(block)
c.vals = c.vals[:0]
_ = DecodeBlock(block, &c.vals)
// only adavance the position if we're asceending.
// Descending queries use the blockPositions

View File

@ -92,7 +92,7 @@ func (a Values) Encode(buf []byte) ([]byte, error) {
// DecodeBlock takes a byte array and will decode into values of the appropriate type
// based on the block
func DecodeBlock(block []byte) (Values, error) {
func DecodeBlock(block []byte, vals *[]Value) error {
if len(block) <= encodedBlockHeaderSize {
panic(fmt.Sprintf("decode of short block: got %v, exp %v", len(block), encodedBlockHeaderSize))
}
@ -100,13 +100,13 @@ func DecodeBlock(block []byte) (Values, error) {
blockType := block[8]
switch blockType {
case BlockFloat64:
return decodeFloatBlock(block)
return decodeFloatBlock(block, vals)
case BlockInt64:
return decodeInt64Block(block)
return decodeInt64Block(block, vals)
case BlockBool:
return decodeBoolBlock(block)
return decodeBoolBlock(block, vals)
case BlockString:
return decodeStringBlock(block)
return decodeStringBlock(block, vals)
default:
panic(fmt.Sprintf("unknown block type: %d", blockType))
}
@ -183,7 +183,10 @@ func encodeFloatBlock(buf []byte, values []Value) ([]byte, error) {
return nil, err
}
// Encoded float values
vb := venc.Bytes()
vb, err := venc.Bytes()
if err != nil {
return nil, err
}
// Prepend the first timestamp of the block in the first 8 bytes and the block
// in the next byte, followed by the block
@ -192,14 +195,14 @@ func encodeFloatBlock(buf []byte, values []Value) ([]byte, error) {
return block, nil
}
func decodeFloatBlock(block []byte) ([]Value, error) {
func decodeFloatBlock(block []byte, a *[]Value) error {
// The first 8 bytes is the minimum timestamp of the block
block = block[8:]
// Block type is the next block, make sure we actually have a float block
blockType := block[0]
if blockType != BlockFloat64 {
return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockFloat64, blockType)
return fmt.Errorf("invalid block type: exp %d, got %d", BlockFloat64, blockType)
}
block = block[1:]
@ -209,27 +212,26 @@ func decodeFloatBlock(block []byte) ([]Value, error) {
dec := NewTimeDecoder(tb)
iter, err := NewFloatDecoder(vb)
if err != nil {
return nil, err
return err
}
// Decode both a timestamp and value
var a []Value
for dec.Next() && iter.Next() {
ts := dec.Read()
v := iter.Values()
a = append(a, &FloatValue{ts, v})
*a = append(*a, &FloatValue{ts, v})
}
// Did timestamp decoding have an error?
if dec.Error() != nil {
return nil, dec.Error()
return dec.Error()
}
// Did float decoding have an error?
if iter.Error() != nil {
return nil, iter.Error()
return iter.Error()
}
return a, nil
return nil
}
type BoolValue struct {
@ -290,14 +292,14 @@ func encodeBoolBlock(buf []byte, values []Value) ([]byte, error) {
return block, nil
}
func decodeBoolBlock(block []byte) ([]Value, error) {
func decodeBoolBlock(block []byte, a *[]Value) error {
// The first 8 bytes is the minimum timestamp of the block
block = block[8:]
// Block type is the next block, make sure we actually have a float block
blockType := block[0]
if blockType != BlockBool {
return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockBool, blockType)
return fmt.Errorf("invalid block type: exp %d, got %d", BlockBool, blockType)
}
block = block[1:]
@ -308,23 +310,22 @@ func decodeBoolBlock(block []byte) ([]Value, error) {
vdec := NewBoolDecoder(vb)
// Decode both a timestamp and value
var a []Value
for dec.Next() && vdec.Next() {
ts := dec.Read()
v := vdec.Read()
a = append(a, &BoolValue{ts, v})
*a = append(*a, &BoolValue{ts, v})
}
// Did timestamp decoding have an error?
if dec.Error() != nil {
return nil, dec.Error()
return dec.Error()
}
// Did bool decoding have an error?
if vdec.Error() != nil {
return nil, vdec.Error()
return vdec.Error()
}
return a, nil
return nil
}
type Int64Value struct {
@ -374,13 +375,13 @@ func encodeInt64Block(buf []byte, values []Value) ([]byte, error) {
return append(block, packBlock(tb, vb)...), nil
}
func decodeInt64Block(block []byte) ([]Value, error) {
func decodeInt64Block(block []byte, a *[]Value) error {
// slice off the first 8 bytes (min timestmap for the block)
block = block[8:]
blockType := block[0]
if blockType != BlockInt64 {
return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockInt64, blockType)
return fmt.Errorf("invalid block type: exp %d, got %d", BlockInt64, blockType)
}
block = block[1:]
@ -393,23 +394,22 @@ func decodeInt64Block(block []byte) ([]Value, error) {
vDec := NewInt64Decoder(vb)
// Decode both a timestamp and value
var a []Value
for tsDec.Next() && vDec.Next() {
ts := tsDec.Read()
v := vDec.Read()
a = append(a, &Int64Value{ts, v})
*a = append(*a, &Int64Value{ts, v})
}
// Did timestamp decoding have an error?
if tsDec.Error() != nil {
return nil, tsDec.Error()
return tsDec.Error()
}
// Did int64 decoding have an error?
if vDec.Error() != nil {
return nil, vDec.Error()
return vDec.Error()
}
return a, nil
return nil
}
type StringValue struct {
@ -459,13 +459,13 @@ func encodeStringBlock(buf []byte, values []Value) ([]byte, error) {
return append(block, packBlock(tb, vb)...), nil
}
func decodeStringBlock(block []byte) ([]Value, error) {
func decodeStringBlock(block []byte, a *[]Value) error {
// slice off the first 8 bytes (min timestmap for the block)
block = block[8:]
blockType := block[0]
if blockType != BlockString {
return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockString, blockType)
return fmt.Errorf("invalid block type: exp %d, got %d", BlockString, blockType)
}
block = block[1:]
@ -477,27 +477,26 @@ func decodeStringBlock(block []byte) ([]Value, error) {
tsDec := NewTimeDecoder(tb)
vDec, err := NewStringDecoder(vb)
if err != nil {
return nil, err
return err
}
// Decode both a timestamp and value
var a []Value
for tsDec.Next() && vDec.Next() {
ts := tsDec.Read()
v := vDec.Read()
a = append(a, &StringValue{ts, v})
*a = append(*a, &StringValue{ts, v})
}
// Did timestamp decoding have an error?
if tsDec.Error() != nil {
return nil, tsDec.Error()
return tsDec.Error()
}
// Did string decoding have an error?
if vDec.Error() != nil {
return nil, vDec.Error()
return vDec.Error()
}
return a, nil
return nil
}
func packBlockHeader(firstTime time.Time, blockType byte) []byte {

View File

@ -1,52 +1,51 @@
package tsm1_test
import (
// "math/rand"
"fmt"
"reflect"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
)
func TestEncoding_FloatBlock(t *testing.T) {
valueCount := 1000
times := getTimes(valueCount, 60, time.Second)
values := make(tsm1.Values, len(times))
values := make([]tsm1.Value, len(times))
for i, t := range times {
values[i] = tsm1.NewValue(t, float64(i))
}
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}
if !reflect.DeepEqual(decodedValues, values) {
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
t.Fatalf("unexpected results:\n\tgot: %s\n\texp: %s\n", spew.Sdump(decodedValues), spew.Sdump(values))
}
}
func TestEncoding_FloatBlock_ZeroTime(t *testing.T) {
values := make(tsm1.Values, 3)
values := make([]tsm1.Value, 3)
for i := 0; i < 3; i++ {
values[i] = tsm1.NewValue(time.Unix(0, 0), float64(i))
}
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}
@ -56,20 +55,20 @@ func TestEncoding_FloatBlock_ZeroTime(t *testing.T) {
}
func TestEncoding_FloatBlock_SimilarFloats(t *testing.T) {
values := make(tsm1.Values, 5)
values := make([]tsm1.Value, 5)
values[0] = tsm1.NewValue(time.Unix(0, 1444238178437870000), 6.00065e+06)
values[1] = tsm1.NewValue(time.Unix(0, 1444238185286830000), 6.000656e+06)
values[2] = tsm1.NewValue(time.Unix(0, 1444238188441501000), 6.000657e+06)
values[3] = tsm1.NewValue(time.Unix(0, 1444238195286811000), 6.000659e+06)
values[4] = tsm1.NewValue(time.Unix(0, 1444238198439917000), 6.000661e+06)
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}
@ -81,18 +80,18 @@ func TestEncoding_FloatBlock_SimilarFloats(t *testing.T) {
func TestEncoding_IntBlock_Basic(t *testing.T) {
valueCount := 1000
times := getTimes(valueCount, 60, time.Second)
values := make(tsm1.Values, len(times))
values := make([]tsm1.Value, len(times))
for i, t := range times {
values[i] = tsm1.NewValue(t, int64(i))
}
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}
@ -115,7 +114,7 @@ func TestEncoding_IntBlock_Basic(t *testing.T) {
func TestEncoding_IntBlock_Negatives(t *testing.T) {
valueCount := 1000
times := getTimes(valueCount, 60, time.Second)
values := make(tsm1.Values, len(times))
values := make([]tsm1.Value, len(times))
for i, t := range times {
v := int64(i)
if i%2 == 0 {
@ -124,13 +123,13 @@ func TestEncoding_IntBlock_Negatives(t *testing.T) {
values[i] = tsm1.NewValue(t, int64(v))
}
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}
@ -142,7 +141,7 @@ func TestEncoding_IntBlock_Negatives(t *testing.T) {
func TestEncoding_BoolBlock_Basic(t *testing.T) {
valueCount := 1000
times := getTimes(valueCount, 60, time.Second)
values := make(tsm1.Values, len(times))
values := make([]tsm1.Value, len(times))
for i, t := range times {
v := true
if i%2 == 0 {
@ -151,13 +150,13 @@ func TestEncoding_BoolBlock_Basic(t *testing.T) {
values[i] = tsm1.NewValue(t, v)
}
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}
@ -169,18 +168,18 @@ func TestEncoding_BoolBlock_Basic(t *testing.T) {
func TestEncoding_StringBlock_Basic(t *testing.T) {
valueCount := 1000
times := getTimes(valueCount, 60, time.Second)
values := make(tsm1.Values, len(times))
values := make([]tsm1.Value, len(times))
for i, t := range times {
values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i))
}
b, err := values.Encode(nil)
b, err := tsm1.Values(values).Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
decodedValues, err := tsm1.DecodeBlock(b)
if err != nil {
var decodedValues []tsm1.Value
if err := tsm1.DecodeBlock(b, &decodedValues); err != nil {
t.Fatalf("unexpected error decoding block: %v", err)
}

View File

@ -11,6 +11,7 @@ this version.
import (
"bytes"
"fmt"
"math"
"github.com/dgryski/go-bits"
@ -29,6 +30,7 @@ const (
// FloatEncoder encodes multiple float64s into a byte slice
type FloatEncoder struct {
val float64
err error
leading uint64
trailing uint64
@ -52,20 +54,25 @@ func NewFloatEncoder() *FloatEncoder {
}
func (s *FloatEncoder) Bytes() []byte {
return append([]byte{floatCompressedGorilla << 4}, s.buf.Bytes()...)
func (s *FloatEncoder) Bytes() ([]byte, error) {
return append([]byte{floatCompressedGorilla << 4}, s.buf.Bytes()...), s.err
}
func (s *FloatEncoder) Finish() {
if !s.finished {
// write an end-of-stream record
s.finished = true
s.Push(math.NaN())
s.bw.Flush(bitstream.Zero)
s.finished = true
}
}
func (s *FloatEncoder) Push(v float64) {
// Only allow NaN as a sentinel value
if math.IsNaN(v) && !s.finished {
s.err = fmt.Errorf("unsupported value: NaN")
return
}
if s.first {
// first point
s.val = v

View File

@ -1,6 +1,7 @@
package tsm1_test
import (
"math"
"reflect"
"testing"
"testing/quick"
@ -29,7 +30,10 @@ func TestFloatEncoder_Simple(t *testing.T) {
s.Finish()
b := s.Bytes()
b, err := s.Bytes()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
it, err := tsm1.NewFloatDecoder(b)
if err != nil {
@ -85,7 +89,10 @@ func TestFloatEncoder_SimilarFloats(t *testing.T) {
s.Finish()
b := s.Bytes()
b, err := s.Bytes()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
it, err := tsm1.NewFloatDecoder(b)
if err != nil {
@ -142,14 +149,16 @@ var TwoHoursData = []struct {
}
func TestFloatEncoder_Roundtrip(t *testing.T) {
s := tsm1.NewFloatEncoder()
for _, p := range TwoHoursData {
s.Push(p.v)
}
s.Finish()
b := s.Bytes()
b, err := s.Bytes()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
it, err := tsm1.NewFloatDecoder(b)
if err != nil {
@ -176,6 +185,21 @@ func TestFloatEncoder_Roundtrip(t *testing.T) {
}
}
func TestFloatEncoder_Roundtrip_NaN(t *testing.T) {
s := tsm1.NewFloatEncoder()
s.Push(1.0)
s.Push(math.NaN())
s.Push(2.0)
s.Finish()
_, err := s.Bytes()
if err == nil {
t.Fatalf("expected error. got nil")
}
}
func Test_FloatEncoder_Quick(t *testing.T) {
quick.Check(func(values []float64) bool {
// Write values to encoder.
@ -187,7 +211,12 @@ func Test_FloatEncoder_Quick(t *testing.T) {
// Read values out of decoder.
got := make([]float64, 0, len(values))
dec, err := tsm1.NewFloatDecoder(enc.Bytes())
b, err := enc.Bytes()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
dec, err := tsm1.NewFloatDecoder(b)
if err != nil {
t.Fatal(err)
}
@ -220,7 +249,10 @@ func BenchmarkFloatDecoder(b *testing.B) {
s.Push(tt.v)
}
s.Finish()
bytes := s.Bytes()
bytes, err := s.Bytes()
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
b.ResetTimer()

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More