Refactor InfluxDB listener (#6974)
Use streaming parser in InfluxDB listener
This commit is contained in:
@@ -30,13 +30,13 @@ submits data to InfluxDB determines the destination database.
|
||||
## maximum duration before timing out write of the response
|
||||
write_timeout = "10s"
|
||||
|
||||
## Maximum allowed http request body size in bytes.
|
||||
## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
|
||||
## Maximum allowed HTTP request body size in bytes.
|
||||
## 0 means to use the default of 32MiB.
|
||||
max_body_size = 0
|
||||
|
||||
## Maximum line size allowed to be sent in bytes.
|
||||
## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||
max_line_size = 0
|
||||
## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored
|
||||
# max_line_size = 0
|
||||
|
||||
## Set one or more allowed client CA certificate file names to
|
||||
## enable mutually authenticated TLS connections
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
package http_listener
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type pool struct {
|
||||
buffers chan []byte
|
||||
size int
|
||||
|
||||
created int64
|
||||
}
|
||||
|
||||
// NewPool returns a new pool object.
|
||||
// n is the number of buffers
|
||||
// bufSize is the size (in bytes) of each buffer
|
||||
func NewPool(n, bufSize int) *pool {
|
||||
return &pool{
|
||||
buffers: make(chan []byte, n),
|
||||
size: bufSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) get() []byte {
|
||||
select {
|
||||
case b := <-p.buffers:
|
||||
return b
|
||||
default:
|
||||
atomic.AddInt64(&p.created, 1)
|
||||
return make([]byte, p.size)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) put(b []byte) {
|
||||
select {
|
||||
case p.buffers <- b:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) ncreated() int64 {
|
||||
return atomic.LoadInt64(&p.created)
|
||||
}
|
||||
@@ -1,464 +0,0 @@
|
||||
package http_listener
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
// DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes.
|
||||
// if the request body is over this size, we will return an HTTP 413 error.
|
||||
// 500 MB
|
||||
DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024
|
||||
|
||||
// MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for
|
||||
// a single InfluxDB point.
|
||||
// 64 KB
|
||||
DEFAULT_MAX_LINE_SIZE = 64 * 1024
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
||||
type HTTPListener struct {
|
||||
ServiceAddress string `toml:"service_address"`
|
||||
// Port gets pulled out of ServiceAddress
|
||||
Port int
|
||||
tlsint.ServerConfig
|
||||
|
||||
ReadTimeout internal.Duration `toml:"read_timeout"`
|
||||
WriteTimeout internal.Duration `toml:"write_timeout"`
|
||||
MaxBodySize internal.Size `toml:"max_body_size"`
|
||||
MaxLineSize internal.Size `toml:"max_line_size"`
|
||||
BasicUsername string `toml:"basic_username"`
|
||||
BasicPassword string `toml:"basic_password"`
|
||||
DatabaseTag string `toml:"database_tag"`
|
||||
|
||||
TimeFunc
|
||||
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
listener net.Listener
|
||||
|
||||
handler *influx.MetricHandler
|
||||
parser *influx.Parser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
|
||||
BytesRecv selfstat.Stat
|
||||
RequestsServed selfstat.Stat
|
||||
WritesServed selfstat.Stat
|
||||
QueriesServed selfstat.Stat
|
||||
PingsServed selfstat.Stat
|
||||
RequestsRecv selfstat.Stat
|
||||
WritesRecv selfstat.Stat
|
||||
QueriesRecv selfstat.Stat
|
||||
PingsRecv selfstat.Stat
|
||||
NotFoundsServed selfstat.Stat
|
||||
BuffersCreated selfstat.Stat
|
||||
AuthFailures selfstat.Stat
|
||||
|
||||
Log telegraf.Logger
|
||||
|
||||
longLines selfstat.Stat
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host HTTP listener on
|
||||
service_address = ":8186"
|
||||
|
||||
## maximum duration before timing out read of the request
|
||||
read_timeout = "10s"
|
||||
## maximum duration before timing out write of the response
|
||||
write_timeout = "10s"
|
||||
|
||||
## Maximum allowed http request body size in bytes.
|
||||
## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
|
||||
max_body_size = "500MiB"
|
||||
|
||||
## Maximum line size allowed to be sent in bytes.
|
||||
## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||
max_line_size = "64KiB"
|
||||
|
||||
|
||||
## Optional tag name used to store the database.
|
||||
## If the write has a database in the query string then it will be kept in this tag name.
|
||||
## This tag can be used in downstream outputs.
|
||||
## The default value of nothing means it will be off and the database will not be recorded.
|
||||
# database_tag = ""
|
||||
|
||||
## Set one or more allowed client CA certificate file names to
|
||||
## enable mutually authenticated TLS connections
|
||||
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Add service certificate and key
|
||||
tls_cert = "/etc/telegraf/cert.pem"
|
||||
tls_key = "/etc/telegraf/key.pem"
|
||||
|
||||
## Optional username and password to accept for HTTP basic authentication.
|
||||
## You probably want to make sure you have TLS configured above for this.
|
||||
# basic_username = "foobar"
|
||||
# basic_password = "barfoo"
|
||||
`
|
||||
|
||||
func (h *HTTPListener) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *HTTPListener) Description() string {
|
||||
return "Influx HTTP write listener"
|
||||
}
|
||||
|
||||
func (h *HTTPListener) Gather(_ telegraf.Accumulator) error {
|
||||
h.BuffersCreated.Set(h.pool.ncreated())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the http listener service.
|
||||
func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
tags := map[string]string{
|
||||
"address": h.ServiceAddress,
|
||||
}
|
||||
h.BytesRecv = selfstat.Register("http_listener", "bytes_received", tags)
|
||||
h.RequestsServed = selfstat.Register("http_listener", "requests_served", tags)
|
||||
h.WritesServed = selfstat.Register("http_listener", "writes_served", tags)
|
||||
h.QueriesServed = selfstat.Register("http_listener", "queries_served", tags)
|
||||
h.PingsServed = selfstat.Register("http_listener", "pings_served", tags)
|
||||
h.RequestsRecv = selfstat.Register("http_listener", "requests_received", tags)
|
||||
h.WritesRecv = selfstat.Register("http_listener", "writes_received", tags)
|
||||
h.QueriesRecv = selfstat.Register("http_listener", "queries_received", tags)
|
||||
h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags)
|
||||
h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags)
|
||||
h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags)
|
||||
h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags)
|
||||
h.longLines = selfstat.Register("http_listener", "long_lines", tags)
|
||||
|
||||
if h.MaxBodySize.Size == 0 {
|
||||
h.MaxBodySize.Size = DEFAULT_MAX_BODY_SIZE
|
||||
}
|
||||
if h.MaxLineSize.Size == 0 {
|
||||
h.MaxLineSize.Size = DEFAULT_MAX_LINE_SIZE
|
||||
}
|
||||
|
||||
if h.ReadTimeout.Duration < time.Second {
|
||||
h.ReadTimeout.Duration = time.Second * 10
|
||||
}
|
||||
if h.WriteTimeout.Duration < time.Second {
|
||||
h.WriteTimeout.Duration = time.Second * 10
|
||||
}
|
||||
|
||||
h.acc = acc
|
||||
h.pool = NewPool(200, int(h.MaxLineSize.Size))
|
||||
|
||||
tlsConf, err := h.ServerConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.ServiceAddress,
|
||||
Handler: h,
|
||||
ReadTimeout: h.ReadTimeout.Duration,
|
||||
WriteTimeout: h.WriteTimeout.Duration,
|
||||
TLSConfig: tlsConf,
|
||||
}
|
||||
|
||||
var listener net.Listener
|
||||
if tlsConf != nil {
|
||||
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
|
||||
} else {
|
||||
listener, err = net.Listen("tcp", h.ServiceAddress)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.listener = listener
|
||||
h.Port = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
h.handler = influx.NewMetricHandler()
|
||||
h.parser = influx.NewParser(h.handler)
|
||||
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
server.Serve(h.listener)
|
||||
}()
|
||||
|
||||
h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop cleans up all resources
|
||||
func (h *HTTPListener) Stop() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
h.listener.Close()
|
||||
h.wg.Wait()
|
||||
|
||||
h.Log.Infof("Stopped HTTP listener service on %s", h.ServiceAddress)
|
||||
}
|
||||
|
||||
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
h.RequestsRecv.Incr(1)
|
||||
defer h.RequestsServed.Incr(1)
|
||||
switch req.URL.Path {
|
||||
case "/write":
|
||||
h.WritesRecv.Incr(1)
|
||||
defer h.WritesServed.Incr(1)
|
||||
h.AuthenticateIfSet(h.serveWrite, res, req)
|
||||
case "/query":
|
||||
h.QueriesRecv.Incr(1)
|
||||
defer h.QueriesServed.Incr(1)
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
}, res, req)
|
||||
case "/ping":
|
||||
h.PingsRecv.Incr(1)
|
||||
defer h.PingsServed.Incr(1)
|
||||
verbose := req.URL.Query().Get("verbose")
|
||||
|
||||
// respond to ping requests
|
||||
if verbose != "" && verbose != "0" && verbose != "false" {
|
||||
res.WriteHeader(http.StatusOK)
|
||||
b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above
|
||||
res.Write(b)
|
||||
} else {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
default:
|
||||
defer h.NotFoundsServed.Incr(1)
|
||||
// Don't know how to respond to calls to other endpoints
|
||||
h.AuthenticateIfSet(http.NotFound, res, req)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
// Check that the content length is not too large for us to handle.
|
||||
if req.ContentLength > h.MaxBodySize.Size {
|
||||
tooLarge(res)
|
||||
return
|
||||
}
|
||||
now := h.TimeFunc()
|
||||
|
||||
precision := req.URL.Query().Get("precision")
|
||||
db := req.URL.Query().Get("db")
|
||||
|
||||
// Handle gzip request bodies
|
||||
body := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
var err error
|
||||
body, err = gzip.NewReader(req.Body)
|
||||
if err != nil {
|
||||
h.Log.Debug(err.Error())
|
||||
badRequest(res, err.Error())
|
||||
return
|
||||
}
|
||||
defer body.Close()
|
||||
}
|
||||
body = http.MaxBytesReader(res, body, h.MaxBodySize.Size)
|
||||
|
||||
var return400 bool
|
||||
var hangingBytes bool
|
||||
buf := h.pool.get()
|
||||
defer h.pool.put(buf)
|
||||
bufStart := 0
|
||||
for {
|
||||
n, err := io.ReadFull(body, buf[bufStart:])
|
||||
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
h.Log.Debug(err.Error())
|
||||
// problem reading the request body
|
||||
badRequest(res, err.Error())
|
||||
return
|
||||
}
|
||||
h.BytesRecv.Incr(int64(n))
|
||||
|
||||
if err == io.EOF {
|
||||
if return400 {
|
||||
badRequest(res, "")
|
||||
} else {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if hangingBytes {
|
||||
i := bytes.IndexByte(buf, '\n')
|
||||
if i == -1 {
|
||||
// still didn't find a newline, keep scanning
|
||||
continue
|
||||
}
|
||||
// rotate the bit remaining after the first newline to the front of the buffer
|
||||
i++ // start copying after the newline
|
||||
bufStart = len(buf) - i
|
||||
if bufStart > 0 {
|
||||
copy(buf, buf[i:])
|
||||
}
|
||||
hangingBytes = false
|
||||
continue
|
||||
}
|
||||
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// finished reading the request body
|
||||
err = h.parse(buf[:n+bufStart], now, precision, db)
|
||||
if err != nil {
|
||||
h.Log.Debugf("%s: %s", err.Error(), bufStart+n)
|
||||
return400 = true
|
||||
}
|
||||
if return400 {
|
||||
if err != nil {
|
||||
badRequest(res, err.Error())
|
||||
} else {
|
||||
badRequest(res, "")
|
||||
}
|
||||
} else {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if we got down here it means that we filled our buffer, and there
|
||||
// are still bytes remaining to be read. So we will parse up until the
|
||||
// final newline, then push the rest of the bytes into the next buffer.
|
||||
i := bytes.LastIndexByte(buf, '\n')
|
||||
if i == -1 {
|
||||
h.longLines.Incr(1)
|
||||
// drop any line longer than the max buffer size
|
||||
h.Log.Debugf("Http_listener received a single line longer than the maximum of %d bytes",
|
||||
len(buf))
|
||||
hangingBytes = true
|
||||
return400 = true
|
||||
bufStart = 0
|
||||
continue
|
||||
}
|
||||
if err := h.parse(buf[:i+1], now, precision, db); err != nil {
|
||||
h.Log.Debug(err.Error())
|
||||
return400 = true
|
||||
}
|
||||
// rotate the bit remaining after the last newline to the front of the buffer
|
||||
i++ // start copying after the newline
|
||||
bufStart = len(buf) - i
|
||||
if bufStart > 0 {
|
||||
copy(buf, buf[i:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time, precision, db string) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
|
||||
h.handler.SetTimeFunc(func() time.Time { return t })
|
||||
metrics, err := h.parser.Parse(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse: %s", err.Error())
|
||||
}
|
||||
|
||||
for _, m := range metrics {
|
||||
// Do we need to keep the database name in the query string.
|
||||
// If a tag has been supplied to put the db in and we actually got a db query,
|
||||
// then we write it in. This overwrites the database tag if one was sent.
|
||||
// This makes it behave like the influx endpoint.
|
||||
if h.DatabaseTag != "" && db != "" {
|
||||
m.AddTag(h.DatabaseTag, db)
|
||||
}
|
||||
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tooLarge(res http.ResponseWriter) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.Header().Set("X-Influxdb-Error", "http: request body too large")
|
||||
res.WriteHeader(http.StatusRequestEntityTooLarge)
|
||||
res.Write([]byte(`{"error":"http: request body too large"}`))
|
||||
}
|
||||
|
||||
func badRequest(res http.ResponseWriter, errString string) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
if errString == "" {
|
||||
errString = "http: bad request"
|
||||
}
|
||||
res.Header().Set("X-Influxdb-Error", errString)
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString)))
|
||||
}
|
||||
|
||||
func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
|
||||
if h.BasicUsername != "" && h.BasicPassword != "" {
|
||||
reqUsername, reqPassword, ok := req.BasicAuth()
|
||||
if !ok ||
|
||||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 ||
|
||||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 {
|
||||
|
||||
h.AuthFailures.Incr(1)
|
||||
http.Error(res, "Unauthorized.", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
handler(res, req)
|
||||
} else {
|
||||
handler(res, req)
|
||||
}
|
||||
}
|
||||
|
||||
func getPrecisionMultiplier(precision string) time.Duration {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func init() {
|
||||
// http_listener deprecated in 1.9
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &HTTPListener{
|
||||
ServiceAddress: ":8186",
|
||||
TimeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
inputs.Add("influxdb_listener", func() telegraf.Input {
|
||||
return &HTTPListener{
|
||||
ServiceAddress: ":8186",
|
||||
TimeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
406
plugins/inputs/influxdb_listener/influxdb_listener.go
Normal file
406
plugins/inputs/influxdb_listener/influxdb_listener.go
Normal file
@@ -0,0 +1,406 @@
|
||||
package influxdb_listener
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultMaxBodySize is the default maximum request body size, in bytes.
|
||||
// if the request body is over this size, we will return an HTTP 413 error.
|
||||
defaultMaxBodySize = 32 * 1024 * 1024
|
||||
)
|
||||
|
||||
type InfluxDBListener struct {
|
||||
ServiceAddress string `toml:"service_address"`
|
||||
port int
|
||||
tlsint.ServerConfig
|
||||
|
||||
ReadTimeout internal.Duration `toml:"read_timeout"`
|
||||
WriteTimeout internal.Duration `toml:"write_timeout"`
|
||||
MaxBodySize internal.Size `toml:"max_body_size"`
|
||||
MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored
|
||||
BasicUsername string `toml:"basic_username"`
|
||||
BasicPassword string `toml:"basic_password"`
|
||||
DatabaseTag string `toml:"database_tag"`
|
||||
|
||||
timeFunc influx.TimeFunc
|
||||
|
||||
listener net.Listener
|
||||
server http.Server
|
||||
|
||||
acc telegraf.Accumulator
|
||||
|
||||
bytesRecv selfstat.Stat
|
||||
requestsServed selfstat.Stat
|
||||
writesServed selfstat.Stat
|
||||
queriesServed selfstat.Stat
|
||||
pingsServed selfstat.Stat
|
||||
requestsRecv selfstat.Stat
|
||||
notFoundsServed selfstat.Stat
|
||||
buffersCreated selfstat.Stat
|
||||
authFailures selfstat.Stat
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
mux http.ServeMux
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host InfluxDB listener on
|
||||
service_address = ":8186"
|
||||
|
||||
## maximum duration before timing out read of the request
|
||||
read_timeout = "10s"
|
||||
## maximum duration before timing out write of the response
|
||||
write_timeout = "10s"
|
||||
|
||||
## Maximum allowed HTTP request body size in bytes.
|
||||
## 0 means to use the default of 32MiB.
|
||||
max_body_size = "32MiB"
|
||||
|
||||
## Optional tag name used to store the database.
|
||||
## If the write has a database in the query string then it will be kept in this tag name.
|
||||
## This tag can be used in downstream outputs.
|
||||
## The default value of nothing means it will be off and the database will not be recorded.
|
||||
# database_tag = ""
|
||||
|
||||
## Set one or more allowed client CA certificate file names to
|
||||
## enable mutually authenticated TLS connections
|
||||
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Add service certificate and key
|
||||
tls_cert = "/etc/telegraf/cert.pem"
|
||||
tls_key = "/etc/telegraf/key.pem"
|
||||
|
||||
## Optional username and password to accept for HTTP basic authentication.
|
||||
## You probably want to make sure you have TLS configured above for this.
|
||||
# basic_username = "foobar"
|
||||
# basic_password = "barfoo"
|
||||
`
|
||||
|
||||
func (h *InfluxDBListener) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) Description() string {
|
||||
return "Accept metrics over InfluxDB 1.x HTTP API"
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) routes() {
|
||||
authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "influxdb",
|
||||
func(_ http.ResponseWriter) {
|
||||
h.authFailures.Incr(1)
|
||||
},
|
||||
)
|
||||
|
||||
h.mux.Handle("/write", authHandler(h.handleWrite()))
|
||||
h.mux.Handle("/query", authHandler(h.handleQuery()))
|
||||
h.mux.Handle("/ping", h.handlePing())
|
||||
h.mux.Handle("/", authHandler(h.handleDefault()))
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) Init() error {
|
||||
tags := map[string]string{
|
||||
"address": h.ServiceAddress,
|
||||
}
|
||||
h.bytesRecv = selfstat.Register("influxdb_listener", "bytes_received", tags)
|
||||
h.requestsServed = selfstat.Register("influxdb_listener", "requests_served", tags)
|
||||
h.writesServed = selfstat.Register("influxdb_listener", "writes_served", tags)
|
||||
h.queriesServed = selfstat.Register("influxdb_listener", "queries_served", tags)
|
||||
h.pingsServed = selfstat.Register("influxdb_listener", "pings_served", tags)
|
||||
h.requestsRecv = selfstat.Register("influxdb_listener", "requests_received", tags)
|
||||
h.notFoundsServed = selfstat.Register("influxdb_listener", "not_founds_served", tags)
|
||||
h.buffersCreated = selfstat.Register("influxdb_listener", "buffers_created", tags)
|
||||
h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags)
|
||||
h.routes()
|
||||
|
||||
if h.MaxBodySize.Size == 0 {
|
||||
h.MaxBodySize.Size = defaultMaxBodySize
|
||||
}
|
||||
|
||||
if h.MaxLineSize.Size != 0 {
|
||||
h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored")
|
||||
}
|
||||
|
||||
if h.ReadTimeout.Duration < time.Second {
|
||||
h.ReadTimeout.Duration = time.Second * 10
|
||||
}
|
||||
if h.WriteTimeout.Duration < time.Second {
|
||||
h.WriteTimeout.Duration = time.Second * 10
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the InfluxDB listener service.
|
||||
func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error {
|
||||
h.acc = acc
|
||||
|
||||
tlsConf, err := h.ServerConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.server = http.Server{
|
||||
Addr: h.ServiceAddress,
|
||||
Handler: h,
|
||||
ReadTimeout: h.ReadTimeout.Duration,
|
||||
WriteTimeout: h.WriteTimeout.Duration,
|
||||
TLSConfig: tlsConf,
|
||||
}
|
||||
|
||||
var listener net.Listener
|
||||
if tlsConf != nil {
|
||||
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
listener, err = net.Listen("tcp", h.ServiceAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
h.listener = listener
|
||||
h.port = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
go func() {
|
||||
err = h.server.Serve(h.listener)
|
||||
if err != http.ErrServerClosed {
|
||||
h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress)
|
||||
}
|
||||
}()
|
||||
|
||||
h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop cleans up all resources
|
||||
func (h *InfluxDBListener) Stop() {
|
||||
err := h.server.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
h.Log.Infof("Error shutting down HTTP server: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
h.requestsRecv.Incr(1)
|
||||
h.mux.ServeHTTP(res, req)
|
||||
h.requestsServed.Incr(1)
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) handleQuery() http.HandlerFunc {
|
||||
return func(res http.ResponseWriter, req *http.Request) {
|
||||
defer h.queriesServed.Incr(1)
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
}
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) handlePing() http.HandlerFunc {
|
||||
return func(res http.ResponseWriter, req *http.Request) {
|
||||
defer h.pingsServed.Incr(1)
|
||||
verbose := req.URL.Query().Get("verbose")
|
||||
|
||||
// respond to ping requests
|
||||
if verbose != "" && verbose != "0" && verbose != "false" {
|
||||
res.WriteHeader(http.StatusOK)
|
||||
b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above
|
||||
res.Write(b)
|
||||
} else {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) handleDefault() http.HandlerFunc {
|
||||
return func(res http.ResponseWriter, req *http.Request) {
|
||||
defer h.notFoundsServed.Incr(1)
|
||||
http.NotFound(res, req)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *InfluxDBListener) handleWrite() http.HandlerFunc {
|
||||
return func(res http.ResponseWriter, req *http.Request) {
|
||||
defer h.writesServed.Incr(1)
|
||||
// Check that the content length is not too large for us to handle.
|
||||
if req.ContentLength > h.MaxBodySize.Size {
|
||||
tooLarge(res)
|
||||
return
|
||||
}
|
||||
|
||||
db := req.URL.Query().Get("db")
|
||||
|
||||
body := req.Body
|
||||
body = http.MaxBytesReader(res, body, h.MaxBodySize.Size)
|
||||
// Handle gzip request bodies
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
var err error
|
||||
body, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
h.Log.Debugf("Error decompressing request body: %v", err.Error())
|
||||
badRequest(res, err.Error())
|
||||
return
|
||||
}
|
||||
defer body.Close()
|
||||
}
|
||||
|
||||
parser := influx.NewStreamParser(body)
|
||||
parser.SetTimeFunc(h.timeFunc)
|
||||
|
||||
precisionStr := req.URL.Query().Get("precision")
|
||||
if precisionStr != "" {
|
||||
precision := getPrecisionMultiplier(precisionStr)
|
||||
parser.SetTimePrecision(precision)
|
||||
}
|
||||
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
var parseErrorCount int
|
||||
var lastPos int = 0
|
||||
var firstParseErrorStr string
|
||||
for {
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
// Shutting down before parsing is finished.
|
||||
res.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
m, err = parser.Next()
|
||||
pos := parser.Position()
|
||||
h.bytesRecv.Incr(int64(pos - lastPos))
|
||||
lastPos = pos
|
||||
|
||||
// Continue parsing metrics even if some are malformed
|
||||
if parseErr, ok := err.(*influx.ParseError); ok {
|
||||
parseErrorCount += 1
|
||||
errStr := parseErr.Error()
|
||||
if firstParseErrorStr == "" {
|
||||
firstParseErrorStr = errStr
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
// Either we're exiting cleanly (err ==
|
||||
// influx.EOF) or there's an unexpected error
|
||||
break
|
||||
}
|
||||
|
||||
if h.DatabaseTag != "" && db != "" {
|
||||
m.AddTag(h.DatabaseTag, db)
|
||||
}
|
||||
|
||||
h.acc.AddMetric(m)
|
||||
|
||||
}
|
||||
if err != influx.EOF {
|
||||
h.Log.Debugf("Error parsing the request body: %v", err.Error())
|
||||
badRequest(res, err.Error())
|
||||
return
|
||||
}
|
||||
if parseErrorCount > 0 {
|
||||
var partialErrorString string
|
||||
switch parseErrorCount {
|
||||
case 1:
|
||||
partialErrorString = fmt.Sprintf("%s", firstParseErrorStr)
|
||||
case 2:
|
||||
partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr)
|
||||
default:
|
||||
partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1)
|
||||
}
|
||||
partialWrite(res, partialErrorString)
|
||||
return
|
||||
}
|
||||
|
||||
// http request success
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
}
|
||||
|
||||
func tooLarge(res http.ResponseWriter) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.Header().Set("X-Influxdb-Error", "http: request body too large")
|
||||
res.WriteHeader(http.StatusRequestEntityTooLarge)
|
||||
res.Write([]byte(`{"error":"http: request body too large"}`))
|
||||
}
|
||||
|
||||
func badRequest(res http.ResponseWriter, errString string) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
if errString == "" {
|
||||
errString = "http: bad request"
|
||||
}
|
||||
res.Header().Set("X-Influxdb-Error", errString)
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString)))
|
||||
}
|
||||
|
||||
func partialWrite(res http.ResponseWriter, errString string) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.Header().Set("X-Influxdb-Error", errString)
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString)))
|
||||
}
|
||||
|
||||
func getPrecisionMultiplier(precision string) time.Duration {
|
||||
// Influxdb defaults silently to nanoseconds if precision isn't
|
||||
// one of the following:
|
||||
var d time.Duration
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
default:
|
||||
d = time.Nanosecond
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func init() {
|
||||
// http_listener deprecated in 1.9
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &InfluxDBListener{
|
||||
ServiceAddress: ":8186",
|
||||
timeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
inputs.Add("influxdb_listener", func() telegraf.Input {
|
||||
return &InfluxDBListener{
|
||||
ServiceAddress: ":8186",
|
||||
timeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
package influxdb_listener
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
// newListener is the minimal InfluxDBListener construction to serve writes.
|
||||
func newListener() *InfluxDBListener {
|
||||
listener := &InfluxDBListener{
|
||||
timeFunc: time.Now,
|
||||
acc: &testutil.NopAccumulator{},
|
||||
bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}),
|
||||
writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}),
|
||||
MaxBodySize: internal.Size{
|
||||
Size: defaultMaxBodySize,
|
||||
},
|
||||
}
|
||||
return listener
|
||||
}
|
||||
|
||||
func BenchmarkInfluxDBListener_serveWrite(b *testing.B) {
|
||||
res := httptest.NewRecorder()
|
||||
addr := "http://localhost/write?db=mydb"
|
||||
|
||||
benchmarks := []struct {
|
||||
name string
|
||||
lines string
|
||||
}{
|
||||
{
|
||||
name: "single line, tag, and field",
|
||||
lines: lines(1, 1, 1),
|
||||
},
|
||||
{
|
||||
name: "single line, 10 tags and fields",
|
||||
lines: lines(1, 10, 10),
|
||||
},
|
||||
{
|
||||
name: "single line, 100 tags and fields",
|
||||
lines: lines(1, 100, 100),
|
||||
},
|
||||
{
|
||||
name: "1k lines, single tag and field",
|
||||
lines: lines(1000, 1, 1),
|
||||
},
|
||||
{
|
||||
name: "1k lines, 10 tags and fields",
|
||||
lines: lines(1000, 10, 10),
|
||||
},
|
||||
{
|
||||
name: "10k lines, 10 tags and fields",
|
||||
lines: lines(10000, 10, 10),
|
||||
},
|
||||
{
|
||||
name: "100k lines, 10 tags and fields",
|
||||
lines: lines(100000, 10, 10),
|
||||
},
|
||||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
b.Run(bm.name, func(b *testing.B) {
|
||||
listener := newListener()
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines))
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
listener.handleWrite()(res, req)
|
||||
if res.Code != http.StatusNoContent {
|
||||
b.Errorf("unexpected status %d", res.Code)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func lines(lines, numTags, numFields int) string {
|
||||
lp := make([]string, lines)
|
||||
for i := 0; i < lines; i++ {
|
||||
tags := make([]string, numTags)
|
||||
for j := 0; j < numTags; j++ {
|
||||
tags[j] = fmt.Sprintf("t%d=v%d", j, j)
|
||||
}
|
||||
|
||||
fields := make([]string, numFields)
|
||||
for k := 0; k < numFields; k++ {
|
||||
fields[k] = fmt.Sprintf("f%d=%d", k, k)
|
||||
}
|
||||
|
||||
lp[i] = fmt.Sprintf("m%d,%s %s",
|
||||
i,
|
||||
strings.Join(tags, ","),
|
||||
strings.Join(fields, ","),
|
||||
)
|
||||
}
|
||||
|
||||
return strings.Join(lp, "\n")
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user