Use a bufio.Scanner in http listener
this will prevent potential very large allocations due to a very large chunk size send from a client. fixes #1823
This commit is contained in:
parent
ca8e512e5b
commit
78ced6bc30
|
@ -41,6 +41,7 @@
|
||||||
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
|
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
|
||||||
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
|
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
|
||||||
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
|
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
|
||||||
|
- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads.
|
||||||
|
|
||||||
## v1.0.1 [unreleased]
|
## v1.0.1 [unreleased]
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package http_listener
|
package http_listener
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -111,25 +113,34 @@ func (t *HttpListener) httpListen() error {
|
||||||
func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||||
t.wg.Add(1)
|
t.wg.Add(1)
|
||||||
defer t.wg.Done()
|
defer t.wg.Done()
|
||||||
body, err := ioutil.ReadAll(req.Body)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Problem reading request: [%s], Error: %s\n", string(body), err)
|
|
||||||
http.Error(res, "ERROR reading request", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch req.URL.Path {
|
switch req.URL.Path {
|
||||||
case "/write":
|
case "/write":
|
||||||
var metrics []telegraf.Metric
|
var http400msg bytes.Buffer
|
||||||
metrics, err = t.parser.Parse(body)
|
var partial string
|
||||||
|
scanner := bufio.NewScanner(req.Body)
|
||||||
|
scanner.Buffer([]byte(""), 128*1024)
|
||||||
|
for scanner.Scan() {
|
||||||
|
metrics, err := t.parser.Parse(scanner.Bytes())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||||
}
|
}
|
||||||
res.WriteHeader(http.StatusNoContent)
|
partial = "partial write: "
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Problem parsing body: [%s], Error: %s\n", string(body), err)
|
http400msg.WriteString(err.Error() + " ")
|
||||||
http.Error(res, "ERROR parsing metrics", http.StatusInternalServerError)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
http.Error(res, "Internal server error: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
} else if http400msg.Len() > 0 {
|
||||||
|
res.Header().Set("Content-Type", "application/json")
|
||||||
|
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||||
|
res.WriteHeader(http.StatusBadRequest)
|
||||||
|
res.Write([]byte(fmt.Sprintf(`{"error":"%s%s"}`, partial, http400msg.String())))
|
||||||
|
} else {
|
||||||
|
res.WriteHeader(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
case "/query":
|
case "/query":
|
||||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||||
|
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue