add missing fields to haproxy input (#2323)

This commit is contained in:
Patrick Hemmer 2017-02-02 08:46:53 -05:00 committed by Cameron Sparr
parent c8de4833e3
commit 036d1beb87
4 changed files with 226 additions and 317 deletions

View File

@ -45,6 +45,7 @@ It is highly recommended that all users migrate to the new riemann output plugin
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.

View File

@ -10,6 +10,7 @@
servers = ["http://1.2.3.4/haproxy?stats", "/var/run/haproxy*.sock"]
```
#### `servers`
Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
For basic authentication you need to add username and password in the URL: `http://user:password@1.2.3.4/haproxy?stats`.
@ -26,9 +27,12 @@ When using socket names, wildcard expansion is supported so plugin can gather st
If no servers are specified, then the default address of `http://127.0.0.1:1936/haproxy?stats` will be used.
#### `keep_field_names`
By default, some of the fields are renamed from what haproxy calls them. Setting the `keep_field_names` parameter to `true` will result in the plugin keeping the original field names.
### Measurements & Fields:
Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1).
Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.7/management.html#9.1).
### Tags:

View File

@ -14,80 +14,17 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
)
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
const (
HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name
HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
HF_QCUR = 2 //2. qcur [..BS]: current queued requests. For the backend this reports the number queued without a server assigned.
HF_QMAX = 3 //3. qmax [..BS]: max value of qcur
HF_SCUR = 4 // 4. scur [LFBS]: current sessions
HF_SMAX = 5 //5. smax [LFBS]: max sessions
HF_SLIM = 6 //6. slim [LFBS]: configured session limit
HF_STOT = 7 //7. stot [LFBS]: cumulative number of connections
HF_BIN = 8 //8. bin [LFBS]: bytes in
HF_BOUT = 9 //9. bout [LFBS]: bytes out
HF_DREQ = 10 //10. dreq [LFB.]: requests denied because of security concerns.
HF_DRESP = 11 //11. dresp [LFBS]: responses denied because of security concerns.
HF_EREQ = 12 //12. ereq [LF..]: request errors. Some of the possible causes are:
HF_ECON = 13 //13. econ [..BS]: number of requests that encountered an error trying to
HF_ERESP = 14 //14. eresp [..BS]: response errors. srv_abrt will be counted here also. Some other errors are: - write error on the client socket (won't be counted for the server stat) - failure applying filters to the response.
HF_WRETR = 15 //15. wretr [..BS]: number of times a connection to a server was retried.
HF_WREDIS = 16 //16. wredis [..BS]: number of times a request was redispatched to another server. The server value counts the number of times that server was switched away from.
HF_STATUS = 17 //17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...)
HF_WEIGHT = 18 //18. weight [..BS]: total weight (backend), server weight (server)
HF_ACT = 19 //19. act [..BS]: number of active servers (backend), server is active (server)
HF_BCK = 20 //20. bck [..BS]: number of backup servers (backend), server is backup (server)
HF_CHKFAIL = 21 //21. chkfail [...S]: number of failed checks. (Only counts checks failed when the server is up.)
HF_CHKDOWN = 22 //22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts transitions to the whole backend being down, rather than the sum of the counters for each server.
HF_LASTCHG = 23 //23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition
HF_DOWNTIME = 24 //24. downtime [..BS]: total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime.
HF_QLIMIT = 25 //25. qlimit [...S]: configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit)
HF_PID = 26 //26. pid [LFBS]: process id (0 for first instance, 1 for second, ...)
HF_IID = 27 //27. iid [LFBS]: unique proxy id
HF_SID = 28 //28. sid [L..S]: server id (unique inside a proxy)
HF_THROTTLE = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
HF_LBTOT = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
HF_TRACKED = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
HF_TYPE = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
HF_RATE = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
HF_RATE_LIM = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
HF_RATE_MAX = 35 //35. rate_max [.FBS]: max number of new sessions per second
HF_CHECK_STATUS = 36 //36. check_status [...S]: status of last health check, one of:
HF_CHECK_CODE = 37 //37. check_code [...S]: layer5-7 code, if available
HF_CHECK_DURATION = 38 //38. check_duration [...S]: time in ms took to finish last health check
HF_HRSP_1xx = 39 //39. hrsp_1xx [.FBS]: http responses with 1xx code
HF_HRSP_2xx = 40 //40. hrsp_2xx [.FBS]: http responses with 2xx code
HF_HRSP_3xx = 41 //41. hrsp_3xx [.FBS]: http responses with 3xx code
HF_HRSP_4xx = 42 //42. hrsp_4xx [.FBS]: http responses with 4xx code
HF_HRSP_5xx = 43 //43. hrsp_5xx [.FBS]: http responses with 5xx code
HF_HRSP_OTHER = 44 //44. hrsp_other [.FBS]: http responses with other codes (protocol error)
HF_HANAFAIL = 45 //45. hanafail [...S]: failed health checks details
HF_REQ_RATE = 46 //46. req_rate [.F..]: HTTP requests per second over last elapsed second
HF_REQ_RATE_MAX = 47 //47. req_rate_max [.F..]: max number of HTTP requests per second observed
HF_REQ_TOT = 48 //48. req_tot [.F..]: total number of HTTP requests received
HF_CLI_ABRT = 49 //49. cli_abrt [..BS]: number of data transfers aborted by the client
HF_SRV_ABRT = 50 //50. srv_abrt [..BS]: number of data transfers aborted by the server (inc. in eresp)
HF_COMP_IN = 51 //51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor
HF_COMP_OUT = 52 //52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor
HF_COMP_BYP = 53 //53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor (CPU/BW limit)
HF_COMP_RSP = 54 //54. comp_rsp [.FB.]: number of HTTP responses that were compressed
HF_LASTSESS = 55 //55. lastsess [..BS]: number of seconds since last session assigned to server/backend
HF_LAST_CHK = 56 //56. last_chk [...S]: last health check contents or textual error
HF_LAST_AGT = 57 //57. last_agt [...S]: last agent check contents or textual error
HF_QTIME = 58 //58. qtime [..BS]:
HF_CTIME = 59 //59. ctime [..BS]:
HF_RTIME = 60 //60. rtime [..BS]: (0 for TCP)
HF_TTIME = 61 //61. ttime [..BS]: the average total session time in ms over the 1024 last requests
)
type haproxy struct {
Servers []string
client *http.Client
KeepFieldNames bool
}
var sampleConfig = `
@ -103,6 +40,11 @@ var sampleConfig = `
## Server address not starting with 'http' will be treated as a possible
## socket, so both examples below are valid.
## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
#
## By default, some of the fields are renamed from what haproxy calls them.
## Setting this option to true results in the plugin keeping the original
## field names.
## keep_field_names = true
`
func (r *haproxy) SampleConfig() string {
@ -147,17 +89,18 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
}
var wg sync.WaitGroup
errChan := errchan.New(len(endpoints))
wg.Add(len(endpoints))
for _, server := range endpoints {
go func(serv string) {
defer wg.Done()
errChan.C <- g.gatherServer(serv, acc)
if err := g.gatherServer(serv, acc); err != nil {
acc.AddError(err)
}
}(server)
}
wg.Wait()
return errChan.Error()
return nil
}
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
@ -175,7 +118,7 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
return fmt.Errorf("Could not write to socket '%s': %s", addr, errw)
}
return importCsvResult(c, acc, socketPath)
return g.importCsvResult(c, acc, socketPath)
}
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
@ -216,7 +159,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
return fmt.Errorf("Unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode)
}
return importCsvResult(res.Body, acc, u.Host)
if err := g.importCsvResult(res.Body, acc, u.Host); err != nil {
return fmt.Errorf("Unable to parse stat result from '%s': %s", addr, err)
}
return nil
}
func getSocketAddr(sock string) string {
@ -229,205 +176,96 @@ func getSocketAddr(sock string) string {
}
}
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
csv := csv.NewReader(r)
result, err := csv.ReadAll()
var typeNames = []string{"frontend", "backend", "server", "listener"}
var fieldRenames = map[string]string{
"pxname": "proxy",
"svname": "sv",
"act": "active_servers",
"bck": "backup_servers",
"cli_abrt": "cli_abort",
"srv_abrt": "srv_abort",
"hrsp_1xx": "http_response.1xx",
"hrsp_2xx": "http_response.2xx",
"hrsp_3xx": "http_response.3xx",
"hrsp_4xx": "http_response.4xx",
"hrsp_5xx": "http_response.5xx",
"hrsp_other": "http_response.other",
}
func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
csvr := csv.NewReader(r)
now := time.Now()
for _, row := range result {
headers, err := csvr.Read()
if err != nil {
return err
}
if len(headers[0]) <= 2 || headers[0][:2] != "# " {
return fmt.Errorf("did not receive standard haproxy headers")
}
headers[0] = headers[0][2:]
for {
row, err := csvr.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
fields := make(map[string]interface{})
tags := map[string]string{
"server": host,
"proxy": row[HF_PXNAME],
"sv": row[HF_SVNAME],
}
for field, v := range row {
switch field {
case HF_QCUR:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["qcur"] = ival
if len(row) != len(headers) {
return fmt.Errorf("number of columns does not match number of headers. headers=%d columns=%d", len(headers), len(row))
}
for i, v := range row {
if v == "" {
continue
}
colName := headers[i]
fieldName := colName
if !g.KeepFieldNames {
if fieldRename, ok := fieldRenames[colName]; ok {
fieldName = fieldRename
}
case HF_QMAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["qmax"] = ival
}
switch colName {
case "pxname", "svname":
tags[fieldName] = v
case "type":
vi, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("unable to parse type value '%s'", v)
}
case HF_SCUR:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["scur"] = ival
if int(vi) >= len(typeNames) {
return fmt.Errorf("received unknown type value: %d", vi)
}
case HF_SMAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["smax"] = ival
tags[fieldName] = typeNames[vi]
case "check_desc", "agent_desc":
// do nothing. These fields are just a more verbose description of the check_status & agent_status fields
case "status", "check_status", "last_chk", "mode", "tracked", "agent_status", "last_agt", "addr", "cookie":
// these are string fields
fields[fieldName] = v
case "lastsess":
vi, err := strconv.ParseInt(v, 10, 64)
if err != nil {
//TODO log the error. And just once (per column) so we don't spam the log
continue
}
case HF_SLIM:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["slim"] = ival
}
case HF_STOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["stot"] = ival
}
case HF_BIN:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["bin"] = ival
}
case HF_BOUT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["bout"] = ival
}
case HF_DREQ:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["dreq"] = ival
}
case HF_DRESP:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["dresp"] = ival
}
case HF_EREQ:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["ereq"] = ival
}
case HF_ECON:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["econ"] = ival
}
case HF_ERESP:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["eresp"] = ival
}
case HF_WRETR:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["wretr"] = ival
}
case HF_WREDIS:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["wredis"] = ival
}
case HF_ACT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["active_servers"] = ival
}
case HF_BCK:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["backup_servers"] = ival
}
case HF_DOWNTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["downtime"] = ival
}
case HF_THROTTLE:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["throttle"] = ival
}
case HF_LBTOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["lbtot"] = ival
}
case HF_RATE:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["rate"] = ival
}
case HF_RATE_MAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["rate_max"] = ival
}
case HF_CHECK_DURATION:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["check_duration"] = ival
}
case HF_HRSP_1xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.1xx"] = ival
}
case HF_HRSP_2xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.2xx"] = ival
}
case HF_HRSP_3xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.3xx"] = ival
}
case HF_HRSP_4xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.4xx"] = ival
}
case HF_HRSP_5xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.5xx"] = ival
}
case HF_REQ_RATE:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["req_rate"] = ival
}
case HF_REQ_RATE_MAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["req_rate_max"] = ival
}
case HF_REQ_TOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["req_tot"] = ival
}
case HF_CLI_ABRT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["cli_abort"] = ival
}
case HF_SRV_ABRT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["srv_abort"] = ival
}
case HF_QTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["qtime"] = ival
}
case HF_CTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["ctime"] = ival
}
case HF_RTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["rtime"] = ival
}
case HF_TTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["ttime"] = ival
fields[fieldName] = vi
default:
vi, err := strconv.ParseUint(v, 10, 64)
if err != nil {
//TODO log the error. And just once (per column) so we don't spam the log
continue
}
fields[fieldName] = vi
}
}
acc.AddFields("haproxy", fields, tags, now)

View File

@ -68,8 +68,9 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
tags := map[string]string{
"server": ts.Listener.Addr().String(),
"proxy": "be_app",
"sv": "host0",
"proxy": "git",
"sv": "www",
"type": "server",
}
fields := HaproxyGetFieldValues()
@ -80,8 +81,8 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
Servers: []string{ts.URL},
}
err = r.Gather(&acc)
require.Error(t, err)
r.Gather(&acc)
require.NotEmpty(t, acc.Errors)
}
func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
@ -100,9 +101,10 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
require.NoError(t, err)
tags := map[string]string{
"proxy": "be_app",
"server": ts.Listener.Addr().String(),
"sv": "host0",
"proxy": "git",
"sv": "www",
"type": "server",
}
fields := HaproxyGetFieldValues()
@ -144,9 +146,10 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
for _, sock := range sockets {
tags := map[string]string{
"proxy": "be_app",
"server": sock.Addr().String(),
"sv": "host0",
"proxy": "git",
"sv": "www",
"type": "server",
}
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
@ -155,8 +158,8 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
// This mask should not match any socket
r.Servers = []string{_badmask}
err = r.Gather(&acc)
require.Error(t, err)
r.Gather(&acc)
require.NotEmpty(t, acc.Errors)
}
//When not passing server config, we default to localhost
@ -171,59 +174,122 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv")
}
func TestHaproxyKeepFieldNames(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, csvOutputSample)
}))
defer ts.Close()
r := &haproxy{
Servers: []string{ts.URL},
KeepFieldNames: true,
}
var acc testutil.Accumulator
err := r.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"server": ts.Listener.Addr().String(),
"pxname": "git",
"svname": "www",
"type": "server",
}
fields := HaproxyGetFieldValues()
fields["act"] = fields["active_servers"]
delete(fields, "active_servers")
fields["bck"] = fields["backup_servers"]
delete(fields, "backup_servers")
fields["cli_abrt"] = fields["cli_abort"]
delete(fields, "cli_abort")
fields["srv_abrt"] = fields["srv_abort"]
delete(fields, "srv_abort")
fields["hrsp_1xx"] = fields["http_response.1xx"]
delete(fields, "http_response.1xx")
fields["hrsp_2xx"] = fields["http_response.2xx"]
delete(fields, "http_response.2xx")
fields["hrsp_3xx"] = fields["http_response.3xx"]
delete(fields, "http_response.3xx")
fields["hrsp_4xx"] = fields["http_response.4xx"]
delete(fields, "http_response.4xx")
fields["hrsp_5xx"] = fields["http_response.5xx"]
delete(fields, "http_response.5xx")
fields["hrsp_other"] = fields["http_response.other"]
delete(fields, "http_response.other")
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
}
func HaproxyGetFieldValues() map[string]interface{} {
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"slim": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(5228218),
"bout": uint64(303747244),
"check_code": uint64(200),
"check_duration": uint64(3),
"check_fall": uint64(3),
"check_health": uint64(4),
"check_rise": uint64(2),
"check_status": "L7OK",
"chkdown": uint64(84),
"chkfail": uint64(559),
"cli_abort": uint64(690),
"ctime": uint64(1),
"downtime": uint64(3352),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(21),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(5668),
"http_response.3xx": uint64(8710),
"http_response.4xx": uint64(140),
"http_response.5xx": uint64(0),
"http_response.other": uint64(0),
"iid": uint64(4),
"last_chk": "OK",
"lastchg": uint64(1036557),
"lastsess": int64(1342),
"lbtot": uint64(9481),
"mode": "http",
"pid": uint64(1),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(1268),
"rate": uint64(0),
"rate_max": uint64(2),
"rtime": uint64(2908),
"sid": uint64(1),
"scur": uint64(0),
"slim": uint64(2),
"smax": uint64(2),
"srv_abort": uint64(0),
"status": "UP",
"stot": uint64(14539),
"ttime": uint64(4500),
"weight": uint64(1),
"wredis": uint64(0),
"wretr": uint64(0),
}
return fields
}
// Can obtain from official haproxy demo: 'http://demo.haproxy.org/;csv'
const csvOutputSample = `
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,
be_static,host0,0,0,0,3,,3209,1141294,17389596,,0,,0,0,0,0,no check,1,1,0,,,,,,2,17,1,,3209,,2,0,,7,,,,0,218,1497,1494,0,0,0,,,,0,0,,,,,2,,,0,2,23,545,
be_static,BACKEND,0,0,0,3,200,3209,1141294,17389596,0,0,,0,0,0,0,UP,1,1,0,,0,70698,0,,2,17,0,,3209,,1,0,,7,,,,0,218,1497,1494,0,0,,,,,0,0,0,0,0,0,2,,,0,2,23,545,
be_static,host0,0,0,0,1,,28,17313,466003,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,1,,28,,2,0,,1,L4OK,,1,0,17,6,5,0,0,0,,,,0,0,,,,,2103,,,0,1,1,36,
be_static,host4,0,0,0,1,,28,15358,1281073,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,2,,28,,2,0,,1,L4OK,,1,0,20,5,3,0,0,0,,,,0,0,,,,,2076,,,0,1,1,54,
be_static,host5,0,0,0,1,,28,17547,1970404,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,3,,28,,2,0,,1,L4OK,,0,0,20,5,3,0,0,0,,,,0,0,,,,,1495,,,0,1,1,53,
be_static,host6,0,0,0,1,,28,14105,1328679,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,4,,28,,2,0,,1,L4OK,,0,0,18,8,2,0,0,0,,,,0,0,,,,,1418,,,0,0,1,49,
be_static,host7,0,0,0,1,,28,15258,1965185,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,5,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,935,,,0,0,1,28,
be_static,host8,0,0,0,1,,28,12934,1034779,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,6,,28,,2,0,,1,L4OK,,0,0,17,9,2,0,0,0,,,,0,0,,,,,582,,,0,1,1,66,
be_static,host9,0,0,0,1,,28,13434,134063,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,7,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,539,,,0,0,1,80,
be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,8,,28,,2,0,,1,L4OK,,0,0,22,6,0,0,0,0,,,,0,0,,,,,487,,,0,0,1,36,
be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
be_app,host0,0,0,1,32,32,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
be_app,host4,0,0,2,29,32,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,agent_status,agent_code,agent_duration,check_desc,agent_desc,check_rise,check_fall,check_health,agent_rise,agent_fall,agent_health,addr,cookie,mode,algo,conn_rate,conn_rate_max,conn_tot,intercepted,dcon,dses,
http-in,FRONTEND,,,3,100,100,2639994,813557487,65937668635,505252,0,47567,,,,,OPEN,,,,,,,,,1,2,0,,,,0,1,0,157,,,,0,1514640,606647,136264,496535,14948,,1,155,2754255,,,36370569635,17435137766,0,642264,,,,,,,,,,,,,,,,,,,,,http,,1,157,2649922,339471,0,0,
http-in,IPv4-direct,,,3,41,100,349801,57445827,1503928881,269899,0,287,,,,,OPEN,,,,,,,,,1,2,1,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
http-in,IPv4-cached,,,0,33,100,1786155,644395819,57905460294,60511,0,1,,,,,OPEN,,,,,,,,,1,2,2,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
http-in,IPv6-direct,,,0,100,100,325619,92414745,6205208728,3399,0,47279,,,,,OPEN,,,,,,,,,1,2,3,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
http-in,local,,,0,0,100,0,0,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,4,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
http-in,local-https,,,0,5,100,188347,19301096,323070732,171443,0,0,,,,,OPEN,,,,,,,,,1,2,5,,,,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,http,,,,,,0,0,
www,www,0,0,0,20,20,1719698,672044109,64806076656,,0,,0,5285,22,0,UP,1,1,0,561,84,1036557,3356,,1,3,1,,1715117,,2,0,,45,L7OK,200,5,671,1144889,481714,87038,4,0,,,,,105016,167,,,,,5,OK,,0,5,16,1167,,,,Layer7 check passed,,2,3,4,,,,,,http,,,,,,,,
www,bck,0,0,0,10,10,1483,537137,7544118,,0,,0,0,0,0,UP,1,0,1,4,0,5218087,0,,1,3,2,,1371,,2,0,,17,L7OK,200,2,0,629,99,755,0,0,,,,,16,0,,,,,1036557,OK,,756,1,13,1184,,,,Layer7 check passed,,2,5,6,,,,,,http,,,,,,,,
www,BACKEND,0,25,0,46,100,1721835,674684790,64813732170,314,0,,130,5285,22,0,UP,1,1,1,,0,5218087,0,,1,3,0,,1716488,,1,0,,45,,,,0,1145518,481813,88664,5719,121,,,,1721835,105172,167,35669268059,17250148556,0,556042,5,,,0,5,16,1167,,,,,,,,,,,,,,http,,,,,,,,
git,www,0,0,0,2,2,14539,5228218,303747244,,0,,0,21,0,0,UP,1,1,0,559,84,1036557,3352,,1,4,1,,9481,,2,0,,2,L7OK,200,3,0,5668,8710,140,0,0,,,,,690,0,,,,,1342,OK,,1268,1,2908,4500,,,,Layer7 check passed,,2,3,4,,,,,,http,,,,,,,,
git,bck,0,0,0,0,2,0,0,0,,0,,0,0,0,0,UP,1,0,1,2,0,5218087,0,,1,4,2,,0,,2,0,,0,L7OK,200,2,0,0,0,0,0,0,,,,,0,0,,,,,-1,OK,,0,0,0,0,,,,Layer7 check passed,,2,3,4,,,,,,http,,,,,,,,
git,BACKEND,0,6,0,8,2,14541,8082393,303747668,0,0,,2,21,0,0,UP,1,1,1,,0,5218087,0,,1,4,0,,9481,,1,0,,7,,,,0,5668,8710,140,23,0,,,,14541,690,0,133458298,38104818,0,4379,1342,,,1268,1,2908,4500,,,,,,,,,,,,,,http,,,,,,,,
demo,BACKEND,0,0,1,5,20,24063,7876647,659864417,48,0,,1,0,0,0,UP,0,0,0,,0,5218087,,,1,17,0,,0,,1,1,,26,,,,0,23983,21,0,1,57,,,,24062,111,0,567843278,146884392,0,1083,0,,,2706,0,0,887,,,,,,,,,,,,,,http,,,,,,,,
`