Changes to make it work with basic authentication (Shield) and older versions of ES ( for eg 0.19.9) .Changes to query Nodestats api endpoint by statistics vs the default of all. (Older versions need all=true speicified explicitly).
This commit is contained in:
parent
fc59757a1a
commit
3b3008ebd2
|
@ -768,6 +768,12 @@
|
||||||
# # ssl_key = "/etc/telegraf/key.pem"
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
# ## Use SSL but skip chain & host verification
|
# ## Use SSL but skip chain & host verification
|
||||||
# # insecure_skip_verify = false
|
# # insecure_skip_verify = false
|
||||||
|
# ## Optional for authentication setup with Shield
|
||||||
|
# # username = "admin"
|
||||||
|
# # password = "admin123"
|
||||||
|
|
||||||
|
## ## cluster node stats API . Defaults to all of the options below
|
||||||
|
# #stats = ["indices", "os", "process", "jvm", "thread_pool","fs","transport","http","breakers"]
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from one or more commands that can output to stdout
|
# # Read metrics from one or more commands that can output to stdout
|
||||||
|
|
|
@ -78,6 +78,14 @@ const sampleConfig = `
|
||||||
# ssl_key = "/etc/telegraf/key.pem"
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
## Use SSL but skip chain & host verification
|
## Use SSL but skip chain & host verification
|
||||||
# insecure_skip_verify = false
|
# insecure_skip_verify = false
|
||||||
|
|
||||||
|
### Optional for authentication setup with Shield
|
||||||
|
# username = "admin"
|
||||||
|
# password = "admin123"
|
||||||
|
|
||||||
|
#### cluster node stats API . Defaults to all of the options below
|
||||||
|
# stats = ["indices", "os", "process", "jvm", "thread_pool","fs","transport","http","breakers"]
|
||||||
|
|
||||||
`
|
`
|
||||||
|
|
||||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||||
|
@ -85,6 +93,7 @@ const sampleConfig = `
|
||||||
type Elasticsearch struct {
|
type Elasticsearch struct {
|
||||||
Local bool
|
Local bool
|
||||||
Servers []string
|
Servers []string
|
||||||
|
Stats []string
|
||||||
HttpTimeout internal.Duration
|
HttpTimeout internal.Duration
|
||||||
ClusterHealth bool
|
ClusterHealth bool
|
||||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||||
|
@ -92,6 +101,8 @@ type Elasticsearch struct {
|
||||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewElasticsearch return a new instance of Elasticsearch
|
// NewElasticsearch return a new instance of Elasticsearch
|
||||||
|
@ -122,7 +133,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
e.client = client
|
e.client = client
|
||||||
}
|
}
|
||||||
|
|
||||||
errChan := errchan.New(len(e.Servers))
|
errChan := errchan.New(len(e.Servers))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(e.Servers))
|
wg.Add(len(e.Servers))
|
||||||
|
@ -131,6 +141,9 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||||
go func(s string, acc telegraf.Accumulator) {
|
go func(s string, acc telegraf.Accumulator) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
var url string
|
var url string
|
||||||
|
if e.ClusterHealth {
|
||||||
|
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||||
|
}
|
||||||
if e.Local {
|
if e.Local {
|
||||||
url = s + statsPathLocal
|
url = s + statsPathLocal
|
||||||
} else {
|
} else {
|
||||||
|
@ -140,9 +153,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||||
errChan.C <- err
|
errChan.C <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if e.ClusterHealth {
|
|
||||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
|
||||||
}
|
|
||||||
}(serv, acc)
|
}(serv, acc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,6 +169,7 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||||
ResponseHeaderTimeout: e.HttpTimeout.Duration,
|
ResponseHeaderTimeout: e.HttpTimeout.Duration,
|
||||||
TLSClientConfig: tlsCfg,
|
TLSClientConfig: tlsCfg,
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &http.Client{
|
client := &http.Client{
|
||||||
Transport: tr,
|
Transport: tr,
|
||||||
Timeout: e.HttpTimeout.Duration,
|
Timeout: e.HttpTimeout.Duration,
|
||||||
|
@ -168,45 +179,75 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||||
nodeStats := &struct {
|
var iurl string
|
||||||
ClusterName string `json:"cluster_name"`
|
if len(e.Stats) == 0 {
|
||||||
Nodes map[string]*node `json:"nodes"`
|
e.Stats = []string{"indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", "breakers"}
|
||||||
}{}
|
|
||||||
if err := e.gatherData(url, nodeStats); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
for id, n := range nodeStats.Nodes {
|
for _, s := range e.Stats {
|
||||||
tags := map[string]string{
|
iurl = url
|
||||||
"node_id": id,
|
switch s {
|
||||||
"node_host": n.Host,
|
case "indices":
|
||||||
"node_name": n.Name,
|
iurl = iurl + "/indices"
|
||||||
"cluster_name": nodeStats.ClusterName,
|
case "os":
|
||||||
|
iurl = iurl + "/os"
|
||||||
|
case "process":
|
||||||
|
iurl = iurl + "/process"
|
||||||
|
case "jvm":
|
||||||
|
iurl = iurl + "/jvm"
|
||||||
|
case "thread_pool":
|
||||||
|
iurl = iurl + "/thread_pool"
|
||||||
|
case "fs":
|
||||||
|
iurl = iurl + "/fs"
|
||||||
|
case "transport":
|
||||||
|
iurl = iurl + "/transport"
|
||||||
|
case "http":
|
||||||
|
iurl = iurl + "/http"
|
||||||
|
case "breakers":
|
||||||
|
iurl = iurl + "/breakers"
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("elasticsearch: No matching NodeStats endpoint found in %s ", s)
|
||||||
}
|
}
|
||||||
|
nodeStats := &struct {
|
||||||
for k, v := range n.Attributes {
|
ClusterName string `json:"cluster_name"`
|
||||||
tags["node_attribute_"+k] = v
|
Nodes map[string]*node `json:"nodes"`
|
||||||
|
}{}
|
||||||
|
if err := e.gatherData(iurl, nodeStats); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
for id, n := range nodeStats.Nodes {
|
||||||
stats := map[string]interface{}{
|
tags := map[string]string{
|
||||||
"indices": n.Indices,
|
"node_id": id,
|
||||||
"os": n.OS,
|
"node_host": n.Host,
|
||||||
"process": n.Process,
|
"node_name": n.Name,
|
||||||
"jvm": n.JVM,
|
"cluster_name": nodeStats.ClusterName,
|
||||||
"thread_pool": n.ThreadPool,
|
|
||||||
"fs": n.FS,
|
|
||||||
"transport": n.Transport,
|
|
||||||
"http": n.HTTP,
|
|
||||||
"breakers": n.Breakers,
|
|
||||||
}
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
for p, s := range stats {
|
|
||||||
f := jsonparser.JSONFlattener{}
|
|
||||||
err := f.FlattenJSON("", s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
|
||||||
|
for k, v := range n.Attributes {
|
||||||
|
tags["node_attribute_"+k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := map[string]interface{}{
|
||||||
|
"indices": n.Indices,
|
||||||
|
"os": n.OS,
|
||||||
|
"process": n.Process,
|
||||||
|
"jvm": n.JVM,
|
||||||
|
"thread_pool": n.ThreadPool,
|
||||||
|
"fs": n.FS,
|
||||||
|
"transport": n.Transport,
|
||||||
|
"http": n.HTTP,
|
||||||
|
"breakers": n.Breakers,
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
for p, s := range stats {
|
||||||
|
f := jsonparser.JSONFlattener{}
|
||||||
|
err := f.FlattenJSON("", s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -258,7 +299,11 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||||
r, err := e.client.Get(url)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if len(e.Username) > 0 && len(e.Password) > 0 {
|
||||||
|
req.SetBasicAuth(e.Username, e.Password)
|
||||||
|
}
|
||||||
|
r, err := e.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue