2015-06-20 12:38:01 +00:00
|
|
|
package prometheus
|
|
|
|
|
|
|
|
import (
|
2018-11-05 21:30:16 +00:00
|
|
|
"context"
|
2015-06-20 12:38:01 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2016-03-01 16:12:23 +00:00
|
|
|
"io/ioutil"
|
2017-09-18 22:06:11 +00:00
|
|
|
"net"
|
2015-06-20 12:38:01 +00:00
|
|
|
"net/http"
|
2017-09-18 22:06:11 +00:00
|
|
|
"net/url"
|
2015-06-20 12:38:01 +00:00
|
|
|
"sync"
|
2016-02-29 16:52:58 +00:00
|
|
|
"time"
|
2017-03-29 22:04:29 +00:00
|
|
|
|
|
|
|
"github.com/influxdata/telegraf"
|
|
|
|
"github.com/influxdata/telegraf/internal"
|
2018-05-04 23:33:23 +00:00
|
|
|
"github.com/influxdata/telegraf/internal/tls"
|
2017-03-29 22:04:29 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
2015-06-20 12:38:01 +00:00
|
|
|
)
|
|
|
|
|
2016-07-07 10:15:47 +00:00
|
|
|
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`
|
|
|
|
|
2015-06-20 12:38:01 +00:00
|
|
|
type Prometheus struct {
|
2017-09-18 22:06:11 +00:00
|
|
|
// An array of urls to scrape metrics from.
|
2018-02-05 19:16:00 +00:00
|
|
|
URLs []string `toml:"urls"`
|
2016-03-17 19:17:48 +00:00
|
|
|
|
2017-09-18 22:06:11 +00:00
|
|
|
// An array of Kubernetes services to scrape metrics from.
|
|
|
|
KubernetesServices []string
|
|
|
|
|
2018-11-05 21:30:16 +00:00
|
|
|
// Location of kubernetes config file
|
|
|
|
KubeConfig string
|
|
|
|
|
2016-03-17 19:17:48 +00:00
|
|
|
// Bearer Token authorization file path
|
2019-01-15 23:25:26 +00:00
|
|
|
BearerToken string `toml:"bearer_token"`
|
|
|
|
BearerTokenString string `toml:"bearer_token_string"`
|
2016-06-23 07:59:44 +00:00
|
|
|
|
2019-07-02 18:14:48 +00:00
|
|
|
// Basic authentication credentials
|
|
|
|
Username string `toml:"username"`
|
|
|
|
Password string `toml:"password"`
|
|
|
|
|
2016-11-07 16:34:02 +00:00
|
|
|
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
|
|
|
|
2019-11-21 04:53:57 +00:00
|
|
|
MetricVersion int `toml:"metric_version"`
|
|
|
|
|
|
|
|
URLTag string `toml:"url_tag"`
|
|
|
|
|
2018-05-04 23:33:23 +00:00
|
|
|
tls.ClientConfig
|
2017-05-09 23:20:43 +00:00
|
|
|
|
2019-09-23 22:39:50 +00:00
|
|
|
Log telegraf.Logger
|
|
|
|
|
2017-05-09 23:20:43 +00:00
|
|
|
client *http.Client
|
2018-11-05 21:30:16 +00:00
|
|
|
|
|
|
|
// Should we scrape Kubernetes services for prometheus annotations
|
2019-04-10 21:52:46 +00:00
|
|
|
MonitorPods bool `toml:"monitor_kubernetes_pods"`
|
|
|
|
PodNamespace string `toml:"monitor_kubernetes_pods_namespace"`
|
2018-11-05 21:30:16 +00:00
|
|
|
lock sync.Mutex
|
2019-01-16 23:49:24 +00:00
|
|
|
kubernetesPods map[string]URLAndAddress
|
2018-11-05 21:30:16 +00:00
|
|
|
cancel context.CancelFunc
|
|
|
|
wg sync.WaitGroup
|
2015-06-20 12:38:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var sampleConfig = `
|
2016-02-18 21:26:51 +00:00
|
|
|
## An array of urls to scrape metrics from.
|
2015-10-15 21:53:29 +00:00
|
|
|
urls = ["http://localhost:9100/metrics"]
|
2016-03-17 19:17:48 +00:00
|
|
|
|
2019-11-21 04:53:57 +00:00
|
|
|
## Metric version (optional, default=1, supported values are 1 and 2)
|
|
|
|
# metric_version = 2
|
|
|
|
|
|
|
|
## Url tag name (tag containing scrapped url. optional, default is "url")
|
|
|
|
# url_tag = "scrapeUrl"
|
|
|
|
|
2017-09-18 22:06:11 +00:00
|
|
|
## An array of Kubernetes services to scrape metrics from.
|
2017-09-18 23:21:45 +00:00
|
|
|
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
|
2017-09-18 22:06:11 +00:00
|
|
|
|
2018-11-05 21:30:16 +00:00
|
|
|
## Kubernetes config file to create client from.
|
|
|
|
# kube_config = "/path/to/kubernetes.config"
|
|
|
|
|
|
|
|
## Scrape Kubernetes pods for the following prometheus annotations:
|
|
|
|
## - prometheus.io/scrape: Enable scraping for this pod
|
|
|
|
## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
|
|
|
|
## set this to 'https' & most likely set the tls config.
|
|
|
|
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
|
|
|
|
## - prometheus.io/port: If port is not 9102 use this annotation
|
|
|
|
# monitor_kubernetes_pods = true
|
2019-04-10 21:52:46 +00:00
|
|
|
## Restricts Kubernetes monitoring to a single namespace
|
|
|
|
## ex: monitor_kubernetes_pods_namespace = "default"
|
|
|
|
# monitor_kubernetes_pods_namespace = ""
|
2018-11-05 21:30:16 +00:00
|
|
|
|
2019-01-15 23:25:26 +00:00
|
|
|
## Use bearer token for authorization. ('bearer_token' takes priority)
|
|
|
|
# bearer_token = "/path/to/bearer/token"
|
|
|
|
## OR
|
|
|
|
# bearer_token_string = "abc_123"
|
2016-06-23 07:59:44 +00:00
|
|
|
|
2019-07-02 18:14:48 +00:00
|
|
|
## HTTP Basic Authentication username and password. ('bearer_token' and
|
|
|
|
## 'bearer_token_string' take priority)
|
|
|
|
# username = ""
|
|
|
|
# password = ""
|
|
|
|
|
|
|
|
## Specify timeout duration for slower prometheus clients (default is 3s)
|
2016-11-07 16:34:02 +00:00
|
|
|
# response_timeout = "3s"
|
|
|
|
|
2018-05-04 23:33:23 +00:00
|
|
|
## Optional TLS Config
|
|
|
|
# tls_ca = /path/to/cafile
|
|
|
|
# tls_cert = /path/to/certfile
|
|
|
|
# tls_key = /path/to/keyfile
|
|
|
|
## Use TLS but skip chain & host verification
|
2016-06-23 07:59:44 +00:00
|
|
|
# insecure_skip_verify = false
|
2015-08-26 15:21:39 +00:00
|
|
|
`
|
2015-06-20 12:38:01 +00:00
|
|
|
|
2016-03-17 19:17:48 +00:00
|
|
|
func (p *Prometheus) SampleConfig() string {
|
2015-06-20 12:38:01 +00:00
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
2016-03-17 19:17:48 +00:00
|
|
|
func (p *Prometheus) Description() string {
|
2015-06-20 12:38:01 +00:00
|
|
|
return "Read metrics from one or many prometheus clients"
|
|
|
|
}
|
|
|
|
|
|
|
|
var ErrProtocolError = errors.New("prometheus protocol error")
|
|
|
|
|
2018-02-05 19:16:00 +00:00
|
|
|
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
|
2017-09-18 22:06:11 +00:00
|
|
|
host := address
|
|
|
|
if u.Port() != "" {
|
|
|
|
host = address + ":" + u.Port()
|
|
|
|
}
|
2018-02-05 19:16:00 +00:00
|
|
|
reconstructedURL := &url.URL{
|
2017-09-18 22:06:11 +00:00
|
|
|
Scheme: u.Scheme,
|
|
|
|
Opaque: u.Opaque,
|
|
|
|
User: u.User,
|
|
|
|
Path: u.Path,
|
|
|
|
RawPath: u.RawPath,
|
|
|
|
ForceQuery: u.ForceQuery,
|
|
|
|
RawQuery: u.RawQuery,
|
|
|
|
Fragment: u.Fragment,
|
|
|
|
Host: host,
|
|
|
|
}
|
2018-02-05 19:16:00 +00:00
|
|
|
return reconstructedURL
|
2017-09-18 22:06:11 +00:00
|
|
|
}
|
|
|
|
|
2018-02-05 19:16:00 +00:00
|
|
|
type URLAndAddress struct {
|
|
|
|
OriginalURL *url.URL
|
|
|
|
URL *url.URL
|
2017-09-23 00:26:19 +00:00
|
|
|
Address string
|
2018-11-05 21:30:16 +00:00
|
|
|
Tags map[string]string
|
2017-09-18 22:06:11 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 23:49:24 +00:00
|
|
|
func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
|
|
|
|
allURLs := make(map[string]URLAndAddress, 0)
|
2018-02-05 19:16:00 +00:00
|
|
|
for _, u := range p.URLs {
|
|
|
|
URL, err := url.Parse(u)
|
|
|
|
if err != nil {
|
2019-09-23 22:39:50 +00:00
|
|
|
p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
|
2018-02-05 19:16:00 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-01-16 23:49:24 +00:00
|
|
|
allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL}
|
2017-09-18 22:06:11 +00:00
|
|
|
}
|
2019-01-16 23:49:24 +00:00
|
|
|
|
2018-11-05 21:30:16 +00:00
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
// loop through all pods scraped via the prometheus annotation on the pods
|
2019-01-16 23:49:24 +00:00
|
|
|
for k, v := range p.kubernetesPods {
|
|
|
|
allURLs[k] = v
|
|
|
|
}
|
2018-11-05 21:30:16 +00:00
|
|
|
|
2017-09-18 22:06:11 +00:00
|
|
|
for _, service := range p.KubernetesServices {
|
2018-02-05 19:16:00 +00:00
|
|
|
URL, err := url.Parse(service)
|
2017-09-18 22:06:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-11-05 21:30:16 +00:00
|
|
|
|
2018-02-05 19:16:00 +00:00
|
|
|
resolvedAddresses, err := net.LookupHost(URL.Hostname())
|
2017-09-18 22:06:11 +00:00
|
|
|
if err != nil {
|
2019-09-23 22:39:50 +00:00
|
|
|
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error())
|
2017-09-18 22:06:11 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, resolved := range resolvedAddresses {
|
2018-02-05 19:16:00 +00:00
|
|
|
serviceURL := p.AddressToURL(URL, resolved)
|
2019-01-16 23:49:24 +00:00
|
|
|
allURLs[serviceURL.String()] = URLAndAddress{
|
|
|
|
URL: serviceURL,
|
|
|
|
Address: resolved,
|
|
|
|
OriginalURL: URL,
|
|
|
|
}
|
2017-09-18 22:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-05 19:16:00 +00:00
|
|
|
return allURLs, nil
|
2017-09-18 22:06:11 +00:00
|
|
|
}
|
|
|
|
|
2015-06-20 12:38:01 +00:00
|
|
|
// Reads stats from all configured servers accumulates stats.
|
|
|
|
// Returns one of the errors encountered while gather stats (if any).
|
2016-03-17 19:17:48 +00:00
|
|
|
func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
|
2017-05-09 23:20:43 +00:00
|
|
|
if p.client == nil {
|
2018-11-03 00:51:40 +00:00
|
|
|
client, err := p.createHTTPClient()
|
2017-05-09 23:20:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.client = client
|
|
|
|
}
|
|
|
|
|
2015-06-20 12:38:01 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
2018-02-05 19:16:00 +00:00
|
|
|
allURLs, err := p.GetAllURLs()
|
2017-09-18 22:06:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-02-05 19:16:00 +00:00
|
|
|
for _, URL := range allURLs {
|
2015-06-20 12:38:01 +00:00
|
|
|
wg.Add(1)
|
2018-02-05 19:16:00 +00:00
|
|
|
go func(serviceURL URLAndAddress) {
|
2015-06-20 12:38:01 +00:00
|
|
|
defer wg.Done()
|
2018-02-05 19:16:00 +00:00
|
|
|
acc.AddError(p.gatherURL(serviceURL, acc))
|
|
|
|
}(URL)
|
2015-06-20 12:38:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
2017-04-24 18:13:26 +00:00
|
|
|
return nil
|
2015-06-20 12:38:01 +00:00
|
|
|
}
|
|
|
|
|
2018-11-03 00:51:40 +00:00
|
|
|
func (p *Prometheus) createHTTPClient() (*http.Client, error) {
|
2018-05-04 23:33:23 +00:00
|
|
|
tlsCfg, err := p.ClientConfig.TLSConfig()
|
2016-06-23 07:59:44 +00:00
|
|
|
if err != nil {
|
2017-05-09 23:20:43 +00:00
|
|
|
return nil, err
|
2016-06-23 07:59:44 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 23:20:43 +00:00
|
|
|
client := &http.Client{
|
|
|
|
Transport: &http.Transport{
|
|
|
|
TLSClientConfig: tlsCfg,
|
|
|
|
DisableKeepAlives: true,
|
|
|
|
},
|
|
|
|
Timeout: p.ResponseTimeout.Duration,
|
2016-03-17 19:17:48 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 23:20:43 +00:00
|
|
|
return client, nil
|
|
|
|
}
|
|
|
|
|
2018-02-05 19:16:00 +00:00
|
|
|
func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error {
|
2018-11-03 00:51:40 +00:00
|
|
|
var req *http.Request
|
|
|
|
var err error
|
|
|
|
var uClient *http.Client
|
2019-11-21 04:53:57 +00:00
|
|
|
var metrics []telegraf.Metric
|
2018-11-03 00:51:40 +00:00
|
|
|
if u.URL.Scheme == "unix" {
|
|
|
|
path := u.URL.Query().Get("path")
|
|
|
|
if path == "" {
|
|
|
|
path = "/metrics"
|
|
|
|
}
|
|
|
|
req, err = http.NewRequest("GET", "http://localhost"+path, nil)
|
|
|
|
|
|
|
|
// ignore error because it's been handled before getting here
|
|
|
|
tlsCfg, _ := p.ClientConfig.TLSConfig()
|
|
|
|
uClient = &http.Client{
|
|
|
|
Transport: &http.Transport{
|
|
|
|
TLSClientConfig: tlsCfg,
|
|
|
|
DisableKeepAlives: true,
|
|
|
|
Dial: func(network, addr string) (net.Conn, error) {
|
|
|
|
c, err := net.Dial("unix", u.URL.Path)
|
|
|
|
return c, err
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Timeout: p.ResponseTimeout.Duration,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if u.URL.Path == "" {
|
|
|
|
u.URL.Path = "/metrics"
|
|
|
|
}
|
|
|
|
req, err = http.NewRequest("GET", u.URL.String(), nil)
|
|
|
|
}
|
|
|
|
|
2017-05-09 23:20:43 +00:00
|
|
|
req.Header.Add("Accept", acceptHeader)
|
|
|
|
|
2016-03-17 19:17:48 +00:00
|
|
|
if p.BearerToken != "" {
|
2019-01-15 23:25:26 +00:00
|
|
|
token, err := ioutil.ReadFile(p.BearerToken)
|
2016-03-17 19:17:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
req.Header.Set("Authorization", "Bearer "+string(token))
|
2019-01-15 23:25:26 +00:00
|
|
|
} else if p.BearerTokenString != "" {
|
|
|
|
req.Header.Set("Authorization", "Bearer "+p.BearerTokenString)
|
2019-07-02 18:14:48 +00:00
|
|
|
} else if p.Username != "" || p.Password != "" {
|
|
|
|
req.SetBasicAuth(p.Username, p.Password)
|
2016-03-17 19:17:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-03 00:51:40 +00:00
|
|
|
var resp *http.Response
|
|
|
|
if u.URL.Scheme != "unix" {
|
|
|
|
resp, err = p.client.Do(req)
|
|
|
|
} else {
|
|
|
|
resp, err = uClient.Do(req)
|
|
|
|
}
|
2015-06-20 12:38:01 +00:00
|
|
|
if err != nil {
|
2018-02-05 19:16:00 +00:00
|
|
|
return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err)
|
2015-06-20 12:38:01 +00:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2018-11-03 00:51:40 +00:00
|
|
|
|
2015-06-20 12:38:01 +00:00
|
|
|
if resp.StatusCode != http.StatusOK {
|
2018-02-05 19:16:00 +00:00
|
|
|
return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status)
|
2015-06-20 12:38:01 +00:00
|
|
|
}
|
|
|
|
|
2016-03-01 16:12:23 +00:00
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error reading body: %s", err)
|
|
|
|
}
|
2015-06-20 12:38:01 +00:00
|
|
|
|
2019-11-21 04:53:57 +00:00
|
|
|
if p.MetricVersion == 2 {
|
|
|
|
metrics, err = ParseV2(body, resp.Header)
|
|
|
|
} else {
|
|
|
|
metrics, err = Parse(body, resp.Header)
|
|
|
|
}
|
|
|
|
|
2016-03-01 16:12:23 +00:00
|
|
|
if err != nil {
|
2016-07-07 10:15:47 +00:00
|
|
|
return fmt.Errorf("error reading metrics for %s: %s",
|
2018-02-05 19:16:00 +00:00
|
|
|
u.URL, err)
|
2016-03-01 16:12:23 +00:00
|
|
|
}
|
2018-11-03 00:51:40 +00:00
|
|
|
|
2016-03-01 16:12:23 +00:00
|
|
|
for _, metric := range metrics {
|
|
|
|
tags := metric.Tags()
|
2018-02-05 19:16:00 +00:00
|
|
|
// strip user and password from URL
|
|
|
|
u.OriginalURL.User = nil
|
2019-11-21 04:53:57 +00:00
|
|
|
tags[p.URLTag] = u.OriginalURL.String()
|
2018-02-05 19:16:00 +00:00
|
|
|
if u.Address != "" {
|
|
|
|
tags["address"] = u.Address
|
2017-09-18 22:06:11 +00:00
|
|
|
}
|
2018-11-05 21:30:16 +00:00
|
|
|
for k, v := range u.Tags {
|
|
|
|
tags[k] = v
|
|
|
|
}
|
2017-10-18 21:51:08 +00:00
|
|
|
|
|
|
|
switch metric.Type() {
|
|
|
|
case telegraf.Counter:
|
|
|
|
acc.AddCounter(metric.Name(), metric.Fields(), tags, metric.Time())
|
|
|
|
case telegraf.Gauge:
|
|
|
|
acc.AddGauge(metric.Name(), metric.Fields(), tags, metric.Time())
|
2017-10-24 23:28:52 +00:00
|
|
|
case telegraf.Summary:
|
|
|
|
acc.AddSummary(metric.Name(), metric.Fields(), tags, metric.Time())
|
|
|
|
case telegraf.Histogram:
|
|
|
|
acc.AddHistogram(metric.Name(), metric.Fields(), tags, metric.Time())
|
2017-10-18 21:51:08 +00:00
|
|
|
default:
|
|
|
|
acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time())
|
|
|
|
}
|
2015-06-20 12:38:01 +00:00
|
|
|
}
|
2015-10-22 16:17:57 +00:00
|
|
|
|
2015-06-20 12:38:01 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:30:16 +00:00
|
|
|
// Start will start the Kubernetes scraping if enabled in the configuration
|
|
|
|
func (p *Prometheus) Start(a telegraf.Accumulator) error {
|
|
|
|
if p.MonitorPods {
|
|
|
|
var ctx context.Context
|
|
|
|
ctx, p.cancel = context.WithCancel(context.Background())
|
|
|
|
return p.start(ctx)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Prometheus) Stop() {
|
2018-12-14 22:34:05 +00:00
|
|
|
if p.MonitorPods {
|
|
|
|
p.cancel()
|
|
|
|
}
|
2018-11-05 21:30:16 +00:00
|
|
|
p.wg.Wait()
|
|
|
|
}
|
|
|
|
|
2015-06-20 12:38:01 +00:00
|
|
|
func init() {
|
2016-01-27 21:21:36 +00:00
|
|
|
inputs.Add("prometheus", func() telegraf.Input {
|
2019-01-16 23:49:24 +00:00
|
|
|
return &Prometheus{
|
|
|
|
ResponseTimeout: internal.Duration{Duration: time.Second * 3},
|
|
|
|
kubernetesPods: map[string]URLAndAddress{},
|
2019-11-21 04:53:57 +00:00
|
|
|
URLTag: "url",
|
2019-01-16 23:49:24 +00:00
|
|
|
}
|
2015-06-20 12:38:01 +00:00
|
|
|
})
|
|
|
|
}
|