2016-03-14 09:27:07 +00:00
|
|
|
package postgresql_extensible
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"database/sql"
|
|
|
|
"fmt"
|
2016-04-25 13:27:23 +00:00
|
|
|
"log"
|
2016-03-14 09:27:07 +00:00
|
|
|
"regexp"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/influxdata/telegraf"
|
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
|
|
|
|
|
|
|
"github.com/lib/pq"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Postgresql struct {
|
|
|
|
Address string
|
2016-04-25 15:09:35 +00:00
|
|
|
Outputaddress string
|
2016-03-14 09:27:07 +00:00
|
|
|
Databases []string
|
|
|
|
OrderedColumns []string
|
|
|
|
AllColumns []string
|
|
|
|
AdditionalTags []string
|
|
|
|
sanitizedAddress string
|
|
|
|
Query []struct {
|
2016-04-25 12:10:13 +00:00
|
|
|
Sqlquery string
|
|
|
|
Version int
|
|
|
|
Withdbname bool
|
|
|
|
Tagvalue string
|
|
|
|
Measurement string
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type query []struct {
|
2016-04-25 12:10:13 +00:00
|
|
|
Sqlquery string
|
|
|
|
Version int
|
|
|
|
Withdbname bool
|
|
|
|
Tagvalue string
|
|
|
|
Measurement string
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
|
|
|
|
|
|
|
|
var sampleConfig = `
|
2016-03-31 23:50:24 +00:00
|
|
|
## specify address via a url matching:
|
|
|
|
## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
|
|
|
## ?sslmode=[disable|verify-ca|verify-full]
|
|
|
|
## or a simple string:
|
|
|
|
## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
2016-03-31 23:50:24 +00:00
|
|
|
## All connection parameters are optional. #
|
|
|
|
## Without the dbname parameter, the driver will default to a database
|
|
|
|
## with the same name as the user. This dbname is just for instantiating a
|
|
|
|
## connection with the server and doesn't restrict the databases we are trying
|
|
|
|
## to grab metrics for.
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
|
|
|
address = "host=localhost user=postgres sslmode=disable"
|
2016-03-31 23:50:24 +00:00
|
|
|
## A list of databases to pull metrics about. If not specified, metrics for all
|
|
|
|
## databases are gathered.
|
|
|
|
## databases = ["app_production", "testing"]
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
2016-04-25 15:09:35 +00:00
|
|
|
# outputaddress = "db01"
|
|
|
|
## A custom name for the database that will be used as the "server" tag in the
|
|
|
|
## measurement output. If not specified, a default one generated from
|
|
|
|
## the connection address is used.
|
|
|
|
#
|
2016-03-31 23:50:24 +00:00
|
|
|
## Define the toml config where the sql queries are stored
|
|
|
|
## New queries can be added, if the withdbname is set to true and there is no
|
|
|
|
## databases defined in the 'databases field', the sql query is ended by a
|
|
|
|
## 'is not null' in order to make the query succeed.
|
|
|
|
## Example :
|
|
|
|
## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
|
|
|
|
## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
|
|
|
|
## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
|
|
|
## withdbname was true. Be careful that if the withdbname is set to false you
|
|
|
|
## don't have to define the where clause (aka with the dbname) the tagvalue
|
2016-04-25 12:10:13 +00:00
|
|
|
## field is used to define custom tags (separated by commas)
|
|
|
|
## The optional "measurement" value can be used to override the default
|
|
|
|
## output measurement name ("postgresql").
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
2016-03-31 23:50:24 +00:00
|
|
|
## Structure :
|
|
|
|
## [[inputs.postgresql_extensible.query]]
|
|
|
|
## sqlquery string
|
|
|
|
## version string
|
|
|
|
## withdbname boolean
|
2016-04-25 12:10:13 +00:00
|
|
|
## tagvalue string (comma separated)
|
|
|
|
## measurement string
|
2016-03-14 09:27:07 +00:00
|
|
|
[[inputs.postgresql_extensible.query]]
|
|
|
|
sqlquery="SELECT * FROM pg_stat_database"
|
|
|
|
version=901
|
|
|
|
withdbname=false
|
|
|
|
tagvalue=""
|
2016-04-25 12:10:13 +00:00
|
|
|
measurement=""
|
2016-03-14 09:27:07 +00:00
|
|
|
[[inputs.postgresql_extensible.query]]
|
|
|
|
sqlquery="SELECT * FROM pg_stat_bgwriter"
|
|
|
|
version=901
|
|
|
|
withdbname=false
|
2016-04-25 12:10:13 +00:00
|
|
|
tagvalue="postgresql.stats"
|
2016-03-14 09:27:07 +00:00
|
|
|
`
|
|
|
|
|
|
|
|
func (p *Postgresql) SampleConfig() string {
|
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Postgresql) Description() string {
|
|
|
|
return "Read metrics from one or many postgresql servers"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Postgresql) IgnoredColumns() map[string]bool {
|
|
|
|
return ignoredColumns
|
|
|
|
}
|
|
|
|
|
|
|
|
var localhost = "host=localhost sslmode=disable"
|
|
|
|
|
|
|
|
func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
|
|
|
|
|
|
|
var sql_query string
|
|
|
|
var query_addon string
|
|
|
|
var db_version int
|
|
|
|
var query string
|
|
|
|
var tag_value string
|
2016-04-25 12:10:13 +00:00
|
|
|
var meas_name string
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
if p.Address == "" || p.Address == "localhost" {
|
|
|
|
p.Address = localhost
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := sql.Open("postgres", p.Address)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer db.Close()
|
|
|
|
|
|
|
|
// Retreiving the database version
|
|
|
|
|
|
|
|
query = `select substring(setting from 1 for 3) as version from pg_settings where name='server_version_num'`
|
|
|
|
err = db.QueryRow(query).Scan(&db_version)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// We loop in order to process each query
|
|
|
|
// Query is not run if Database version does not match the query version.
|
|
|
|
|
|
|
|
for i := range p.Query {
|
|
|
|
sql_query = p.Query[i].Sqlquery
|
|
|
|
tag_value = p.Query[i].Tagvalue
|
2016-04-25 12:10:13 +00:00
|
|
|
if p.Query[i].Measurement != "" {
|
|
|
|
meas_name = p.Query[i].Measurement
|
|
|
|
} else {
|
|
|
|
meas_name = "postgresql"
|
|
|
|
}
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
if p.Query[i].Withdbname {
|
|
|
|
if len(p.Databases) != 0 {
|
|
|
|
query_addon = fmt.Sprintf(` IN ('%s')`,
|
|
|
|
strings.Join(p.Databases, "','"))
|
|
|
|
} else {
|
|
|
|
query_addon = " is not null"
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
query_addon = ""
|
|
|
|
}
|
|
|
|
sql_query += query_addon
|
|
|
|
|
|
|
|
if p.Query[i].Version <= db_version {
|
|
|
|
rows, err := db.Query(sql_query)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
// grab the column information from the result
|
|
|
|
p.OrderedColumns, err = rows.Columns()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
for _, v := range p.OrderedColumns {
|
|
|
|
p.AllColumns = append(p.AllColumns, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.AdditionalTags = nil
|
|
|
|
if tag_value != "" {
|
|
|
|
tag_list := strings.Split(tag_value, ",")
|
|
|
|
for t := range tag_list {
|
|
|
|
p.AdditionalTags = append(p.AdditionalTags, tag_list[t])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for rows.Next() {
|
2016-04-25 12:10:13 +00:00
|
|
|
err = p.accRow(meas_name, rows, acc)
|
2016-03-14 09:27:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type scanner interface {
|
|
|
|
Scan(dest ...interface{}) error
|
|
|
|
}
|
|
|
|
|
2016-04-23 21:40:10 +00:00
|
|
|
var KVMatcher, _ = regexp.Compile("(password|sslcert|sslkey|sslmode|sslrootcert)=\\S+ ?")
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
func (p *Postgresql) SanitizedAddress() (_ string, err error) {
|
2016-04-25 15:09:35 +00:00
|
|
|
if p.Outputaddress != "" {
|
|
|
|
return p.Outputaddress, nil
|
|
|
|
}
|
2016-03-14 09:27:07 +00:00
|
|
|
var canonicalizedAddress string
|
|
|
|
if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") {
|
|
|
|
canonicalizedAddress, err = pq.ParseURL(p.Address)
|
|
|
|
if err != nil {
|
|
|
|
return p.sanitizedAddress, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
canonicalizedAddress = p.Address
|
|
|
|
}
|
2016-04-23 21:40:10 +00:00
|
|
|
p.sanitizedAddress = KVMatcher.ReplaceAllString(canonicalizedAddress, "")
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
return p.sanitizedAddress, err
|
|
|
|
}
|
|
|
|
|
2016-04-25 12:10:13 +00:00
|
|
|
func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumulator) error {
|
2016-03-14 09:27:07 +00:00
|
|
|
var columnVars []interface{}
|
|
|
|
var dbname bytes.Buffer
|
|
|
|
|
|
|
|
// this is where we'll store the column name with its *interface{}
|
|
|
|
columnMap := make(map[string]*interface{})
|
|
|
|
|
|
|
|
for _, column := range p.OrderedColumns {
|
|
|
|
columnMap[column] = new(interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// populate the array of interface{} with the pointers in the right order
|
|
|
|
for i := 0; i < len(columnMap); i++ {
|
|
|
|
columnVars = append(columnVars, columnMap[p.OrderedColumns[i]])
|
|
|
|
}
|
|
|
|
|
|
|
|
// deconstruct array of variables and send to Scan
|
|
|
|
err := row.Scan(columnVars...)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if columnMap["datname"] != nil {
|
|
|
|
// extract the database name from the column map
|
|
|
|
dbnameChars := (*columnMap["datname"]).([]uint8)
|
|
|
|
for i := 0; i < len(dbnameChars); i++ {
|
|
|
|
dbname.WriteString(string(dbnameChars[i]))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dbname.WriteString("postgres")
|
|
|
|
}
|
|
|
|
|
|
|
|
var tagAddress string
|
|
|
|
tagAddress, err = p.SanitizedAddress()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the additional tags
|
|
|
|
|
|
|
|
tags := map[string]string{}
|
|
|
|
tags["server"] = tagAddress
|
|
|
|
tags["db"] = dbname.String()
|
|
|
|
fields := make(map[string]interface{})
|
2016-08-09 07:25:59 +00:00
|
|
|
COLUMN:
|
2016-03-14 09:27:07 +00:00
|
|
|
for col, val := range columnMap {
|
2016-04-25 13:27:23 +00:00
|
|
|
if acc.Debug() {
|
|
|
|
log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
|
|
|
|
}
|
2016-03-14 09:27:07 +00:00
|
|
|
_, ignore := ignoredColumns[col]
|
2016-08-09 07:25:59 +00:00
|
|
|
if ignore || *val == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tag := range p.AdditionalTags {
|
|
|
|
if col != tag {
|
|
|
|
continue
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
2016-08-09 07:25:59 +00:00
|
|
|
switch v := (*val).(type) {
|
|
|
|
case []byte:
|
|
|
|
tags[col] = string(v)
|
|
|
|
case int64:
|
|
|
|
tags[col] = fmt.Sprintf("%d", v)
|
2016-03-17 14:01:08 +00:00
|
|
|
}
|
2016-08-09 07:25:59 +00:00
|
|
|
continue COLUMN
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := (*val).([]byte); ok {
|
|
|
|
fields[col] = string(v)
|
|
|
|
} else {
|
|
|
|
fields[col] = *val
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-25 12:10:13 +00:00
|
|
|
acc.AddFields(meas_name, fields, tags)
|
2016-03-14 09:27:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
inputs.Add("postgresql_extensible", func() telegraf.Input {
|
|
|
|
return &Postgresql{}
|
|
|
|
})
|
|
|
|
}
|