2016-03-14 09:27:07 +00:00
|
|
|
package postgresql_extensible
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
2016-04-25 13:27:23 +00:00
|
|
|
"log"
|
2016-03-14 09:27:07 +00:00
|
|
|
"strings"
|
|
|
|
|
2017-04-05 00:37:44 +00:00
|
|
|
// register in driver.
|
|
|
|
_ "github.com/jackc/pgx/stdlib"
|
|
|
|
|
2016-03-14 09:27:07 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2018-01-06 00:03:09 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2016-03-14 09:27:07 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
2017-01-24 20:36:36 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
2016-03-14 09:27:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Postgresql struct {
|
2018-01-06 00:03:09 +00:00
|
|
|
postgresql.Service
|
|
|
|
Databases []string
|
|
|
|
AdditionalTags []string
|
|
|
|
Query []struct {
|
2016-04-25 12:10:13 +00:00
|
|
|
Sqlquery string
|
|
|
|
Version int
|
|
|
|
Withdbname bool
|
|
|
|
Tagvalue string
|
|
|
|
Measurement string
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
2016-09-08 14:22:10 +00:00
|
|
|
Debug bool
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type query []struct {
|
2016-04-25 12:10:13 +00:00
|
|
|
Sqlquery string
|
|
|
|
Version int
|
|
|
|
Withdbname bool
|
|
|
|
Tagvalue string
|
|
|
|
Measurement string
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
|
2017-01-24 20:36:36 +00:00
|
|
|
var ignoredColumns = map[string]bool{"stats_reset": true}
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
var sampleConfig = `
|
2016-03-31 23:50:24 +00:00
|
|
|
## specify address via a url matching:
|
|
|
|
## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
|
|
|
## ?sslmode=[disable|verify-ca|verify-full]
|
|
|
|
## or a simple string:
|
|
|
|
## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
2016-03-31 23:50:24 +00:00
|
|
|
## All connection parameters are optional. #
|
|
|
|
## Without the dbname parameter, the driver will default to a database
|
|
|
|
## with the same name as the user. This dbname is just for instantiating a
|
|
|
|
## connection with the server and doesn't restrict the databases we are trying
|
|
|
|
## to grab metrics for.
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
|
|
|
address = "host=localhost user=postgres sslmode=disable"
|
2018-01-06 00:03:09 +00:00
|
|
|
|
|
|
|
## connection configuration.
|
|
|
|
## maxlifetime - specify the maximum lifetime of a connection.
|
|
|
|
## default is forever (0s)
|
|
|
|
max_lifetime = "0s"
|
|
|
|
|
2016-03-31 23:50:24 +00:00
|
|
|
## A list of databases to pull metrics about. If not specified, metrics for all
|
|
|
|
## databases are gathered.
|
|
|
|
## databases = ["app_production", "testing"]
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
2016-04-25 15:09:35 +00:00
|
|
|
## A custom name for the database that will be used as the "server" tag in the
|
|
|
|
## measurement output. If not specified, a default one generated from
|
|
|
|
## the connection address is used.
|
2018-01-06 00:03:09 +00:00
|
|
|
# outputaddress = "db01"
|
2016-04-25 15:09:35 +00:00
|
|
|
#
|
2016-03-31 23:50:24 +00:00
|
|
|
## Define the toml config where the sql queries are stored
|
|
|
|
## New queries can be added, if the withdbname is set to true and there is no
|
|
|
|
## databases defined in the 'databases field', the sql query is ended by a
|
|
|
|
## 'is not null' in order to make the query succeed.
|
|
|
|
## Example :
|
|
|
|
## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
|
|
|
|
## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
|
|
|
|
## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
|
|
|
## withdbname was true. Be careful that if the withdbname is set to false you
|
|
|
|
## don't have to define the where clause (aka with the dbname) the tagvalue
|
2016-04-25 12:10:13 +00:00
|
|
|
## field is used to define custom tags (separated by commas)
|
|
|
|
## The optional "measurement" value can be used to override the default
|
|
|
|
## output measurement name ("postgresql").
|
2016-03-14 09:27:07 +00:00
|
|
|
#
|
2016-03-31 23:50:24 +00:00
|
|
|
## Structure :
|
|
|
|
## [[inputs.postgresql_extensible.query]]
|
|
|
|
## sqlquery string
|
|
|
|
## version string
|
|
|
|
## withdbname boolean
|
2016-04-25 12:10:13 +00:00
|
|
|
## tagvalue string (comma separated)
|
|
|
|
## measurement string
|
2016-03-14 09:27:07 +00:00
|
|
|
[[inputs.postgresql_extensible.query]]
|
|
|
|
sqlquery="SELECT * FROM pg_stat_database"
|
|
|
|
version=901
|
|
|
|
withdbname=false
|
|
|
|
tagvalue=""
|
2016-04-25 12:10:13 +00:00
|
|
|
measurement=""
|
2016-03-14 09:27:07 +00:00
|
|
|
[[inputs.postgresql_extensible.query]]
|
|
|
|
sqlquery="SELECT * FROM pg_stat_bgwriter"
|
|
|
|
version=901
|
|
|
|
withdbname=false
|
2016-04-25 12:10:13 +00:00
|
|
|
tagvalue="postgresql.stats"
|
2016-03-14 09:27:07 +00:00
|
|
|
`
|
|
|
|
|
|
|
|
func (p *Postgresql) SampleConfig() string {
|
|
|
|
return sampleConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Postgresql) Description() string {
|
|
|
|
return "Read metrics from one or many postgresql servers"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Postgresql) IgnoredColumns() map[string]bool {
|
|
|
|
return ignoredColumns
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
2017-04-05 00:37:44 +00:00
|
|
|
var (
|
|
|
|
err error
|
|
|
|
sql_query string
|
|
|
|
query_addon string
|
|
|
|
db_version int
|
|
|
|
query string
|
|
|
|
tag_value string
|
|
|
|
meas_name string
|
2018-01-06 00:03:09 +00:00
|
|
|
columns []string
|
2017-04-05 00:37:44 +00:00
|
|
|
)
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
// Retreiving the database version
|
|
|
|
|
|
|
|
query = `select substring(setting from 1 for 3) as version from pg_settings where name='server_version_num'`
|
2018-01-06 00:03:09 +00:00
|
|
|
if err = p.DB.QueryRow(query).Scan(&db_version); err != nil {
|
2017-05-18 20:52:56 +00:00
|
|
|
db_version = 0
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
2018-01-06 00:03:09 +00:00
|
|
|
|
2016-03-14 09:27:07 +00:00
|
|
|
// We loop in order to process each query
|
|
|
|
// Query is not run if Database version does not match the query version.
|
|
|
|
|
|
|
|
for i := range p.Query {
|
|
|
|
sql_query = p.Query[i].Sqlquery
|
|
|
|
tag_value = p.Query[i].Tagvalue
|
2016-04-25 12:10:13 +00:00
|
|
|
if p.Query[i].Measurement != "" {
|
|
|
|
meas_name = p.Query[i].Measurement
|
|
|
|
} else {
|
|
|
|
meas_name = "postgresql"
|
|
|
|
}
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
if p.Query[i].Withdbname {
|
|
|
|
if len(p.Databases) != 0 {
|
|
|
|
query_addon = fmt.Sprintf(` IN ('%s')`,
|
|
|
|
strings.Join(p.Databases, "','"))
|
|
|
|
} else {
|
|
|
|
query_addon = " is not null"
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
query_addon = ""
|
|
|
|
}
|
|
|
|
sql_query += query_addon
|
|
|
|
|
|
|
|
if p.Query[i].Version <= db_version {
|
2018-01-06 00:03:09 +00:00
|
|
|
rows, err := p.DB.Query(sql_query)
|
2016-03-14 09:27:07 +00:00
|
|
|
if err != nil {
|
2017-04-24 18:13:26 +00:00
|
|
|
acc.AddError(err)
|
|
|
|
continue
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
// grab the column information from the result
|
2018-01-06 00:03:09 +00:00
|
|
|
if columns, err = rows.Columns(); err != nil {
|
2017-04-24 18:13:26 +00:00
|
|
|
acc.AddError(err)
|
|
|
|
continue
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
2018-01-06 00:03:09 +00:00
|
|
|
|
2016-03-14 09:27:07 +00:00
|
|
|
p.AdditionalTags = nil
|
|
|
|
if tag_value != "" {
|
|
|
|
tag_list := strings.Split(tag_value, ",")
|
|
|
|
for t := range tag_list {
|
|
|
|
p.AdditionalTags = append(p.AdditionalTags, tag_list[t])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for rows.Next() {
|
2018-01-06 00:03:09 +00:00
|
|
|
err = p.accRow(meas_name, rows, acc, columns)
|
2016-03-14 09:27:07 +00:00
|
|
|
if err != nil {
|
2017-04-24 18:13:26 +00:00
|
|
|
acc.AddError(err)
|
|
|
|
break
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type scanner interface {
|
|
|
|
Scan(dest ...interface{}) error
|
|
|
|
}
|
|
|
|
|
2018-01-06 00:03:09 +00:00
|
|
|
func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumulator, columns []string) error {
|
2017-09-25 17:58:10 +00:00
|
|
|
var (
|
|
|
|
err error
|
|
|
|
columnVars []interface{}
|
|
|
|
dbname bytes.Buffer
|
|
|
|
tagAddress string
|
|
|
|
)
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
// this is where we'll store the column name with its *interface{}
|
|
|
|
columnMap := make(map[string]*interface{})
|
|
|
|
|
2018-01-06 00:03:09 +00:00
|
|
|
for _, column := range columns {
|
2016-03-14 09:27:07 +00:00
|
|
|
columnMap[column] = new(interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// populate the array of interface{} with the pointers in the right order
|
|
|
|
for i := 0; i < len(columnMap); i++ {
|
2018-01-06 00:03:09 +00:00
|
|
|
columnVars = append(columnVars, columnMap[columns[i]])
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// deconstruct array of variables and send to Scan
|
2017-09-25 17:58:10 +00:00
|
|
|
if err = row.Scan(columnVars...); err != nil {
|
2016-03-14 09:27:07 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-09-25 17:58:10 +00:00
|
|
|
|
2016-03-14 09:27:07 +00:00
|
|
|
if columnMap["datname"] != nil {
|
|
|
|
// extract the database name from the column map
|
2017-01-24 20:36:36 +00:00
|
|
|
dbname.WriteString((*columnMap["datname"]).(string))
|
2016-03-14 09:27:07 +00:00
|
|
|
} else {
|
|
|
|
dbname.WriteString("postgres")
|
|
|
|
}
|
|
|
|
|
2017-09-25 17:58:10 +00:00
|
|
|
if tagAddress, err = p.SanitizedAddress(); err != nil {
|
2016-03-14 09:27:07 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the additional tags
|
2017-09-25 17:58:10 +00:00
|
|
|
tags := map[string]string{
|
|
|
|
"server": tagAddress,
|
|
|
|
"db": dbname.String(),
|
|
|
|
}
|
2016-03-14 09:27:07 +00:00
|
|
|
|
|
|
|
fields := make(map[string]interface{})
|
2016-08-09 07:25:59 +00:00
|
|
|
COLUMN:
|
2016-03-14 09:27:07 +00:00
|
|
|
for col, val := range columnMap {
|
2018-01-06 00:03:09 +00:00
|
|
|
log.Printf("D! postgresql_extensible: column: %s = %T: %v\n", col, *val, *val)
|
2016-03-14 09:27:07 +00:00
|
|
|
_, ignore := ignoredColumns[col]
|
2016-08-09 07:25:59 +00:00
|
|
|
if ignore || *val == nil {
|
|
|
|
continue
|
|
|
|
}
|
2017-01-24 20:36:36 +00:00
|
|
|
|
2016-08-09 07:25:59 +00:00
|
|
|
for _, tag := range p.AdditionalTags {
|
|
|
|
if col != tag {
|
|
|
|
continue
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
2016-08-09 07:25:59 +00:00
|
|
|
switch v := (*val).(type) {
|
2017-01-24 20:36:36 +00:00
|
|
|
case string:
|
|
|
|
tags[col] = v
|
2016-08-09 07:25:59 +00:00
|
|
|
case []byte:
|
|
|
|
tags[col] = string(v)
|
2017-01-24 20:36:36 +00:00
|
|
|
case int64, int32, int:
|
2016-08-09 07:25:59 +00:00
|
|
|
tags[col] = fmt.Sprintf("%d", v)
|
2017-01-24 20:36:36 +00:00
|
|
|
default:
|
|
|
|
log.Println("failed to add additional tag", col)
|
2016-03-17 14:01:08 +00:00
|
|
|
}
|
2016-08-09 07:25:59 +00:00
|
|
|
continue COLUMN
|
|
|
|
}
|
2017-09-25 17:58:10 +00:00
|
|
|
|
2016-08-09 07:25:59 +00:00
|
|
|
if v, ok := (*val).([]byte); ok {
|
|
|
|
fields[col] = string(v)
|
|
|
|
} else {
|
|
|
|
fields[col] = *val
|
2016-03-14 09:27:07 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-25 12:10:13 +00:00
|
|
|
acc.AddFields(meas_name, fields, tags)
|
2016-03-14 09:27:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
inputs.Add("postgresql_extensible", func() telegraf.Input {
|
2018-01-06 00:03:09 +00:00
|
|
|
return &Postgresql{
|
|
|
|
Service: postgresql.Service{
|
|
|
|
MaxIdle: 1,
|
|
|
|
MaxOpen: 1,
|
|
|
|
MaxLifetime: internal.Duration{
|
|
|
|
Duration: 0,
|
|
|
|
},
|
2018-08-01 22:44:10 +00:00
|
|
|
IsPgBouncer: false,
|
2018-01-06 00:03:09 +00:00
|
|
|
},
|
|
|
|
}
|
2016-03-14 09:27:07 +00:00
|
|
|
})
|
|
|
|
}
|