Skip to content

Commit

Permalink
make sql.DB connection details configurable
Browse files Browse the repository at this point in the history
  • Loading branch information
James Lawrence committed Apr 22, 2017
1 parent 3684ec6 commit ee2b082
Show file tree
Hide file tree
Showing 7 changed files with 277 additions and 207 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@ for receiving and sending UDP, TCP, unix, & unix-datagram data. These plugins
will replace udp_listener and tcp_listener, which are still available but will
be deprecated eventually.

- Postgresql plugins will now default to using a persistent connection to the database.
if you wish you retain the previous functionality set the max_lifetime configuration to less
than the collection interval.

### Features

- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
Expand Down Expand Up @@ -73,7 +77,8 @@ be deprecated eventually.
- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
- [#2575](https://github.com/influxdata/telegraf/issues/2575): Add diskio input for Darwin
- [#1977](https://github.com/influxdata/telegraf/issues/1977): make postgresql connection pool persist between intervals.

### Bugfixes

Expand Down
77 changes: 0 additions & 77 deletions plugins/inputs/postgresql/connect.go

This file was deleted.

76 changes: 32 additions & 44 deletions plugins/inputs/postgresql/postgresql.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,26 +2,24 @@ package postgresql

import (
"bytes"
"database/sql"
"fmt"
"regexp"
"sort"
"strings"

// register in driver.
_ "github.com/jackc/pgx/stdlib"

"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)

type Postgresql struct {
Address string
Service
Databases []string
IgnoredDatabases []string
OrderedColumns []string
AllColumns []string
sanitizedAddress string
}

var ignoredColumns = map[string]bool{"stats_reset": true}
Expand All @@ -41,6 +39,15 @@ var sampleConfig = `
## to grab metrics for.
##
address = "host=localhost user=postgres sslmode=disable"
## A custom name for the database that will be used as the "server" tag in the
## measurement output. If not specified, a default one generated from
## the connection address is used.
# outputaddress = "db01"
## connection configuration.
## maxlifetime - specify the maximum lifetime of a connection.
## default is forever (0s)
max_lifetime = "0s"
## A list of databases to explicitly ignore. If not specified, metrics for all
## databases are gathered. Do NOT use with the 'databases' option.
Expand All @@ -63,23 +70,15 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
return ignoredColumns
}

var localhost = "host=localhost sslmode=disable"

func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
var (
err error
db *sql.DB
query string
)

if p.Address == "" || p.Address == "localhost" {
p.Address = localhost
}

if db, err = sql.Open("pgx", p.Address); err != nil {
if err = p.DB.Ping(); err != nil {
return err
}
defer db.Close()

if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 {
query = `SELECT * FROM pg_stat_database`
Expand All @@ -91,22 +90,21 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
strings.Join(p.Databases, "','"))
}

rows, err := db.Query(query)
rows, err := p.DB.Query(query)
if err != nil {
return err
}

defer rows.Close()

// grab the column information from the result
p.OrderedColumns, err = rows.Columns()
if err != nil {
if p.OrderedColumns, err = rows.Columns(); err != nil {
return err
} else {
p.AllColumns = make([]string, len(p.OrderedColumns))
copy(p.AllColumns, p.OrderedColumns)
}

p.AllColumns = make([]string, len(p.OrderedColumns))
copy(p.AllColumns, p.OrderedColumns)

for rows.Next() {
err = p.accRow(rows, acc)
if err != nil {
Expand All @@ -116,21 +114,20 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {

query = `SELECT * FROM pg_stat_bgwriter`

bg_writer_row, err := db.Query(query)
bg_writer_row, err := p.DB.Query(query)
if err != nil {
return err
}

defer bg_writer_row.Close()

// grab the column information from the result
p.OrderedColumns, err = bg_writer_row.Columns()
if err != nil {
if p.OrderedColumns, err = bg_writer_row.Columns(); err != nil {
return err
} else {
for _, v := range p.OrderedColumns {
p.AllColumns = append(p.AllColumns, v)
}
}

for _, v := range p.OrderedColumns {
p.AllColumns = append(p.AllColumns, v)
}

for bg_writer_row.Next() {
Expand All @@ -147,23 +144,6 @@ type scanner interface {
Scan(dest ...interface{}) error
}

var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?")

func (p *Postgresql) SanitizedAddress() (_ string, err error) {
var canonicalizedAddress string
if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") {
canonicalizedAddress, err = ParseURL(p.Address)
if err != nil {
return p.sanitizedAddress, err
}
} else {
canonicalizedAddress = p.Address
}
p.sanitizedAddress = passwordKVMatcher.ReplaceAllString(canonicalizedAddress, "")

return p.sanitizedAddress, err
}

func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {
var columnVars []interface{}
var dbname bytes.Buffer
Expand Down Expand Up @@ -215,6 +195,14 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {

func init() {
inputs.Add("postgresql", func() telegraf.Input {
return &Postgresql{}
return &Postgresql{
Service: Service{
MaxIdle: 1,
MaxOpen: 1,
MaxLifetime: internal.Duration{
Duration: 0,
},
},
}
})
}
Loading

0 comments on commit ee2b082

Please sign in to comment.