This is an automated email from the ASF dual-hosted git repository. potiuk pushed a commit to branch development in repository https://gitbox.apache.org/repos/asf/airflow-pgbouncer-exporter.git
commit 3f96629aca73793a05a876d9f24bf29d5d616469 Author: Juraj Bubniak <[email protected]> AuthorDate: Thu Oct 22 12:34:11 2020 +0200 Add support for default constant prometheus labels. --- docker-compose.yml | 3 +- internal/collector/exporter.go | 371 ++++-------------------------------- internal/collector/exporter_test.go | 89 +++++++-- internal/collector/metrics.go | 288 ++++++++++++++++++++++++++++ internal/config/config.go | 2 + internal/server/http_test.go | 16 +- main.go | 5 + 7 files changed, 410 insertions(+), 364 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 0cd00e2..9766a1c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.7" services: postgres: - image: "postgres:12.1-alpine" + image: "postgres:12.3-alpine" restart: always environment: POSTGRES_USER: "postgres" @@ -28,5 +28,6 @@ services: - "9127:9127" environment: DATABASE_URL: "postgres://postgres:postgres@pgbouncer:5432/pgbouncer?sslmode=disable&binary_parameters=yes" + DEFAULT_LABELS: "instance=pg1 env=dev" depends_on: - pgbouncer diff --git a/internal/collector/exporter.go b/internal/collector/exporter.go index ced8b74..9eb2fa6 100644 --- a/internal/collector/exporter.go +++ b/internal/collector/exporter.go @@ -3,6 +3,7 @@ package collector import ( "context" "fmt" + "strings" "sync" "github.com/jbub/pgbouncer_exporter/internal/config" @@ -31,11 +32,17 @@ var ( type metric struct { enabled bool - desc *prometheus.Desc + name string + help string + labels []string valType prometheus.ValueType eval func(res *storeResult) []metricResult } +func (m metric) desc(constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(m.name, m.help, m.labels, constLabels) +} + type metricResult struct { labels []string value float64 @@ -50,342 +57,20 @@ type storeResult struct { // Exporter represents pgbouncer prometheus stats exporter. type Exporter struct { - cfg config.Config - stor domain.Store - mut sync.Mutex // guards Collect - metrics []metric + cfg config.Config + stor domain.Store + mut sync.Mutex // guards Collect + constLabels prometheus.Labels + metrics []metric } // New returns new Exporter. func New(cfg config.Config, stor domain.Store) *Exporter { return &Exporter{ - stor: stor, - cfg: cfg, - metrics: []metric{ - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_requests"), - "Total number of SQL requests pooled by pgbouncer.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalRequests), - }) - } - return results - }, - }, - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_received"), - "Total volume in bytes of network traffic received by pgbouncer.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalReceived), - }) - } - return results - }, - }, - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_sent"), - "Total volume in bytes of network traffic sent by pgbouncer.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalSent), - }) - } - return results - }, - }, - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_query_time"), - "Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalQueryTime), - }) - } - return results - }, - }, - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_xact_time"), - "Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalXactTime), - }) - } - return results - }, - }, - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_query_count"), - "Total number of SQL queries pooled by pgbouncer.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalQueryCount), - }) - } - return results - }, - }, - { - enabled: cfg.ExportStats, - desc: prometheus.NewDesc( - fqName(SubsystemStats, "total_xact_count"), - "Total number of SQL transactions pooled by pgbouncer.", - []string{"database"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, stat := range res.stats { - results = append(results, metricResult{ - labels: []string{stat.Database}, - value: float64(stat.TotalXactCount), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "active_clients"), - "Client connections that are linked to server connection and can process queries.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.Active), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "waiting_clients"), - "Client connections have sent queries but have not yet got a server connection.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.Waiting), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "active_server"), - "Server connections that are linked to a client.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.ServerActive), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "idle_server"), - "Server connections that are unused and immediately usable for client queries.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.ServerIdle), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "used_server"), - "Server connections that have been idle for more than server_check_delay, so they need server_check_query to run on them before they can be used again.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.ServerUsed), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "tested_server"), - "Server connections that are currently running either server_reset_query or server_check_query.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.ServerTested), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "login_server"), - "Server connections currently in the process of logging in.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.ServerLogin), - }) - } - return results - }, - }, - { - enabled: cfg.ExportPools, - desc: prometheus.NewDesc( - fqName(SubsystemPools, "max_wait"), - "How long the first (oldest) client in the queue has waited, in seconds. If this starts increasing, then the current pool of servers does not handle requests quickly enough. The reason may be either an overloaded server or just too small of a pool_size setting.", - []string{"database", "user", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, pool := range res.pools { - results = append(results, metricResult{ - labels: []string{pool.Database, pool.User, pool.PoolMode}, - value: float64(pool.MaxWait), - }) - } - return results - }, - }, - { - enabled: cfg.ExportDatabases, - desc: prometheus.NewDesc( - fqName(SubsystemDatabases, "current_connections"), - "Current number of connections for this database.", - []string{"name", "pool_mode"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, database := range res.databases { - results = append(results, metricResult{ - labels: []string{database.Name, database.PoolMode}, - value: float64(database.CurrentConnections), - }) - } - return results - }, - }, - { - enabled: cfg.ExportLists, - desc: prometheus.NewDesc( - fqName(SubsystemLists, "items"), - "List of internal pgbouncer information.", - []string{"list"}, - nil, - ), - valType: prometheus.GaugeValue, - eval: func(res *storeResult) (results []metricResult) { - for _, list := range res.lists { - results = append(results, metricResult{ - labels: []string{list.List}, - value: float64(list.Items), - }) - } - return results - }, - }, - }, + stor: stor, + cfg: cfg, + constLabels: parseLabels(cfg.DefaultLabels), + metrics: buildMetrics(cfg), } } @@ -395,7 +80,7 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { if !met.enabled { continue } - ch <- met.desc + ch <- met.desc(e.constLabels) } } @@ -422,7 +107,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { for _, res := range results { ch <- prometheus.MustNewConstMetric( - met.desc, + met.desc(e.constLabels), met.valType, res.value, res.labels..., @@ -469,6 +154,20 @@ func (e *Exporter) getStoreResult(ctx context.Context) (*storeResult, error) { return res, nil } -func fqName(subsystem string, name string) string { - return prometheus.BuildFQName(Name, subsystem, name) +func parseLabels(s string) prometheus.Labels { + if s == "" { + return nil + } + + items := strings.Split(s, " ") + res := make(prometheus.Labels, len(items)) + for _, item := range items { + if item == "" { + continue + } + if parts := strings.SplitN(item, "=", 2); len(parts) == 2 { + res[parts[0]] = parts[1] + } + } + return res } diff --git a/internal/collector/exporter_test.go b/internal/collector/exporter_test.go index 5db7cff..b825a5a 100644 --- a/internal/collector/exporter_test.go +++ b/internal/collector/exporter_test.go @@ -7,7 +7,8 @@ import ( "github.com/jbub/pgbouncer_exporter/internal/config" "github.com/jbub/pgbouncer_exporter/internal/store" - "github.com/stretchr/testify/assert" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" ) func TestGetStoreResultExportEnabled(t *testing.T) { @@ -24,17 +25,17 @@ func TestGetStoreResultExportEnabled(t *testing.T) { ctx := context.Background() res, err := exp.getStoreResult(ctx) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, st.StatsCalled) - assert.True(t, st.PoolsCalled) - assert.True(t, st.DatabasesCalled) - assert.True(t, st.ListsCalled) + require.True(t, st.StatsCalled) + require.True(t, st.PoolsCalled) + require.True(t, st.DatabasesCalled) + require.True(t, st.ListsCalled) - assert.Equal(t, res.stats, st.Stats) - assert.Equal(t, res.pools, st.Pools) - assert.Equal(t, res.databases, st.Databases) - assert.Equal(t, res.lists, st.Lists) + require.Equal(t, res.stats, st.Stats) + require.Equal(t, res.pools, st.Pools) + require.Equal(t, res.databases, st.Databases) + require.Equal(t, res.lists, st.Lists) } func TestGetStoreResultExportDisabled(t *testing.T) { @@ -51,15 +52,65 @@ func TestGetStoreResultExportDisabled(t *testing.T) { ctx := context.Background() res, err := exp.getStoreResult(ctx) - assert.NoError(t, err) + require.NoError(t, err) - assert.False(t, st.StatsCalled) - assert.False(t, st.PoolsCalled) - assert.False(t, st.DatabasesCalled) - assert.False(t, st.ListsCalled) + require.False(t, st.StatsCalled) + require.False(t, st.PoolsCalled) + require.False(t, st.DatabasesCalled) + require.False(t, st.ListsCalled) - assert.Equal(t, res.stats, st.Stats) - assert.Equal(t, res.pools, st.Pools) - assert.Equal(t, res.databases, st.Databases) - assert.Equal(t, res.lists, st.Lists) + require.Equal(t, res.stats, st.Stats) + require.Equal(t, res.pools, st.Pools) + require.Equal(t, res.databases, st.Databases) + require.Equal(t, res.lists, st.Lists) +} + +var ( + parseLabelsCases = []struct { + name string + value string + expected prometheus.Labels + }{ + { + name: "empty", + value: "", + expected: nil, + }, + { + name: "invalid item", + value: "key", + expected: prometheus.Labels{}, + }, + { + name: "blank item", + value: "key=", + expected: prometheus.Labels{ + "key": "", + }, + }, + { + name: "single item", + value: "key=value", + expected: prometheus.Labels{ + "key": "value", + }, + }, + { + name: "multiple items", + value: "key=value key2=value2", + expected: prometheus.Labels{ + "key": "value", + "key2": "value2", + }, + }, + } +) + +func TestParseLabels(t *testing.T) { + for _, cs := range parseLabelsCases { + t.Run(cs.name, func(t *testing.T) { + labels := parseLabels(cs.value) + require.Equal(t, cs.expected, labels) + }) + } } diff --git a/internal/collector/metrics.go b/internal/collector/metrics.go new file mode 100644 index 0000000..7184018 --- /dev/null +++ b/internal/collector/metrics.go @@ -0,0 +1,288 @@ +package collector + +import ( + "github.com/jbub/pgbouncer_exporter/internal/config" + + "github.com/prometheus/client_golang/prometheus" +) + +func buildMetrics(cfg config.Config) []metric { + return []metric{ + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_requests"), + help: "Total number of SQL requests pooled by pgbouncer.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalRequests), + }) + } + return results + }, + }, + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_received"), + help: "Total volume in bytes of network traffic received by pgbouncer.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalReceived), + }) + } + return results + }, + }, + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_sent"), + help: "Total volume in bytes of network traffic sent by pgbouncer.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalSent), + }) + } + return results + }, + }, + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_query_time"), + help: "Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalQueryTime), + }) + } + return results + }, + }, + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_xact_time"), + help: "Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalXactTime), + }) + } + return results + }, + }, + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_query_count"), + help: "Total number of SQL queries pooled by pgbouncer.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalQueryCount), + }) + } + return results + }, + }, + { + enabled: cfg.ExportStats, + name: fqName(SubsystemStats, "total_xact_count"), + help: "Total number of SQL transactions pooled by pgbouncer.", + labels: []string{"database"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, stat := range res.stats { + results = append(results, metricResult{ + labels: []string{stat.Database}, + value: float64(stat.TotalXactCount), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "active_clients"), + help: "Client connections that are linked to server connection and can process queries.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.Active), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "waiting_clients"), + help: "Client connections have sent queries but have not yet got a server connection.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.Waiting), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "active_server"), + help: "Server connections that are linked to a client.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.ServerActive), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "idle_server"), + help: "Server connections that are unused and immediately usable for client queries.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.ServerIdle), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "used_server"), + help: "Server connections that have been idle for more than server_check_delay, so they need server_check_query to run on them before they can be used again.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.ServerUsed), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "tested_server"), + help: "Server connections that are currently running either server_reset_query or server_check_query.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.ServerTested), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "login_server"), + help: "Server connections currently in the process of logging in.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.ServerLogin), + }) + } + return results + }, + }, + { + enabled: cfg.ExportPools, + name: fqName(SubsystemPools, "max_wait"), + help: "How long the first (oldest) client in the queue has waited, in seconds. If this starts increasing, then the current pool of servers does not handle requests quickly enough. The reason may be either an overloaded server or just too small of a pool_size setting.", + labels: []string{"database", "user", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, pool := range res.pools { + results = append(results, metricResult{ + labels: []string{pool.Database, pool.User, pool.PoolMode}, + value: float64(pool.MaxWait), + }) + } + return results + }, + }, + { + enabled: cfg.ExportDatabases, + name: fqName(SubsystemDatabases, "current_connections"), + help: "Current number of connections for this database.", + labels: []string{"name", "pool_mode"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, database := range res.databases { + results = append(results, metricResult{ + labels: []string{database.Name, database.PoolMode}, + value: float64(database.CurrentConnections), + }) + } + return results + }, + }, + { + enabled: cfg.ExportLists, + name: fqName(SubsystemLists, "items"), + help: "List of internal pgbouncer information.", + labels: []string{"list"}, + valType: prometheus.GaugeValue, + eval: func(res *storeResult) (results []metricResult) { + for _, list := range res.lists { + results = append(results, metricResult{ + labels: []string{list.List}, + value: float64(list.Items), + }) + } + return results + }, + }, + } +} + +func fqName(subsystem string, name string) string { + return prometheus.BuildFQName(Name, subsystem, name) +} diff --git a/internal/config/config.go b/internal/config/config.go index 0df6e9d..0a5e573 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -16,6 +16,7 @@ func LoadFromCLI(ctx *cli.Context) Config { ExportPools: ctx.Bool("export-pools"), ExportDatabases: ctx.Bool("export-databases"), ExportLists: ctx.Bool("export-lists"), + DefaultLabels: ctx.String("default-labels"), } } @@ -30,4 +31,5 @@ type Config struct { ExportPools bool ExportDatabases bool ExportLists bool + DefaultLabels string } diff --git a/internal/server/http_test.go b/internal/server/http_test.go index ac54bdb..01563c3 100644 --- a/internal/server/http_test.go +++ b/internal/server/http_test.go @@ -11,7 +11,7 @@ import ( "github.com/jbub/pgbouncer_exporter/internal/store" "github.com/prometheus/common/expfmt" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -154,28 +154,28 @@ func TestResponseContainsMetrics(t *testing.T) { client := srv.Client() resp, err := client.Get(srv.URL + cfg.TelemetryPath) - assert.NoError(t, err) + require.NoError(t, err) defer resp.Body.Close() metrics, err := parser.TextToMetricFamilies(resp.Body) - assert.NoError(t, err) + require.NoError(t, err) if cfg.ExportPools { - assert.True(t, st.PoolsCalled) + require.True(t, st.PoolsCalled) } if cfg.ExportStats { - assert.True(t, st.StatsCalled) + require.True(t, st.StatsCalled) } if cfg.ExportDatabases { - assert.True(t, st.DatabasesCalled) + require.True(t, st.DatabasesCalled) } if cfg.ExportLists { - assert.True(t, st.ListsCalled) + require.True(t, st.ListsCalled) } for _, expMetric := range testCase.metrics { if _, ok := metrics[expMetric]; !ok { - assert.FailNow(t, "metric not found", expMetric) + require.FailNow(t, "metric not found", expMetric) } } }) diff --git a/main.go b/main.go index b3d8509..9ef8fb5 100644 --- a/main.go +++ b/main.go @@ -65,6 +65,11 @@ func main() { EnvVars: []string{"STORE_TIMEOUT"}, Value: time.Second * 2, }, + &cli.StringFlag{ + Name: "default-labels", + Usage: "Default prometheus labels applied to all metrics. Format: label1=value1 label2=value2", + EnvVars: []string{"DEFAULT_LABELS"}, + }, }, Commands: []*cli.Command{ cmd.Server,
