Use of Push Gateway & Golang

I’ve got a ETL Loader written in golang, that acts like a batch job, that would normally be started via a scheduler every couple of min, run for a couple of minutes and then die…

Idea is to use the push gateway…


Short lived batch jobs are the main purpose of the Push Gateway

1 Like

so ye… then I’m going down the right path, now to just figure out why it’s not working…

I’m going to try and post my code here… as formatted text, lets see, if someone can advise why I’m not getting any of my metrics on http://:9091/metrics …

*	File			: main.go
* 	Created			: 27 March 2023
*	Description		: Quick Dirty wrapper for Prometheus (push gateway) and golang library to figure out how to back port it into fs_loader
*	Modified		: 29 March 2023	- Start
*	By			: George Leonard (

package main

import (


type metrics struct {
	completionTime prometheus.Gauge
	successTime    prometheus.Gauge
	duration       prometheus.Gauge
	records        prometheus.Gauge

	info          *prometheus.GaugeVec
	sql_duration  *prometheus.HistogramVec
	rec_duration  *prometheus.HistogramVec
	api_duration  *prometheus.HistogramVec
	req_processed *prometheus.CounterVec

var (

	// We use a registry here to benefit from the consistency checks that
	// happen during registration.
	reg    = prometheus.NewRegistry()
	m      = NewMetrics(reg)
	pusher = push.New("", "pushgateway").Gatherer(reg)

func NewMetrics(reg prometheus.Registerer) *metrics {

	m := &metrics{
		// Example metrics
		completionTime: prometheus.NewGauge(prometheus.GaugeOpts{
			Name: "db_backup_last_completion_timestamp_seconds",
			Help: "The timestamp of the last completion of a DB backup, successful or not.",

		successTime: prometheus.NewGauge(prometheus.GaugeOpts{
			Name: "db_backup_last_success_timestamp_seconds",
			Help: "The timestamp of the last successful completion of a DB backup.",

		duration: prometheus.NewGauge(prometheus.GaugeOpts{
			Name: "db_backup_duration_seconds",
			Help: "The duration of the last DB backup in seconds.",

		records: prometheus.NewGauge(prometheus.GaugeOpts{
			Name: "db_backup_records_processed",
			Help: "The number of records processed in the last DB backup.",

		// My wrapper, for my metrics from my app
		info: prometheus.NewGaugeVec(prometheus.GaugeOpts{ // Shows value, can go up and down
			Name: "txn_count",
			Help: "Target amount for completed requests",
		}, []string{"batch"}),

		sql_duration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ // used to store timed values
			Name: "fs_sql_duration_seconds",
			Help: "Duration of the sql requests",
			// 4 times larger apdex status
			// Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
			// Buckets: prometheus.LinearBuckets(0.1, 5, 15),
			Buckets: []float64{0.1, 0.5, 1, 5, 10, 100},
		}, []string{"batch"}),

		api_duration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
			Name:    "fs_api_duration_seconds",
			Help:    "Duration of the api requests",
			Buckets: []float64{0.00001, 0.000015, 0.00002, 0.000025, 0.00003},
		}, []string{"batch"}),

		rec_duration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
			Name:    "fs_etl_operations_seconds",
			Help:    "Duration of the entire requests",
			Buckets: []float64{0.001, 0.0015, 0.002, 0.0025, 0.01},
		}, []string{"batch"}),

		req_processed: prometheus.NewCounterVec(prometheus.CounterOpts{ // can only go up/increment, but usefull combined with rate, resets to zero at restart.
			Name: "fs_etl_operations_total",
			Help: "Number of completed requests.",
		}, []string{"batch"}),

	reg.MustRegister(, m.sql_duration, m.api_duration, m.rec_duration, m.req_processed)

	return m

func performBackup() (int, error) {

	// Perform the backup and return the number of backed up records and any
	// applicable error.
	// ...

	n := rand.Intn(1000) // if vGeneral.sleep = 1000, then n will be random value of 0 -> 1000  aka 0 and 1 second
	fmt.Printf("Sleeping %d Millisecond...\n", n)
	time.Sleep(time.Duration(n) * time.Millisecond)

	return 42, nil

func mRun() {

	var todo_count = 40

	for count := 0; count < todo_count; count++ {
		// Note that successTime is not registered.

		start := time.Now()
		n, err := performBackup() // execute the long running batch job.
		m.records.Set(float64(n)) // How many files back'd up, return variable

		// Note that time.Since only uses a monotonic clock in Go1.9+.
		m.duration.Set(time.Since(start).Seconds()) // execution time

		if err != nil {
			fmt.Println("DB backup failed:", err)

		} else {
			// Add successTime to pusher only in case of success.
			// We could as well register it with the registry.
			// This example, however, demonstrates that you can
			// mix Gatherers and Collectors when handling a Pusher.

			//pusher.Collector(m.successTime) // as we're inside a loop don't use this otherwise it tries to readd the metric to be collected.


		// Add is used here rather than Push to not delete a previously pushed
		// success timestamp in case of a failure of this backup.
		if err := pusher.Add(); err != nil {
			fmt.Println("Could not push to Pushgateway:", err)

func main() {



ps. this batch job, can run under a second at times (mostly)/couple of seconds, to a couple of minutes (horror situation then),


hi all…

I placed the more intended “pull” based example in GitHub - georgelza/prom_wrapper_2: Simple Prometheus TestApp for Golang - Pull method 2. and then a “push” based example via a push gateway in GitHub - georgelza/prom_wrapper_pg: Simple Prometheus TestApp for Golang - Push Gateway method
anyone is welcome to use as needed.
This is how I unpacked it for myself, as examples i can share with some of our developers to show them how… I created a Grafana dashboard of some of these values which I got locally also, that I can share with our devs via a json file.