I am trying to write a program which exposes prometheus metrics.
It is a simple program, where I want to increment a counter for every time my "run" method is called on my struct.
import (
"log"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type myStruct struct {
errorCount prometheus.Counter
}
func (s myStruct) initialize() {
s.errorCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter",
Help: "sample prometheus counter",
})
}
func (s myStruct) run() {
s.errorCount.Add(1)
}
func main() {
s := new(myStruct)
s.initialize()
http.Handle("/metrics", promhttp.Handler())
go func() {
for {
s.run()
time.Sleep(time.Second)
}
}()
log.Fatal(http.ListenAndServe(":8080", nil))
}
Above code fails with a "Failed to continue - bad access" error, every time I try to increment the counter. i.e. at this line
s.errorCount.Inc()
I am unable to determine why the counter suddenly disappears from memory (if I'm understanding the error message correctly).
I am determine if i am missing something fundamental w.r.t. Go, or am I using the prometheus client library incorrectly.
In initialise() s is being passed by value which means that in main() s.errorCount is nil.
Just change the declaration of initialise (and run) to take a pointer.
func (s *myStruct) initialize() {
...
A few more suggestions you might like to try:
func init() {
go func() {
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(":8080", nil))
}()
}
type myStruct struct {
errorCount prometheus.Counter
}
func NewMyStruct() *myStruct {
return &myStruct {
errorCount: prometheus.NewCounter(prometheus.CounterOpts {
Name: "my_counter",
Help: "sample prometheus counter",
}),
}
}
func (s *myStruct) run() {
s.errorCount.Add(1)
}
func main() {
s := NewMyStruct()
go func() {
for {
s.run()
time.Sleep(time.Second)
}
}()
// ... OR select{}
}
Related
Here is the example of my code.Now I want add histogram in my code.
but I can't find a way to add histogram like this.
Is anybody could help me?
I am able to write histogram sample but I can't add it in my below code
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"net/http"
)
type fooCollector struct {
fooMetric *prometheus.Desc
}
func newFooCollector(label1 string) *fooCollector {
return &fooCollector{
fooMetric: prometheus.NewDesc("fff_metric",
"Shows whether a foo has occurred in our cluster",
nil, prometheus.Labels{"env":label1},
),
}
}
func (collector *fooCollector) Describe(ch chan<- *prometheus.Desc) {
//Update this section with the each metric you create for a given collector
ch <- collector.fooMetric
}
func (collector *fooCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(collector.fooMetric, prometheus.GaugeValue, 111111)
}
func main() {
prometheus.MustRegister(newFooCollector("dev"))
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(":80", nil)
}
finally I learned how histogram works.here is my code
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http"
)
type fooCollector struct {
fooMetric *prometheus.Desc
}
//First,we define the variable of histogram
var (
hbrms_histovec = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "hbrms_histogram",
Help: "hbrms_histogram",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Buckets: prometheus.ExponentialBuckets(50, 1.3, 15),//50*1.3,15times
},
[]string{"env"},
)
)
func newFooCollector() *fooCollector {
return &fooCollector{
fooMetric: prometheus.NewDesc("fff_metric",
"Shows whether a foo has occurred in our cluster",
nil, nil,
),
}
}
func (collector *fooCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- collector.fooMetric
}
func (collector *fooCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(collector.fooMetric, prometheus.CounterValue, float64(1))
// 2nd,we set metrics in this way instead of write to channel,we just find a way of calling the code below when we visit the url.
hbrms_histovec.WithLabelValues("val1").Observe(float64(10))
}
func main() {
reg := prometheus.NewPedanticRegistry()
reg.MustRegister(newFooCollector())
// finally,we register the metrics "hbrms_histovec" in this way
reg.MustRegister(hbrms_histovec)
gatherers := prometheus.Gatherers{reg}
h := promhttp.HandlerFor(gatherers,
promhttp.HandlerOpts{
ErrorHandling: promhttp.ContinueOnError,
})
http.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
})
http.ListenAndServe(":80", nil)
}
You can just simply call .Collect(ch) method for each metric(thats both includes description and values). Also you don't need to extend default prometheus route handler - just be careful you don't have collision with default metric names
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http"
)
type fooCollector struct {
fooMetric *prometheus.GaugeVec
hmdrsHistogram *prometheus.HistogramVec
}
func newFooCollector() *fooCollector {
return &fooCollector{
fooMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "fff_metric",
Help: "Shows whether a foo has occurred in our cluster",
}, []string{"country"}),
hmdrsHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "hbrms_histogram",
Help: "hbrms_histogram",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Buckets: prometheus.ExponentialBuckets(50, 1.3, 15), //50*1.3,15times
},
[]string{"env"},
),
}
}
func (collector *fooCollector) Describe(ch chan<- *prometheus.Desc) {
// don't need to manually call .Describe() here,
// because description was defined with prometheus.MustRegister method
//collector.fooMetric.Describe(ch)
//collector.hmdrsHistogram.Describe(ch)
}
func (collector *fooCollector) Collect(ch chan<- prometheus.Metric) {
v := 14 // get value from DB/External service/etc
collector.fooMetric.WithLabelValues("qwe").Set(float64(v))
collector.fooMetric.WithLabelValues("qwe").Set(v)
collector.fooMetric.Collect(ch)
collector.hmdrsHistogram.WithLabelValues("val1").Observe(float64(10))
collector.hmdrsHistogram.Collect(ch)
}
func RegisterFooCollector() {
fc := newFooCollector()
prometheus.MustRegister(fc)
}
func main() {
RegisterFooCollector()
// also includes default metrics
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(":80", nil)
if err != nil {
return
}
}
I would like to dynamically load config file and not restart my Go app. I wrote the below files, which runs but has data race.
config.go
package main
import (
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
"log"
"sync"
"time"
)
var (
reloadConfig = make(chan string)
reloadConfig2 = make(chan string)
viperLock1 sync.Mutex
viperLock2 sync.Mutex
)
func setUpConfig(file string, merge bool, v *viper.Viper) {
v.AddConfigPath("./")
v.SetConfigName(file)
v.SetConfigType("yml")
if merge {
err1 := v.MergeInConfig()
checkForFatalError("fatal error occurred while reading config file!", err1)
} else {
err := v.ReadInConfig()
checkForFatalError("fatal error occurred while reading config file!", err)
}
log.Println("Initial config value: ", v.GetString("env"))
}
func loadConfigDynamically(configChannel chan string, viperLock *sync.Mutex, vipe *viper.Viper) {
viperLock.Lock()
vipe.OnConfigChange(func(e fsnotify.Event) {
viperLock.Lock()
log.Println("config file changed", e.Name)
environment := vipe.GetString("env")
configChannel <- environment
viperLock.Unlock()
})
viperLock.Unlock()
vipe.WatchConfig()
}
func loadMultipleConfigsDynamically() {
go func() {
time.Sleep(time.Millisecond * 50)
vipe2 := viper.New()
setUpConfig("config_base", false, vipe2)
loadConfigDynamically(reloadConfig2, &viperLock2, vipe2)
time.Sleep(time.Millisecond * 50)
vipe1 := viper.New()
setUpConfig("config", false, vipe1)
loadConfigDynamically(reloadConfig, &viperLock1, vipe1)
}()
}
main.go
package main
import (
log "github.com/sirupsen/logrus"
"os"
"os/signal"
"syscall"
)
var reloadConfigNow = make(chan bool)
var reloadConfigAgain = make(chan bool)
var newConfigValue string
func main() {
loadMultipleConfigsDynamically()
go printUpdatedValueOnly()
go justAnotherGoroutine()
go yetAnotherGoroutine()
shutdownAppGracefully()
}
func printUpdatedValueOnly() {
for {
select {
case updatedValue := <-reloadConfig:
newConfigValue = updatedValue
log.Println("dynamically loaded config value: ", updatedValue)
reloadConfigNow <-true
reloadConfigAgain <-true
case updatedValue1 := <-reloadConfig2:
newConfigValue = updatedValue1
log.Println("dynamically loaded config value: ", updatedValue1)
reloadConfigNow <-true
reloadConfigAgain <-true
default:
}
}
}
func justAnotherGoroutine(){
existingConfigValue := ""
for {
select {
case <-reloadConfigNow:
existingConfigValue = newConfigValue
log.Println("justAnotherGoroutine: ", existingConfigValue)
default:
}
}
}
func yetAnotherGoroutine() {
existingConfigValue := ""
for {
select {
case <-reloadConfigAgain:
existingConfigValue = newConfigValue
log.Println("yetAnotherGoroutine: ", existingConfigValue)
default:
}
}
}
func checkForFatalError(errorMsg string, err error) {
if err != nil {
log.Fatal(errorMsg, err)
}
}
func shutdownAppGracefully() {
killSignal := make(chan os.Signal, 1)
signal.Notify(killSignal, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT)
k := <-killSignal
log.Info("OS Interrupt Signal received, application is shutting down!")
logSystemInterruptType(k)
}
func logSystemInterruptType(osInterrupt os.Signal) {
switch osInterrupt {
case syscall.SIGHUP:
log.Info("SIGHUP")
case syscall.SIGINT:
log.Info("SIGINT")
case syscall.SIGTERM:
log.Info("SIGTERM")
case syscall.SIGQUIT:
log.Info("SIGQUIT")
default:
log.Info("Unknown OS Interrupt")
}
}
config.yml
env : "LOCAL"
config_base.yml
env : "dev15"
go.mod
module reload_config
go 1.16
require (
github.com/fsnotify/fsnotify v1.4.9
github.com/spf13/viper v1.8.1
)
I learned recently that viper is not thread safe and hence I need to wrap it with mutex. I tried to do the same. In config.go file, func loadConfigDynamically, where I set OnConfigChange is the data race for read. And in the same function at the same line is previous write data race. I run the above package with
go run -race reload_config
And change the value of env in the config.yml to test if the config file is loading dynamically.This data race only occurs for the very first time config reloading dynamically. For subsequent times, it works just fine.
You lock viperLock called vipe.WatchConfig() and set vipe.OnConfigChange with a function it is also locking viperLock.
Because you already called vipe.WatchConfig() and then it started to call vipe.OnConfigChange in separate go routine. it is also try to acquire the same lock. That's why there is a race condition.
Call vipe.WatchConfig() after setting the vipe.OnConfigChange and after release the lock.
It should be corrected as below.
func loadConfigDynamically() {
go func() {
time.Sleep(time.Second)
viperLock.Lock()
vipe.OnConfigChange(func(e fsnotify.Event) {
viperLock.Lock()
log.Println("config file changed", e.Name)
environment := vipe.GetString("env")
reloadConfig <- environment
viperLock.Unlock()
})
viperLock.Unlock()
vipe.WatchConfig() //this starting call vipe.OnConfigChange
}()
}
It could be that go thinks that a variable is being modified and accessed by two goroutines at the same time and that there is no lock on the modified and accessed places.
Something like the following example:
package main
import (
"time"
)
type Foo struct {
f func(string)
}
func (f *Foo) Watch() {
go func() {
for {
time.Sleep(time.Second * 2)
if f.f != nil {
f.f("hello world")
}
}
}()
}
func (f *Foo) SetF(fun func(string)) {
f.f = fun
}
func main() {
f := Foo{}
f.Watch()
f.SetF(func(s string) {
})
time.Sleep(time.Second * 5)
}
It has a data race. If I put the same lock on both the modified and read places there will be no data race:
package main
import (
"sync"
"time"
)
var lock sync.Mutex
type Foo struct {
f func(string)
}
func (f *Foo) Watch() {
go func() {
for {
time.Sleep(time.Second * 2)
lock.Lock() // read places
if f.f != nil {
f.f("hello world")
}
lock.Unlock()
}
}()
}
func (f *Foo) SetF(fun func(string)) {
f.f = fun
}
func main() {
f := Foo{}
f.Watch()
lock.Lock() // write places
f.SetF(func(s string) {
})
lock.Unlock()
time.Sleep(time.Second * 5)
}
Or to eliminate the possibility of two goroutines reading and writing at the same time would work fine:
func main() {
f := Foo{}
f.SetF(func(s string) {
})
f.Watch()
time.Sleep(time.Second * 5)
}
1) How does golang solve visibility issue?
2) Is there any issues with below code?
package main
type Service struct {
stop bool
}
func (s *Service) Run() {
for !s.stop {
//Some logic
}
}
func (s *Service) Stop() {
s.stop = true
}
func main() {
s := &Service{}
go s.Run()
//Some logic
s.Stop()
}
I recommend to use context.WithCancel to stop goroutines in this case.
I just want to do repetitive background tasks in Go, using time.AfterFunc,But seems something wrong with the logic.
The out put just:
interval call
interval call
But at least 5 times to call the function if all things went normal.
package main
import (
"fmt"
"time"
"os"
"os/signal"
)
type Timer struct {
Queue chan *TimeCall
}
func NewTimer(l int) *Timer {
timer := new(Timer)
timer.Queue = make(chan *TimeCall,l)
return timer
}
type TimeCall struct {
timer *time.Timer
callback func()
}
func (this *TimeCall) CallBack() {
defer func() { recover() }()
if this.callback != nil {
this.callback()
}
}
func (this *Timer) AfterFunc(d time.Duration, callback func()) *TimeCall {
call := new(TimeCall)
call.callback = callback
call.timer = time.AfterFunc(d, func() {
this.Queue <- call
})
return call
}
type PipeService struct {
TimeCall *Timer
}
func (this *PipeService) AfterFunc(delay time.Duration, callback func()) *TimeCall {
return this.TimeCall.AfterFunc(delay, callback)
}
func (this *PipeService) IntervalCall(interval time.Duration, callback func()) {
this.TimeCall.AfterFunc(interval,func(){
if callback != nil {
callback()
}
this.AfterFunc(interval,callback)
})
}
func (this *PipeService) Run(closeSig chan bool) {
for {
select {
case <-closeSig:
return
case call := <-this.TimeCall.Queue:
call.CallBack()
}
}
}
func main() {
var closeChan chan bool
InsPipeService := &PipeService{TimeCall: NewTimer(10)}
InsPipeService.IntervalCall(2*time.Second,func(){
fmt.Println("interval call")
})
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill)
go func(){
InsPipeService.Run(closeChan)
}()
time.Sleep(10*time.Second)
}
Run Code
time.AfterFunc() returns a *time.Timer, quoting form its doc:
The Timer type represents a single event. When the Timer expires, the current time will be sent on C, unless the Timer was created by AfterFunc.
The time.Timer returned by time.AfterFunc() does not repeat, so what you see is perfectly normal: in your PipeService.IntervalCall() you execute the callback immediately, and it gets executed after the timeout.
Also note that you pass 2 as interval for the PipeService.IntervalCall() method. This interval parameter is of type time.Duraion. So when you pass 2, that won't be 2 seconds (but actually 2 nanoseconds). You should pass a value constructed from constants from the time package like:
InsPipeService.IntervalCall(2 * time.Second, func(){
fmt.Println("interval call")
})
If you want repetition, use time.Ticker. For example the following code prints a message in every 2 seconds:
t := time.NewTicker(2 * time.Second)
for now := range t.C {
fmt.Println("tick", now)
}
Or simply if you don't need the Ticker and you don't want to shut it down:
c := time.Tick(2 * time.Second)
for now := range c {
fmt.Println("tick", now)
}
set time interval then call Start it will run user Job on each time intervals. set Enabled to false to stop it.
My Sample:
package main
import (
"fmt"
"sync"
"time"
)
type IntervalTimer struct {
Interval time.Duration
Enabled bool
Job func()
Wg sync.WaitGroup
}
func (it *IntervalTimer) isr() {
if it.Enabled {
it.Job()
time.AfterFunc(it.Interval, it.isr)
} else {
it.Wg.Done()
}
}
//trigger
func (it *IntervalTimer) Start() {
if it.Enabled {
it.Wg.Add(1)
time.AfterFunc(it.Interval, it.isr)
}
}
// user code:
var n int = 5
var it *IntervalTimer
func uerTask() {
fmt.Println(n, time.Now()) // do user job ...
n--
if n == 0 {
it.Enabled = false
}
}
func main() {
it = &IntervalTimer{Interval: 500 * time.Millisecond, Enabled: true, Job: uerTask}
it.Start()
//do some job ...
it.Wg.Wait()
fmt.Println("Bye")
}
I'm attempting write a test for my package and failing at comparing funcs. Here's essentially what i'm doing.
package main
import (
"fmt"
"reflect"
)
type HandlerFunc func(cmd interface{})
type Bus struct {
handlers map[reflect.Type]HandlerFunc
}
func (bus *Bus) RegisterHandler(cmd interface{}, handler HandlerFunc) {
bus.handlers[reflect.TypeOf(cmd)] = handler
}
func (bus *Bus) GetHandler(cmd interface{}) HandlerFunc {
t := reflect.TypeOf(cmd)
for kind, handler := range bus.handlers {
if t == kind {
return handler
}
}
return nil
}
func New() *Bus {
return &Bus{
handlers: make(map[reflect.Type]HandlerFunc),
}
}
type RegisterUserCommand struct {}
func main() {
bus := New()
handler := func (cmd interface{}) {}
bus.RegisterHandler(&RegisterUserCommand{}, handler)
retrieved := bus.GetHandler(&RegisterUserCommand{})
if retrieved != handler {
fmt.Println("Not the same!")
return
}
fmt.Println("Same!")
}
Comparing retrieved with handler causes the following error
invalid operation: (func(interface {}))(retrieved) != handler (func can only be compared to nil)
How can i properly test the function i'm retrieving is the same one added previously?
Given that you can't compare functions, you can write your test in a different way. You can make handler set a boolean value in your test and check that you've got the right function by calling it and seeing if the boolean changes.
Here's an example:
func main() {
bus := New()
called := false
handler := func (cmd interface{}) { called = true }
bus.RegisterHandler(&RegisterUserCommand{}, handler)
bus.GetHandler(&RegisterUserCommand{})(nil)
if called {
fmt.Println("We got the right handler!")
return
}
fmt.Println("We didn't get the right handler")
}