Why my counter doesn't work in "uber-go/tally"? - go

Can you please help me to find out why my event counters don't get reported. I even put the debugger on the reporter and it is not triggering. Time recorder triggers fine though.
Here is the implementation.
I have a Kafka consumer which I have defined the followings: (consumer.go)
reporter := metrics.NewPrintStatsReporter()
// Report every 100*time.Millisecond
rootScope, closer := tally.NewRootScope(tally.ScopeOptions{
Reporter: reporter,
}, time.Second)
defer func(closer io.Closer) {
err := closer.Close()
if err != nil {
...
}
}(closer)
subScope := rootScope.SubScope("Events")
then I do:
supportedEventCount := subScope.Counter(metrics.SupportedEventTypes)
unSupportedEventCount := subScope.Counter(metrics.UnsupportedEventTypes)
totalEventTypes := subScope.Counter(metrics.TotalEventTypes)
kafkaReadLatency := rootScope.Timer(metrics.KafkaReadLatency)
in the same file
go func() {
defer close(messages)
for { ...
st := time.Now()
m, err := c.r.ReadMessage(ctx)
kafkaConsumerReadLatency := time.Since(st)
if kafkaConsumerReadLatency < 5*time.Minute {
kafkaReadLatency.Record(kafkaConsumerReadLatency)
}
// Reporting any kind of event
totalEventTypes.Inc(1)
if helpers.IsSupportedEvent(logger, &kafkaEvent) {
supportedEventCount.Inc(1)
messages <- kafkaEvent
} else {
unSupportedEventCount.Inc(1)
}
}()
...
kafkaReadLatency.Record(kafkaConsumerReadLatency) works fine I get timer kafka_read_latency 34.479058541s
However non of the counters are working... which is very strange.
Here is my tally interface reporter: (very standard, 100% matches the supplied example)
package metrics
import (
"fmt"
tally "github.com/uber-go/tally/v4"
"time"
)
const (
SupportedEventTypes = "supported_event_types"
UnsupportedEventTypes = "unsupported_event_types"
TotalEventTypes = "total_event_types"
KafkaReadLatency = "kafka_read_latency"
)
type printStatsReporter struct{}
func NewPrintStatsReporter() tally.StatsReporter {
return &printStatsReporter{}
}
func (r *printStatsReporter) ReportCounter(name string, _ map[string]string, value int64) {
fmt.Printf("count %s %d\n", name, value)
}
func (r *printStatsReporter) ReportGauge(name string, _ map[string]string, value float64) {
fmt.Printf("gauge %s %f\n", name, value)
}
func (r *printStatsReporter) ReportTimer(name string, _ map[string]string, interval time.Duration) {
fmt.Printf("timer %s %s\n", name, interval.String())
}
...
func (r *printStatsReporter) Capabilities() tally.Capabilities {
return r
}
func (r *printStatsReporter) Reporting() bool {
return true
}
func (r *printStatsReporter) Tagging() bool {
return true
}
func (r *printStatsReporter) Flush() {
fmt.Printf("flush\n")
}
Here is the example:
https://github.com/uber-go/tally/blob/master/example/main.go

Related

Data race occurs even if using sync.Mutex in map

How can I solve data race bug in this piece of code? I get an error in processPbxQueueByUniqueID func on l.Remove(frontItem). Do I need to maybe create another struct that contains sync.Mutex and *list.List and then change pbxQueueUniqueIDProcessor.processes to map[string]NewStructWithMutexAndList?
var pbxQueueUnique = newPbxQueueUniqueIDProcessor()
var pbxMutex sync.Mutex
type pbxQueueUniqueIDProcessor struct {
processes map[string]*list.List
}
func newPbxQueueUniqueIDProcessor() *pbxQueueUniqueIDProcessor {
return &pbxQueueUniqueIDProcessor{processes: make(map[string]*list.List)}
}
func (q *pbxQueueUniqueIDProcessor) Add(uniqueID string, item pbxQueueItem) {
pbxMutex.Lock()
defer pbxMutex.Unlock()
_, ok := q.processes[uniqueID]
if !ok {
l := &list.List{}
l.PushBack(item)
q.processes[uniqueID] = l
go processPbxQueueByUniqueID(uniqueID)
return
}
q.processes[uniqueID].PushBack(item)
}
func (q *pbxQueueUniqueIDProcessor) Get(uniqueID string) *list.List {
pbxMutex.Lock()
defer pbxMutex.Unlock()
return q.processes[uniqueID]
}
func (q *pbxQueueUniqueIDProcessor) RemoveFromList(uniqueID string, el *list.Element) {
pbxMutex.Lock()
defer pbxMutex.Unlock()
l := q.processes[uniqueID]
if l == nil {
return
}
l.Remove(el)
}
func processPbxQueueByUniqueID(uniqueID string) {
l := pbxQueueUnique.Get(uniqueID)
if l == nil {
return
}
for {
frontItem := l.Front()
if frontItem == nil {
break
}
frontValue := frontItem.Value.(pbxQueueItem)
execAcc(frontValue)
l.Remove(frontItem)
}
}

viper dynamically loading config file has data race

I would like to dynamically load config file and not restart my Go app. I wrote the below files, which runs but has data race.
config.go
package main
import (
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
"log"
"sync"
"time"
)
var (
reloadConfig = make(chan string)
reloadConfig2 = make(chan string)
viperLock1 sync.Mutex
viperLock2 sync.Mutex
)
func setUpConfig(file string, merge bool, v *viper.Viper) {
v.AddConfigPath("./")
v.SetConfigName(file)
v.SetConfigType("yml")
if merge {
err1 := v.MergeInConfig()
checkForFatalError("fatal error occurred while reading config file!", err1)
} else {
err := v.ReadInConfig()
checkForFatalError("fatal error occurred while reading config file!", err)
}
log.Println("Initial config value: ", v.GetString("env"))
}
func loadConfigDynamically(configChannel chan string, viperLock *sync.Mutex, vipe *viper.Viper) {
viperLock.Lock()
vipe.OnConfigChange(func(e fsnotify.Event) {
viperLock.Lock()
log.Println("config file changed", e.Name)
environment := vipe.GetString("env")
configChannel <- environment
viperLock.Unlock()
})
viperLock.Unlock()
vipe.WatchConfig()
}
func loadMultipleConfigsDynamically() {
go func() {
time.Sleep(time.Millisecond * 50)
vipe2 := viper.New()
setUpConfig("config_base", false, vipe2)
loadConfigDynamically(reloadConfig2, &viperLock2, vipe2)
time.Sleep(time.Millisecond * 50)
vipe1 := viper.New()
setUpConfig("config", false, vipe1)
loadConfigDynamically(reloadConfig, &viperLock1, vipe1)
}()
}
main.go
package main
import (
log "github.com/sirupsen/logrus"
"os"
"os/signal"
"syscall"
)
var reloadConfigNow = make(chan bool)
var reloadConfigAgain = make(chan bool)
var newConfigValue string
func main() {
loadMultipleConfigsDynamically()
go printUpdatedValueOnly()
go justAnotherGoroutine()
go yetAnotherGoroutine()
shutdownAppGracefully()
}
func printUpdatedValueOnly() {
for {
select {
case updatedValue := <-reloadConfig:
newConfigValue = updatedValue
log.Println("dynamically loaded config value: ", updatedValue)
reloadConfigNow <-true
reloadConfigAgain <-true
case updatedValue1 := <-reloadConfig2:
newConfigValue = updatedValue1
log.Println("dynamically loaded config value: ", updatedValue1)
reloadConfigNow <-true
reloadConfigAgain <-true
default:
}
}
}
func justAnotherGoroutine(){
existingConfigValue := ""
for {
select {
case <-reloadConfigNow:
existingConfigValue = newConfigValue
log.Println("justAnotherGoroutine: ", existingConfigValue)
default:
}
}
}
func yetAnotherGoroutine() {
existingConfigValue := ""
for {
select {
case <-reloadConfigAgain:
existingConfigValue = newConfigValue
log.Println("yetAnotherGoroutine: ", existingConfigValue)
default:
}
}
}
func checkForFatalError(errorMsg string, err error) {
if err != nil {
log.Fatal(errorMsg, err)
}
}
func shutdownAppGracefully() {
killSignal := make(chan os.Signal, 1)
signal.Notify(killSignal, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT)
k := <-killSignal
log.Info("OS Interrupt Signal received, application is shutting down!")
logSystemInterruptType(k)
}
func logSystemInterruptType(osInterrupt os.Signal) {
switch osInterrupt {
case syscall.SIGHUP:
log.Info("SIGHUP")
case syscall.SIGINT:
log.Info("SIGINT")
case syscall.SIGTERM:
log.Info("SIGTERM")
case syscall.SIGQUIT:
log.Info("SIGQUIT")
default:
log.Info("Unknown OS Interrupt")
}
}
config.yml
env : "LOCAL"
config_base.yml
env : "dev15"
go.mod
module reload_config
go 1.16
require (
github.com/fsnotify/fsnotify v1.4.9
github.com/spf13/viper v1.8.1
)
I learned recently that viper is not thread safe and hence I need to wrap it with mutex. I tried to do the same. In config.go file, func loadConfigDynamically, where I set OnConfigChange is the data race for read. And in the same function at the same line is previous write data race. I run the above package with
go run -race reload_config
And change the value of env in the config.yml to test if the config file is loading dynamically.This data race only occurs for the very first time config reloading dynamically. For subsequent times, it works just fine.
You lock viperLock called vipe.WatchConfig() and set vipe.OnConfigChange with a function it is also locking viperLock.
Because you already called vipe.WatchConfig() and then it started to call vipe.OnConfigChange in separate go routine. it is also try to acquire the same lock. That's why there is a race condition.
Call vipe.WatchConfig() after setting the vipe.OnConfigChange and after release the lock.
It should be corrected as below.
func loadConfigDynamically() {
go func() {
time.Sleep(time.Second)
viperLock.Lock()
vipe.OnConfigChange(func(e fsnotify.Event) {
viperLock.Lock()
log.Println("config file changed", e.Name)
environment := vipe.GetString("env")
reloadConfig <- environment
viperLock.Unlock()
})
viperLock.Unlock()
vipe.WatchConfig() //this starting call vipe.OnConfigChange
}()
}
It could be that go thinks that a variable is being modified and accessed by two goroutines at the same time and that there is no lock on the modified and accessed places.
Something like the following example:
package main
import (
"time"
)
type Foo struct {
f func(string)
}
func (f *Foo) Watch() {
go func() {
for {
time.Sleep(time.Second * 2)
if f.f != nil {
f.f("hello world")
}
}
}()
}
func (f *Foo) SetF(fun func(string)) {
f.f = fun
}
func main() {
f := Foo{}
f.Watch()
f.SetF(func(s string) {
})
time.Sleep(time.Second * 5)
}
It has a data race. If I put the same lock on both the modified and read places there will be no data race:
package main
import (
"sync"
"time"
)
var lock sync.Mutex
type Foo struct {
f func(string)
}
func (f *Foo) Watch() {
go func() {
for {
time.Sleep(time.Second * 2)
lock.Lock() // read places
if f.f != nil {
f.f("hello world")
}
lock.Unlock()
}
}()
}
func (f *Foo) SetF(fun func(string)) {
f.f = fun
}
func main() {
f := Foo{}
f.Watch()
lock.Lock() // write places
f.SetF(func(s string) {
})
lock.Unlock()
time.Sleep(time.Second * 5)
}
Or to eliminate the possibility of two goroutines reading and writing at the same time would work fine:
func main() {
f := Foo{}
f.SetF(func(s string) {
})
f.Watch()
time.Sleep(time.Second * 5)
}

Finding all functions of certain type

Is there a way to list out all functions that uses/returns a specific type?
For example: I'm interested to use the following function.
func ListenAndServe(addr string, handler Handler) error
How can I find out all functions (across all Go packages) that can return a Handler?
I'd write an analysis tool using the x/tools/go/analysis framework. Here's a rough sketch that you can run on any module (it uses go/packages underneath so it fully supports modules):
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/token"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/singlechecker"
)
var RtAnalysis = &analysis.Analyzer{
Name: "rtanalysis",
Doc: "finds functions by return type",
Run: run,
}
func main() {
singlechecker.Main(RtAnalysis)
}
func run(pass *analysis.Pass) (interface{}, error) {
for _, file := range pass.Files {
ast.Inspect(file, func(n ast.Node) bool {
if funcTy, ok := n.(*ast.FuncType); ok {
if funcTy.Results != nil {
for _, fl := range funcTy.Results.List {
if tv, ok := pass.TypesInfo.Types[fl.Type]; ok {
if tv.Type.String() == "net/http.Handler" {
ns := nodeString(funcTy, pass.Fset)
fmt.Printf("%s has return of type net/http.Handler\n", ns)
}
}
}
}
}
return true
})
}
return nil, nil
}
// nodeString formats a syntax tree in the style of gofmt.
func nodeString(n ast.Node, fset *token.FileSet) string {
var buf bytes.Buffer
format.Node(&buf, fset, n)
return buf.String()
}

How to use multiple sync.WaitGroup in a single program

In my Go program I start multiple worker groups for every department.
I want to wait for workers from each department to complete before exiting the program
I cannot use a single WaitGroups because in the actual scenario I may have to end any particular department and need to wait only on that.
This is simplified version of code, but it panics with a message
panic: runtime error: invalid memory address or nil pointer dereference
package main
import (
"fmt"
"sync"
"time"
)
var wgMap map[string]*sync.WaitGroup
func deptWorker(dName string, id int) {
defer wgMap[dName].Done()
fmt.Printf("Department %s : Worker %d starting\n", dName, id)
time.Sleep(time.Second)
fmt.Printf("Department %s : Worker %d done\n", dName, id)
}
func department(dName string) {
var wg sync.WaitGroup
for i := 1; i <= 3; i++ {
wg.Add(1)
go deptWorker(dName, i)
}
wgMap[dName] = &wg
}
func main() {
go department("medical")
go department("electronics")
wgMap["medical"].Wait()
wgMap["electronics"].Wait()
}
Two fix nil panic you simply need to use
var wgMap = map[string]*sync.WaitGroup{}
It will initialize the map. However, in my view, it's better here to create a new abstraction, let's name it 'WaitMap'.
It can be implemented in this way:
package main
import (
"fmt"
"sync"
"time"
)
type WaitMapObject struct {
wg map[string]int
mu sync.Mutex
cond sync.Cond
}
func WaitMap() *WaitMapObject {
m := &WaitMapObject{}
m.wg = make(map[string]int)
m.cond.L = &m.mu
return m
}
func (m *WaitMapObject) Wait(name string) {
m.mu.Lock()
for m.wg[name] != 0 {
m.cond.Wait()
}
m.mu.Unlock()
}
func (m *WaitMapObject) Done(name string) {
m.mu.Lock()
no := m.wg[name] - 1
if no < 0 {
panic("")
}
m.wg[name] = no
m.mu.Unlock()
m.cond.Broadcast()
}
func (m *WaitMapObject) Add(name string, no int) {
m.mu.Lock()
m.wg[name] = m.wg[name] + no
m.mu.Unlock()
}
func deptWorker(dName string, id int, wm *WaitMapObject) {
defer wm.Done(dName)
fmt.Printf("Department %s : Worker %d starting\n", dName, id)
time.Sleep(time.Second)
fmt.Printf("Department %s : Worker %d done\n", dName, id)
}
func department(dName string, wm *WaitMapObject) {
for i := 1; i <= 3; i++ {
wm.Add(dName,1)
go deptWorker(dName, i, wm)
}
wm.Done(dName)
}
func main() {
wm := WaitMap()
wm.Add("mediacal",1)
go department("medical", wm)
wm.Add("electronics",1)
go department("electronics", wm)
wm.Wait("medical")
wm.Wait("electronics")
}

go: var declared but not used error - how to work around it?

In this function I get "s declared and not used" which I don't understand - do I need to somehow tag it as 'really I used it' or something?
func getString(data map[string]interface{}, name string) (string, error) {
s := data[name]
if reflect.TypeOf(s).Kind() != reflect.String {
return s.(string), nil
}
return "", &apiError{1, "it's not a string"}
}
Oddly, I don't get the error from this function:
func getInt(data map[string]interface{}, name string) (int, error) {
t := data[name]
if reflect.TypeOf(t).Kind() == reflect.Int {
return t.(int), nil
}
return 0, &apiError{1, "it's not an int"}
}
Also, any thoughts on the right way to factor these into a single function would be welcomed!
Your error comes from (declaring and not) using the same identifier elsewhere because this compiles and runs fine on golang.org:
package main
import "reflect"
func main() {
m := make(map[string]interface{})
m["foo"] = "25"
getString(m, "foo")
}
func getString(data map[string]interface{}, name string) (string, error) {
s := data[name]
if reflect.TypeOf(s).Kind() != reflect.String {
return s.(string), nil
}
return "", nil
}
Your code looks correct, error isn't reproducible.
Sure you can refactor these into a single function, but you may not like it depending of tastes.
type VType int
const (
VInteger VType = iota
VString
VUnknown
)
func getValue(data map[string]interface{}, name string) (VType, int, string) {
switch v := data[name].(type) {
case int:
return VInteger, v, ""
case string:
return VString, 0, v
default:
return VUnknown, 0, ""
}
}
func main() {
m := make(map[string]interface{})
m["foo"] = "25"
switch t, i, s := getValue(m, "foo"); t {
case VInteger:
fmt.Println("int ", i) //do something with int
case VString:
fmt.Println("string ", s) //do something with string
case VUnknown:
err := &apiError{1, "it's not an int"} //do something with err
}
}

Resources