I would like to dynamically load config file and not restart my Go app. I wrote the below files, which runs but has data race.
config.go
package main
import (
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
"log"
"sync"
"time"
)
var (
reloadConfig = make(chan string)
reloadConfig2 = make(chan string)
viperLock1 sync.Mutex
viperLock2 sync.Mutex
)
func setUpConfig(file string, merge bool, v *viper.Viper) {
v.AddConfigPath("./")
v.SetConfigName(file)
v.SetConfigType("yml")
if merge {
err1 := v.MergeInConfig()
checkForFatalError("fatal error occurred while reading config file!", err1)
} else {
err := v.ReadInConfig()
checkForFatalError("fatal error occurred while reading config file!", err)
}
log.Println("Initial config value: ", v.GetString("env"))
}
func loadConfigDynamically(configChannel chan string, viperLock *sync.Mutex, vipe *viper.Viper) {
viperLock.Lock()
vipe.OnConfigChange(func(e fsnotify.Event) {
viperLock.Lock()
log.Println("config file changed", e.Name)
environment := vipe.GetString("env")
configChannel <- environment
viperLock.Unlock()
})
viperLock.Unlock()
vipe.WatchConfig()
}
func loadMultipleConfigsDynamically() {
go func() {
time.Sleep(time.Millisecond * 50)
vipe2 := viper.New()
setUpConfig("config_base", false, vipe2)
loadConfigDynamically(reloadConfig2, &viperLock2, vipe2)
time.Sleep(time.Millisecond * 50)
vipe1 := viper.New()
setUpConfig("config", false, vipe1)
loadConfigDynamically(reloadConfig, &viperLock1, vipe1)
}()
}
main.go
package main
import (
log "github.com/sirupsen/logrus"
"os"
"os/signal"
"syscall"
)
var reloadConfigNow = make(chan bool)
var reloadConfigAgain = make(chan bool)
var newConfigValue string
func main() {
loadMultipleConfigsDynamically()
go printUpdatedValueOnly()
go justAnotherGoroutine()
go yetAnotherGoroutine()
shutdownAppGracefully()
}
func printUpdatedValueOnly() {
for {
select {
case updatedValue := <-reloadConfig:
newConfigValue = updatedValue
log.Println("dynamically loaded config value: ", updatedValue)
reloadConfigNow <-true
reloadConfigAgain <-true
case updatedValue1 := <-reloadConfig2:
newConfigValue = updatedValue1
log.Println("dynamically loaded config value: ", updatedValue1)
reloadConfigNow <-true
reloadConfigAgain <-true
default:
}
}
}
func justAnotherGoroutine(){
existingConfigValue := ""
for {
select {
case <-reloadConfigNow:
existingConfigValue = newConfigValue
log.Println("justAnotherGoroutine: ", existingConfigValue)
default:
}
}
}
func yetAnotherGoroutine() {
existingConfigValue := ""
for {
select {
case <-reloadConfigAgain:
existingConfigValue = newConfigValue
log.Println("yetAnotherGoroutine: ", existingConfigValue)
default:
}
}
}
func checkForFatalError(errorMsg string, err error) {
if err != nil {
log.Fatal(errorMsg, err)
}
}
func shutdownAppGracefully() {
killSignal := make(chan os.Signal, 1)
signal.Notify(killSignal, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT)
k := <-killSignal
log.Info("OS Interrupt Signal received, application is shutting down!")
logSystemInterruptType(k)
}
func logSystemInterruptType(osInterrupt os.Signal) {
switch osInterrupt {
case syscall.SIGHUP:
log.Info("SIGHUP")
case syscall.SIGINT:
log.Info("SIGINT")
case syscall.SIGTERM:
log.Info("SIGTERM")
case syscall.SIGQUIT:
log.Info("SIGQUIT")
default:
log.Info("Unknown OS Interrupt")
}
}
config.yml
env : "LOCAL"
config_base.yml
env : "dev15"
go.mod
module reload_config
go 1.16
require (
github.com/fsnotify/fsnotify v1.4.9
github.com/spf13/viper v1.8.1
)
I learned recently that viper is not thread safe and hence I need to wrap it with mutex. I tried to do the same. In config.go file, func loadConfigDynamically, where I set OnConfigChange is the data race for read. And in the same function at the same line is previous write data race. I run the above package with
go run -race reload_config
And change the value of env in the config.yml to test if the config file is loading dynamically.This data race only occurs for the very first time config reloading dynamically. For subsequent times, it works just fine.
You lock viperLock called vipe.WatchConfig() and set vipe.OnConfigChange with a function it is also locking viperLock.
Because you already called vipe.WatchConfig() and then it started to call vipe.OnConfigChange in separate go routine. it is also try to acquire the same lock. That's why there is a race condition.
Call vipe.WatchConfig() after setting the vipe.OnConfigChange and after release the lock.
It should be corrected as below.
func loadConfigDynamically() {
go func() {
time.Sleep(time.Second)
viperLock.Lock()
vipe.OnConfigChange(func(e fsnotify.Event) {
viperLock.Lock()
log.Println("config file changed", e.Name)
environment := vipe.GetString("env")
reloadConfig <- environment
viperLock.Unlock()
})
viperLock.Unlock()
vipe.WatchConfig() //this starting call vipe.OnConfigChange
}()
}
It could be that go thinks that a variable is being modified and accessed by two goroutines at the same time and that there is no lock on the modified and accessed places.
Something like the following example:
package main
import (
"time"
)
type Foo struct {
f func(string)
}
func (f *Foo) Watch() {
go func() {
for {
time.Sleep(time.Second * 2)
if f.f != nil {
f.f("hello world")
}
}
}()
}
func (f *Foo) SetF(fun func(string)) {
f.f = fun
}
func main() {
f := Foo{}
f.Watch()
f.SetF(func(s string) {
})
time.Sleep(time.Second * 5)
}
It has a data race. If I put the same lock on both the modified and read places there will be no data race:
package main
import (
"sync"
"time"
)
var lock sync.Mutex
type Foo struct {
f func(string)
}
func (f *Foo) Watch() {
go func() {
for {
time.Sleep(time.Second * 2)
lock.Lock() // read places
if f.f != nil {
f.f("hello world")
}
lock.Unlock()
}
}()
}
func (f *Foo) SetF(fun func(string)) {
f.f = fun
}
func main() {
f := Foo{}
f.Watch()
lock.Lock() // write places
f.SetF(func(s string) {
})
lock.Unlock()
time.Sleep(time.Second * 5)
}
Or to eliminate the possibility of two goroutines reading and writing at the same time would work fine:
func main() {
f := Foo{}
f.SetF(func(s string) {
})
f.Watch()
time.Sleep(time.Second * 5)
}
Related
Can you please help me to find out why my event counters don't get reported. I even put the debugger on the reporter and it is not triggering. Time recorder triggers fine though.
Here is the implementation.
I have a Kafka consumer which I have defined the followings: (consumer.go)
reporter := metrics.NewPrintStatsReporter()
// Report every 100*time.Millisecond
rootScope, closer := tally.NewRootScope(tally.ScopeOptions{
Reporter: reporter,
}, time.Second)
defer func(closer io.Closer) {
err := closer.Close()
if err != nil {
...
}
}(closer)
subScope := rootScope.SubScope("Events")
then I do:
supportedEventCount := subScope.Counter(metrics.SupportedEventTypes)
unSupportedEventCount := subScope.Counter(metrics.UnsupportedEventTypes)
totalEventTypes := subScope.Counter(metrics.TotalEventTypes)
kafkaReadLatency := rootScope.Timer(metrics.KafkaReadLatency)
in the same file
go func() {
defer close(messages)
for { ...
st := time.Now()
m, err := c.r.ReadMessage(ctx)
kafkaConsumerReadLatency := time.Since(st)
if kafkaConsumerReadLatency < 5*time.Minute {
kafkaReadLatency.Record(kafkaConsumerReadLatency)
}
// Reporting any kind of event
totalEventTypes.Inc(1)
if helpers.IsSupportedEvent(logger, &kafkaEvent) {
supportedEventCount.Inc(1)
messages <- kafkaEvent
} else {
unSupportedEventCount.Inc(1)
}
}()
...
kafkaReadLatency.Record(kafkaConsumerReadLatency) works fine I get timer kafka_read_latency 34.479058541s
However non of the counters are working... which is very strange.
Here is my tally interface reporter: (very standard, 100% matches the supplied example)
package metrics
import (
"fmt"
tally "github.com/uber-go/tally/v4"
"time"
)
const (
SupportedEventTypes = "supported_event_types"
UnsupportedEventTypes = "unsupported_event_types"
TotalEventTypes = "total_event_types"
KafkaReadLatency = "kafka_read_latency"
)
type printStatsReporter struct{}
func NewPrintStatsReporter() tally.StatsReporter {
return &printStatsReporter{}
}
func (r *printStatsReporter) ReportCounter(name string, _ map[string]string, value int64) {
fmt.Printf("count %s %d\n", name, value)
}
func (r *printStatsReporter) ReportGauge(name string, _ map[string]string, value float64) {
fmt.Printf("gauge %s %f\n", name, value)
}
func (r *printStatsReporter) ReportTimer(name string, _ map[string]string, interval time.Duration) {
fmt.Printf("timer %s %s\n", name, interval.String())
}
...
func (r *printStatsReporter) Capabilities() tally.Capabilities {
return r
}
func (r *printStatsReporter) Reporting() bool {
return true
}
func (r *printStatsReporter) Tagging() bool {
return true
}
func (r *printStatsReporter) Flush() {
fmt.Printf("flush\n")
}
Here is the example:
https://github.com/uber-go/tally/blob/master/example/main.go
I am using gocron in my current project and I had encounter a few situations that not in the document.
I test this code:
gocron.Every(3).Seconds().Do(taskWithParams,2,"world")
gocron.Every(2).Seconds().Do(taskWithParams,1, "hello")
gocron.Start()
time.Sleep(10 * time.Second)
gocron.Remove(taskWithParams)//<-- remove task
...
func taskWithParams(a int, b string) {
fmt.Println(a, b)
}
When I remove task(gocron.Remove(taskWithParams)), always gocron.Every(3).Seconds().Do(taskWithParams,2,"world") is removed. even I swap them:
gocron.Every(2).Seconds().Do(taskWithParams,1, "hello")
gocron.Every(3).Seconds().Do(taskWithParams,2,"world")
Is there a way for me to specifically point out which task I want to remove, since the remove() only allow 1 argument?
The document also have a scheduler:
s := gocron.NewScheduler()
s.Every(3).Seconds().Do(task)
<- s.Start()
When is the best use case for scheduler?
If we are done with scheduler, how to remove it from memory? do scheduler.Clear() does the job? or we have to have another way to clear them from the memory?
you can handle the removal logic by deduplicating the function handlers.
package main
import (
"fmt"
)
func main() {
fn1 := func() { taskWithParams(2, "world") }
gocron.Every(3).Seconds().Do(fn1)
fn2 := func() { taskWithParams(1, "hello") }
gocron.Every(2).Seconds().Do(fn2)
gocron.Start()
time.Sleep(10 * time.Second)
gocron.Remove(fn2)
}
func taskWithParams(a int, b string) {
fmt.Println(a, b)
}
Otherwise, the scheduler.Do method returns an instance of *Job that you can pass to scheduler.RemoveByReference.
package main
import (
"fmt"
)
func main() {
job, err := gocron.Every(3).Seconds().Do(taskWithParams, 2, "ww")
if err != nil {
panic(err)
}
gocron.Every(2).Seconds().Do(taskWithParams, 1, "hh")
gocron.Start()
time.Sleep(10 * time.Second)
gocron.RemoveByReference(job)
}
func taskWithParams(a int, b string) {
fmt.Println(a, b)
}
I am trying to write a program which exposes prometheus metrics.
It is a simple program, where I want to increment a counter for every time my "run" method is called on my struct.
import (
"log"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type myStruct struct {
errorCount prometheus.Counter
}
func (s myStruct) initialize() {
s.errorCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter",
Help: "sample prometheus counter",
})
}
func (s myStruct) run() {
s.errorCount.Add(1)
}
func main() {
s := new(myStruct)
s.initialize()
http.Handle("/metrics", promhttp.Handler())
go func() {
for {
s.run()
time.Sleep(time.Second)
}
}()
log.Fatal(http.ListenAndServe(":8080", nil))
}
Above code fails with a "Failed to continue - bad access" error, every time I try to increment the counter. i.e. at this line
s.errorCount.Inc()
I am unable to determine why the counter suddenly disappears from memory (if I'm understanding the error message correctly).
I am determine if i am missing something fundamental w.r.t. Go, or am I using the prometheus client library incorrectly.
In initialise() s is being passed by value which means that in main() s.errorCount is nil.
Just change the declaration of initialise (and run) to take a pointer.
func (s *myStruct) initialize() {
...
A few more suggestions you might like to try:
func init() {
go func() {
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(":8080", nil))
}()
}
type myStruct struct {
errorCount prometheus.Counter
}
func NewMyStruct() *myStruct {
return &myStruct {
errorCount: prometheus.NewCounter(prometheus.CounterOpts {
Name: "my_counter",
Help: "sample prometheus counter",
}),
}
}
func (s *myStruct) run() {
s.errorCount.Add(1)
}
func main() {
s := NewMyStruct()
go func() {
for {
s.run()
time.Sleep(time.Second)
}
}()
// ... OR select{}
}
Following problem:
I have a function that only should allow one caller to execute.
If someone tries to call the function and it is already busy the second caller should immediatly return with an error.
I tried the following:
1. Use a mutex
Would be pretty easy. But the problem is, you cannot check if a mutex is locked. You can only block on it. Therefore it does not work
2. Wait on a channel
var canExec = make(chan bool, 1)
func init() {
canExec <- true
}
func onlyOne() error {
select {
case <-canExec:
default:
return errors.New("already busy")
}
defer func() {
fmt.Println("done")
canExec <- true
}()
// do stuff
}
What I don't like here:
looks really messi
if easy to mistakenly block on the channel / mistakenly write to the channel
3. Mixture of mutex and shared state
var open = true
var myMutex *sync.Mutex
func canExec() bool {
myMutex.Lock()
defer myMutex.Unlock()
if open {
open = false
return true
}
return false
}
func endExec() {
myMutex.Lock()
defer myMutex.Unlock()
open = true
}
func onlyOne() error {
if !canExec() {
return errors.New("busy")
}
defer endExec()
// do stuff
return nil
}
I don't like this either. Using a shard variable with mutex is not that nice.
Any other idea?
I'll throw my preference out there - use the atomic package.
var (
locker uint32
errLocked = errors.New("Locked out buddy")
)
func OneAtATime(d time.Duration) error {
if !atomic.CompareAndSwapUint32(&locker, 0, 1) { // <-----------------------------
return errLocked // All logic in these |
} // four lines |
defer atomic.StoreUint32(&locker, 0) // <-----------------------------
// logic here, but we will sleep
time.Sleep(d)
return nil
}
The idea is pretty simple. Set the initial value to 0 (0 value of uint32). The first thing you do in the function is check if the value of locker is currently 0 and if so it changes it to 1. It does all of this atomically. If it fails simply return an error (or however else you like to handle a locked state). If successful, you immediately defer replacing the value (now 1) with 0. You don't have to use defer obviously, but failing to set the value back to 0 before returning would leave you in a state where the function could no longer be run.
After you do those 4 lines of setup, you do whatever you would normally.
https://play.golang.org/p/riryVJM4Qf
You can make things a little nicer if desired by using named values for your states.
const (
stateUnlocked uint32 = iota
stateLocked
)
var (
locker = stateUnlocked
errLocked = errors.New("Locked out buddy")
)
func OneAtATime(d time.Duration) error {
if !atomic.CompareAndSwapUint32(&locker, stateUnlocked, stateLocked) {
return errLocked
}
defer atomic.StoreUint32(&locker, stateUnlocked)
// logic here, but we will sleep
time.Sleep(d)
return nil
}
You can use a semaphore for this (go get golang.org/x/sync/semaphore)
package main
import (
"errors"
"fmt"
"sync"
"time"
"golang.org/x/sync/semaphore"
)
var sem = semaphore.NewWeighted(1)
func main() {
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
if err := onlyOne(); err != nil {
fmt.Println(err)
}
}()
time.Sleep(time.Second)
}
wg.Wait()
}
func onlyOne() error {
if !sem.TryAcquire(1) {
return errors.New("busy")
}
defer sem.Release(1)
fmt.Println("working")
time.Sleep(5 * time.Second)
return nil
}
You could use standard channel approach with select statement.
var (
ch = make(chan bool)
)
func main() {
i := 0
wg := sync.WaitGroup{}
for i < 100 {
i++
wg.Add(1)
go func() {
defer wg.Done()
err := onlyOne()
if err != nil {
fmt.Println("Error: ", err)
} else {
fmt.Println("Ok")
}
}()
go func() {
ch <- true
}()
}
wg.Wait()
}
func onlyOne() error {
select {
case <-ch:
// do stuff
return nil
default:
return errors.New("Busy")
}
}
Do you want a function to be executed exactly once or once at given time? In former case take a look at https://golang.org/pkg/sync/#Once.
If you want once at a time solution:
package main
import (
"fmt"
"sync"
"time"
)
// OnceAtATime protects function from being executed simultaneously.
// Example:
// func myFunc() { time.Sleep(10*time.Second) }
// func main() {
// once := OnceAtATime{}
// once.Do(myFunc)
// once.Do(myFunc) // not executed
// }
type OnceAtATime struct {
m sync.Mutex
executed bool
}
func (o *OnceAtATime) Do(f func()) {
o.m.Lock()
if o.executed {
o.m.Unlock()
return
}
o.executed = true
o.m.Unlock()
f()
o.m.Lock()
o.executed = false
o.m.Unlock()
}
// Proof of concept
func f(m int, done chan<- struct{}) {
for i := 0; i < 10; i++ {
fmt.Printf("%d: %d\n", m, i)
time.Sleep(250 * time.Millisecond)
}
close(done)
}
func main() {
done := make(chan struct{})
once := OnceAtATime{}
go once.Do(func() { f(1, done) })
go once.Do(func() { f(2, done) })
<-done
done = make(chan struct{})
go once.Do(func() { f(3, done) })
<-done
}
https://play.golang.org/p/nZcEcWAgKp
But the problem is, you cannot check if a mutex is locked. You can only block on it. Therefore it does not work
With possible Go 1.18 (Q1 2022), you will be able to test if a mutex is locked... without blocking on it.
See (as mentioned by Go 101) the issue 45435 from Tye McQueen :
sync: add Mutex.TryLock
This is followed by CL 319769, with the caveat:
Use of these functions is almost (but not) always a bad idea.
Very rarely they are necessary, and third-party implementations (using a mutex and an atomic word, say) cannot integrate as well with the race detector as implementations in package sync itself.
The objections (since retracted) were:
Locks are for protecting invariants.
If the lock is held by someone else, there is nothing you can say about the invariant.
TryLock encourages imprecise thinking about locks; it encourages making assumptions about the invariants that may or may not be true.
That ends up being its own source of races.
Thinking more about this, there is one important benefit to building TryLock into Mutex, compared to a wrapper:
failed TryLock calls wouldn't create spurious happens-before edges to confuse the race detector.
And:
A channel-based implementation is possible, but performs poorly in comparison.
There's a reason we have sync.Mutex rather than just using channel for locking.
I came up with the following generic solution for that:
Works for me, or do you see any problem with that?
import (
"sync"
)
const (
ONLYONECALLER_LOCK = "onlyonecaller"
ANOTHER_LOCK = "onlyonecaller"
)
var locks = map[string]bool{}
var mutex = &sync.Mutex{}
func Lock(lock string) bool {
mutex.Lock()
defer mutex.Unlock()
locked, ok := locks[lock]
if !ok {
locks[lock] = true
return true
}
if locked {
return false
}
locks[lock] = true
return true
}
func IsLocked(lock string) bool {
mutex.Lock()
defer mutex.Unlock()
locked, ok := locks[lock]
if !ok {
return false
}
return locked
}
func Unlock(lock string) {
mutex.Lock()
defer mutex.Unlock()
locked, ok := locks[lock]
if !ok {
return
}
if !locked {
return
}
locks[lock] = false
}
see: https://play.golang.org/p/vUUsHcT3L-
How about this package: https://github.com/viney-shih/go-lock . It use channel and semaphore (golang.org/x/sync/semaphore) to solve your problem.
go-lock implements TryLock, TryLockWithTimeout and TryLockWithContext functions in addition to Lock and Unlock. It provides flexibility to control the resources.
Examples:
package main
import (
"fmt"
"time"
"context"
lock "github.com/viney-shih/go-lock"
)
func main() {
casMut := lock.NewCASMutex()
casMut.Lock()
defer casMut.Unlock()
// TryLock without blocking
fmt.Println("Return", casMut.TryLock()) // Return false
// TryLockWithTimeout without blocking
fmt.Println("Return", casMut.TryLockWithTimeout(50*time.Millisecond)) // Return false
// TryLockWithContext without blocking
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
fmt.Println("Return", casMut.TryLockWithContext(ctx)) // Return false
// Output:
// Return false
// Return false
// Return false
}
Lets keep it simple:
package main
import (
"fmt"
"time"
"golang.org/x/sync/semaphore"
)
var sem *semaphore.NewWeighted(1)
func init() {
sem = emaphore.NewWeighted(1)
}
func doSomething() {
if !sem.TryAcquire(1) {
return errors.New("I'm busy")
}
defer sem.Release(1)
fmt.Println("I'm doing my work right now, then I'll take a nap")
time.Sleep(10)
}
func main() {
go func() {
doSomething()
}()
}
I just want to do repetitive background tasks in Go, using time.AfterFunc,But seems something wrong with the logic.
The out put just:
interval call
interval call
But at least 5 times to call the function if all things went normal.
package main
import (
"fmt"
"time"
"os"
"os/signal"
)
type Timer struct {
Queue chan *TimeCall
}
func NewTimer(l int) *Timer {
timer := new(Timer)
timer.Queue = make(chan *TimeCall,l)
return timer
}
type TimeCall struct {
timer *time.Timer
callback func()
}
func (this *TimeCall) CallBack() {
defer func() { recover() }()
if this.callback != nil {
this.callback()
}
}
func (this *Timer) AfterFunc(d time.Duration, callback func()) *TimeCall {
call := new(TimeCall)
call.callback = callback
call.timer = time.AfterFunc(d, func() {
this.Queue <- call
})
return call
}
type PipeService struct {
TimeCall *Timer
}
func (this *PipeService) AfterFunc(delay time.Duration, callback func()) *TimeCall {
return this.TimeCall.AfterFunc(delay, callback)
}
func (this *PipeService) IntervalCall(interval time.Duration, callback func()) {
this.TimeCall.AfterFunc(interval,func(){
if callback != nil {
callback()
}
this.AfterFunc(interval,callback)
})
}
func (this *PipeService) Run(closeSig chan bool) {
for {
select {
case <-closeSig:
return
case call := <-this.TimeCall.Queue:
call.CallBack()
}
}
}
func main() {
var closeChan chan bool
InsPipeService := &PipeService{TimeCall: NewTimer(10)}
InsPipeService.IntervalCall(2*time.Second,func(){
fmt.Println("interval call")
})
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill)
go func(){
InsPipeService.Run(closeChan)
}()
time.Sleep(10*time.Second)
}
Run Code
time.AfterFunc() returns a *time.Timer, quoting form its doc:
The Timer type represents a single event. When the Timer expires, the current time will be sent on C, unless the Timer was created by AfterFunc.
The time.Timer returned by time.AfterFunc() does not repeat, so what you see is perfectly normal: in your PipeService.IntervalCall() you execute the callback immediately, and it gets executed after the timeout.
Also note that you pass 2 as interval for the PipeService.IntervalCall() method. This interval parameter is of type time.Duraion. So when you pass 2, that won't be 2 seconds (but actually 2 nanoseconds). You should pass a value constructed from constants from the time package like:
InsPipeService.IntervalCall(2 * time.Second, func(){
fmt.Println("interval call")
})
If you want repetition, use time.Ticker. For example the following code prints a message in every 2 seconds:
t := time.NewTicker(2 * time.Second)
for now := range t.C {
fmt.Println("tick", now)
}
Or simply if you don't need the Ticker and you don't want to shut it down:
c := time.Tick(2 * time.Second)
for now := range c {
fmt.Println("tick", now)
}
set time interval then call Start it will run user Job on each time intervals. set Enabled to false to stop it.
My Sample:
package main
import (
"fmt"
"sync"
"time"
)
type IntervalTimer struct {
Interval time.Duration
Enabled bool
Job func()
Wg sync.WaitGroup
}
func (it *IntervalTimer) isr() {
if it.Enabled {
it.Job()
time.AfterFunc(it.Interval, it.isr)
} else {
it.Wg.Done()
}
}
//trigger
func (it *IntervalTimer) Start() {
if it.Enabled {
it.Wg.Add(1)
time.AfterFunc(it.Interval, it.isr)
}
}
// user code:
var n int = 5
var it *IntervalTimer
func uerTask() {
fmt.Println(n, time.Now()) // do user job ...
n--
if n == 0 {
it.Enabled = false
}
}
func main() {
it = &IntervalTimer{Interval: 500 * time.Millisecond, Enabled: true, Job: uerTask}
it.Start()
//do some job ...
it.Wg.Wait()
fmt.Println("Bye")
}