I have the following code snippet.
package main
import (
"errors"
"fmt"
"time"
)
func errName(ch chan error) {
for i := 0; i < 10000; i++ {
}
ch <- errors.New("Error name")
close(ch)
}
func errEmail(ch chan error) {
for i := 0; i < 100; i++ {
}
ch <- errors.New("Error email")
close(ch)
}
func main() {
ch := make(chan error)
go errName(ch)
go errEmail(ch)
fmt.Println(<-ch)
//close(ch)
time.Sleep(1000000)
}
As you can see, I let two functions run in the goroutine, errName and errEmail. I pass as parameter a channel with error type. If one of them finish first, it should send the error through the channel and close it. So the second, still running goroutine, have not to run anymore, because I've got the error already and I want to terminate the still running goroutine. This is what I trying to reach in my example above.
When I run the programm, I've got error
panic: send on closed channel
goroutine 6 [running]:
main.errEmail(0xc0820101e0)
D:/gocode/src/samples/gorountine2.go:24 +0xfd
created by main.main
D:/gocode/src/samples/gorountine2.go:33 +0x74
goroutine 1 [runnable]:
main.main()
D:/gocode/src/samples/gorountine2.go:34 +0xac
exit status 2
I know, when I remove the close statement, it would not panic, but channel on the running goroutine is still waiting for error reference and that's mean, it wasted the memory for nothing(waiting).
When one of them send an error to the channel, the second error I will do not care anymore, that is my target.
A standard way to organize this behaviors is to use
package main
import (
"fmt"
"time"
"code.google.com/p/go.net/context"
)
func errName(ctx context.Context, cancel context.CancelFunc) {
for i := 0; i < 10000; i++ {
select {
case <-ctx.Done():
return
default:
}
}
cancel()
}
func errEmail(ctx context.Context, cancel context.CancelFunc) {
for i := 0; i < 100; i++ {
select {
case <-ctx.Done():
return
default:
}
}
cancel()
}
func main() {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
go errName(ctx, cancel)
go errEmail(ctx, cancel)
<-ctx.Done()
if ctx.Err() != nil {
fmt.Println(ctx.Err())
}
time.Sleep(1000000)
}
You can read two good articles on the matter:
http://blog.golang.org/context
http://blog.golang.org/pipelines
Use another channel to signal done:
package main
import (
"errors"
"fmt"
"time"
)
func errName(ch chan error, done chan struct{}) {
for i := 0; i < 10000; i++ {
select {
case <-done:
fmt.Println("early return from name")
return
default:
}
}
select {
case: ch <- errors.New("Error name")
default:
}
}
func errEmail(ch chan error, done chan struct{}) {
for i := 0; i < 100; i++ {
select {
case <-done:
fmt.Println("early return from email")
return
default:
}
}
select {
case ch <- errors.New("Error email"):
default:
}
}
func main() {
ch := make(chan error, 1)
done := make(chan struct{})
go errName(ch, done)
go errEmail(ch, done)
fmt.Println(<-ch)
close(done)
time.Sleep(1000000)
}
playground example
To prevent the losing goroutine from blocking forever on channel send, I created the error channel with capacity 1 and use a select when sending:
select {
case ch <- errors.New("Error email"):
default:
}
If you are working with more than one level of goroutine completion, then you should consider using golang/x/net/context Context.
Done chan struct{} mentioned (or its context.Context incarnation) is the idiomatic and THE TRUE way for behaviour. But the easy way to avoid panic in your snippet can be
import "sync"
var once sync.Once
func errName(ch chan error) {
for i := 0; i < 10000; i++ {
}
once.Do(func() {ch <- errors.New("Error name"); close(ch)}())
}
func errName(ch chan error) {
for i := 0; i < 10000; i++ {
}
once.Do(func() {ch <- errors.New("Error name"); close(ch)}())
}
Related
I am playing around with worker pools and i want to consolidate all errors from worker pools before i return the error. I've written a sample code but i am entering a deadlock.
What am i trying to achieve?
a client send 100 requests,i want to first add those requests to a job queue and dispatch it to n number of go routines that does tasks in background , if at all there are errors i want to accumulate all these errors before i send all errors to the client. I have written a snippet, can someone explain what's wrong and how to mitigate the deadlock.
package main
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/apex/log"
"github.com/hashicorp/go-multierror"
)
type Manager struct {
taskChan chan int
wg *sync.WaitGroup
QuitChan chan bool
ErrorChan chan error
busyWorkers int64
}
func main() {
fmt.Println("Hello, 世界")
m := New()
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
//defer cancel()
for i := 0; i < 3; i++ {
m.wg.Add(1)
go m.run(ctx, test)
}
for i := 1; i < 5; i++ {
m.taskChan <- i
}
close(m.taskChan)
go func(*Manager) {
if len(m.taskChan) == 0 {
m.QuitChan <- true
}
}(m)
var errors error
for {
select {
case err := <-m.ErrorChan:
errors = multierror.Append(errors, err)
if m.busyWorkers == int64(0) {
break
}
default:
fmt.Println("hello")
}
}
m.wg.Wait()
fmt.Println(errors)
}
func New() *Manager {
return &Manager{taskChan: make(chan int),
wg: new(sync.WaitGroup),
QuitChan: make(chan bool),
ErrorChan: make(chan error),
}
}
func (m *Manager) run(ctx context.Context, fn func(a, b int) error) {
defer m.wg.Done()
defer fmt.Println("finished working")
for {
select {
case t, ok := <-m.taskChan:
if ok {
atomic.AddInt64(&m.busyWorkers, 1)
err := fn(t, t)
if err != nil {
m.ErrorChan <- err
}
atomic.AddInt64(&m.busyWorkers, -1)
}
case <-ctx.Done():
log.Infof("closing channel %v", ctx.Err())
return
case <-m.QuitChan:
return
}
}
}
// this can return error or not, this is the main driver func, but i'm propagating
//errors so that i can understand where i am going wrong
func test(a, b int) error {
fmt.Println(a, b)
return fmt.Errorf("dummy error %v", a)
}
You have 3 workers who all return errors.
Your main thread tries to put 5 jobs in the queue. Once the first 3 has been taken by your workers, the main thread is stuck waiting for a new worker to receive on taskChan and all your 3 workers are stuck trying to send data on ErrorChan.
In other words, deadlock.
Maybe you wanted to make taskChan a buffered channel? That way you can send data on it until the buffer is full without blocking.
taskChan: make(chan int, 10)
I'm trying to figure out why I have a dead lock with waitgroup.Wait()
package main
import (
"fmt"
"sync"
)
var wg sync.WaitGroup
func foo(c chan int, i int) {
defer wg.Done()
c <- i
}
func main() {
ch := make(chan int)
for i := 0; i < 10; i++ {
wg.Add(1)
go foo(ch, i)
}
wg.Wait()
close(ch)
for item := range ch {
fmt.Println(item)
}
}
When I run it like this, it prints fatal error: all goroutines are asleep - deadlock!
I tried to change ch to a buffered channel and that solved the problem. But I really want to know why is there a dead lock.
I've commented out the parts where your program's logic is not correct:
package main
import (
"fmt"
"sync"
)
var wg sync.WaitGroup
func foo(c chan int, i int) {
defer wg.Done()
c <- i
}
func main() {
ch := make(chan int) // unbuffered channel
for i := 0; i < 10; i++ {
wg.Add(1)
go foo(ch, i)
}
// wg.Wait is waiting for all goroutines to finish but that's
// only possible if the send to channel succeeds. In this case,
// it is not possible as your receiver "for item := range ch" is below
// this. Hence, a deadlock.
wg.Wait()
// Ideally, it should be the sender's duty to close the channel.
// And closing a channel before the receiver where the channel
// is unbuffered is not correct.
close(ch)
for item := range ch {
fmt.Println(item)
}
}
Corrected program:
package main
import (
"fmt"
"sync"
)
var wg sync.WaitGroup
func foo(c chan int, i int) {
defer wg.Done()
c <- i
}
func main() {
ch := make(chan int)
go func() {
for item := range ch {
fmt.Println(item)
}
}()
for i := 0; i < 10; i++ {
wg.Add(1)
go foo(ch, i)
}
wg.Wait()
close(ch)
}
I'm trying to parallelize calls to an API to speed things up, but I'm facing a problem where I need to stop spinning up goroutines to call the API if I receive an error from one of the goroutine calls. Since I am closing the channel twice(once in the error handling part and when the execution is done), I'm getting a panic: close of closed channel error. Is there an elegant way to handle this without the program to panic? Any help would be appreciated!
The following is the pseudo-code snippet.
for i := 0; i < someNumber; i++ {
go func(num int, q chan<- bool) {
value, err := callAnAPI()
if err != nil {
close(q)//exit from the for-loop
}
// process the value here
wg.Done()
}(i, quit)
}
close(quit)
To mock my scenario, I have written the following program. Is there any way to exit the for-loop gracefully once the condition(commented out) is satisfied?
package main
import (
"fmt"
"sync"
)
func receive(q <-chan bool) {
for {
select {
case <-q:
return
}
}
}
func main() {
quit := make(chan bool)
var result []int
wg := &sync.WaitGroup{}
wg.Add(10)
for i := 0; i < 10; i++ {
go func(num int, q chan<- bool) {
//if num == 5 {
// close(q)
//}
result = append(result, num)
wg.Done()
}(i, quit)
}
close(quit)
receive(quit)
wg.Wait()
fmt.Printf("Result: %v", result)
}
You can use context package which defines the Context type, which carries deadlines, cancellation signals, and other request-scoped values across API boundaries and between processes.
package main
import (
"context"
"fmt"
"sync"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel when we are finished, even without error
wg := &sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
go func(num int) {
defer wg.Done()
select {
case <-ctx.Done():
return // Error occured somewhere, terminate
default: // avoid blocking
}
// your code here
// res, err := callAnAPI()
// if err != nil {
// cancel()
// return
//}
if num == 5 {
cancel()
return
}
fmt.Println(num)
}(i)
}
wg.Wait()
fmt.Println(ctx.Err())
}
Try on: Go Playground
You can also take a look to this answer for more detailed explanation.
I need to start a number of workers with single task queue and single result queue. Each worker should be started in different goroutine. And I need to wait till all workers will be finished and task queue will be empty before exiting from program.
I have prepare small example for goroutine synchronization.
The main idea was that we count tasks in queue and waiting for all workers to finish jobs. But current implementation sometime miss values.
Why this happends and how to solve the problem?
The sample code:
import (
"fmt"
"os"
"os/signal"
"strconv"
)
const num_workers = 5
type workerChannel chan uint64
// Make channel for tasks
var workCh workerChannel
// Make channel for task counter
var cntChannel chan int
// Task counter
var tskCnt int64
// Worker function
func InitWorker(input workerChannel, result chan string, num int) {
for {
select {
case inp := <-input:
getTask()
result <- ("Worker " + strconv.Itoa(num) + ":" + strconv.FormatUint(inp, 10))
}
}
}
// Function to manage task counter
// should be in uniq goroutine
func taskCounter(inp chan int) {
for {
val := <-inp
tskCnt += int64(val)
}
}
// Put pask to the queue
func putTask(val uint64) {
func() {
fmt.Println("Put ", val)
cntChannel <- int(1)
workCh <- val
}()
}
// Get task from queue
func getTask() {
func() {
cntChannel <- int(-1)
}()
}
func main() {
// Init service channels
abort := make(chan os.Signal)
done := make(chan bool)
// init queue for results
result := make(chan string)
// init task queue
workCh = make(workerChannel)
// start some workers
for i := uint(0); i < num_workers; i++ {
go InitWorker(workCh, result, int(i))
}
// init counter for synchro
cntChannel = make(chan int)
go taskCounter(cntChannel)
// goroutine that put some tasks into queue
go func() {
for i := uint(0); i < 21; i++ {
putTask(uint64(i))
}
// wait for processing all tasks and close application
for len(cntChannel) != 0 {}
for tskCnt != 0 {}
for len(workCh) != 0 {}
for len(result) != 0 {}
// send signal for close
done <- true
}()
signal.Notify(abort, os.Interrupt)
for {
select {
case <-abort:
fmt.Println("Aborted.")
os.Exit(0)
// print results
case res := <-result:
fmt.Println(res)
case <-done:
fmt.Println("Done")
os.Exit(0)
}
}
}
Use sync.WaitGroup to wait for goroutines to complete. Close channels to cause loops reading on channels to exit.
package main
import (
"fmt"
"sync"
)
type workerChannel chan uint64
const num_workers = 5
func main() {
results := make(chan string)
workCh := make(workerChannel)
// Start workers
var wg sync.WaitGroup
wg.Add(num_workers)
for i := 0; i < num_workers; i++ {
go func(num int) {
defer wg.Done()
// Loop processing work until workCh is closed
for w := range workCh {
results <- fmt.Sprintf("worker %d, task %d", num, w)
}
}(i)
}
// Close result channel when workers are done
go func() {
wg.Wait()
close(results)
}()
// Send work to be done
go func() {
for i := 0; i < 21; i++ {
workCh <- uint64(i)
}
// Closing the channel causes workers to break out of loop
close(workCh)
}()
// Process results. Loop exits when result channel is closed.
for r := range results {
fmt.Println(r)
}
}
https://play.golang.org/p/ZifpzsP6fNv
I suggest using close(chan) for this kind of tasks.
WaitGroup version.
package main
import (
"log"
"sync"
)
func worker(in chan int, wg *sync.WaitGroup) {
defer wg.Done()
for i := range in {
log.Println(i)
}
}
func main() {
in := make(chan int)
lc := 25
maxValue := 30
wg := sync.WaitGroup{}
wg.Add(lc)
for i := 0; i < lc; i++ {
go worker(in, &wg)
}
for c := 0; c <= maxValue; c++ {
in <- c
}
close(in)
wg.Wait()
}
Channel version
package main
import (
"log"
"os"
)
func worker(in chan int, end chan struct{}) {
defer func() { end <- struct{}{} }()
for i := range in {
log.Println(i)
}
}
func main() {
in := make(chan int)
lc := 25
maxValue := 30
end := make(chan struct{})
var fin int
go func() {
for {
<-end
fin++
log.Println(`fin`, fin)
if fin == lc {
break
}
}
close(end)
os.Exit(0)
}()
for i := 0; i < lc; i++ {
go worker(in, end)
}
for c := 0; c <= maxValue; c++ {
in <- c
}
close(in)
<-make(chan struct{})
}
I got a problem using sync.WaitGroup and select together. If you take a look at following http request pool you will notice that if an error occurs it will never be reported as wg.Done() will block and there is no read from the channel anymore.
package pool
import (
"fmt"
"log"
"net/http"
"sync"
)
var (
MaxPoolQueue = 100
MaxPoolWorker = 10
)
type Pool struct {
wg *sync.WaitGroup
queue chan *http.Request
errors chan error
}
func NewPool() *Pool {
return &Pool{
wg: &sync.WaitGroup{},
queue: make(chan *http.Request, MaxPoolQueue),
errors: make(chan error),
}
}
func (p *Pool) Add(r *http.Request) {
p.wg.Add(1)
p.queue <- r
}
func (p *Pool) Run() error {
for i := 0; i < MaxPoolWorker; i++ {
go p.doWork()
}
select {
case err := <-p.errors:
return err
default:
p.wg.Wait()
}
return nil
}
func (p *Pool) doWork() {
for r := range p.queue {
fmt.Printf("Request to %s\n", r.Host)
p.wg.Done()
_, err := http.DefaultClient.Do(r)
if err != nil {
log.Fatal(err)
p.errors <- err
} else {
fmt.Printf("no error\n")
}
}
}
Source can be found here
How can I still use WaitGroup but also get errors from go routines?
Just got the answer my self as I wrote the question and as I think it is an interesting case I would like to share it with you.
The trick to use sync.WaitGroup and chan together is that we wrap:
select {
case err := <-p.errors:
return err
default:
p.wg.Done()
}
Together in a for loop:
for {
select {
case err := <-p.errors:
return err
default:
p.wg.Done()
}
}
In this case select will always check for errors and wait if nothing happens :)
It looks a bit like the fail-fast mechanism enabled by the Tomb library (Tomb V2 GoDoc):
The tomb package handles clean goroutine tracking and termination.
If any of the tracked goroutines returns a non-nil error, or the Kill or Killf method is called by any goroutine in the system (tracked or not), the tomb Err is set, Alive is set to false, and the Dying channel is closed to flag that all tracked goroutines are supposed to willingly terminate as soon as possible.
Once all tracked goroutines terminate, the Dead channel is closed, and Wait unblocks and returns the first non-nil error presented to the tomb via a result or an explicit Kill or Killf method call, or nil if there were no errors.
You can see an example in this playground:
(extract)
// start runs all the given functions concurrently
// until either they all complete or one returns an
// error, in which case it returns that error.
//
// The functions are passed a channel which will be closed
// when the function should stop.
func start(funcs []func(stop <-chan struct{}) error) error {
var tomb tomb.Tomb
var wg sync.WaitGroup
allDone := make(chan struct{})
// Start all the functions.
for _, f := range funcs {
f := f
wg.Add(1)
go func() {
defer wg.Done()
if err := f(tomb.Dying()); err != nil {
tomb.Kill(err)
}
}()
}
// Start a goroutine to wait for them all to finish.
go func() {
wg.Wait()
close(allDone)
}()
// Wait for them all to finish, or one to fail
select {
case <-allDone:
case <-tomb.Dying():
}
tomb.Done()
return tomb.Err()
}
A simpler implementation would be like below. (Check in play.golang: https://play.golang.org/p/TYxxsDRt5Wu)
package main
import "fmt"
import "sync"
import "time"
type Error struct {
message string
}
func (e Error) Error() string {
return e.message
}
func main() {
var wg sync.WaitGroup
waitGroupLength := 8
errChannel := make(chan error, 1)
// Setup waitgroup to match the number of go routines we'll launch off
wg.Add(waitGroupLength)
finished := make(chan bool, 1) // this along with wg.Wait() are why the error handling works and doesn't deadlock
for i := 0; i < waitGroupLength; i++ {
go func(i int) {
fmt.Printf("Go routine %d executed\n", i+1)
time.Sleep(time.Duration(waitGroupLength - i))
time.Sleep(0) // only here so the time import is needed
if i%4 == 1 {
errChannel <- Error{fmt.Sprintf("Errored on routine %d", i+1)}
}
// Mark the wait group as Done so it does not hang
wg.Done()
}(i)
}
go func() {
wg.Wait()
close(finished)
}()
L:
for {
select {
case <-finished:
break L // this will break from loop
case err := <-errChannel:
if err != nil {
fmt.Println("error ", err)
// handle your error
}
}
}
fmt.Println("Executed all go routines")
}