Channel deadlock on workerpool - go

I am playing around with channels by making a workerpool of a 1000 workers. Currently I am getting the following error:
fatal error: all goroutines are asleep - deadlock!
Here is my code:
package main
import "fmt"
import "time"
func worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
results <- j * 2
}
}
func main() {
jobs := make(chan int, 100)
results := make(chan int, 100)
for w := 1; w <= 1000; w++ {
go worker(w, jobs, results)
}
for j := 1; j < 1000000; j++ {
jobs <- j
}
close(jobs)
fmt.Println("==========CLOSED==============")
for i:=0;i<len(results);i++ {
<-results
}
}
Why is this happening? I am still new to go and I am hoping to make sense of this.

While Thomas' answer is basically correct, I post my version which is IMO better Go and also works with unbuffered channels:
func main() {
jobs := make(chan int)
results := make(chan int)
var wg sync.WaitGroup
// you could init the WaitGroup's count here with one call but this is error
// prone - if you change the loop's size you could forget to change the
// WG's count. So call wg.Add in loop
//wg.Add(1000)
for w := 1; w <= 1000; w++ {
wg.Add(1)
go func() {
defer wg.Done()
worker(w, jobs, results)
}()
}
go func() {
for j := 1; j < 2000; j++ {
jobs <- j
}
close(jobs)
fmt.Println("==========CLOSED==============")
}()
// in this gorutine we wait until all "producer" routines are done
// then close the results channel so that the consumer loop stops
go func() {
wg.Wait()
close(results)
}()
for i := range results {
fmt.Print(i, " ")
}
fmt.Println("==========DONE==============")
}

The problem is that your channels are filling up. The main() routine tries to put all jobs into the jobs channel before reading any results. But the results channel only has space for 100 results before any write to the channel will block, so all the workers will eventually block waiting for space in this channel – space that will never come, because main() has not started reading from results yet.
To quickly fix this, you can either make jobs big enough to hold all jobs, so the main() function can continue to the reading phase; or you can make results big enough to hold all results, so the workers can output their results without blocking.
A nicer approach is to make another goroutine to fill up the jobs queue, so main() can go straight to reading results:
func main() {
jobs := make(chan int, 100)
results := make(chan int, 100)
for w := 1; w <= 1000; w++ {
go worker(w, jobs, results)
}
go func() {
for j := 1; j < 1000000; j++ {
jobs <- j
}
close(jobs)
fmt.Println("==========CLOSED==============")
}
for i := 1; i < 1000000; i++ {
<-results
}
}
Note that I had to change the final for loop to a fixed number of iterations, otherwise it might terminate before all the results have been read.

The following code:
for j := 1; j < 1000000; j++ {
jobs <- j
}
should run in a separate goroutine, since all the workers will block waiting for the main gorourine to receive on the results channel, while the main goroutine is stuck in the loop.

package main
import (
"fmt"
"sync"
"time"
)
func worker(id int, jobs <-chan int, results chan<- int, wg *sync.WaitGroup) {
defer wg.Done()
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Millisecond * time.Duration(10))
fmt.Println("worker", id, "finished job", j)
results <- j * 2
}
}
func main() {
jobs := make(chan int, 100)
results := make(chan int, 100)
wg := new(sync.WaitGroup)
wg.Add(1000)
for w := 1; w <= 1000; w++ {
go worker(w, jobs, results, wg)
}
go func() {
wg.Wait()
close(results)
}()
go func() {
for j := 1; j < 1000000; j++ {
jobs <- j
}
close(jobs)
}()
sum := 0
for v := range results {
sum += v
}
fmt.Println("==========CLOSED==============")
fmt.Println("sum", sum)
}

Related

How to make Go channel worker have different result's length?

I made some edits from the gobyexample:
import (
"fmt"
"math/rand"
"time"
)
type DemoResult struct {
Name string
Rate int
}
func random(min, max int) int {
rand.Seed(time.Now().UTC().UnixNano())
return rand.Intn(max-min) + min
}
func worker(id int, jobs <-chan int, results chan<- DemoResult) {
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
myrand := random(1, 4)
if myrand == 2 {
results <- DemoResult{Name: "succ", Rate: j}
}
// else {
// results <- DemoResult{Name: "failed", Rate: 999}
// }
}
}
func main() {
const numJobs = 5
jobs := make(chan int, numJobs)
results := make(chan DemoResult)
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
for j := 1; j <= numJobs; j++ {
jobs <- j
}
close(jobs)
for a := 1; a <= numJobs; a++ {
out := <-results
if out.Name == "succ" {
fmt.Printf("%v\n", out)
}
}
}
I commented the following code intentional to make it stuck forever:
// else {
// results <- DemoResult{Name: "failed", Rate: 999}
// }
It seems like we should make the result's length the same as jobs'. I was wondering if we could make it have different length?
Use a wait group to detect when the workers are done. Close the results channel when the workers are done. Receive results until the channel is closed.
func worker(wg *sync.WaitGroup, id int,
jobs <-chan int,
results chan<- DemoResult) {
// Decrement wait group counter on return from
// function.
defer wg.Done()
⋮
}
func main() {
⋮
// Declare wait group and increment counter for
// each worker.
var wg sync.WaitGroup
for w := 1; w <= 3; w++ {
wg.Add(1)
go worker(&wg, w, jobs, results)
}
⋮
// Wait for workers to decrement wait group
// counter to zero and close channel.
// Execute in goroutine so we can continue on
// to receiving values from results in main.
go func() {
wg.Wait()
close(results)
}()
⋮
// Loop until results is closed.
for out := range results {
⋮
}
}
https://go.dev/play/p/FOQwybMl7tM
I was wondering if we could make it have different length?
Absolutely but you need some way of determining when you have reached the end of the results. This is the reason your example fails - currently the function assumes there will be numJobs (one result per job) results and waits for that many.
An alternative would be to use the channels closure to indicate this i.e. (playground)
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
type DemoResult struct {
Name string
Rate int
}
func random(min, max int) int {
rand.Seed(time.Now().UTC().UnixNano())
return rand.Intn(max-min) + min
}
func worker(id int, jobs <-chan int, results chan<- DemoResult) {
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
myrand := random(1, 4)
if myrand == 2 {
results <- DemoResult{Name: "succ", Rate: j}
} // else {
// results <- DemoResult{Name: "failed", Rate: 999}
//}
}
}
func main() {
const numWorkers = 3
const numJobs = 5
jobs := make(chan int, numJobs)
results := make(chan DemoResult)
var wg sync.WaitGroup
wg.Add(numWorkers)
for w := 1; w <= numWorkers; w++ {
go func() {
worker(w, jobs, results)
wg.Done()
}()
}
go func() {
wg.Wait() // Wait for go routines to complete then close results channel
close(results)
}()
for j := 1; j <= numJobs; j++ {
jobs <- j
}
close(jobs)
for out := range results {
if out.Name == "succ" {
fmt.Printf("%v\n", out)
}
}
}

Deadlocks with buffered channels in Go

I am encountering for the below code fatal error: all goroutines are asleep - deadlock!
Am I right in using a buffered channel? I would appreciate it if you can give me pointers. I am unfortunately at the end of my wits.
func main() {
valueChannel := make(chan int, 2)
defer close(valueChannel)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go doNothing(&wg, valueChannel)
}
for {
v, ok := <- valueChannel
if !ok {
break
}
fmt.Println(v)
}
wg.Wait()
}
func doNothing(wg *sync.WaitGroup, numChan chan int) {
defer wg.Done()
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
numChan <- 12
}
The main goroutine blocks on <- valueChannel after receiving all values. Close the channel to unblock the main goroutine.
func main() {
valueChannel := make(chan int, 2)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go doNothing(&wg, valueChannel)
}
// Close channel after goroutines complete.
go func() {
wg.Wait()
close(valueChannel)
}()
// Receive values until channel is closed.
// The for / range loop here does the same
// thing as the for loop in the question.
for v := range valueChannel {
fmt.Println(v)
}
}
Run the example on the playground.
The code above works independent of the number of values sent by the goroutines.
If the main() function can determine the number of values sent by the goroutines, then receive that number of values from main():
func main() {
const n = 10
valueChannel := make(chan int, 2)
for i := 0; i < n; i++ {
go doNothing(valueChannel)
}
// Each call to doNothing sends one value. Receive
// one value for each call to doNothing.
for i := 0; i < n; i++ {
fmt.Println(<-valueChannel)
}
}
func doNothing(numChan chan int) {
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
numChan <- 12
}
Run the example on the playground.
The main problem is on the for loop of channel receiving.
The comma ok idiom is slightly different on channels, ok indicates whether the received value was sent on the channel (true) or is a zero value returned because the channel is closed and empty (false).
In this case the channel is waiting a data to be sent and since it's already finished sending the value ten times : Deadlock.
So apart of the design of the code if I just need to do the less change possible here it is:
func main() {
valueChannel := make(chan int, 2)
defer close(valueChannel)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go doNothing(&wg, valueChannel)
}
for i := 0; i < 10; i++ {
v := <- valueChannel
fmt.Println(v)
}
wg.Wait()
}
func doNothing(wg *sync.WaitGroup, numChan chan int) {
defer wg.Done()
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
numChan <- 12
}

How to properly use channels to control concurrency?

I'm new to concurrency in Go and I'm trying to figure out how to use channels to control concurrency. What I would like to do have a loop where I can call out to a function using a new go routine and continue looping while that function processes and I would like to limit the number of routines that run to 3. My first attempt to do this was the code below:
func write(val int, ch chan bool) {
fmt.Println("Processing:", val)
time.Sleep(2 * time.Second)
ch <- val % 3 == 0
}
func main() {
ch := make(chan bool, 3) // limit to 3 routines?
for i := 0; i< 10; i++ {
go write(i, ch)
resp := <- ch
fmt.Println("Divisible by 3:", resp)
}
time.Sleep(20 * time.Second)
}
I was under the impression that this would basically make calls to write 3 at a time and then hold off on processing the next 3 until the first 3 had finished. Based on what is logging it appears to only be processing one at a time. The code can be found and executed here.
What would I need to change in this example to get the functionality that I described above?
The problem here is very simple:
for i := 0; i< 10; i++ {
go write(i, ch)
resp := <- ch
fmt.Println("Divisible by 3:", resp)
}
You spin up a goroutine, then wait for it to respond, before you continue around the loop and spin up the next goroutine. They can't run in parallel because you never run two of them at the same time.
To fix this, you need to spin up all 10 goroutines, then wait on all 10 responses (playground):
for i := 0; i< 10; i++ {
go write(i, ch)
}
for i := 0; i<10; i++ {
resp := <- ch
fmt.Println("Divisible by 3:", resp)
}
Now you do have 7 goroutines blocking on the channel—but it's so brief that you can't see it happening, so the output won't be very interesting. If you try adding a Processed message at the end of the goroutine, and sleeping between each channel read, you'll see that 3 of them finish immediately (well, after waiting 2 seconds), and then the others unblock and finish one by one (playground).
There is one more way to run go routines in parallel with wait for all of them to return the value on channel is using Wait groups. It also helps to synchronize go routines. If you are working with go routines to wait for all of them to finish before executing another function better approach is to use wait group.
package main
import (
"fmt"
"time"
"sync"
)
func write(val int, wg *sync.WaitGroup, ch chan bool) {
defer wg.Done()
fmt.Println("Processing:", val)
time.Sleep(2 * time.Second)
ch <- val % 3 == 0
}
func main() {
wg := &sync.WaitGroup{}
ch := make(chan bool, 3)
for i := 0; i< 10; i++ {
wg.Add(1)
go write(i, wg, ch)
}
for i := 0; i< 10; i++ {
fmt.Println("Divisible by 3: ", <-ch)
}
close(ch)
wg.Wait()
time.Sleep(20 * time.Second)
}
Playground example

In Golang, how to handle many goroutines with channel

I'm thinking start 1000 goroutines at the same time using for loop in Golang.
The problem is: I have to make sure that every goroutine has been executed.
Is it possible using channels to help me make sure of that?
The structure is kinda like this:
func main {
for i ... {
go ...
ch?
ch?
}
As #Andy mentioned You can use sync.WaitGroup to achieve this. Below is an example. Hope the code is self-explanatory.
package main
import (
"fmt"
"sync"
"time"
)
func dosomething(millisecs int64, wg *sync.WaitGroup) {
defer wg.Done()
duration := time.Duration(millisecs) * time.Millisecond
time.Sleep(duration)
fmt.Println("Function in background, duration:", duration)
}
func main() {
arr := []int64{200, 400, 150, 600}
var wg sync.WaitGroup
for _, n := range arr {
wg.Add(1)
go dosomething(n, &wg)
}
wg.Wait()
fmt.Println("Done")
}
To make sure the goroutines are done and collect the results, try this example:
package main
import (
"fmt"
)
const max = 1000
func main() {
for i := 1; i <= max; i++ {
go f(i)
}
sum := 0
for i := 1; i <= max; i++ {
sum += <-ch
}
fmt.Println(sum) // 500500
}
func f(n int) {
// do some job here and return the result:
ch <- n
}
var ch = make(chan int, max)
In order to wait for 1000 goroutines to finish, try this example:
package main
import (
"fmt"
"sync"
)
func main() {
wg := &sync.WaitGroup{}
for i := 0; i < 1000; i++ {
wg.Add(1)
go f(wg, i)
}
wg.Wait()
fmt.Println("Done.")
}
func f(wg *sync.WaitGroup, n int) {
defer wg.Done()
fmt.Print(n, " ")
}
I would suggest that you follow a pattern. Concurrency and Channel is Good but if you use it in a bad way, your program might became even slower than expected. The simple way to handle multiple go-routine and channel is by a worker pool pattern.
Take a close look at the code below
// In this example we'll look at how to implement
// a _worker pool_ using goroutines and channels.
package main
import "fmt"
import "time"
// Here's the worker, of which we'll run several
// concurrent instances. These workers will receive
// work on the `jobs` channel and send the corresponding
// results on `results`. We'll sleep a second per job to
// simulate an expensive task.
func worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
results <- j * 2
}
}
func main() {
// In order to use our pool of workers we need to send
// them work and collect their results. We make 2
// channels for this.
jobs := make(chan int, 100)
results := make(chan int, 100)
// This starts up 3 workers, initially blocked
// because there are no jobs yet.
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// Here we send 5 `jobs` and then `close` that
// channel to indicate that's all the work we have.
for j := 1; j <= 5; j++ {
jobs <- j
}
close(jobs)
// Finally we collect all the results of the work.
for a := 1; a <= 5; a++ {
<-results
}
}
This simple example is taken from here . Also the results channel can help you keep track of all the go routines executing the jobs including failure notice.

Go: channel many slow API queries into single SQL transaction

I wonder what would be idiomatic way to do as following.
I have N slow API queries, and one database connection, I want to have a buffered channel, where responses will come, and one database transaction which I will use to write data.
I could only come up with semaphore thing as following makeup example:
func myFunc(){
//10 concurrent API calls
sem := make(chan bool, 10)
//A concurrent safe map as buffer
var myMap MyConcurrentMap
for i:=0;i<N;i++{
sem<-true
go func(i int){
defer func(){<-sem}()
resp:=slowAPICall(fmt.Sprintf("http://slow-api.me?%d",i))
myMap.Put(resp)
}(i)
}
for j=0;j<cap(sem);j++{
sem<-true
}
tx,_ := db.Begin()
for data:=range myMap{
tx.Exec("Insert data into database")
}
tx.Commit()
}
I am nearly sure there is simpler, cleaner and more proper solution, but it is seems complicated to grasp for me.
EDIT:
Well, I come with following solution, this way I do not need the buffer map, so once data comes to resp channel the data is printed or can be used to insert into a database, it works, I am still not sure if everything OK, at last there are no race.
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
//Gloab waitGroup
var wg sync.WaitGroup
func init() {
//just for fun sake, make rand seeded
rand.Seed(time.Now().UnixNano())
}
//Emulate a slow API call
func verySlowAPI(id int) int {
n := rand.Intn(5)
time.Sleep(time.Duration(n) * time.Second)
return n
}
func main() {
//Amount of tasks
N := 100
//Concurrency level
concur := 10
//Channel for tasks
tasks := make(chan int, N)
//Channel for responses
resp := make(chan int, 10)
//10 concurrent groutinezs
wg.Add(concur)
for i := 1; i <= concur; i++ {
go worker(tasks, resp)
}
//Add tasks
for i := 0; i < N; i++ {
tasks <- i
}
//Collect data from goroutiens
for i := 0; i < N; i++ {
fmt.Printf("%d\n", <-resp)
}
//close the tasks channel
close(tasks)
//wait till finish
wg.Wait()
}
func worker(task chan int, resp chan<- int) {
defer wg.Done()
for {
task, ok := <-task
if !ok {
return
}
n := verySlowAPI(task)
resp <- n
}
}
There's no need to use channels for a semaphore, sync.WaitGroup was made for waiting for a set of routines to complete.
If you're using the channel to limit throughput, you're better off with a worker pool, and using the channel to pass jobs to the workers:
type job struct {
i int
}
func myFunc(N int) {
// Adjust as needed for total number of tasks
work := make(chan job, 10)
// res being whatever type slowAPICall returns
results := make(chan res, 10)
resBuff := make([]res, 0, N)
wg := new(sync.WaitGroup)
// 10 concurrent API calls
for i = 0; i < 10; i++ {
wg.Add(1)
go func() {
for j := range work {
resp := slowAPICall(fmt.Sprintf("http://slow-api.me?%d", j.i))
results <- resp
}
wg.Done()
}()
}
go func() {
for r := range results {
resBuff = append(resBuff, r)
}
}
for i = 0; i < N; i++ {
work <- job{i}
}
close(work)
wg.Wait()
close(results)
}
Maybe this will work for you. Now you can get rid of your concurrent map. Here is a code snippet:
func myFunc() {
//10 concurrent API calls
sem := make(chan bool, 10)
respCh := make(chan YOUR_RESP_TYPE, 10)
var responses []YOUR_RESP_TYPE
for i := 0; i < N; i++ {
sem <- true
go func(i int) {
defer func() {
<-sem
}()
resp := slowAPICall(fmt.Sprintf("http://slow-api.me?%d",i))
respCh <- resp
}(i)
}
respCollected := make(chan struct{})
go func() {
for i := 0; i < N; i++ {
responses = append(responses, <-respCh)
}
close(respCollected)
}()
<-respCollected
tx,_ := db.Begin()
for _, data := range responses {
tx.Exec("Insert data into database")
}
tx.Commit()
}
Than we need to use one more goroutine that will collect all responses in some slice or map from a response channel.

Resources