Get responses from multiple go routines into an array - go

I need to fetch responses from multiple go routines and put them into an array. I know that channels could be used for this, however I am not sure how I can make sure that all go routines have finished processing the results. Thus I am using a waitgroup.
Code
func main() {
log.Info("Collecting ints")
var results []int32
for _, broker := range e.BrokersByBrokerID {
wg.Add(1)
go getInt32(&wg)
}
wg.Wait()
log.info("Collected")
}
func getInt32(wg *sync.WaitGroup) (int32, error) {
defer wg.Done()
// Just to show that this method may just return an error and no int32
err := broker.Open(config)
if err != nil && err != sarama.ErrAlreadyConnected {
return 0, fmt.Errorf("Cannot connect to broker '%v': %s", broker.ID(), err)
}
defer broker.Close()
return 1003, nil
}
My question
How can I put all the response int32 (which may return an error) into my int32 array, making sure that all go routines have finished their processing work and returned either the error or the int?

If you don't process the return values of the function launched as a goroutine, they are discarded. See What happens to return value from goroutine.
You may use a slice to collect the results, where each goroutine could receive the index to put the results to, or alternatively the address of the element. See Can I concurrently write different slice elements. Note that if you use this, the slice must be pre-allocated and only the element belonging to the goroutine may be written, you can't "touch" other elements and you can't append to the slice.
Or you may use a channel, on which the goroutines send values that include the index or ID of the item they processed, so the collecting goroutine can identify or order them. See How to collect values from N goroutines executed in a specific order?
If processing should stop on the first error encountered, see Close multiple goroutine if an error occurs in one in go
Here's an example how it could look like when using a channel. Note that no waitgroup is needed here, because we know that we expect as many values on the channel as many goroutines we launch.
type result struct {
task int32
data int32
err error
}
func main() {
tasks := []int32{1, 2, 3, 4}
ch := make(chan result)
for _, task := range tasks {
go calcTask(task, ch)
}
// Collect results:
results := make([]result, len(tasks))
for i := range results {
results[i] = <-ch
}
fmt.Printf("Results: %+v\n", results)
}
func calcTask(task int32, ch chan<- result) {
if task > 2 {
// Simulate failure
ch <- result{task: task, err: fmt.Errorf("task %v failed", task)}
return
}
// Simulate success
ch <- result{task: task, data: task * 2, err: nil}
}
Output (try ot on the Go Playground):
Results: [{task:4 data:0 err:0x40e130} {task:1 data:2 err:<nil>} {task:2 data:4 err:<nil>} {task:3 data:0 err:0x40e138}]

I also believe you have to use channel, it must be something like this:
package main
import (
"fmt"
"log"
"sync"
)
var (
BrokersByBrokerID = []int32{1, 2, 3}
)
type result struct {
data string
err string // you must use error type here
}
func main() {
var wg sync.WaitGroup
var results []result
ch := make(chan result)
for _, broker := range BrokersByBrokerID {
wg.Add(1)
go getInt32(ch, &wg, broker)
}
go func() {
for v := range ch {
results = append(results, v)
}
}()
wg.Wait()
close(ch)
log.Printf("collected %v", results)
}
func getInt32(ch chan result, wg *sync.WaitGroup, broker int32) {
defer wg.Done()
if broker == 1 {
ch <- result{err: fmt.Sprintf("error: gor broker 1")}
return
}
ch <- result{data: fmt.Sprintf("broker %d - ok", broker)}
}
Result will look like this:
2019/02/05 15:26:28 collected [{broker 3 - ok } {broker 2 - ok } { error: gor broker 1}]

package main
import (
"fmt"
"log"
"sync"
)
var (
BrokersByBrokerID = []int{1, 2, 3, 4}
)
type result struct {
data string
err string // you must use error type here
}
func main() {
var wg sync.WaitGroup
var results []int
ch := make(chan int)
done := make(chan bool)
for _, broker := range BrokersByBrokerID {
wg.Add(1)
go func(i int) {
defer wg.Done()
ch <- i
if i == 4 {
done <- true
}
}(broker)
}
L:
for {
select {
case v := <-ch:
results = append(results, v)
if len(results) == 4 {
//<-done
close(ch)
break L
}
case _ = <-done:
break
}
}
fmt.Println("STOPPED")
//<-done
wg.Wait()
log.Printf("collected %v", results)
}

Thank cn007b and Edenshaw. My answer is based on their answers.
As Edenshaw commented, need another sync.Waitgroup for goroutine which getting results from channel, or you may get an incomplete array.
package main
import (
"fmt"
"sync"
"encoding/json"
)
type Resp struct {
id int
}
func main() {
var wg sync.WaitGroup
chanRes := make(chan interface{}, 3)
for i := 0; i < 3; i++ {
wg.Add(1)
resp := &Resp{}
go func(i int, resp *Resp) {
defer wg.Done()
resp.id = i
chanRes <- resp
}(i, resp)
}
res := make([]interface{}, 0)
var wg2 sync.WaitGroup
wg2.Add(1)
go func() {
defer wg2.Done()
for v := range chanRes {
res = append(res, v.(*Resp).id)
}
}()
wg.Wait()
close(chanRes)
wg2.Wait()
resStr, _ := json.Marshal(res)
fmt.Println(string(resStr))
}

package main
import (
"fmt"
"log"
"sync"
"time"
)
var (
BrokersByBrokerID = []int{1, 2, 3, 4}
)
type result struct {
data string
err string // you must use error type here
}
func main() {
var wg sync.WaitGroup.
var results []int
ch := make(chan int)
done := make(chan bool)
for _, broker := range BrokersByBrokerID {
wg.Add(1)
go func(i int) {
defer wg.Done()
ch <- i
if i == 4 {
done <- true
}
}(broker)
}
for v := range ch {
results = append(results, v)
if len(results) == 4 {
close(ch)
}
}
fmt.Println("STOPPED")
<-done
wg.Wait()
log.Printf("collected %v", results)
}
</pre>

Related

go routine panics when i return multiple errors

I am playing around with worker pools and i want to consolidate all errors from worker pools before i return the error. I've written a sample code but i am entering a deadlock.
What am i trying to achieve?
a client send 100 requests,i want to first add those requests to a job queue and dispatch it to n number of go routines that does tasks in background , if at all there are errors i want to accumulate all these errors before i send all errors to the client. I have written a snippet, can someone explain what's wrong and how to mitigate the deadlock.
package main
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/apex/log"
"github.com/hashicorp/go-multierror"
)
type Manager struct {
taskChan chan int
wg *sync.WaitGroup
QuitChan chan bool
ErrorChan chan error
busyWorkers int64
}
func main() {
fmt.Println("Hello, 世界")
m := New()
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
//defer cancel()
for i := 0; i < 3; i++ {
m.wg.Add(1)
go m.run(ctx, test)
}
for i := 1; i < 5; i++ {
m.taskChan <- i
}
close(m.taskChan)
go func(*Manager) {
if len(m.taskChan) == 0 {
m.QuitChan <- true
}
}(m)
var errors error
for {
select {
case err := <-m.ErrorChan:
errors = multierror.Append(errors, err)
if m.busyWorkers == int64(0) {
break
}
default:
fmt.Println("hello")
}
}
m.wg.Wait()
fmt.Println(errors)
}
func New() *Manager {
return &Manager{taskChan: make(chan int),
wg: new(sync.WaitGroup),
QuitChan: make(chan bool),
ErrorChan: make(chan error),
}
}
func (m *Manager) run(ctx context.Context, fn func(a, b int) error) {
defer m.wg.Done()
defer fmt.Println("finished working")
for {
select {
case t, ok := <-m.taskChan:
if ok {
atomic.AddInt64(&m.busyWorkers, 1)
err := fn(t, t)
if err != nil {
m.ErrorChan <- err
}
atomic.AddInt64(&m.busyWorkers, -1)
}
case <-ctx.Done():
log.Infof("closing channel %v", ctx.Err())
return
case <-m.QuitChan:
return
}
}
}
// this can return error or not, this is the main driver func, but i'm propagating
//errors so that i can understand where i am going wrong
func test(a, b int) error {
fmt.Println(a, b)
return fmt.Errorf("dummy error %v", a)
}
You have 3 workers who all return errors.
Your main thread tries to put 5 jobs in the queue. Once the first 3 has been taken by your workers, the main thread is stuck waiting for a new worker to receive on taskChan and all your 3 workers are stuck trying to send data on ErrorChan.
In other words, deadlock.
Maybe you wanted to make taskChan a buffered channel? That way you can send data on it until the buffer is full without blocking.
taskChan: make(chan int, 10)

Concurrency issues with crawler

I try to build concurrent crawler based on Tour and some others SO answers regarding that. What I have currently is below but I think I have here two subtle issues.
Sometimes I get 16 urls in response and sometimes 17 (debug print in main). I know it because when I even change WriteToSlice to Read then in Read sometimes 'Read: end, counter = ' is never reached and it's always when I get 16 urls.
I have troubles with err channel, I get no messages in this channel, even when I run my main Crawl method with address like www.golang.org so without valid schema error should be send via err channel
Concurrency is really difficult topic, help and advice will be appreciated
package main
import (
"fmt"
"net/http"
"sync"
"golang.org/x/net/html"
)
type urlCache struct {
urls map[string]struct{}
sync.Mutex
}
func (v *urlCache) Set(url string) bool {
v.Lock()
defer v.Unlock()
_, exist := v.urls[url]
v.urls[url] = struct{}{}
return !exist
}
func newURLCache() *urlCache {
return &urlCache{
urls: make(map[string]struct{}),
}
}
type results struct {
data chan string
err chan error
}
func newResults() *results {
return &results{
data: make(chan string, 1),
err: make(chan error, 1),
}
}
func (r *results) close() {
close(r.data)
close(r.err)
}
func (r *results) WriteToSlice(s *[]string) {
for {
select {
case data := <-r.data:
*s = append(*s, data)
case err := <-r.err:
fmt.Println("e ", err)
}
}
}
func (r *results) Read() {
fmt.Println("Read: start")
counter := 0
for c := range r.data {
fmt.Println(c)
counter++
}
fmt.Println("Read: end, counter = ", counter)
}
func crawl(url string, depth int, wg *sync.WaitGroup, cache *urlCache, res *results) {
defer wg.Done()
if depth == 0 || !cache.Set(url) {
return
}
response, err := http.Get(url)
if err != nil {
res.err <- err
return
}
defer response.Body.Close()
node, err := html.Parse(response.Body)
if err != nil {
res.err <- err
return
}
urls := grablUrls(response, node)
res.data <- url
for _, url := range urls {
wg.Add(1)
go crawl(url, depth-1, wg, cache, res)
}
}
func grablUrls(resp *http.Response, node *html.Node) []string {
var f func(*html.Node) []string
var results []string
f = func(n *html.Node) []string {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key != "href" {
continue
}
link, err := resp.Request.URL.Parse(a.Val)
if err != nil {
continue
}
results = append(results, link.String())
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
return results
}
res := f(node)
return res
}
// Crawl ...
func Crawl(url string, depth int) []string {
wg := &sync.WaitGroup{}
output := &[]string{}
visited := newURLCache()
results := newResults()
defer results.close()
wg.Add(1)
go crawl(url, depth, wg, visited, results)
go results.WriteToSlice(output)
// go results.Read()
wg.Wait()
return *output
}
func main() {
r := Crawl("https://www.golang.org", 2)
// r := Crawl("www.golang.org", 2) // no schema, error should be generated and send via err
fmt.Println(len(r))
}
Both your questions 1 and 2 are a result of the same bug.
In Crawl() you are not waiting for this go routine to finish: go results.WriteToSlice(output). On the last crawl() function, the wait group is released, the output is returned and printed before the WriteToSlice function finishes with the data and err channel. So what has happened is this:
crawl() finishes, placing data in results.data and results.err.
Waitgroup wait() unblocks, causing main() to print the length of the result []string
WriteToSlice adds the last data (or err) item to the channel
You need to return from Crawl() not only when the data is done being written to the channel, but also when the channel is done being read in it's entirety (including the buffer). A good way to do this is close channels when you are sure that you are done with them. By organizing your code this way, you can block on the go routine that is draining the channels, and instead of using the wait group to release to main, you wait until the channels are 100% done.
You can see this gobyexample https://gobyexample.com/closing-channels. Remember that when you close a channel, the channel can still be used until the last item is taken. So you can close a buffered channel, and the reader will still get all the items that were queued in the channel.
There is some code structure that can change to make this cleaner, but here is a quick way to fix your program. Change Crawl to block on WriteToSlice. Close the data channel when the crawl function finishes, and wait for WriteToSlice to finish.
// Crawl ...
func Crawl(url string, depth int) []string {
wg := &sync.WaitGroup{}
output := &[]string{}
visited := newURLCache()
results := newResults()
go func() {
wg.Add(1)
go crawl(url, depth, wg, visited, results)
wg.Wait()
// All data is written, this makes `WriteToSlice()` unblock
close(results.data)
}()
// This will block until results.data is closed
results.WriteToSlice(output)
close(results.err)
return *output
}
Then on write to slice, you have to check for the closed channel to exit the for loop:
func (r *results) WriteToSlice(s *[]string) {
for {
select {
case data, open := <-r.data:
if !open {
return // All data done
}
*s = append(*s, data)
case err := <-r.err:
fmt.Println("e ", err)
}
}
}
Here is the full code: https://play.golang.org/p/GBpGk-lzrhd (it won't work in the playground)

Golang program hangs without finishing execution

I have the following golang program;
package main
import (
"fmt"
"net/http"
"time"
)
var urls = []string{
"http://www.google.com/",
"http://golang.org/",
"http://yahoo.com/",
}
type HttpResponse struct {
url string
response *http.Response
err error
status string
}
func asyncHttpGets(url string, ch chan *HttpResponse) {
client := http.Client{}
if url == "http://www.google.com/" {
time.Sleep(500 * time.Millisecond) //google is down
}
fmt.Printf("Fetching %s \n", url)
resp, err := client.Get(url)
u := &HttpResponse{url, resp, err, "fetched"}
ch <- u
fmt.Println("sent to chan")
}
func main() {
fmt.Println("start")
ch := make(chan *HttpResponse, len(urls))
for _, url := range urls {
go asyncHttpGets(url, ch)
}
for i := range ch {
fmt.Println(i)
}
fmt.Println("Im done")
}
Run it on Playground
However when I run it; it hangs (ie the last part that ought to print Im done doesnt run.)
Here's the terminal output;;
$ go run get.go
start
Fetching http://yahoo.com/
Fetching http://golang.org/
Fetching http://www.google.com/
sent to chan
&{http://www.google.com/ 0xc820144120 fetched}
sent to chan
&{http://golang.org/ 0xc82008b710 fetched}
sent to chan
&{http://yahoo.com/ 0xc82008b7a0 fetched}
The problem is that ranging over a channel in a for loop will continue forever unless the channel is closed. If you want to read precisely len(urls) values from the channel, you should loop that many times:
for i := 0; i < len(urls); i++ {
fmt.Println(<-ch)
}
Another good dirty devious trick would be to use sync.WaitGroup and increment it per goroutine and then monitor it with a Wait and after its done it will close your channel allowing the next blocks of code to run, the reason I am offering you this approach is because it gets away from using a static number in a loop like len(urls) so that you can have a dynamic slice that might change and what not.
The reason Wait and close are in their own goroutine is so that your code can reach the for loop to range over your channel
package main
import (
"fmt"
"net/http"
"time"
"sync"
)
var urls = []string{
"http://www.google.com/",
"http://golang.org/",
"http://yahoo.com/",
}
type HttpResponse struct {
url string
response *http.Response
err error
status string
}
func asyncHttpGets(url string, ch chan *HttpResponse, wg *sync.WaitGroup) {
client := http.Client{}
if url == "http://www.google.com/" {
time.Sleep(500 * time.Millisecond) //google is down
}
fmt.Printf("Fetching %s \n", url)
resp, err := client.Get(url)
u := &HttpResponse{url, resp, err, "fetched"}
ch <- u
fmt.Println("sent to chan")
wg.Done()
}
func main() {
fmt.Println("start")
ch := make(chan *HttpResponse, len(urls))
var wg sync.WaitGroup
for _, url := range urls {
wg.Add(1)
go asyncHttpGets(url, ch, &wg)
}
go func() {
wg.Wait()
close(ch)
}()
for i := range ch {
fmt.Println(i)
}
fmt.Println("Im done")
}

How do I handle errors in a worker pool using WaitGroup?

I got a problem using sync.WaitGroup and select together. If you take a look at following http request pool you will notice that if an error occurs it will never be reported as wg.Done() will block and there is no read from the channel anymore.
package pool
import (
"fmt"
"log"
"net/http"
"sync"
)
var (
MaxPoolQueue = 100
MaxPoolWorker = 10
)
type Pool struct {
wg *sync.WaitGroup
queue chan *http.Request
errors chan error
}
func NewPool() *Pool {
return &Pool{
wg: &sync.WaitGroup{},
queue: make(chan *http.Request, MaxPoolQueue),
errors: make(chan error),
}
}
func (p *Pool) Add(r *http.Request) {
p.wg.Add(1)
p.queue <- r
}
func (p *Pool) Run() error {
for i := 0; i < MaxPoolWorker; i++ {
go p.doWork()
}
select {
case err := <-p.errors:
return err
default:
p.wg.Wait()
}
return nil
}
func (p *Pool) doWork() {
for r := range p.queue {
fmt.Printf("Request to %s\n", r.Host)
p.wg.Done()
_, err := http.DefaultClient.Do(r)
if err != nil {
log.Fatal(err)
p.errors <- err
} else {
fmt.Printf("no error\n")
}
}
}
Source can be found here
How can I still use WaitGroup but also get errors from go routines?
Just got the answer my self as I wrote the question and as I think it is an interesting case I would like to share it with you.
The trick to use sync.WaitGroup and chan together is that we wrap:
select {
case err := <-p.errors:
return err
default:
p.wg.Done()
}
Together in a for loop:
for {
select {
case err := <-p.errors:
return err
default:
p.wg.Done()
}
}
In this case select will always check for errors and wait if nothing happens :)
It looks a bit like the fail-fast mechanism enabled by the Tomb library (Tomb V2 GoDoc):
The tomb package handles clean goroutine tracking and termination.
If any of the tracked goroutines returns a non-nil error, or the Kill or Killf method is called by any goroutine in the system (tracked or not), the tomb Err is set, Alive is set to false, and the Dying channel is closed to flag that all tracked goroutines are supposed to willingly terminate as soon as possible.
Once all tracked goroutines terminate, the Dead channel is closed, and Wait unblocks and returns the first non-nil error presented to the tomb via a result or an explicit Kill or Killf method call, or nil if there were no errors.
You can see an example in this playground:
(extract)
// start runs all the given functions concurrently
// until either they all complete or one returns an
// error, in which case it returns that error.
//
// The functions are passed a channel which will be closed
// when the function should stop.
func start(funcs []func(stop <-chan struct{}) error) error {
var tomb tomb.Tomb
var wg sync.WaitGroup
allDone := make(chan struct{})
// Start all the functions.
for _, f := range funcs {
f := f
wg.Add(1)
go func() {
defer wg.Done()
if err := f(tomb.Dying()); err != nil {
tomb.Kill(err)
}
}()
}
// Start a goroutine to wait for them all to finish.
go func() {
wg.Wait()
close(allDone)
}()
// Wait for them all to finish, or one to fail
select {
case <-allDone:
case <-tomb.Dying():
}
tomb.Done()
return tomb.Err()
}
A simpler implementation would be like below. (Check in play.golang: https://play.golang.org/p/TYxxsDRt5Wu)
package main
import "fmt"
import "sync"
import "time"
type Error struct {
message string
}
func (e Error) Error() string {
return e.message
}
func main() {
var wg sync.WaitGroup
waitGroupLength := 8
errChannel := make(chan error, 1)
// Setup waitgroup to match the number of go routines we'll launch off
wg.Add(waitGroupLength)
finished := make(chan bool, 1) // this along with wg.Wait() are why the error handling works and doesn't deadlock
for i := 0; i < waitGroupLength; i++ {
go func(i int) {
fmt.Printf("Go routine %d executed\n", i+1)
time.Sleep(time.Duration(waitGroupLength - i))
time.Sleep(0) // only here so the time import is needed
if i%4 == 1 {
errChannel <- Error{fmt.Sprintf("Errored on routine %d", i+1)}
}
// Mark the wait group as Done so it does not hang
wg.Done()
}(i)
}
go func() {
wg.Wait()
close(finished)
}()
L:
for {
select {
case <-finished:
break L // this will break from loop
case err := <-errChannel:
if err != nil {
fmt.Println("error ", err)
// handle your error
}
}
}
fmt.Println("Executed all go routines")
}

Go: One producer many consumers

So I have seen a lot of ways of implementing one consumer and many producers in Go - the classic fanIn function from the Concurrency in Go talk.
What I want is a fanOut function. It takes as a parameter a channel it reads a value from and returns a slice of channels that it writes copies of this value to.
Is there a correct/recommended way of implementing this?
You pretty much described the best way to do it but here is a small sample of code that does it.
Go playground: https://play.golang.org/p/jwdtDXVHJk
package main
import (
"fmt"
"time"
)
func producer(iters int) <-chan int {
c := make(chan int)
go func() {
for i := 0; i < iters; i++ {
c <- i
time.Sleep(1 * time.Second)
}
close(c)
}()
return c
}
func consumer(cin <-chan int) {
for i := range cin {
fmt.Println(i)
}
}
func fanOut(ch <-chan int, size, lag int) []chan int {
cs := make([]chan int, size)
for i, _ := range cs {
// The size of the channels buffer controls how far behind the recievers
// of the fanOut channels can lag the other channels.
cs[i] = make(chan int, lag)
}
go func() {
for i := range ch {
for _, c := range cs {
c <- i
}
}
for _, c := range cs {
// close all our fanOut channels when the input channel is exhausted.
close(c)
}
}()
return cs
}
func fanOutUnbuffered(ch <-chan int, size int) []chan int {
cs := make([]chan int, size)
for i, _ := range cs {
// The size of the channels buffer controls how far behind the recievers
// of the fanOut channels can lag the other channels.
cs[i] = make(chan int)
}
go func() {
for i := range ch {
for _, c := range cs {
c <- i
}
}
for _, c := range cs {
// close all our fanOut channels when the input channel is exhausted.
close(c)
}
}()
return cs
}
func main() {
c := producer(10)
chans := fanOutUnbuffered(c, 3)
go consumer(chans[0])
go consumer(chans[1])
consumer(chans[2])
}
The important part to note is how we close the output channels once the input channel has been exhausted. Also if one of the output channels blocks on the send it will hold up the send on the other output channels. We control the amount of lag by setting the buffer size of the channels.
This solution below is a bit contrived, but it works for me:
package main
import (
"fmt"
"time"
"crypto/rand"
"encoding/binary"
)
func handleNewChannels(arrchangen chan [](chan uint32),
intchangen chan (chan uint32)) {
currarr := []chan uint32{}
arrchangen <- currarr
for {
newchan := <-intchangen
currarr = append(currarr, newchan)
arrchangen <- currarr
}
}
func sendToChannels(arrchangen chan [](chan uint32)) {
tick := time.Tick(1 * time.Second)
currarr := <-arrchangen
for {
select {
case <-tick:
sent := false
var n uint32
binary.Read(rand.Reader, binary.LittleEndian, &n)
for i := 0 ; i < len(currarr) ; i++ {
currarr[i] <- n
sent = true
}
if sent {
fmt.Println("Sent generated ", n)
}
case newarr := <-arrchangen:
currarr = newarr
}
}
}
func handleChannel(tchan chan uint32) {
for {
val := <-tchan
fmt.Println("Got the value ", val)
}
}
func createChannels(intchangen chan (chan uint32)) {
othertick := time.Tick(5 * time.Second)
for {
<-othertick
fmt.Println("Creating new channel! ")
newchan := make(chan uint32)
intchangen <- newchan
go handleChannel(newchan)
}
}
func main() {
arrchangen := make(chan [](chan uint32))
intchangen := make(chan (chan uint32))
go handleNewChannels(arrchangen, intchangen)
go sendToChannels(arrchangen)
createChannels(intchangen)
}
First, see related question What is the neatest idiom for producer/consumer in Go? and One thread showing interest in another thread (consumer / producer). Also, take a look to producer-consumer problem. About concurrency see how to achieve concurrency In Google Go.
We can handle multiple consumers without making the copy of channel data for each consumer.
Go playground: https://play.golang.org/p/yOKindnqiZv
package main
import (
"fmt"
"sync"
)
type data struct {
msg string
consumers int
}
func main() {
ch := make(chan *data) // both block or non-block are ok
var wg sync.WaitGroup
consumerCount := 3 // specify no. of consumers
producer := func() {
obj := &data {
msg: "hello everyone!",
consumers: consumerCount,
}
ch <- obj
}
consumer := func(idx int) {
defer wg.Done()
obj := <-ch
fmt.Printf("consumer %d received data %v\n", idx, obj)
obj.consumers--
if obj.consumers > 0 {
ch <- obj // forward to others
} else {
fmt.Printf("last receiver: %d\n", idx)
}
}
go producer()
for i:=1; i<=consumerCount; i++ {
wg.Add(1)
go consumer(i)
}
wg.Wait()
}

Resources