On IoT devices go applications are running that can receive commands from the cloud. The commands are pushed on a queue
var queue chan time.Time
and workers on the IoT device process the queue.
The job of the worker is to send back data covering a period of time to the cloud, the time on the channel is the start time of such a period. The IoT devices are on mobile network connection so sometimes data gets lost and never arrives at the cloud. The cloud also is not sure if the command it sent arrived on the IoT device and could get impatient and resend the command.
I want to make sure that if the original command is still in the queue the same command can not be pushed on the queue. Is there a way to do that?
func addToQueue(periodStart time.Time) error {
if alreadyOnQueue(queue, periodStart) {
return errors.New("periodStart was already on the queue, not adding it again")
}
queue <- periodStart
return nil
}
func alreadyOnQueue(queue chan time.Time, t time.Time) bool {
return false // todo
}
I've created a solution that is available on https://github.com/munnik/uniqueue/
package uniqueue
import (
"errors"
"sync"
)
// UQ is a uniqueue queue. It guarantees that a value is only once in the queue. The queue is thread safe.
// The unique constraint can be temporarily disabled to add multiple instances of the same value to the queue.
type UQ[T comparable] struct {
back chan T
queue chan T
front chan T
constraints map[T]*constraint
mu sync.Mutex
AutoRemoveConstraint bool // if true, the constraint will be removed when the value is popped from the queue.
}
type constraint struct {
count uint // number of elements in the queue
disabled bool
}
func NewUQ[T comparable](size uint) *UQ[T] {
u := &UQ[T]{
back: make(chan T),
queue: make(chan T, size),
front: make(chan T),
constraints: map[T]*constraint{},
}
go u.linkChannels()
return u
}
// Get the back of the queue, this channel can be used to write values to.
func (u *UQ[T]) Back() chan<- T {
return u.back
}
// Get the front of the queue, this channel can be used to read values from.
func (u *UQ[T]) Front() <-chan T {
return u.front
}
// Ignores the constraint for a value v once, when the value is added to the queue again, the constraint is enabled again.
func (u *UQ[T]) IgnoreConstraintFor(v T) {
u.mu.Lock()
defer u.mu.Unlock()
if _, ok := u.constraints[v]; !ok {
u.constraints[v] = &constraint{}
}
u.constraints[v].disabled = true
}
// Manually add a constraint to the queue, only use in special cases when you want to prevent certain values to enter the queue.
func (u *UQ[T]) AddConstraint(v T) error {
u.mu.Lock()
defer u.mu.Unlock()
if _, ok := u.constraints[v]; !ok {
u.constraints[v] = &constraint{
count: 1,
disabled: false,
}
return nil
} else {
if u.constraints[v].disabled {
u.constraints[v].count += 1
u.constraints[v].disabled = false
return nil
}
}
return errors.New("Already existing constraint prevents adding new constraint")
}
// Manually remove a constraint from the queue, this needs to be called when AutoRemoveConstraint is set to false. Useful when you want to remove the constraint only when a worker using the queue is finished processing the value.
func (u *UQ[T]) RemoveConstraint(v T) {
u.mu.Lock()
defer u.mu.Unlock()
if _, ok := u.constraints[v]; ok {
u.constraints[v].count -= 1
if u.constraints[v].count == 0 {
delete(u.constraints, v)
}
}
}
func (u *UQ[T]) linkChannels() {
wg := &sync.WaitGroup{}
wg.Add(2)
go u.shiftToFront(wg)
go u.readFromBack(wg)
wg.Wait()
}
func (u *UQ[T]) shiftToFront(wg *sync.WaitGroup) {
for v := range u.queue {
u.front <- v
if u.AutoRemoveConstraint {
u.RemoveConstraint(v)
}
}
close(u.front)
wg.Done()
}
func (u *UQ[T]) readFromBack(wg *sync.WaitGroup) {
for v := range u.back {
if err := u.AddConstraint(v); err == nil {
u.queue <- v
}
}
close(u.queue)
wg.Done()
}
Related
I am building a daemon and I have two services that will be sending data to and from each other. Service A is what produces the data and service B a is Data Buffer service or like a queue. So from the main.go file, service B is instantiated and started. The Start() method will perform the buffer() function as a goroutine because this function waits for data to be passed onto a channel and I don't want the main process to halt waiting for buffer to complete. Then Service A is instantiated and started. It is then also "registered" with Service B.
I created a method called RegisterWithBufferService for Service A that creates two new channels. It will store those channels as it's own attributes and also provide them to Service B.
func (s *ServiceA) RegisterWithBufferService(bufService *data.DataBuffer) error {
newIncomingChan := make(chan *data.DataFrame, 1)
newOutgoingChan := make(chan []byte, 1)
s.IncomingBuffChan = newIncomingChan
s.OutgoingDataChannels = append(s.OutgoingDataChannels, newOutgoingChan)
bufService.DataProviders[s.ServiceName()] = data.DataProviderInfo{
IncomingChan: newOutgoingChan, //our outGoing channel is their incoming
OutgoingChan: newIncomingChan, // our incoming channel is their outgoing
}
s.DataBufferService = bufService
bufService.NewProvider <- s.ServiceName() //The DataBuffer service listens for new services and creates a new goroutine for buffering
s.Logger.Info().Msg("Registeration completed.")
return nil
}
Buffer essentially listens for incoming data from Service A, decodes it using Decode() and then adds it to a slice called buf. If the slice is greater in length than bufferPeriod then it will send the first item in the slice in the Outgoing channel back to Service A.
func (b* DataBuffer) buffer(bufferPeriod int) {
for {
select {
case newProvider := <- b.NewProvider:
b.wg.Add(1)
/*
newProvider is a string
DataProviders is a map the value it returns is a struct containing the Incoming and
Outgoing channels for this service
*/
p := b.DataProviders[newProvider]
go func(prov string, in chan []byte, out chan *DataFrame) {
defer b.wg.Done()
var buf []*DataFrame
for {
select {
case rawData := <-in:
tmp := Decode(rawData) //custom decoding function. Returns a *DataFrame
buf = append(buf, tmp)
if len(buf) < bufferPeriod {
b.Logger.Info().Msg("Sending decoded data out.")
out <- buf[0]
buf = buf[1:] //pop
}
case <- b.Quit:
return
}
}
}(newProvider, p.IncomingChan, p.OutgoingChan)
}
case <- b.Quit:
return
}
}
Now Service A has a method called record that will periodically push data to all the channels in it's OutgoingDataChannels attribute.
func (s *ServiceA) record() error {
...
if atomic.LoadInt32(&s.Listeners) != 0 {
s.Logger.Info().Msg("Sending raw data to data buffer")
for _, outChan := range s.OutgoingDataChannels {
outChan <- dataBytes // the receiver (Service B) is already listening and this doesn't hang
}
s.Logger.Info().Msg("Raw data sent and received") // The logger will output this so I know it's not hanging
}
}
The problem is that Service A seems to push the data successfully using record but Service B never goes into the case rawData := <-in: case in the buffer sub-goroutine. Is this because I have nested goroutines? Incase it's not clear, when Service B is started, it calls buffer but because it would hang otherwise, I made the call to buffer a goroutine. So then when Service A calls RegisterWithBufferService, the buffer goroutine creates a goroutine to listen for new data from Service B and push it back to Service A once the buffer is filled. I hope I explained it clearly.
EDIT 1
I've made a minimal, reproducible example.
package main
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
var (
defaultBufferingPeriod int = 3
DefaultPollingInterval int64 = 10
)
type DataObject struct{
Data string
}
type DataProvider interface {
RegisterWithBufferService(*DataBuffer) error
ServiceName() string
}
type DataProviderInfo struct{
IncomingChan chan *DataObject
OutgoingChan chan *DataObject
}
type DataBuffer struct{
Running int32 //used atomically
DataProviders map[string]DataProviderInfo
Quit chan struct{}
NewProvider chan string
wg sync.WaitGroup
}
func NewDataBuffer() *DataBuffer{
var (
wg sync.WaitGroup
)
return &DataBuffer{
DataProviders: make(map[string]DataProviderInfo),
Quit: make(chan struct{}),
NewProvider: make(chan string),
wg: wg,
}
}
func (b *DataBuffer) Start() error {
if ok := atomic.CompareAndSwapInt32(&b.Running, 0, 1); !ok {
return fmt.Errorf("Could not start Data Buffer Service.")
}
go b.buffer(defaultBufferingPeriod)
return nil
}
func (b *DataBuffer) Stop() error {
if ok := atomic.CompareAndSwapInt32(&b.Running, 1, 0); !ok {
return fmt.Errorf("Could not stop Data Buffer Service.")
}
for _, p := range b.DataProviders {
close(p.IncomingChan)
close(p.OutgoingChan)
}
close(b.Quit)
b.wg.Wait()
return nil
}
// buffer creates goroutines for each incoming, outgoing data pair and decodes the incoming bytes into outgoing DataFrames
func (b *DataBuffer) buffer(bufferPeriod int) {
for {
select {
case newProvider := <- b.NewProvider:
fmt.Println("Received new Data provider.")
if _, ok := b.DataProviders[newProvider]; ok {
b.wg.Add(1)
p := b.DataProviders[newProvider]
go func(prov string, in chan *DataObject, out chan *DataObject) {
defer b.wg.Done()
var (
buf []*DataObject
)
fmt.Printf("Waiting for data from: %s\n", prov)
for {
select {
case inData := <-in:
fmt.Printf("Received data from: %s\n", prov)
buf = append(buf, inData)
if len(buf) > bufferPeriod {
fmt.Printf("Queue is filled, sending data back to %s\n", prov)
out <- buf[0]
fmt.Println("Data Sent")
buf = buf[1:] //pop
}
case <- b.Quit:
return
}
}
}(newProvider, p.IncomingChan, p.OutgoingChan)
}
case <- b.Quit:
return
}
}
}
type ServiceA struct{
Active int32 // atomic
Stopping int32 // atomic
Recording int32 // atomic
Listeners int32 // atomic
name string
QuitChan chan struct{}
IncomingBuffChan chan *DataObject
OutgoingBuffChans []chan *DataObject
DataBufferService *DataBuffer
}
// A compile time check to ensure ServiceA fully implements the DataProvider interface
var _ DataProvider = (*ServiceA)(nil)
func NewServiceA() (*ServiceA, error) {
var newSliceOutChans []chan *DataObject
return &ServiceA{
QuitChan: make(chan struct{}),
OutgoingBuffChans: newSliceOutChans,
name: "SERVICEA",
}, nil
}
// Start starts the service. Returns an error if any issues occur
func (s *ServiceA) Start() error {
atomic.StoreInt32(&s.Active, 1)
return nil
}
// Stop stops the service. Returns an error if any issues occur
func (s *ServiceA) Stop() error {
atomic.StoreInt32(&s.Stopping, 1)
close(s.QuitChan)
return nil
}
func (s *ServiceA) StartRecording(pol_int int64) error {
if ok := atomic.CompareAndSwapInt32(&s.Recording, 0, 1); !ok {
return fmt.Errorf("Could not start recording. Data recording already started")
}
ticker := time.NewTicker(time.Duration(pol_int) * time.Second)
go func() {
for {
select {
case <-ticker.C:
fmt.Println("Time to record...")
err := s.record()
if err != nil {
return
}
case <-s.QuitChan:
ticker.Stop()
return
}
}
}()
return nil
}
func (s *ServiceA) record() error {
current_time := time.Now()
ct := fmt.Sprintf("%02d-%02d-%d", current_time.Day(), current_time.Month(), current_time.Year())
dataObject := &DataObject{
Data: ct,
}
if atomic.LoadInt32(&s.Listeners) != 0 {
fmt.Println("Sending data to Data buffer...")
for _, outChan := range s.OutgoingBuffChans {
outChan <- dataObject // the receivers should already be listening
}
fmt.Println("Data sent.")
}
return nil
}
// RegisterWithBufferService satisfies the DataProvider interface. It provides the bufService with new incoming and outgoing channels along with a polling interval
func (s ServiceA) RegisterWithBufferService(bufService *DataBuffer) error {
if _, ok := bufService.DataProviders[s.ServiceName()]; ok {
return fmt.Errorf("%v data provider already registered with Data Buffer.", s.ServiceName())
}
newIncomingChan := make(chan *DataObject, 1)
newOutgoingChan := make(chan *DataObject, 1)
s.IncomingBuffChan = newIncomingChan
s.OutgoingBuffChans = append(s.OutgoingBuffChans, newOutgoingChan)
bufService.DataProviders[s.ServiceName()] = DataProviderInfo{
IncomingChan: newOutgoingChan, //our outGoing channel is their incoming
OutgoingChan: newIncomingChan, // our incoming channel is their outgoing
}
s.DataBufferService = bufService
bufService.NewProvider <- s.ServiceName() //The DataBuffer service listens for new services and creates a new goroutine for buffering
return nil
}
// ServiceName satisfies the DataProvider interface. It returns the name of the service.
func (s ServiceA) ServiceName() string {
return s.name
}
func main() {
var BufferedServices []DataProvider
fmt.Println("Instantiating and Starting Data Buffer Service...")
bufService := NewDataBuffer()
err := bufService.Start()
if err != nil {
panic(fmt.Sprintf("%v", err))
}
defer bufService.Stop()
fmt.Println("Data Buffer Service successfully started.")
fmt.Println("Instantiating and Starting Service A...")
serviceA, err := NewServiceA()
if err != nil {
panic(fmt.Sprintf("%v", err))
}
BufferedServices = append(BufferedServices, *serviceA)
err = serviceA.Start()
if err != nil {
panic(fmt.Sprintf("%v", err))
}
defer serviceA.Stop()
fmt.Println("Service A successfully started.")
fmt.Println("Registering services with Data Buffer...")
for _, s := range BufferedServices {
_ = s.RegisterWithBufferService(bufService) // ignoring error msgs for base case
}
fmt.Println("Registration complete.")
fmt.Println("Beginning recording...")
_ = atomic.AddInt32(&serviceA.Listeners, 1)
err = serviceA.StartRecording(DefaultPollingInterval)
if err != nil {
panic(fmt.Sprintf("%v", err))
}
for {
select {
case RTD := <-serviceA.IncomingBuffChan:
fmt.Println(RTD)
case <-serviceA.QuitChan:
atomic.StoreInt32(&serviceA.Listeners, 0)
bufService.Quit<-struct{}{}
}
}
}
Running on Go 1.17. When running the example, it should print the following every 10 seconds:
Time to record...
Sending data to Data buffer...
Data sent.
But then Data buffer never goes into the inData := <-in case.
To diagnose this I changed fmt.Println("Sending data to Data buffer...") to fmt.Println("Sending data to Data buffer...", s.OutgoingBuffChans) and the output was:
Time to record...
Sending data to Data buffer... []
So you are not actually sending the data to any channels. The reason for this is:
func (s ServiceA) RegisterWithBufferService(bufService *DataBuffer) error {
As the receiver is not a pointer when you do the s.OutgoingBuffChans = append(s.OutgoingBuffChans, newOutgoingChan) you are changing s.OutgoingBuffChans in a copy of the ServiceA which is discarded when the function exits. To fix this change:
func (s ServiceA) RegisterWithBufferService(bufService *DataBuffer) error {
to
func (s *ServiceA) RegisterWithBufferService(bufService *DataBuffer) error {
and
BufferedServices = append(BufferedServices, *serviceA)
to
BufferedServices = append(BufferedServices, serviceA)
The amended version outputs:
Time to record...
Sending data to Data buffer... [0xc0000d8060]
Data sent.
Received data from: SERVICEA
Time to record...
Sending data to Data buffer... [0xc0000d8060]
Data sent.
Received data from: SERVICEA
So this resolves the reported issue (I would not be suprised if there are other issues but hopefully this points you in the right direction). I did notice that the code you originally posted does use a pointer receiver so that might have suffered from another issue (but its difficult to comment on code fragments in a case like this).
I've been searching a lot but could not find an answer for my problem yet.
I need to make multiple calls to an external API, but with different parameters concurrently.
And then for each call I need to init a struct for each dataset and process the data I receive from the API call. Bear in mind that I read each line of the incoming request and start immediately send it to the channel.
First problem I encounter was not obvious at the beginning due to the large quantity of data I'm receiving, is that each goroutine does not receive all the data that goes through the channel. (Which I learned by the research I've made). So what I need is a way of requeuing/redirect that data to the correct goroutine.
The function that sends the streamed response from a single dataset.
(I've cut useless parts of code that are out of context)
func (api *API) RequestData(ctx context.Context, c chan DWeatherResponse, dataset string, wg *sync.WaitGroup) error {
for {
line, err := reader.ReadBytes('\n')
s := string(line)
if err != nil {
log.Println("End of %s", dataset)
return err
}
data, err := extractDataFromStreamLine(s, dataset)
if err != nil {
continue
}
c <- *data
}
}
The function that will process the incoming data
func (s *StrikeStruct) Process(ch, requeue chan dweather.DWeatherResponse) {
for {
data, more := <-ch
if !more {
break
}
// data contains {dataset string, value float64, date time.Time}
// The s.Parameter needs to match the dataset
// IMPORTANT PART, checks if the received data is part of this struct dataset
// If not I want to send it to another go routine until it gets to the correct
one. There will be a max of 4 datasets but still this could not be the best approach to have
if !api.GetDataset(s.Parameter, data.Dataset) {
requeue <- data
continue
}
// Do stuff with the data from this point
}
}
Now on my own API endpoint I have the following:
ch := make(chan dweather.DWeatherResponse, 2)
requeue := make(chan dweather.DWeatherResponse)
final := make(chan strike.StrikePerYearResponse)
var wg sync.WaitGroup
for _, s := range args.Parameters.Strikes {
strike := strike.StrikePerYear{
Parameter: strike.Parameter(s.Dataset),
StrikeValue: s.Value,
}
// I receive and process the data in here
go strike.ProcessStrikePerYear(ch, requeue, final, string(s.Dataset))
}
go func() {
for {
data, _ := <-requeue
ch <- data
}
}()
// Creates a goroutine for each dataset
for _, dataset := range api.Params.Dataset {
wg.Add(1)
go api.RequestData(ctx, ch, dataset, &wg)
}
wg.Wait()
close(ch)
//Once the data is all processed it is all appended
var strikes []strike.StrikePerYearResponse
for range args.Fetch.Datasets {
strikes = append(strikes, <-final)
}
return strikes
The issue with this code is that as soon as I start receiving data from more than one endpoint the requeue will block and nothing more happens. If I remove that requeue logic data will be lost if it does not land on the correct goroutine.
My two questions are:
Why is the requeue blocking if it has a goroutine always ready to receive?
Should I take a different approach on how I'm processing the incoming data?
this is not a good way to solving your problem. you should change your solution. I suggest an implementation like the below:
import (
"fmt"
"sync"
)
// answer for https://stackoverflow.com/questions/68454226/how-to-handle-multiple-goroutines-that-share-the-same-channel
var (
finalResult = make(chan string)
)
// IData use for message dispatcher that all struct must implement its method
type IData interface {
IsThisForMe() bool
Process(*sync.WaitGroup)
}
//MainData can be your main struct like StrikePerYear
type MainData struct {
// add any props
Id int
Name string
}
type DataTyp1 struct {
MainData *MainData
}
func (d DataTyp1) IsThisForMe() bool {
// you can check your condition here to checking incoming data
if d.MainData.Id == 2 {
return true
}
return false
}
func (d DataTyp1) Process(wg *sync.WaitGroup) {
d.MainData.Name = "processed by DataTyp1"
// send result to final channel, you can change it as you want
finalResult <- d.MainData.Name
wg.Done()
}
type DataTyp2 struct {
MainData *MainData
}
func (d DataTyp2) IsThisForMe() bool {
// you can check your condition here to checking incoming data
if d.MainData.Id == 3 {
return true
}
return false
}
func (d DataTyp2) Process(wg *sync.WaitGroup) {
d.MainData.Name = "processed by DataTyp2"
// send result to final channel, you can change it as you want
finalResult <- d.MainData.Name
wg.Done()
}
//dispatcher will run new go routine for each request.
//you can implement a worker pool to preventing running too many go routines.
func dispatcher(incomingData *MainData, wg *sync.WaitGroup) {
// based on your requirements you can remove this go routing or not
go func() {
var p IData
p = DataTyp1{incomingData}
if p.IsThisForMe() {
go p.Process(wg)
return
}
p = DataTyp2{incomingData}
if p.IsThisForMe() {
go p.Process(wg)
return
}
}()
}
func main() {
dummyDataArray := []MainData{
MainData{Id: 2, Name: "this data #2"},
MainData{Id: 3, Name: "this data #3"},
}
wg := sync.WaitGroup{}
for i := range dummyDataArray {
wg.Add(1)
dispatcher(&dummyDataArray[i], &wg)
}
result := make([]string, 0)
done := make(chan struct{})
// data collector
go func() {
loop:for {
select {
case <-done:
break loop
case r := <-finalResult:
result = append(result, r)
}
}
}()
wg.Wait()
done<- struct{}{}
for _, s := range result {
fmt.Println(s)
}
}
Note: this is just for opening your mind for finding a better solution, and for sure this is not a production-ready code.
I have two goroutines:
first one adds task to queue
second cleans up from the queue based on status
Add and cleanup might not be simultaneous.
If the status of task is success, I want to delete the task from the queue, if not, I will retry for status to be success (will have time limit). If that fails, I will log and delete from queue.
We can't communicate between add and delete because that is not how the real world scenario works.
I want something like a watcher which monitors addition in queue and does the following cleanup. To increase complexity, Add might be adding even during cleanup is happening (not shown here). I want to implement it without using external packages.
How can I achieve this?
type Task struct {
name string
status string //completed, failed
}
var list []*Task
func main() {
done := make(chan bool)
go Add()
time.Sleep(15)
go clean(done)
<-done
}
func Add() {
t1 := &Task{"test1", "completed"}
t2 := &Task{"test2", "failed"}
list = append(list, t1, t2)
}
func clean() {
for k, v := range list {
if v.status == "completed" {
RemoveIndex(list, k)
} else {
//for now consider this as retry
v.status == "completed"
}
if len(list) > 0 {
clean()
}
<-done
}
}
func RemoveIndex(s []int, index int) []int {
return append(s[:index], s[index+1:]...)
}
so i found a solution which works for me and posting it here for anyone it might be helpful for.
in my main i have added a ticker which runs every x seconds to watch if something is added in the queue.
type Task struct {
name string
status string //completed, failed
}
var list []*Task
func main() {
done := make(chan bool)
c := make(chan os.Signal, 2)
go Add()
go func() {
for {
select {
// case <-done:
// Cleaner(k)
case <-ticker.C:
Monitor(done)
}
}
}()
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
//waiting for interrupt here
}
func Add() {
t1 := &Task{"test1", "completed"}
t2 := &Task{"test2", "failed"}
list = append(list, t1, t2)
}
func Monitor(done chan bool) {
if len(list) > 0 {
Cleaner()
}
}
func cleaner(){
//do cleaning here
// pop each element from queue and delete
}
func RemoveIndex(s []int, index int) []int {
return append(s[:index], s[index+1:]...)
}
so now this solution does not need to depend on communication between go routines,
in a real world scenario, the programme never dies and keeps adding and cleaning based on use case.you can optimize better by locking and unlocking before addition to queue and deletion from queue.
I have two read-only channels <-chan Event that utilized as generators.
type Event struct{
time int
}
I can read their values as:
for {
select {
case <-chan1:
// do something
case <-chan2:
//do something
}
I use those channels for event-driven simulations so I have to choose the Event with the less time field.
Is it possible to inspect which value is going from each channel and only then choose from which one to read? Because the operation <-chan1 takes value from channel and it is impossible to push it back (read only channel).
You can implement your version of go channel structure. for example following implementation act like go channel without size limit and you can inspect its first element.
package buffchan
import (
"container/list"
"sync"
)
// BufferedChannel provides go channel like interface with unlimited storage
type BufferedChannel struct {
m *sync.Mutex
l *list.List
c *sync.Cond
}
// New Creates new buffer channel
func New() *BufferedChannel {
m := new(sync.Mutex)
return &BufferedChannel{
m: m,
l: list.New(),
c: sync.NewCond(m),
}
}
// Append adds given data at end of channel
func (b *BufferedChannel) Append(v interface{}) {
b.m.Lock()
defer b.m.Unlock()
b.l.PushBack(v)
b.c.Signal()
}
// Remove removes first element of list synchronously
func (b *BufferedChannel) Remove() interface{} {
b.m.Lock()
defer b.m.Unlock()
for b.l.Len() == 0 {
b.c.Wait()
}
v := b.l.Front()
b.l.Remove(v)
return v.Value
}
// Inspect first element of list if exists
func (b *BufferedChannel) Inspect() interface{} {
b.m.Lock()
defer b.m.Unlock()
for b.l.Len() == 0 {
return nil
}
return b.l.Front().Value
}
// AsyncRemove removes first element of list asynchronously
func (b *BufferedChannel) AsyncNonBlocking() interface{} {
b.m.Lock()
defer b.m.Unlock()
for b.l.Len() == 0 {
return nil
}
v := b.l.Front()
b.l.Remove(v)
return v.Value
}
I would like to broadcast data received from a channel to a list of channel. The list of channel is dynamic an can be modified during the run phase.
As a new developper in Go, I wrote this code. I found it quite heavy for what I want. Is-there a better way to do this?
package utils
import "sync"
// StringChannelBroadcaster broadcasts string data from a channel to multiple channels
type StringChannelBroadcaster struct {
Source chan string
Subscribers map[string]*StringChannelSubscriber
stopChannel chan bool
mutex sync.Mutex
capacity uint64
}
// NewStringChannelBroadcaster creates a StringChannelBroadcaster
func NewStringChannelBroadcaster(capacity uint64) (b *StringChannelBroadcaster) {
return &StringChannelBroadcaster{
Source: make(chan string, capacity),
Subscribers: make(map[string]*StringChannelSubscriber),
capacity: capacity,
}
}
// Dispatch starts dispatching message
func (b *StringChannelBroadcaster) Dispatch() {
b.stopChannel = make(chan bool)
for {
select {
case val, ok := <-b.Source:
if ok {
b.mutex.Lock()
for _, value := range b.Subscribers {
value.Channel <- val
}
b.mutex.Unlock()
}
case <-b.stopChannel:
return
}
}
}
// Stop stops the Broadcaster
func (b *StringChannelBroadcaster) Stop() {
close(b.stopChannel)
}
// StringChannelSubscriber defines a subscriber to a StringChannelBroadcaster
type StringChannelSubscriber struct {
Key string
Channel chan string
}
// NewSubscriber returns a new subsriber to the StringChannelBroadcaster
func (b *StringChannelBroadcaster) NewSubscriber() *StringChannelSubscriber {
key := RandString(20)
newSubscriber := StringChannelSubscriber{
Key: key,
Channel: make(chan string, b.capacity),
}
b.mutex.Lock()
b.Subscribers[key] = &newSubscriber
b.mutex.Unlock()
return &newSubscriber
}
// RemoveSubscriber removes a subscrber from the StringChannelBroadcaster
func (b *StringChannelBroadcaster) RemoveSubscriber(subscriber *StringChannelSubscriber) {
b.mutex.Lock()
delete(b.Subscribers, subscriber.Key)
b.mutex.Unlock()
}
Thank you,
Julien
I think you can simplify it a bit: get rid of stopChannel and the Stop method. You can just close Source instead of calling Stop, and detect that in Dispatch (ok will be false) to quit (you can just range over the source channel actually).
You can get rid of Dispatch, and just start a goroutine in NewStringChannelBroadcaster with the for cycle, so external code doesn't have to start the dispatch cycle separately.
You can use a channel type as the map key, so your map can become map[chan string]struct{} (empty struct because you don't need the map value). So your NewSubscriber can take a channel type parameter (or create a new channel and return it), and insert that into the map, you don't need the random string or the StringChannelSubscriber type.
I also made some improvements, like closing the subscriber channels:
package main
import "sync"
import (
"fmt"
"time"
)
// StringChannelBroadcaster broadcasts string data from a channel to multiple channels
type StringChannelBroadcaster struct {
Source chan string
Subscribers map[chan string]struct{}
mutex sync.Mutex
capacity uint64
}
// NewStringChannelBroadcaster creates a StringChannelBroadcaster
func NewStringChannelBroadcaster(capacity uint64) *StringChannelBroadcaster {
b := &StringChannelBroadcaster{
Source: make(chan string, capacity),
Subscribers: make(map[chan string]struct{}),
capacity: capacity,
}
go b.dispatch()
return b
}
// Dispatch starts dispatching message
func (b *StringChannelBroadcaster) dispatch() {
// for iterates until the channel is closed
for val := range b.Source {
b.mutex.Lock()
for ch := range b.Subscribers {
ch <- val
}
b.mutex.Unlock()
}
b.mutex.Lock()
for ch := range b.Subscribers {
close(ch)
// you shouldn't be calling RemoveSubscriber after closing b.Source
// but it's better to be safe than sorry
delete(b.Subscribers, ch)
}
b.Subscribers = nil
b.mutex.Unlock()
}
func (b *StringChannelBroadcaster) NewSubscriber() chan string {
ch := make(chan string, b.capacity)
b.mutex.Lock()
if b.Subscribers == nil {
panic(fmt.Errorf("NewSubscriber called on closed broadcaster"))
}
b.Subscribers[ch] = struct{}{}
b.mutex.Unlock()
return ch
}
// RemoveSubscriber removes a subscrber from the StringChannelBroadcaster
func (b *StringChannelBroadcaster) RemoveSubscriber(ch chan string) {
b.mutex.Lock()
if _, ok := b.Subscribers[ch]; ok {
close(ch) // this line does have to be inside the if to prevent close of closed channel, in case RemoveSubscriber is called twice on the same channel
delete(b.Subscribers, ch) // this line doesn't need to be inside the if
}
b.mutex.Unlock()
}
func main() {
b := NewStringChannelBroadcaster(0)
var toberemoved chan string
for i := 0; i < 3; i++ {
i := i
ch := b.NewSubscriber()
if i == 1 {
toberemoved = ch
}
go func() {
for v := range ch {
fmt.Printf("receive %v: %v\n", i, v)
}
fmt.Printf("Exit %v\n", i)
}()
}
b.Source <- "Test 1"
b.Source <- "Test 2"
// This is a race condition: the second reader may or may not receive the first two messages.
b.RemoveSubscriber(toberemoved)
b.Source <- "Test 3"
// let the reader goroutines receive the last message
time.Sleep(2 * time.Second)
close(b.Source)
// let the reader goroutines write close message
time.Sleep(1 * time.Second)
}
https://play.golang.org/p/X-NcikvbDM
Edit: I've added your edit to fix the panic when calling RemoveSubscriber after closing Source, but you shouldn't be doing that, you should let the struct and everything in it be garbage collected after the channel is closed.
I've also added a panic to NewSubscriber if it's called after closing Source. Previously you could do that and it'd leak the created channel and presumably the goroutine that will block forever on that channel.
If you can call NewSubscriber (or RemoveSubscriber) on an already closed broadcaster, that probably means there's an error in your code somewhere, since you're holding on to a broadcaster that you shouldn't be.