I built the following go code.
The idea is to build a done channel and a generator that generates a channel of int.
Link them in a 2 stage pipe
chanNumbers := pipeb(done, pipea(done, gen(done)))
After some seconds, cancel the done channel.
I expect to see the generator and two stage of the pipe to cancel and return, but the text "PipeX is terminating now" only appears randomly and I really do not understand why.
Would someone have an idea?
package main
import (
"fmt"
"time"
)
func gen(done <-chan interface{}) <-chan int {
ret := make(chan int)
cx := 0
go func() {
for {
select {
case <-done:
fmt.Println("**Generator Terminates now")
time.Sleep(2 * time.Second)
fmt.Println("**Generator has terminated now")
close(ret)
return
case ret <- cx:
fmt.Printf("Gen : we push %d \n", cx)
cx = cx + 1
}
}
}()
fmt.Println("Generator has created and returned its channel")
return ret
}
func pipea(done <-chan interface{}, in <-chan int) <-chan int {
ret := make(chan int)
go func() {
for {
select {
case <-done:
fmt.Println("**pipeA terminates")
time.Sleep(2 * time.Second)
fmt.Println("**pipeA has terminated now")
close(ret)
return
case tmp, ok := (<-in):
if ok {
fmt.Printf("pipeA : we push %d \n", tmp)
ret <- tmp
} else {
in = nil
}
}
}
}()
return ret
}
func pipeb(done <-chan interface{}, in <-chan int) <-chan int {
ret := make(chan int)
go func() {
for {
select {
case <-done:
fmt.Println("**pipeB terminates")
time.Sleep(2 * time.Second)
fmt.Println("**pipeB has terminated now")
close(ret)
return
case tmp, ok := (<-in):
if ok {
fmt.Printf("pipeB : we push %d \n", tmp)
ret <- tmp
} else {
in = nil
}
}
}
}()
return ret
}
func main() {
done := make(chan interface{})
chanNumbers := pipeb(done, pipea(done, gen(done)))
go func() {
time.Sleep(2 * time.Second)
close(done)
}()
forloop:
for {
select {
case n := <-chanNumbers:
fmt.Printf("Received in main element : %d\n", n)
case <-done:
break forloop
}
}
//end of the main program
fmt.Println("Sleeping some seconds before termiating")
time.Sleep(8 * time.Second)
fmt.Println("exit...")
}
You have four go-routines running:
gen, your generator, writing to an unbuffered output channel until done
pipeA, reading from gen, writing to an unbuffered output channel until done
pipeB, reading from pipeA, writing to an unbuffered output channel until done
main, reading from pipeB until done
Now when you close done, it totally depends on the order in which the go-routines see that.
If main is the first to see that done is closed, it will break the for-loop and stop consuming from pipeB. But if pipeB is still trying to write to the output channel (ret <- tmp), it will block there; so it will never get to the <- done part.
There are two options to fix this:
Only listen to done in your generator, and have the other go-routines use for n := range in { }.
Put your sending logic in a select as well so your generator and pipes can detect when done is closed.
Alternatively, you might want to use buffered output channels, but even then this problem can still occur.
Related
I want to process from channel if channel is full or certain time is elapsed. My use case is similar to existing question and I have tried to modify the existing ans
My code is at https://go.dev/play/p/HaGZ9HHqj0i,
package main
import (
"fmt"
"sync"
"time"
)
type Audit struct {
ID int
}
const batchSize = 5
var batch = make([]Audit, 0, batchSize)
func upsertBigQueryAudits(audits []Audit) {
fmt.Printf("Processing batch of %d\n", len(audits))
for _, a := range audits {
fmt.Printf("%d ", a.ID)
}
fmt.Println()
batch = []Audit{}
}
func process(full <-chan struct{}) {
ticker := time.NewTicker(1 * time.Nanosecond)
for {
select {
case <-full:
fmt.Println("From full")
upsertBigQueryAudits(batch)
case <-ticker.C:
fmt.Println("From ticker")
if len(batch) > 0 {
fmt.Println("From ticker sending batch")
upsertBigQueryAudits(batch)
}
}
}
}
func processAudits(audits <-chan Audit, full chan<- struct{}, batchSize int) {
for audit := range audits {
batch = append(batch, audit)
if len(batch) == cap(batch) {
// upsertBigQueryAudits(batch)
fmt.Println("Sending full")
full <- struct{}{}
}
}
}
func produceAudits(x int, to chan Audit) {
for i := 0; i < x; i++ {
to <- Audit{
ID: i,
}
}
}
func main() {
var wg sync.WaitGroup
audits := make(chan Audit)
full := make(chan struct{})
wg.Add(1)
go func() {
defer wg.Done()
process(full)
}()
wg.Add(1)
go func() {
defer wg.Done()
processAudits(audits, full, batchSize)
}()
wg.Add(1)
go func() {
defer wg.Done()
produceAudits(25, audits)
close(audits)
}()
wg.Wait()
fmt.Println("Complete")
}
Here I want to process the batch when it is filled with batchSize elements Or when certain time (for eg 1 nano second) is elapsed.
Edit: Added my concern inline.
This is sample output.
timeout running program
Sending full <--- this line comes twice
Sending full
From full
Processing batch of 10 <--- instead of 5 it is processing 10 items
0 1 2 3 4 5 6 7 8 9
From full
Processing batch of 1
10
Sending full
Sending full
From full
Processing batch of 1 <-- instead of 5 it is processing 1 items. It does not process more than 1 item.
11
From full
Processing batch of 0
...
...
Sending full
Sending full
From full
Processing batch of 1 <-- instead of 5 it is processing 1 items
24
From full
Processing batch of 0
From ticker <-- this line comes only after consuming 25 items.
From ticker
From ticker
From ticker
I found answer at https://elliotchance.medium.com/batch-a-channel-by-size-or-time-in-go-92fa3098f65
package main
import (
"context"
"fmt"
"time"
)
func BatchStringsCtx[T any](ctx context.Context, values <-chan T, maxItems int, maxTimeout time.Duration) chan []T {
batches := make(chan []T)
go func() {
defer close(batches)
for keepGoing := true; keepGoing; {
var batch []T
expire := time.After(maxTimeout)
for {
select {
case <-ctx.Done():
keepGoing = false
goto done
case value, ok := <-values:
if !ok {
keepGoing = false
goto done
}
batch = append(batch, value)
if len(batch) == maxItems {
goto done
}
case <-expire:
goto done
}
}
done:
if len(batch) > 0 {
batches <- batch
}
}
}()
return batches
}
func main() {
strings := make(chan string)
go func() {
strings <- "hello"
strings <- "there" // hit limit of 2
strings <- "how"
time.Sleep(15 * time.Millisecond) // hit timeout
strings <- "are"
strings <- "you" // hit limit of 2
// A really long time without any new values.
// The context was cancelled around 300ms,
// before this sleep finished.
time.Sleep(500 * time.Millisecond)
strings <- "doing?" // never read
close(strings)
}()
//ctx, cancel := context.WithTimeout(context.Background(), 300 * time.Millisecond)
ctx := context.Background()
start := time.Now()
batches := BatchStringsCtx[string](ctx, strings, 2, 10*time.Millisecond)
for batch := range batches {
fmt.Println(time.Now().Sub(start), batch)
}
}
I am new to Golang and am trying to use goroutines so that they can talk among them. I have some code which starts up a goroutine which has operation1, I call it to dance. When it finishes, it signals another goroutine which performs another operation2, let's say sleep.
You can pass a force dance parameter to the dance goroutine but if it is already in the dance state, it would sleep.
package main
import (
"fmt"
"time"
)
func main(){
test("notdancing", true)
time.Sleep(10*time.Second)
}
func dance()error{
fmt.Println("Tapping my feet")
time.Sleep(10*time.Second)
return nil
}
func test(status string, forceDance bool) {
This does not work when
//startSleep := make(chan bool)
Why does a channel need to be provided a buffer length to make it work? I tried without the buffer length but it says all goroutines are asleep if I don't pass 1 as the second parameter.
startdance := make(chan bool, 1)
startSleep := make(chan bool, 1)
if status == "dancing" && forceDance {
select {
case startSleep <-true:
fmt.Println("Would start to sleep now")
default:
fmt.Println("Sleep Already started. No need to force")
}
}
if status != "dancing" {
fmt.Println("Startingdance")
startdance <- true
}
go func() {
<-startdance
err := dance()
if err == nil {
select {
case startSleep <- true:
fmt.Println("Starting Sleeping, dancing completed")
default:
fmt.Println("Already started Sleeping")
}
} else {
fmt.Println("Not in a mood to dance today")
}
}()
go func() {
<-startSleep
if forceDance {
fmt.Println("Force sleep because forcing to dance while already dancing")
}
}()
}
I would highly appreciate any corrections to the code as well as the pitfalls of using this approach.
in case of Unbuffered Channel (when size is not specified) it can't hold a value as it has no size. therefore a reader has to be present at the time of writing/transmiting the data through the channel or else it will be blocking the call.
func main() {
startDance := make(chan bool)
startDance <- true
}
But when you specify a size in the above code (say 1) then it won't be a deadlock as it will have space to hold the data. ((https://robertbasic.com/blog/buffered-vs-unbuffered-channels-in-golang/) .)(https://www.golang-book.com/books/intro/10) you could check out the above website's to get a better understanding about channels and concurrency
package main
import (
"fmt"
"time"
)
func main() {
startDance := make(chan bool)
startSleep := make(chan bool)
forceSleep := make(chan bool)
go startDance1(startDance, forceSleep, startSleep)
go performSleep(startSleep, startDance)
startDance <- true
fmt.Println("now dance is started ")
forceSleep <- true
select {}
}
func startDance1(startDance chan bool, forceSleep chan bool, startSleep chan bool) {
fmt.Println("waiting to start dance")
select {
case <-startDance:
fmt.Println("staring dance")
}
for {
select {
case <-startDance:
fmt.Println("starting dance")
case <-forceSleep:
fmt.Println("aleardy dancing going to sleep")
select {
case startSleep <- true:
default:
}
default:
//this is just to show working this
// i added default or else this will go into deadlock
fmt.Println("dancing")
time.Sleep(time.Second * 1)
}
}
}
func performSleep(startSleep chan bool, startDance chan bool) {
select {
case <-startSleep:
fmt.Println("staring sleep")
}
fmt.Println("sleeping for 5 seconds ")
time.Sleep(time.Second * 5)
select {
case startDance <- true:
fmt.Println("started dance")
default:
fmt.Println("failed to start dance ")
}
}
Above code is a minor improvement over yours (i tried to make it according to your requirements). I would suggest you go through some books to get to know more about go concurrency (https://www.golang-book.com/books/intro/10_
I'm starting out with Go and I'm now writing a simple program which reads out data from a sensor and puts that into a channel to do some calculations with it. I now have it working as follows:
package main
import (
"fmt"
"time"
"strconv"
)
func get_sensor_data(c chan float64) {
time.Sleep(1 * time.Second) // wait a second before sensor data starts pooring in
c <- 2.1 // Sensor data starts being generated
c <- 2.2
c <- 2.3
c <- 2.4
c <- 2.5
}
func main() {
s := 1.1
c := make(chan float64)
go get_sensor_data(c)
for {
select {
case s = <-c:
fmt.Println("the next value of s from the channel: " + strconv.FormatFloat(s, 'f', 1, 64))
default:
// no new values in the channel
}
fmt.Println(s)
time.Sleep(500 * time.Millisecond) // Do heavy "work"
}
}
This works fine, but the sensor generates a lot of data, and I'm always only interested in the latest data. With this setup however, it only reads out the next item with every loop, which means that if the channel at some point contains 20 values, the newest value only is read out after 10 seconds.
Is there a way for a channel to always only contain one value at a time, so that I always only get the data I'm interested in, and no unnecessary memory is used by the channel (although the memory is the least of my worries)?
Channels are best thought of as queues (FIFO). Therefore you can't really skip around. However there are libraries out there that do stuff like this: https://github.com/cloudfoundry/go-diodes is an atomic ring buffer that will overwrite old data. You can set a smaller size if you like.
All that being said, it doesn't sound like you need a queue (or ring buffer). You just need a mutex:
type SensorData struct{
mu sync.RWMutex
last float64
}
func (d *SensorData) Store(data float64) {
mu.Lock()
defer mu.Unlock()
d.last = data
}
func (d *SensorData) Get() float64 {
mu.RLock()
defer mu.RUnlock()
return d.last
}
This uses a RWMutex which means many things can read from it at the same time while only a single thing can write. It will store a single entry much like you said.
No. Channels are FIFO buffers, full stop. That is how channels work and their only purpose. If you only want the latest value, consider just using a single variable protected by a mutex; write to it whenever new data comes in, and whenever you read it, you will always be reading the latest value.
Channels serves a specific purpose. You might want to use a code that is inside a lock and update the variable whenever new value is to be set.
This way reciever will always get the latest value.
You cannot get that from one channel directly, but you can use one channel per value and get notified when there are new values:
package main
import (
"fmt"
"strconv"
"sync"
"time"
)
type LatestChannel struct {
n float64
next chan struct{}
mu sync.Mutex
}
func New() *LatestChannel {
return &LatestChannel{next: make(chan struct{})}
}
func (c *LatestChannel) Push(n float64) {
c.mu.Lock()
c.n = n
old := c.next
c.next = make(chan struct{})
c.mu.Unlock()
close(old)
}
func (c *LatestChannel) Get() (float64, <-chan struct{}) {
c.mu.Lock()
n := c.n
next := c.next
c.mu.Unlock()
return n, next
}
func getSensorData(c *LatestChannel) {
time.Sleep(1 * time.Second)
c.Push(2.1)
time.Sleep(100 * time.Millisecond)
c.Push(2.2)
time.Sleep(100 * time.Millisecond)
c.Push(2.3)
time.Sleep(100 * time.Millisecond)
c.Push(2.4)
time.Sleep(100 * time.Millisecond)
c.Push(2.5)
}
func main() {
s := 1.1
c := New()
_, hasNext := c.Get()
go getSensorData(c)
for {
select {
case <-hasNext:
s, hasNext = c.Get()
fmt.Println("the next value of s from the channel: " + strconv.FormatFloat(s, 'f', 1, 64))
default:
// no new values in the channel
}
fmt.Println(s)
time.Sleep(250 * time.Millisecond) // Do heavy "work"
}
}
If you do not need the notify about new value, you can try to read Channels inside channels pattern in Golang.
Try this package https://github.com/subbuv26/chanup
It allows the producer to update the channel with latest value, which replaces the latest value. And produces does not get blocked. (with this, stale values gets overridden).
So, on the consumer side, always only the latest item gets read.
import "github.com/subbuv26/chanup"
ch := chanup.GetChan()
_ := ch.Put(testType{
a: 10,
s: "Sample",
})
_ := ch.Update(testType{
a: 20,
s: "Sample2",
})
// Continue updating with latest values
...
...
// On consumer end
val := ch.Get()
// val contains latest value
There is another way to solve this problem (trick)
sender work faster: sender remove channel if channel_length > 1
go func() {
for {
msg:=strconv.Itoa(int(time.Now().Unix()))
fmt.Println("make: ",msg," at:",time.Now())
messages <- msg
if len(messages)>1{
//remove old message
<-messages
}
time.Sleep(2*time.Second)
}
}()
receiver work slower:
go func() {
for {
channLen :=len(messages)
fmt.Println("len is ",channLen)
fmt.Println("received",<-messages)
time.Sleep(10*time.Second)
}
}()
OR, we can delete old message from receiver side
(read message like delete it)
There is an elegant channel-only solution. If you're OK with adding one more channel and goroutine - you can introduce a buferless channel and a goroutine that tries to send the latest value from your channel to it:
package main
import (
"fmt"
"time"
)
func wrapLatest(ch <-chan int) <-chan int {
result := make(chan int) // important that this one i unbuffered
go func() {
defer close(result)
value, ok := <-ch
if !ok {
return
}
LOOP:
for {
select {
case value, ok = <-ch:
if !ok {
return
}
default:
break LOOP
}
}
for {
select {
case value, ok = <-ch:
if !ok {
return
}
case result <- value:
if value, ok = <-ch; !ok {
return
}
}
}
}()
return result
}
func main() {
sendChan := make(chan int, 10) // may be buffered or not
for i := 0; i < 10; i++ {
sendChan <- i
}
go func() {
for i := 10; i < 20; i++ {
sendChan <- i
time.Sleep(time.Second)
}
close(sendChan)
}()
recvChan := wrapLatest(sendChan)
for i := range recvChan {
fmt.Println(i)
time.Sleep(time.Second * 2)
}
}
I have the following sample code. I want to maintain 4 goroutines running at all times. They have the possibility of panicking. In the case of the panic, I have a recover where I restart the goroutine.
The way I implemented works but I am not sure whether its the correct and proper way to do this. Any thoughts
package main
import (
"fmt"
"time"
)
var gVar string
var pCount int
func pinger(c chan int) {
for i := 0; ; i++ {
fmt.Println("adding ", i)
c <- i
}
}
func printer(id int, c chan int) {
defer func() {
if err := recover(); err != nil {
fmt.Println("HERE", id)
fmt.Println(err)
pCount++
if pCount == 5 {
panic("TOO MANY PANICS")
} else {
go printer(id, c)
}
}
}()
for {
msg := <-c
fmt.Println(id, "- ping", msg, gVar)
if msg%5 == 0 {
panic("PANIC")
}
time.Sleep(time.Second * 1)
}
}
func main() {
var c chan int = make(chan int, 2)
gVar = "Preflight"
pCount = 0
go pinger(c)
go printer(1, c)
go printer(2, c)
go printer(3, c)
go printer(4, c)
var input string
fmt.Scanln(&input)
}
You can extract the recover logic in a function such as:
func recoverer(maxPanics, id int, f func()) {
defer func() {
if err := recover(); err != nil {
fmt.Println("HERE", id)
fmt.Println(err)
if maxPanics == 0 {
panic("TOO MANY PANICS")
} else {
go recoverer(maxPanics-1, id, f)
}
}
}()
f()
}
And then use it like:
go recoverer(5, 1, func() { printer(1, c) })
Like Zan Lynx's answer, I'd like to share another way to do it (although it's pretty much similar to OP's way.) I used an additional buffered channel ch. When a goroutine panics, the recovery function inside the goroutine send its identity i to ch. In for loop at the bottom of main(), it detects which goroutine's in panic and whether to restart by receiving values from ch.
Run in Go Playground
package main
import (
"fmt"
"time"
)
func main() {
var pCount int
ch := make(chan int, 5)
f := func(i int) {
defer func() {
if err := recover(); err != nil {
ch <- i
}
}()
fmt.Printf("goroutine f(%v) started\n", i)
time.Sleep(1000 * time.Millisecond)
panic("goroutine in panic")
}
go f(1)
go f(2)
go f(3)
go f(4)
for {
i := <-ch
pCount++
if pCount >= 5 {
fmt.Println("Too many panics")
break
}
fmt.Printf("Detected goroutine f(%v) panic, will restart\n", i)
f(i)
}
}
Oh, I am not saying that the following is more correct than your way. It is just another way to do it.
Create another function, call it printerRecover or something like it, and do your defer / recover in there. Then in printer just loop on calling printerRecover. Add in function return values to check if you need the goroutine to exit for some reason.
The way you implemented is correct. Just for me the approach to maintain exactly 4 routines running at all times looks not much go_way, either handling routine's ID, either spawning in defer which may leads unpredictable stack due to closure. I don't think you can efficiently balance resource this way. Why don't you like to simple spawn worker when it needed
func main() {
...
go func(tasks chan int){ //multiplexer
for {
task = <-tasks //when needed
go printer(task) //just spawns handler
}
}(ch)
...
}
and let runtime do its job? This way things are done in stdlib listeners/servers and them known to be efficient enough. goroutines are very lightweight to spawn and runtime is quite smart to balance load. Sure you must to recover either way. It is my very personal opinion.
I'm here to find out the most idiomatic way to do the follow task.
Task:
Write data from a channel to a file.
Problem:
I have a channel ch := make(chan int, 100)
I need to read from the channel and write the values I read from the channel to a file. My question is basically how do I do so given that
If channel ch is full, write the values immediately
If channel ch is not full, write every 5s.
So essentially, data needs to be written to the file at least every 5s (assuming that data will be filled into the channel at least every 5s)
Whats the best way to use select, for and range to do my above task?
Thanks!
There is no such "event" as "buffer of channel is full", so you can't detect that [*]. This means you can't idiomatically solve your problem with language primitives using only 1 channel.
[*] Not entirely true: you could detect if the buffer of a channel is full by using select with default case when sending on the channel, but that requires logic from the senders, and repetitive attempts to send.
I would use another channel from which I would receive as values are sent on it, and "redirect", store the values in another channel which has a buffer of 100 as you mentioned. At each redirection you may check if the internal channel's buffer is full, and if so, do an immediate write. If not, continue to monitor the "incoming" channel and a timer channel with a select statement, and if the timer fires, do a "regular" write.
You may use len(chInternal) to check how many elements are in the chInternal channel, and cap(chInternal) to check its capacity. Note that this is "safe" as we are the only goroutine handling the chInternal channel. If there would be multiple goroutines, value returned by len(chInternal) could be outdated by the time we use it to something (e.g. comparing it).
In this solution chInternal (as its name says) is for internal use only. Others should only send values on ch. Note that ch may or may not be a buffered channel, solution works in both cases. However, you may improve efficiency if you also give some buffer to ch (so chances that senders get blocked will be lower).
var (
chInternal = make(chan int, 100)
ch = make(chan int) // You may (should) make this a buffered channel too
)
func main() {
delay := time.Second * 5
timer := time.NewTimer(delay)
for {
select {
case v := <-ch:
chInternal <- v
if len(chInternal) == cap(chInternal) {
doWrite() // Buffer is full, we need to write immediately
timer.Reset(delay)
}
case <-timer.C:
doWrite() // "Regular" write: 5 seconds have passed since last write
timer.Reset(delay)
}
}
}
If an immediate write happens (due to a "buffer full" situation), this solution will time the next "regular" write 5 seconds after this. If you don't want this and you want the 5-second regular writes be independent from the immediate writes, simply do not reset the timer following the immediate write.
An implementation of doWrite() may be as follows:
var f *os.File // Make sure to open file for writing
func doWrite() {
for {
select {
case v := <-chInternal:
fmt.Fprintf(f, "%d ", v) // Write v to the file
default: // Stop when no more values in chInternal
return
}
}
}
We can't use for ... range as that only returns when the channel is closed, but our chInternal channel is not closed. So we use a select with a default case so when no more values are in the buffer of chInternal, we return.
Improvements
Using a slice instead of 2nd channel
Since the chInternal channel is only used by us, and only on a single goroutine, we may also choose to use a single []int slice instead of a channel (reading/writing a slice is much faster than a channel).
Showing only the different / changed parts, it could look something like this:
var (
buf = make([]int, 0, 100)
)
func main() {
// ...
for {
select {
case v := <-ch:
buf = append(buf, v)
if len(buf) == cap(buf) {
// ...
}
}
func doWrite() {
for _, v := range buf {
fmt.Fprintf(f, "%d ", v) // Write v to the file
}
buf = buf[:0] // "Clear" the buffer
}
With multiple goroutines
If we stick to leave chInternal a channel, the doWrite() function may be called on another goroutine to not block the other one, e.g. go doWrite(). Since data to write is read from a channel (chInternal), this requires no further synchronization.
if you just use 5 seconds write, to increase the file write performance,
you may fill the channel any time you need,
then writer goroutine writes that data to the buffered file,
see this very simple and idiomatic sample without using timer
with just using for...range:
package main
import (
"bufio"
"fmt"
"os"
"sync"
)
var wg sync.WaitGroup
func WriteToFile(filename string, ch chan int) {
f, e := os.Create(filename)
if e != nil {
panic(e)
}
w := bufio.NewWriterSize(f, 4*1024*1024)
defer wg.Done()
defer f.Close()
defer w.Flush()
for v := range ch {
fmt.Fprintf(w, "%d ", v)
}
}
func main() {
ch := make(chan int, 100)
wg.Add(1)
go WriteToFile("file.txt", ch)
for i := 0; i < 500000; i++ {
ch <- i // do the job
}
close(ch) // Finish the job and close output file
wg.Wait()
}
and notice the defers order.
and in case of 5 seconds write, you may add one interval timer just to flush the buffer of this file to the disk, like this:
package main
import (
"bufio"
"fmt"
"os"
"sync"
"time"
)
var wg sync.WaitGroup
func WriteToFile(filename string, ch chan int) {
f, e := os.Create(filename)
if e != nil {
panic(e)
}
w := bufio.NewWriterSize(f, 4*1024*1024)
ticker := time.NewTicker(5 * time.Second)
quit := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
if w.Buffered() > 0 {
fmt.Println(w.Buffered())
w.Flush()
}
case <-quit:
ticker.Stop()
return
}
}
}()
defer wg.Done()
defer f.Close()
defer w.Flush()
defer close(quit)
for v := range ch {
fmt.Fprintf(w, "%d ", v)
}
}
func main() {
ch := make(chan int, 100)
wg.Add(1)
go WriteToFile("file.txt", ch)
for i := 0; i < 25; i++ {
ch <- i // do the job
time.Sleep(500 * time.Millisecond)
}
close(ch) // Finish the job and close output file
wg.Wait()
}
here I used time.NewTicker(5 * time.Second) for interval timer with quit channel, you may use time.AfterFunc() or time.Tick() or time.Sleep().
with some optimizations ( removing quit channel):
package main
import (
"bufio"
"fmt"
"os"
"sync"
"time"
)
var wg sync.WaitGroup
func WriteToFile(filename string, ch chan int) {
f, e := os.Create(filename)
if e != nil {
panic(e)
}
w := bufio.NewWriterSize(f, 4*1024*1024)
ticker := time.NewTicker(5 * time.Second)
defer wg.Done()
defer f.Close()
defer w.Flush()
for {
select {
case v, ok := <-ch:
if ok {
fmt.Fprintf(w, "%d ", v)
} else {
fmt.Println("done.")
ticker.Stop()
return
}
case <-ticker.C:
if w.Buffered() > 0 {
fmt.Println(w.Buffered())
w.Flush()
}
}
}
}
func main() {
ch := make(chan int, 100)
wg.Add(1)
go WriteToFile("file.txt", ch)
for i := 0; i < 25; i++ {
ch <- i // do the job
time.Sleep(500 * time.Millisecond)
}
close(ch) // Finish the job and close output file
wg.Wait()
}
I hope this helps.