The following snippet validates a phone number and write the details to CSV.
func Parse(phone Input, output *PhoneNumber) error {
var n PhoneNumber
num, _ := phonenumbers.Parse(phone.Number, phone.Prefix)
n.PhoneNumber = phonenumbers.Format(num, phonenumbers.E164)
n.CountryCode = num.GetCountryCode()
n.PhoneType = phonenumbers.GetNumberType(num)
n.NetworkName, _ = phonenumbers.GetCarrierForNumber(num, "EN")
n.Region = phonenumbers.GetRegionCodeForNumber(num)
*output = n
return nil
}
func createFile(path string) {
// detect if file exists
var _, err = os.Stat(path)
// create file if not exists
if os.IsNotExist(err) {
var file, err = os.Create(path)
if err != nil {
return
}
defer file.Close()
}
}
func worker(ctx context.Context, dst chan string, src chan []string) {
for {
select {
case dataArray, ok := <-src: // you must check for readable state of the channel.
if !ok {
return
}
go processNumber(dataArray[0])
case <-ctx.Done(): // if the context is cancelled, quit.
return
}
}
}
func processNumber(number string) {
num, e := phonenumbers.Parse(number, "")
if e != nil {
return
}
region := phonenumbers.GetRegionCodeForNumber(num)
carrier, _ := phonenumbers.GetCarrierForNumber(num, "EN")
path := "sample_all.csv"
createFile(path)
var csvFile, _ = os.OpenFile(path, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
csvwriter := csv.NewWriter(csvFile)
_ = csvwriter.Write([]string{phonenumbers.Format(num, phonenumbers.E164), fmt.Sprintf("%v", num.GetCountryCode()), fmt.Sprintf("%v", phonenumbers.GetNumberType(num)), carrier, region})
defer csvFile.Close()
csvwriter.Flush()
}
func ParseFile(phone Input, output *PhoneNumber) error {
// create a context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// that cancels at ctrl+C
go onSignal(os.Interrupt, cancel)
numberOfWorkers := 2
start := time.Now()
csvfile, err := os.Open(phone.File)
if err != nil {
log.Fatal(err)
}
defer csvfile.Close()
reader := csv.NewReader(csvfile)
// create the pair of input/output channels for the controller=>workers com.
src := make(chan []string)
out := make(chan string)
// use a waitgroup to manage synchronization
var wg sync.WaitGroup
// declare the workers
for i := 0; i < numberOfWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
worker(ctx, out, src)
}()
}
// read the csv and write it to src
go func() {
for {
record, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
src <- record // you might select on ctx.Done().
}
close(src) // close src to signal workers that no more job are incoming.
}()
// wait for worker group to finish and close out
go func() {
wg.Wait() // wait for writers to quit.
close(out) // when you close(out) it breaks the below loop.
}()
// drain the output
for res := range out {
fmt.Println(res)
}
fmt.Printf("\n%2fs", time.Since(start).Seconds())
return nil
}
In processNumber function, if I skip writing to CSV, the process of verifying number completes 6 seconds but writing one record at a time on CSV stretch the time consumption to 15s.
How can I optimize the code?
Can I chunk the records and write them in chunks instead of writing one row at a time?
Do work directly in worker goroutine instead of firing off goroutine per task.
Open file output file once. Flush output file once.
func worker(ctx context.Context, dst chan []string, src chan []string) {
for {
select {
case dataArray, ok := <-src: // you must check for readable state of the channel.
if !ok {
return
}
dst <- processNumber(dataArray[0])
case <-ctx.Done(): // if the context is cancelled, quit.
return
}
}
}
func processNumber(number string) []string {
num, e := phonenumbers.Parse(number, "")
if e != nil {
return
}
region := phonenumbers.GetRegionCodeForNumber(num)
carrier, _ := phonenumbers.GetCarrierForNumber(num, "EN")
return []string{phonenumbers.Format(num, phonenumbers.E164), fmt.Sprintf("%v", num.GetCountryCode()), fmt.Sprintf("%v", phonenumbers.GetNumberType(num)), carrier, region}
}
func ParseFile(phone Input, output *PhoneNumber) error {
// create a context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// that cancels at ctrl+C
go onSignal(os.Interrupt, cancel)
numberOfWorkers := 2
start := time.Now()
csvfile, err := os.Open(phone.File)
if err != nil {
log.Fatal(err)
}
defer csvfile.Close()
reader := csv.NewReader(csvfile)
// create the pair of input/output channels for the controller=>workers com.
src := make(chan []string)
out := make(chan string)
// use a waitgroup to manage synchronization
var wg sync.WaitGroup
// declare the workers
for i := 0; i < numberOfWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
worker(ctx, out, src)
}()
}
// read the csv and write it to src
go func() {
for {
record, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
src <- record // you might select on ctx.Done().
}
close(src) // close src to signal workers that no more job are incoming.
}()
// wait for worker group to finish and close out
go func() {
wg.Wait() // wait for writers to quit.
close(out) // when you close(out) it breaks the below loop.
}()
path := "sample_all.csv"
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
csvwriter := csv.NewWriter(csvFile)
// drain the output
for res := range out {
csvwriter.Write(res)
}
csvwriter.Flush()
fmt.Printf("\n%2fs", time.Since(start).Seconds())
return nil
}
Related
I read this and this and this but none of them solving my issue..
I'm trying to read 2 files async, so I wrote the below:
//readlines.go
package main
import (
"bufio"
"os"
)
// readLines reads a whole file into memory
// and returns a slice of its lines.
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
And calling it as:
package main
import (
"fmt"
"os"
"github.com/gocarina/gocsv"
)
func (s *stocks) Read() {
fmt.Println("Reading")
stockFile, err := os.OpenFile("current_invenory.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
panic(err)
}
defer stockFile.Close()
stocks := []systemStock{}
if err := gocsv.UnmarshalFile(stockFile, &stocks); err != nil { // Load stocks from file
panic(err)
}
*s = stocks
}
package main
import (
"fmt"
"os"
"github.com/gocarina/gocsv"
)
func (t *transactions) Read() {
fmt.Println("Reading")
trxFile, err := os.OpenFile("current_transactions.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
panic(err)
}
defer trxFile.Close()
trx := []systemTransactions{}
if err := gocsv.UnmarshalFile(trxFile, &trx); err != nil { // Load stocks from file
panic(err)
}
*t = trx
}
The above working very fine with:
stock := stocks{}
trx := transactions{}
stock.Read()
trx.Read()
for _, s := range stock {
fmt.Println("Hello", s.Code)
}
But give the error fatal error: all goroutines are asleep - deadlock! when I tried to read them as:
cs, ct := readData()
for _, s := range cs {
fmt.Println("Hello", s.Code)
}
for _, t := range ct {
fmt.Println("Hello trx of ", t.Code)
}
Using
import "sync"
//func readData(cs chan stocks, ct chan transactions) (stocks, transactions) {
func readData() (stocks, transactions) {
var wg sync.WaitGroup
defer wg.Done()
stock := stocks{}
trx := transactions{}
wg.Add(1)
go stock.Read()
wg.Add(1)
go trx.Read()
wg.Wait()
return stock, trx
}
So the error is related for something wrong I made (or do not understand) in the last block~
To run the Read methods for stocks and transactions concurrently, these methods need to have a way of signaling when they are finished executing. This can be done in a lot of ways, but here are two which require the least modifications to your code.
Solution 1
Use the sync.WaitGroup package. With this package, the Read methods should execute wg.Done() statement when they are done with executing. It should look something like this:
func (s *stocks) Read(wg *sync.WaitGroup) {
defer wg.Done()
fmt.Println("Reading")
stockFile, err := os.OpenFile("current_invenory.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
panic(err)
}
defer stockFile.Close()
stocks := []systemStock{}
if err := gocsv.UnmarshalFile(stockFile, &stocks); err != nil { // Load stocks from file
panic(err)
}
*s = stocks
}
func (t *transactions) Read(wg *sync.WaitGroup) {
defer wg.Done()
fmt.Println("Reading")
trxFile, err := os.OpenFile("current_transactions.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
panic(err)
}
defer trxFile.Close()
trx := []systemTransactions{}
if err := gocsv.UnmarshalFile(trxFile, &trx); err != nil { // Load stocks from file
panic(err)
}
*t = trx
}
func readData() (stocks, transactions) {
var wg sync.WaitGroup
wg.Add(2)
stock := stocks{}
trx := transactions{}
go stock.Read(&wg)
go trx.Read(&wg)
wg.Wait()
return stock, trx
}
Solution 2
This approach uses the golang.org/x/sync/errgroup package. In this case, you do not need to handle the synchronization and signaling yourself, but functions that are added with errgroup.Go method need to have a strict func() error signature. Your code should look like this:
func (s *stocks) Read() error {
fmt.Println("Reading")
stockFile, err := os.OpenFile("current_invenory.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
return err
}
defer stockFile.Close()
stocks := []systemStock{}
if err := gocsv.UnmarshalFile(stockFile, &stocks); err != nil { // Load stocks from file
return err
}
*s = stocks
return nil
}
func (t *transactions) Read() error {
fmt.Println("Reading")
trxFile, err := os.OpenFile("current_transactions.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
return err
}
defer trxFile.Close()
trx := []systemTransactions{}
if err := gocsv.UnmarshalFile(trxFile, &trx); err != nil { // Load stocks from file
return err
}
*t = trx
return nil
}
func readData() (stocks, transactions) {
g, _ := errgroup.WithContext(context.Background())
stock := stocks{}
trx := transactions{}
g.Go(stock.Read)
g.Go(trx.Read)
if err:= g.Wait(); err != nil {
panic(err)
}
return stock, trx
}
Solution 3
You’re (correctly) adding 1 to the wait group when you start reading from each CSV, bringing the wait group’s internal counter to 2, but wg.Wait() will wait until that counter goes down to zero and you don’t have any calls to wg.Done() to do that. I recommend changing go stock.Read() to:
go func() {
defer wg Done()
stock.Read()
}()
So, the full working code be:
func readData() (stocks, transactions) {
var wg sync.WaitGroup
stock := stocks{}
trx := transactions{}
wg.Add(1)
go func() {
defer wg.Done()
stock.Read()
}()
wg.Add(1)
go func() {
defer wg.Done()
trx.Read()
}()
wg.Wait()
return stock, trx
}
I'm migrating a lot of files that are currently stored in a relational database to amazon S3. I'm using go because I had heard about the concurrency of it, but I'm getting very low throughput. I'm new to go so I'm probably not doing it in the best way possible.
This is what I have at the moment
type Attachment struct {
BinaryData []byte `db:"BinaryData"`
CreatedAt time.Time `db:"CreatedDT"`
Id int `db:"Id"`
}
func main() {
connString := os.Getenv("CONNECTION_STRING")
log.SetFlags(log.Ltime)
db, err := sqlx.Connect("sqlserver", connString)
if err != nil {
panic(err)
}
log.Print("Connected to database")
sql := "SELECT TOP 1000 Id,CreatedDT, BinaryData FROM Attachment"
attachmentsDb := []Attachment{}
err = db.Select(&attachmentsDb, sql)
if err != nil {
log.Fatal(err)
}
session, err := session.NewSession(&aws.Config{
Region: aws.String("eu-west-1"),
})
if err != nil {
log.Fatal(err)
return
}
svc := s3.New(session)
wg := &sync.WaitGroup{}
for _, att := range attachmentsDb {
done := make(chan error)
go func(wg *sync.WaitGroup, att Attachment, out chan error) {
wg.Add(1)
err := <-saveAttachment(&att, svc)
if err == nil {
log.Printf("CV Written %d", att.Id)
}
wg.Done()
out<-err
}(wg, att, done)
<-done
}
wg.Wait()
//close(in)
defer db.Close()
}
func saveAttachment(att *Attachment, svc *s3.S3 )<-chan error {
out := make(chan error)
bucket := os.Getenv("BUCKET")
go func() {
defer close(out)
key := getKey(att)
input := &s3.PutObjectInput{Bucket: &bucket,
Key: &key,
Body: bytes.NewReader(att.BinaryData),
}
_, err := svc.PutObject(input)
if err != nil {
//log.Fatal(err)
log.Printf("Error uploading CV %d error %v", att.Id, err)
}
out <- err
}()
return out
}
func getKey(att *Attachment) string {
return fmt.Sprintf("%s/%d", os.Getenv("KEY"), att.Id)
}
These loops will executes sequentially because in every loop, it waits for result from channel done so there aren't any benifit from running multiple goroutines. And no need to create a new goroutine in func saveAttachment(), because you already create it in the loops.
func main() {
//....
svc := s3.New(session)
wg := &sync.WaitGroup{}
for _, att := range attachmentsDb {
done := make(chan error)
//New goroutine
go func(wg *sync.WaitGroup, att Attachment, out chan error) {
wg.Add(1)
//Already in a goroutine now, but in func saveAttachment() will create a new goroutine?
err := <-saveAttachment(&att, svc) //There is a goroutine created in this func
if err == nil {
log.Printf("CV Written %d", att.Id)
}
wg.Done()
out<-err
}(wg, att, done)
<-done //This will block until receives the result, after that a new loop countinues
}
}
func saveAttachment(att *Attachment, svc *s3.S3 )<-chan error {
out := make(chan error)
bucket := os.Getenv("BUCKET")
//Why new goroutine?
go func() {
defer close(out)
key := getKey(att)
input := &s3.PutObjectInput{Bucket: &bucket,
Key: &key,
Body: bytes.NewReader(att.BinaryData),
}
_, err := svc.PutObject(input)
if err != nil {
//log.Fatal(err)
log.Printf("Error uploading CV %d error %v", att.Id, err)
}
out <- err
}()
return out
}
If you want to upload in parallel, don't do that. You can quickly fix it like this
func main() {
//....
svc := s3.New(session)
wg := &sync.WaitGroup{}
//Number of goroutines = number of attachments
for _, att := range attachmentsDb {
wg.Add(1)
//One goroutine to uploads for each Attachment
go func(wg *sync.WaitGroup, att Attachment) {
err := saveAtt(&att, svc)
if err == nil {
log.Printf("CV Written %d", att.Id)
}
wg.Done()
}(wg, att)
//No blocking after created a goroutine, loops countines to create new goroutine
}
wg.Wait()
fmt.Println("done")
}
//This func will be executed in goroutine, so no need to create a goroutine inside it
func saveAtt(att *Attachment, svc *s3.S3) error {
bucket := os.Getenv("BUCKET")
key := getKey(att)
input := &s3.PutObjectInput{Bucket: &bucket,
Key: &key,
Body: bytes.NewReader(att.BinaryData),
}
_, err := svc.PutObject(input)
if err != nil {
log.Printf("Error uploading CV %d error %v", att.Id, err)
}
return err
}
But this approach isn't good when there are so many attachments beacause number of goroutines = number of attachments. In this case, you will need a goroutine pool so you can limit number of goroutines to run.
Warining!!!, This is just an example to show goroutine pool logic, you need to implement it by your way
//....
//Create a attachment queue
queue := make(chan *Attachment) //Or use buffered channel: queue := make(chan *Attachment, bufferedSize)
//Send all attachment to queue
go func() {
for _, att := range attachmentsDb {
queue <- &att
}
}()
//....
//Create a goroutine pool
svc := s3.New(session)
wg := &sync.WaitGroup{}
//Use this as const
workerCount := 5
//Number of goroutines = Number of workerCount
for i := 1; i <= workerCount; i++ {
//New goroutine
go func() {
//Get attachment from queue to upload. When the queue channel is empty, this code will blocks
for att := range queue {
err := saveAtt(att, svc)
if err == nil {
log.Printf("CV Written %d", att.Id)
}
}
}()
}
//....
//Warning!!! You need to call close channel only WHEN all attachments was uploaded, this code just show how you can end the goroutine pool
//Just close queue channel when all attachments was uploaded, all upload goroutines will end (because of `att := range queue`)
close(queue)
//....
I use the library (http://github.com/fsnotify/fsnotify) to monitor the file system.
I m trying to adjust the repository example to match my requirements, but when i do so the program is not working anymore.
I commented the done channel within the ExampleNewWatcher function
done := make(chan bool)
<-done
As a result, now when i run the example, this channel does not output anything anymore.
event, ok := <-watcher.Events
Complete code:
package main
import (
"github.com/fsnotify/fsnotify"
"log"
"os"
"strconv"
"time"
)
func ExampleNewWatcher() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
done := make(chan bool) // if i commen this line the `panic` not norking
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write {
log.Println("modified file:", event.Name)
}
panic("just for test") // new output this line
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Println("error:", err)
}
}
}()
err = watcher.Add("/tmp/foo")
if err != nil {
log.Fatal(err)
}
<-done // comment this
}
func check(err error) {
if err != nil {
panic(err)
}
}
func main() {
// create the test file
var err error
file, err := os.Create("/tmp/foo")
check(err)
_, err = file.Write([]byte("hello world"))
check(err)
stopchan := make(chan struct{})
// test change file
go func() {
for i := 0; i < 10; i++ {
file1, err := os.OpenFile("/tmp/foo", os.O_RDWR, 0644)
check(err)
d := strconv.Itoa(i) + "hello world"
_, err = file1.Write([]byte(d))
check(err)
err = file1.Close()
check(err)
time.Sleep(2 * time.Second) // wait the context writed to the file
}
}()
ExampleNewWatcher() // monitor file
stopchan <- struct{}{}
}
In a function definition, if a channel is an argument without a direction, does it have to send or receive something?
func makeRequest(url string, ch chan<- string, results chan<- string) {
start := time.Now()
resp, err := http.Get(url)
defer resp.Body.Close()
if err != nil {
fmt.Printf("%v", err)
}
resp, err = http.Post(url, "text/plain", bytes.NewBuffer([]byte("Hey")))
defer resp.Body.Close()
secs := time.Since(start).Seconds()
if err != nil {
fmt.Printf("%v", err)
}
// Cannot move past this.
ch <- fmt.Sprintf("%f", secs)
results <- <- ch
}
func MakeRequestHelper(url string, ch chan string, results chan string, iterations int) {
for i := 0; i < iterations; i++ {
makeRequest(url, ch, results)
}
for i := 0; i < iterations; i++ {
fmt.Println(<-ch)
}
}
func main() {
args := os.Args[1:]
threadString := args[0]
iterationString := args[1]
url := args[2]
threads, err := strconv.Atoi(threadString)
if err != nil {
fmt.Printf("%v", err)
}
iterations, err := strconv.Atoi(iterationString)
if err != nil {
fmt.Printf("%v", err)
}
channels := make([]chan string, 100)
for i := range channels {
channels[i] = make(chan string)
}
// results aggregate all the things received by channels in all goroutines
results := make(chan string, iterations*threads)
for i := 0; i < threads; i++ {
go MakeRequestHelper(url, channels[i], results, iterations)
}
resultSlice := make([]string, threads*iterations)
for i := 0; i < threads*iterations; i++ {
resultSlice[i] = <-results
}
}
In the above code,
ch <- or <-results
seems to be blocking every goroutine that executes makeRequest.
I am new to concurrency model of Go. I understand that sending to and receiving from a channel blocks but find it difficult what is blocking what in this code.
I'm not really sure that you are doing... It seems really convoluted. I suggest you read up on how to use channels.
https://tour.golang.org/concurrency/2
That being said you have so much going on in your code that it was much easier to just gut it to something a bit simpler. (It can be simplified further). I left comments to understand the code.
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"sync"
"time"
)
// using structs is a nice way to organize your code
type Worker struct {
wg sync.WaitGroup
semaphore chan struct{}
result chan Result
client http.Client
}
// group returns so that you don't have to send to many channels
type Result struct {
duration float64
results string
}
// closing your channels will stop the for loop in main
func (w *Worker) Close() {
close(w.semaphore)
close(w.result)
}
func (w *Worker) MakeRequest(url string) {
// a semaphore is a simple way to rate limit the amount of goroutines running at any single point of time
// google them, Go uses them often
w.semaphore <- struct{}{}
defer func() {
w.wg.Done()
<-w.semaphore
}()
start := time.Now()
resp, err := w.client.Get(url)
if err != nil {
log.Println("error", err)
return
}
defer resp.Body.Close()
// don't have any examples where I need to also POST anything but the point should be made
// resp, err = http.Post(url, "text/plain", bytes.NewBuffer([]byte("Hey")))
// if err != nil {
// log.Println("error", err)
// return
// }
// defer resp.Body.Close()
secs := time.Since(start).Seconds()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println("error", err)
return
}
w.result <- Result{duration: secs, results: string(b)}
}
func main() {
urls := []string{"https://facebook.com/", "https://twitter.com/", "https://google.com/", "https://youtube.com/", "https://linkedin.com/", "https://wordpress.org/",
"https://instagram.com/", "https://pinterest.com/", "https://wikipedia.org/", "https://wordpress.com/", "https://blogspot.com/", "https://apple.com/",
}
workerNumber := 5
worker := Worker{
semaphore: make(chan struct{}, workerNumber),
result: make(chan Result),
client: http.Client{Timeout: 5 * time.Second},
}
// use sync groups to allow your code to wait for
// all your goroutines to finish
for _, url := range urls {
worker.wg.Add(1)
go worker.MakeRequest(url)
}
// by declaring wait and close as a seperate goroutine
// I can get to the for loop below and iterate on the results
// in a non blocking fashion
go func() {
worker.wg.Wait()
worker.Close()
}()
// do something with the results channel
for res := range worker.result {
fmt.Printf("Request took %2.f seconds.\nResults: %s\n\n", res.duration, res.results)
}
}
The channels in channels are nil (no make is executed; you make the slice but not the channels), so any send or receive will block. I'm not sure exactly what you're trying to do here, but that's the basic problem.
See https://golang.org/doc/effective_go.html#channels for an explanation of how channels work.
I try to adapt this example:
https://gobyexample.com/worker-pools
But I don't know how to stop the channel because program don't exit at the end of the channel loop.
Can you explain how to exit the program?
package main
import (
"github.com/SlyMarbo/rss"
"bufio"
"fmt"
"log"
"os"
)
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func worker(id int, jobs <-chan string, results chan<- string) {
for url := range jobs {
fmt.Println("worker", id, "processing job", url)
feed, err := rss.Fetch(url)
if err != nil {
fmt.Println("Error on: ", url)
continue
}
borne := 0
for _, value := range feed.Items {
if borne < 5 {
results <- value.Link
borne = borne +1
} else {
continue
}
}
}
}
func main() {
jobs := make(chan string)
results := make(chan string)
for w := 1; w <= 16; w++ {
go worker(w, jobs, results)
}
urls, err := readLines("flux.txt")
if err != nil {
log.Fatalf("readLines: %s", err)
}
for _, url := range urls {
jobs <- url
}
close(jobs)
// it seems program runs over...
for msg := range results {
fmt.Println(msg)
}
}
The flux.txt is a flat text file like :
http://blog.case.edu/news/feed.atom
...
The problem is that, in the example you are referring to, the worker pool reads from results 9 times:
for a := 1; a <= 9; a++ {
<-results
}
Your program, on the other hand, does a range loop over the results which has a different semantics in go. The range operator does not stop until the channel is closed.
for msg := range results {
fmt.Println(msg)
}
To fix your problem you'd need to close the results channel. However, if you just call close(results) before the for loop, you most probably will
get a panic, because the workers might be writing on results.
To fix this problem, you need to add another channel to be notified when all the workers are done. You can do this either using a sync.WaitGroup or :
const (
workers = 16
)
func main() {
jobs := make(chan string, 100)
results := make(chan string, 100)
var wg sync.WaitGroup
for w := 0; w < workers; w++ {
go func() {
wg.Add(1)
defer wg.Done()
worker(w, jobs, results)
}()
}
urls, err := readLines("flux.txt")
if err != nil {
log.Fatalf("readLines: %s", err)
}
for _, url := range urls {
jobs <- url
}
close(jobs)
wg.Wait()
close(results)
// it seems program runs over...
for msg := range results {
fmt.Println(msg)
}
}
Or a done channel:
package main
import (
"bufio"
"fmt"
"github.com/SlyMarbo/rss"
"log"
"os"
)
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func worker(id int, jobs <-chan string, results chan<- string, done chan struct{}) {
for url := range jobs {
fmt.Println("worker", id, "processing job", url)
feed, err := rss.Fetch(url)
if err != nil {
fmt.Println("Error on: ", url)
continue
}
borne := 0
for _, value := range feed.Items {
if borne < 5 {
results <- value.Link
borne = borne + 1
} else {
continue
}
}
}
close(done)
}
const (
workers = 16
)
func main() {
jobs := make(chan string, 100)
results := make(chan string, 100)
dones := make([]chan struct{}, workers)
for w := 0; w < workers; w++ {
dones[w] = make(chan struct{})
go worker(w, jobs, results, dones[w])
}
urls, err := readLines("flux.txt")
if err != nil {
log.Fatalf("readLines: %s", err)
}
for _, url := range urls {
jobs <- url
}
close(jobs)
for _, done := range dones {
<-done
}
close(results)
// it seems program runs over...
for msg := range results {
fmt.Println(msg)
}
}