Context cascading Test - go

How to improve this context cascading test:
func TestParentTimedoutContext(t *testing.T) {
ctx1, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
defer cancel2()
time.Sleep(15 * time.Millisecond) // make ctx1 timeout
if ctx2.Err() == nil {
t.Error("failed")
}
}
Command:
go test -timeout 30s -count=1 -run ^TestParentTimedoutContext$ my/internal/services
Output:
ok
Sometimes:
failed
go version go1.16.6

Wait for ctx2.Done() and measure time duration which should be less than ctx2 deadline.
Try this:
func TestParentTimedoutContext(t *testing.T) {
t0 := time.Now()
ctx1, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
defer cancel2()
<-ctx2.Done()
d := time.Since(t0)
if d > 777*time.Millisecond {
t.Error("failed:" + d.String())
}
}
Benchmark:
func BenchmarkParentTimedoutContext(b *testing.B) {
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
ctx3, cancel3 := context.WithTimeout(context.Background(), 10_000*time.Millisecond)
defer cancel3()
for {
select {
case <-ctx3.Done():
return
default:
// runtime.Gosched()
}
}
}()
}
for i := 0; i < b.N; i++ {
ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Millisecond)
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
<-ctx2.Done()
cancel1()
cancel2()
}
}
Benchmark result (Linux):
# BenchmarkParentTimedoutContext-8 589 17_178_588 ns/op // 17ms
Histogram code:
package main
import (
"context"
"fmt"
"runtime"
"time"
)
func main() {
const n = 1_000
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
ctx3, cancel3 := context.WithTimeout(context.Background(), n*time.Millisecond)
defer cancel3()
for {
select {
case <-ctx3.Done():
return
default:
// runtime.Gosched()
}
}
}()
}
all := make([]time.Duration, n)
for i := 0; i < n; i++ {
t0 := time.Now()
ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Millisecond)
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
<-ctx2.Done()
d := time.Since(t0)
cancel1()
cancel2()
all[i] = d
}
ave, x, y := histogram(all, 10)
fmt.Println("min =", y[0])
fmt.Println("max =", y[1])
fmt.Println("ave =", ave)
fmt.Println(x)
fmt.Println(y)
fmt.Println(runtime.GOOS, runtime.GOARCH, runtime.Version())
}
func histogram(d []time.Duration, n int) (ave time.Duration, x []int, y []time.Duration) {
x = make([]int, n)
y = make([]time.Duration, n+1)
min := d[0]
max := min
ave = min
for _, v := range d[1:] {
ave += v
if v < min {
min = v
} else if v > max {
max = v
}
}
ave /= time.Duration(len(d))
distance := (max - min) / time.Duration(n)
v := min
for i := range y {
y[i] = v
v += distance
}
y[len(y)-1] = max // compensate division error
for _, v := range d {
i := int((v - min) / distance)
if i >= n {
i--
}
x[i]++
}
return
}
Benchmark result (Linux):
go run .
min = 1.046524ms
max = 22.717866ms
ave = 2.090832ms
[950 0 0 0 0 0 1 0 47 2]
[1.046524ms 3.213658ms 5.380792ms 7.547926ms 9.71506ms 11.882194ms 14.049328ms 16.216462ms 18.383596ms 20.55073ms 22.717866ms]
linux amd64 go1.16.6

Related

How to collect values from a channel into a slice in Go?

Suppose I have a helper function helper(n int) which returns a slice of integers of variable length. I would like to run helper(n) in parallel for various values of n and collect the output in one big slice. My first attempt at this is the following:
package main
import (
"fmt"
"golang.org/x/sync/errgroup"
)
func main() {
out := make([]int, 0)
ch := make(chan int)
go func() {
for i := range ch {
out = append(out, i)
}
}()
g := new(errgroup.Group)
for n := 2; n <= 3; n++ {
n := n
g.Go(func() error {
for _, i := range helper(n) {
ch <- i
}
return nil
})
}
if err := g.Wait(); err != nil {
panic(err)
}
close(ch)
// time.Sleep(time.Second)
fmt.Println(out) // should have the same elements as [0 1 0 1 2]
}
func helper(n int) []int {
out := make([]int, 0)
for i := 0; i < n; i++ {
out = append(out, i)
}
return out
}
However, if I run this example I do not get all 5 expected values, instead I get
[0 1 0 1]
(If I uncomment the time.Sleep I do get all five values, [0 1 2 0 1], but this is not an acceptable solution).
It seems that the problem with this is that out is being updated in a goroutine, but the main function returns before it is done updating.
One thing that would work is using a buffered channel of size 5:
func main() {
ch := make(chan int, 5)
g := new(errgroup.Group)
for n := 2; n <= 3; n++ {
n := n
g.Go(func() error {
for _, i := range helper(n) {
ch <- i
}
return nil
})
}
if err := g.Wait(); err != nil {
panic(err)
}
close(ch)
out := make([]int, 0)
for i := range ch {
out = append(out, i)
}
fmt.Println(out) // should have the same elements as [0 1 0 1 2]
}
However, although in this simplified example I know what the size of the output should be, in my actual application this is not known a priori. Essentially what I would like is an 'infinite' buffer such that sending to the channel never blocks, or a more idiomatic way to achieve the same thing; I've read https://blog.golang.org/pipelines but wasn't able to find a close match to my use case. Any ideas?
In this version of the code, the execution is blocked until ch is closed.
ch is always closed at the end of a routine that is responsible to push into ch. Because the program pushes to ch in a routine, it is not needed to use a buffered channel.
package main
import (
"fmt"
"golang.org/x/sync/errgroup"
)
func main() {
ch := make(chan int)
go func() {
g := new(errgroup.Group)
for n := 2; n <= 3; n++ {
n := n
g.Go(func() error {
for _, i := range helper(n) {
ch <- i
}
return nil
})
}
if err := g.Wait(); err != nil {
panic(err)
}
close(ch)
}()
out := make([]int, 0)
for i := range ch {
out = append(out, i)
}
fmt.Println(out) // should have the same elements as [0 1 0 1 2]
}
func helper(n int) []int {
out := make([]int, 0)
for i := 0; i < n; i++ {
out = append(out, i)
}
return out
}
Here is the fixed version of the first code, it is convoluted but demonstrates the usage of sync.WaitGroup.
package main
import (
"fmt"
"sync"
"golang.org/x/sync/errgroup"
)
func main() {
out := make([]int, 0)
ch := make(chan int)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for i := range ch {
out = append(out, i)
}
}()
g := new(errgroup.Group)
for n := 2; n <= 3; n++ {
n := n
g.Go(func() error {
for _, i := range helper(n) {
ch <- i
}
return nil
})
}
if err := g.Wait(); err != nil {
panic(err)
}
close(ch)
wg.Wait()
// time.Sleep(time.Second)
fmt.Println(out) // should have the same elements as [0 1 0 1 2]
}
func helper(n int) []int {
out := make([]int, 0)
for i := 0; i < n; i++ {
out = append(out, i)
}
return out
}

Implementing a gradient descent

I'm trying to implement a gradient descent in Go. My goal is to predict the cost of a car from it's mileage.
Here is my data set:
km,price
240000,3650
139800,3800
150500,4400
185530,4450
176000,5250
114800,5350
166800,5800
89000,5990
144500,5999
84000,6200
82029,6390
63060,6390
74000,6600
97500,6800
67000,6800
76025,6900
48235,6900
93000,6990
60949,7490
65674,7555
54000,7990
68500,7990
22899,7990
61789,8290
I've tried various approaches, like normalizing the data set, not normalizing it, leaving thetas as is, denormalizing thetas... But I cannot get the correct result.
My maths must be off somewhere, but I cannot figure out where.
The result I'm trying to get should be approximately t0 = 8500, t1 = -0.02
My implementation is the following:
package main
import (
"encoding/csv"
"fmt"
"log"
"math"
"os"
"strconv"
)
const (
dataFile = "data.csv"
iterations = 20000
learningRate = 0.1
)
type dataSet [][]float64
var minKm, maxKm, minPrice, maxPrice float64
func (d dataSet) getExtremes(column int) (float64, float64) {
min := math.Inf(1)
max := math.Inf(-1)
for _, row := range d {
item := row[column]
if item > max {
max = item
}
if item < min {
min = item
}
}
return min, max
}
func normalizeItem(item, min, max float64) float64 {
return (item - min) / (max - min)
}
func (d *dataSet) normalize() {
minKm, maxKm = d.getExtremes(0)
minPrice, maxPrice = d.getExtremes(1)
for _, row := range *d {
row[0], row[1] = normalizeItem(row[0], minKm, maxKm), normalizeItem(row[1], minPrice, maxPrice)
}
}
func processEntry(entry []string) []float64 {
if len(entry) != 2 {
log.Fatalln("expected two fields")
}
km, err := strconv.ParseFloat(entry[0], 64)
if err != nil {
log.Fatalln(err)
}
price, err := strconv.ParseFloat(entry[1], 64)
if err != nil {
log.Fatalln(err)
}
return []float64{km, price}
}
func getData() dataSet {
file, err := os.Open(dataFile)
if err != nil {
log.Fatalln(err)
}
reader := csv.NewReader(file)
entries, err := reader.ReadAll()
if err != nil {
log.Fatalln(err)
}
entries = entries[1:]
data := make(dataSet, len(entries))
for k, entry := range entries {
data[k] = processEntry(entry)
}
return data
}
func outputResult(theta0, theta1 float64) {
file, err := os.OpenFile("weights.csv", os.O_WRONLY, 0644)
if err != nil {
log.Fatalln(err)
}
defer file.Close()
file.Truncate(0)
file.Seek(0, 0)
file.WriteString(fmt.Sprintf("theta0,%.6f\ntheta1,%.6f\n", theta0, theta1))
}
func estimatePrice(theta0, theta1, mileage float64) float64 {
return theta0 + theta1*mileage
}
func (d dataSet) computeThetas(theta0, theta1 float64) (float64, float64) {
dataSize := float64(len(d))
t0sum, t1sum := 0.0, 0.0
for _, it := range d {
mileage := it[0]
price := it[1]
err := estimatePrice(theta0, theta1, mileage) - price
t0sum += err
t1sum += err * mileage
}
return theta0 - (t0sum / dataSize * learningRate), theta1 - (t1sum / dataSize * learningRate)
}
func denormalize(theta, min, max float64) float64 {
return theta*(max-min) + min
}
func main() {
data := getData()
data.normalize()
theta0, theta1 := 0.0, 0.0
for k := 0; k < iterations; k++ {
theta0, theta1 = data.computeThetas(theta0, theta1)
}
theta0 = denormalize(theta0, minKm, maxKm)
theta1 = denormalize(theta1, minPrice, maxPrice)
outputResult(theta0, theta1)
}
What should I fix in order to properly implement a gradient descent?
Linear Regression is really simple:
// yi = alpha + beta*xi + ei
func linearRegression(x, y []float64) (float64, float64) {
EX := expected(x)
EY := expected(y)
EXY := expectedXY(x, y)
EXX := expectedXY(x, x)
covariance := EXY - EX*EY
variance := EXX - EX*EX
beta := covariance / variance
alpha := EY - beta*EX
return alpha, beta
}
Try it here, Output:
8499.599649933218 -0.021448963591702314 396270.87871142407
Code:
package main
import (
"encoding/csv"
"fmt"
"strconv"
"strings"
)
func main() {
x, y := readXY(`data.csv`)
alpha, beta := linearRegression(x, y)
fmt.Println(alpha, beta, -alpha/beta) // 8499.599649933218 -0.021448963591702314 396270.87871142407
}
// https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model
// yi = alpha + beta*xi + ei
func linearRegression(x, y []float64) (float64, float64) {
EX := expected(x)
EY := expected(y)
EXY := expectedXY(x, y)
EXX := expectedXY(x, x)
covariance := EXY - EX*EY
variance := EXX - EX*EX
beta := covariance / variance
alpha := EY - beta*EX
return alpha, beta
}
// E[X]
func expected(x []float64) float64 {
sum := 0.0
for _, v := range x {
sum += v
}
return sum / float64(len(x))
}
// E[XY]
func expectedXY(x, y []float64) float64 {
sum := 0.0
for i, v := range x {
sum += v * y[i]
}
return sum / float64(len(x))
}
func readXY(filename string) ([]float64, []float64) {
// file, err := os.Open(filename)
// if err != nil {
// panic(err)
// }
// defer file.Close()
file := strings.NewReader(data)
reader := csv.NewReader(file)
records, err := reader.ReadAll()
if err != nil {
panic(err)
}
records = records[1:]
size := len(records)
x := make([]float64, size)
y := make([]float64, size)
for i, v := range records {
val, err := strconv.ParseFloat(v[0], 64)
if err != nil {
panic(err)
}
x[i] = val
val, err = strconv.ParseFloat(v[1], 64)
if err != nil {
panic(err)
}
y[i] = val
}
return x, y
}
var data = `km,price
240000,3650
139800,3800
150500,4400
185530,4450
176000,5250
114800,5350
166800,5800
89000,5990
144500,5999
84000,6200
82029,6390
63060,6390
74000,6600
97500,6800
67000,6800
76025,6900
48235,6900
93000,6990
60949,7490
65674,7555
54000,7990
68500,7990
22899,7990
61789,8290`
Gradient descent is based on the observation that if the multi-variable function F(x) is defined and differentiable in a neighborhood of a point a , then F(x) decreases fastest if one goes from a in the direction of the negative gradient of F at a,-∇F(a), for example:
// F(x)
f := func(x float64) float64 {
return alpha + beta*x // write your target function here
}
Derivative function:
h := 0.000001
// Derivative function ∇F(x)
df := func(x float64) float64 {
return (f(x+h) - f(x-h)) / (2 * h) // write your target function derivative here
}
Search:
minimunAt := 1.0 // We start the search here
gamma := 0.01 // Step size multiplier
precision := 0.0000001 // Desired precision of result
max := 100000 // Maximum number of iterations
currentX := 0.0
step := 0.0
for i := 0; i < max; i++ {
currentX = minimunAt
minimunAt = currentX - gamma*df(currentX)
step = minimunAt - currentX
if math.Abs(step) <= precision {
break
}
}
fmt.Printf("Minimum at %.8f value: %v\n", minimunAt, f(minimunAt))

Why having a default clause in a goroutine's select makes it slower?

Referring to the following benchmarking test codes:
func BenchmarkRuneCountNoDefault(b *testing.B) {
b.StopTimer()
var strings []string
numStrings := 10
for n := 0; n < numStrings; n++{
s := RandStringBytesMaskImprSrc(10)
strings = append(strings, s)
}
jobs := make(chan string)
results := make (chan int)
for i := 0; i < runtime.NumCPU(); i++{
go RuneCountNoDefault(jobs, results)
}
b.StartTimer()
for n := 0; n < b.N; n++ {
go func(){
for n := 0; n < numStrings; n++{
<-results
}
return
}()
for n := 0; n < numStrings; n++{
jobs <- strings[n]
}
}
close(jobs)
}
func RuneCountNoDefault(jobs chan string, results chan int){
for{
select{
case j, ok := <-jobs:
if ok{
results <- utf8.RuneCountInString(j)
} else {
return
}
}
}
}
func BenchmarkRuneCountWithDefault(b *testing.B) {
b.StopTimer()
var strings []string
numStrings := 10
for n := 0; n < numStrings; n++{
s := RandStringBytesMaskImprSrc(10)
strings = append(strings, s)
}
jobs := make(chan string)
results := make (chan int)
for i := 0; i < runtime.NumCPU(); i++{
go RuneCountWithDefault(jobs, results)
}
b.StartTimer()
for n := 0; n < b.N; n++ {
go func(){
for n := 0; n < numStrings; n++{
<-results
}
return
}()
for n := 0; n < numStrings; n++{
jobs <- strings[n]
}
}
close(jobs)
}
func RuneCountWithDefault(jobs chan string, results chan int){
for{
select{
case j, ok := <-jobs:
if ok{
results <- utf8.RuneCountInString(j)
} else {
return
}
default: //DIFFERENCE
}
}
}
//https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
func RandStringBytesMaskImprSrc(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
When I benchmarked both the functions where one function, RuneCountNoDefault has no default clause in the select and the other, RuneCountWithDefault has a default clause, I'm getting the following benchmark:
BenchmarkRuneCountNoDefault-4 200000 8910 ns/op
BenchmarkRuneCountWithDefault-4 5 277798660 ns/op
Checking the cpuprofile generated by the tests, I noticed that the function with the default clause spends a lot of time in the following channel operations:
Why having a default clause in the goroutine's select makes it slower?
I'm using Go version 1.10 for windows/amd64
The Go Programming Language
Specification
Select statements
If one or more of the communications can proceed, a single one that
can proceed is chosen via a uniform pseudo-random selection.
Otherwise, if there is a default case, that case is chosen. If there
is no default case, the "select" statement blocks until at least one
of the communications can proceed.
Modifying your benchmark to count the number of proceed and default cases taken:
$ go test default_test.go -bench=.
goos: linux
goarch: amd64
BenchmarkRuneCountNoDefault-4 300000 4108 ns/op
BenchmarkRuneCountWithDefault-4 10 209890782 ns/op
--- BENCH: BenchmarkRuneCountWithDefault-4
default_test.go:90: proceeds 114
default_test.go:91: defaults 128343308
$
While other cases were unable to proceed, the default case was taken 128343308 times in 209422470, (209890782 - 114*4108), nanoseconds or 1.63 nanoseconds per default case. If you do something small a large number of times, it adds up.
default_test.go:
package main
import (
"math/rand"
"runtime"
"sync/atomic"
"testing"
"time"
"unicode/utf8"
)
func BenchmarkRuneCountNoDefault(b *testing.B) {
b.StopTimer()
var strings []string
numStrings := 10
for n := 0; n < numStrings; n++ {
s := RandStringBytesMaskImprSrc(10)
strings = append(strings, s)
}
jobs := make(chan string)
results := make(chan int)
for i := 0; i < runtime.NumCPU(); i++ {
go RuneCountNoDefault(jobs, results)
}
b.StartTimer()
for n := 0; n < b.N; n++ {
go func() {
for n := 0; n < numStrings; n++ {
<-results
}
return
}()
for n := 0; n < numStrings; n++ {
jobs <- strings[n]
}
}
close(jobs)
}
func RuneCountNoDefault(jobs chan string, results chan int) {
for {
select {
case j, ok := <-jobs:
if ok {
results <- utf8.RuneCountInString(j)
} else {
return
}
}
}
}
var proceeds ,defaults uint64
func BenchmarkRuneCountWithDefault(b *testing.B) {
b.StopTimer()
var strings []string
numStrings := 10
for n := 0; n < numStrings; n++ {
s := RandStringBytesMaskImprSrc(10)
strings = append(strings, s)
}
jobs := make(chan string)
results := make(chan int)
for i := 0; i < runtime.NumCPU(); i++ {
go RuneCountWithDefault(jobs, results)
}
b.StartTimer()
for n := 0; n < b.N; n++ {
go func() {
for n := 0; n < numStrings; n++ {
<-results
}
return
}()
for n := 0; n < numStrings; n++ {
jobs <- strings[n]
}
}
close(jobs)
b.Log("proceeds", atomic.LoadUint64(&proceeds))
b.Log("defaults", atomic.LoadUint64(&defaults))
}
func RuneCountWithDefault(jobs chan string, results chan int) {
for {
select {
case j, ok := <-jobs:
atomic.AddUint64(&proceeds, 1)
if ok {
results <- utf8.RuneCountInString(j)
} else {
return
}
default: //DIFFERENCE
atomic.AddUint64(&defaults, 1)
}
}
}
//https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
func RandStringBytesMaskImprSrc(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
Playground: https://play.golang.org/p/DLnAY0hovQG

Julia set image rendering ruined by concurrency

I have the following code that I am to change into a concurrent program.
// Stefan Nilsson 2013-02-27
// This program creates pictures of Julia sets (en.wikipedia.org/wiki/Julia_set).
package main
import (
"image"
"image/color"
"image/png"
"log"
"math/cmplx"
"os"
"strconv"
)
type ComplexFunc func(complex128) complex128
var Funcs []ComplexFunc = []ComplexFunc{
func(z complex128) complex128 { return z*z - 0.61803398875 },
func(z complex128) complex128 { return z*z + complex(0, 1) },
}
func main() {
for n, fn := range Funcs {
err := CreatePng("picture-"+strconv.Itoa(n)+".png", fn, 1024)
if err != nil {
log.Fatal(err)
}
}
}
// CreatePng creates a PNG picture file with a Julia image of size n x n.
func CreatePng(filename string, f ComplexFunc, n int) (err error) {
file, err := os.Create(filename)
if err != nil {
return
}
defer file.Close()
err = png.Encode(file, Julia(f, n))
return
}
// Julia returns an image of size n x n of the Julia set for f.
func Julia(f ComplexFunc, n int) image.Image {
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
img := image.NewRGBA(bounds)
s := float64(n / 4)
for i := bounds.Min.X; i < bounds.Max.X; i++ {
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
n := Iterate(f, complex(float64(i)/s, float64(j)/s), 256)
r := uint8(0)
g := uint8(0)
b := uint8(n % 32 * 8)
img.Set(i, j, color.RGBA{r, g, b, 255})
}
}
return img
}
// Iterate sets z_0 = z, and repeatedly computes z_n = f(z_{n-1}), n ≥ 1,
// until |z_n| > 2 or n = max and returns this n.
func Iterate(f ComplexFunc, z complex128, max int) (n int) {
for ; n < max; n++ {
if real(z)*real(z)+imag(z)*imag(z) > 4 {
break
}
z = f(z)
}
return
}
I have decided to try and make the Julia() function concurrent. So I changed it to:
func Julia(f ComplexFunc, n int) image.Image {
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
img := image.NewRGBA(bounds)
s := float64(n / 4)
for i := bounds.Min.X; i < bounds.Max.X; i++ {
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
go func(){
n := Iterate(f, complex(float64(i)/s, float64(j)/s), 256)
r := uint8(0)
g := uint8(0)
b := uint8(n % 32 * 8)
img.Set(i, j, color.RGBA{r, g, b, 255})
}()
}
}
return img
This change causes the images to look very different. The patterns are essentially the same, but there are a lot of white pixels that were not there before.
What is happening here?
There are 2 problems:
You don't actually wait for your goroutines to finish.
You don't pass i and j to the goroutine, so they will almost always be the last i and j.
Your function should look something like:
func Julia(f ComplexFunc, n int) image.Image {
var wg sync.WaitGroup
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
img := image.NewRGBA(bounds)
s := float64(n / 4)
for i := bounds.Min.X; i < bounds.Max.X; i++ {
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
wg.Add(1)
go func(i, j int) {
n := Iterate(f, complex(float64(i)/s, float64(j)/s), 256)
r := uint8(0)
g := uint8(0)
b := uint8(n % 32 * 8)
img.Set(i, j, color.RGBA{r, g, b, 255})
wg.Done()
}(i, j)
}
}
wg.Wait()
return img
}
A bonus tip, when diving into concurrency, it's usually a good idea to try your code with the race detector.
You might have to use a mutex to call img.Set but I'm not very sure and I can't test atm.

go routine dead lock?

I am new to golang, and I am puzzled with this deadlock (run here)
package main
import (
"fmt"
"runtime"
"time"
)
func main() {
c := make(chan string)
work := make(chan int, 1)
clvl := runtime.NumCPU()
count := 0
for i := 0; i < clvl; i++ {
go func(i int) {
for jdId := range work {
time.Sleep(time.Second * 1)
c <- fmt.Sprintf("done %d", jdId)
}
}(i)
}
go func() {
for i := 0; i < 10; i++ {
work <- i
}
close(work)
}()
for resp := range c {
fmt.Println(resp, count)
count += 1
}
}
You never close c, so your for range loop waits forever. Close it like this:
var wg sync.WaitGroup
for i := 0; i < clvl; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for jdId := range work {
time.Sleep(time.Second * 1)
c <- fmt.Sprintf("done %d", jdId)
}
}(i)
}
go func() {
for i := 0; i < 10; i++ {
work <- i
}
close(work)
wg.Wait()
close(c)
}()
EDIT: Fixed the panic, thanks Crast

Resources