Implementing a gradient descent - go

I'm trying to implement a gradient descent in Go. My goal is to predict the cost of a car from it's mileage.
Here is my data set:
km,price
240000,3650
139800,3800
150500,4400
185530,4450
176000,5250
114800,5350
166800,5800
89000,5990
144500,5999
84000,6200
82029,6390
63060,6390
74000,6600
97500,6800
67000,6800
76025,6900
48235,6900
93000,6990
60949,7490
65674,7555
54000,7990
68500,7990
22899,7990
61789,8290
I've tried various approaches, like normalizing the data set, not normalizing it, leaving thetas as is, denormalizing thetas... But I cannot get the correct result.
My maths must be off somewhere, but I cannot figure out where.
The result I'm trying to get should be approximately t0 = 8500, t1 = -0.02
My implementation is the following:
package main
import (
"encoding/csv"
"fmt"
"log"
"math"
"os"
"strconv"
)
const (
dataFile = "data.csv"
iterations = 20000
learningRate = 0.1
)
type dataSet [][]float64
var minKm, maxKm, minPrice, maxPrice float64
func (d dataSet) getExtremes(column int) (float64, float64) {
min := math.Inf(1)
max := math.Inf(-1)
for _, row := range d {
item := row[column]
if item > max {
max = item
}
if item < min {
min = item
}
}
return min, max
}
func normalizeItem(item, min, max float64) float64 {
return (item - min) / (max - min)
}
func (d *dataSet) normalize() {
minKm, maxKm = d.getExtremes(0)
minPrice, maxPrice = d.getExtremes(1)
for _, row := range *d {
row[0], row[1] = normalizeItem(row[0], minKm, maxKm), normalizeItem(row[1], minPrice, maxPrice)
}
}
func processEntry(entry []string) []float64 {
if len(entry) != 2 {
log.Fatalln("expected two fields")
}
km, err := strconv.ParseFloat(entry[0], 64)
if err != nil {
log.Fatalln(err)
}
price, err := strconv.ParseFloat(entry[1], 64)
if err != nil {
log.Fatalln(err)
}
return []float64{km, price}
}
func getData() dataSet {
file, err := os.Open(dataFile)
if err != nil {
log.Fatalln(err)
}
reader := csv.NewReader(file)
entries, err := reader.ReadAll()
if err != nil {
log.Fatalln(err)
}
entries = entries[1:]
data := make(dataSet, len(entries))
for k, entry := range entries {
data[k] = processEntry(entry)
}
return data
}
func outputResult(theta0, theta1 float64) {
file, err := os.OpenFile("weights.csv", os.O_WRONLY, 0644)
if err != nil {
log.Fatalln(err)
}
defer file.Close()
file.Truncate(0)
file.Seek(0, 0)
file.WriteString(fmt.Sprintf("theta0,%.6f\ntheta1,%.6f\n", theta0, theta1))
}
func estimatePrice(theta0, theta1, mileage float64) float64 {
return theta0 + theta1*mileage
}
func (d dataSet) computeThetas(theta0, theta1 float64) (float64, float64) {
dataSize := float64(len(d))
t0sum, t1sum := 0.0, 0.0
for _, it := range d {
mileage := it[0]
price := it[1]
err := estimatePrice(theta0, theta1, mileage) - price
t0sum += err
t1sum += err * mileage
}
return theta0 - (t0sum / dataSize * learningRate), theta1 - (t1sum / dataSize * learningRate)
}
func denormalize(theta, min, max float64) float64 {
return theta*(max-min) + min
}
func main() {
data := getData()
data.normalize()
theta0, theta1 := 0.0, 0.0
for k := 0; k < iterations; k++ {
theta0, theta1 = data.computeThetas(theta0, theta1)
}
theta0 = denormalize(theta0, minKm, maxKm)
theta1 = denormalize(theta1, minPrice, maxPrice)
outputResult(theta0, theta1)
}
What should I fix in order to properly implement a gradient descent?

Linear Regression is really simple:
// yi = alpha + beta*xi + ei
func linearRegression(x, y []float64) (float64, float64) {
EX := expected(x)
EY := expected(y)
EXY := expectedXY(x, y)
EXX := expectedXY(x, x)
covariance := EXY - EX*EY
variance := EXX - EX*EX
beta := covariance / variance
alpha := EY - beta*EX
return alpha, beta
}
Try it here, Output:
8499.599649933218 -0.021448963591702314 396270.87871142407
Code:
package main
import (
"encoding/csv"
"fmt"
"strconv"
"strings"
)
func main() {
x, y := readXY(`data.csv`)
alpha, beta := linearRegression(x, y)
fmt.Println(alpha, beta, -alpha/beta) // 8499.599649933218 -0.021448963591702314 396270.87871142407
}
// https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model
// yi = alpha + beta*xi + ei
func linearRegression(x, y []float64) (float64, float64) {
EX := expected(x)
EY := expected(y)
EXY := expectedXY(x, y)
EXX := expectedXY(x, x)
covariance := EXY - EX*EY
variance := EXX - EX*EX
beta := covariance / variance
alpha := EY - beta*EX
return alpha, beta
}
// E[X]
func expected(x []float64) float64 {
sum := 0.0
for _, v := range x {
sum += v
}
return sum / float64(len(x))
}
// E[XY]
func expectedXY(x, y []float64) float64 {
sum := 0.0
for i, v := range x {
sum += v * y[i]
}
return sum / float64(len(x))
}
func readXY(filename string) ([]float64, []float64) {
// file, err := os.Open(filename)
// if err != nil {
// panic(err)
// }
// defer file.Close()
file := strings.NewReader(data)
reader := csv.NewReader(file)
records, err := reader.ReadAll()
if err != nil {
panic(err)
}
records = records[1:]
size := len(records)
x := make([]float64, size)
y := make([]float64, size)
for i, v := range records {
val, err := strconv.ParseFloat(v[0], 64)
if err != nil {
panic(err)
}
x[i] = val
val, err = strconv.ParseFloat(v[1], 64)
if err != nil {
panic(err)
}
y[i] = val
}
return x, y
}
var data = `km,price
240000,3650
139800,3800
150500,4400
185530,4450
176000,5250
114800,5350
166800,5800
89000,5990
144500,5999
84000,6200
82029,6390
63060,6390
74000,6600
97500,6800
67000,6800
76025,6900
48235,6900
93000,6990
60949,7490
65674,7555
54000,7990
68500,7990
22899,7990
61789,8290`
Gradient descent is based on the observation that if the multi-variable function F(x) is defined and differentiable in a neighborhood of a point a , then F(x) decreases fastest if one goes from a in the direction of the negative gradient of F at a,-∇F(a), for example:
// F(x)
f := func(x float64) float64 {
return alpha + beta*x // write your target function here
}
Derivative function:
h := 0.000001
// Derivative function ∇F(x)
df := func(x float64) float64 {
return (f(x+h) - f(x-h)) / (2 * h) // write your target function derivative here
}
Search:
minimunAt := 1.0 // We start the search here
gamma := 0.01 // Step size multiplier
precision := 0.0000001 // Desired precision of result
max := 100000 // Maximum number of iterations
currentX := 0.0
step := 0.0
for i := 0; i < max; i++ {
currentX = minimunAt
minimunAt = currentX - gamma*df(currentX)
step = minimunAt - currentX
if math.Abs(step) <= precision {
break
}
}
fmt.Printf("Minimum at %.8f value: %v\n", minimunAt, f(minimunAt))

Related

Context cascading Test

How to improve this context cascading test:
func TestParentTimedoutContext(t *testing.T) {
ctx1, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
defer cancel2()
time.Sleep(15 * time.Millisecond) // make ctx1 timeout
if ctx2.Err() == nil {
t.Error("failed")
}
}
Command:
go test -timeout 30s -count=1 -run ^TestParentTimedoutContext$ my/internal/services
Output:
ok
Sometimes:
failed
go version go1.16.6
Wait for ctx2.Done() and measure time duration which should be less than ctx2 deadline.
Try this:
func TestParentTimedoutContext(t *testing.T) {
t0 := time.Now()
ctx1, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
defer cancel2()
<-ctx2.Done()
d := time.Since(t0)
if d > 777*time.Millisecond {
t.Error("failed:" + d.String())
}
}
Benchmark:
func BenchmarkParentTimedoutContext(b *testing.B) {
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
ctx3, cancel3 := context.WithTimeout(context.Background(), 10_000*time.Millisecond)
defer cancel3()
for {
select {
case <-ctx3.Done():
return
default:
// runtime.Gosched()
}
}
}()
}
for i := 0; i < b.N; i++ {
ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Millisecond)
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
<-ctx2.Done()
cancel1()
cancel2()
}
}
Benchmark result (Linux):
# BenchmarkParentTimedoutContext-8 589 17_178_588 ns/op // 17ms
Histogram code:
package main
import (
"context"
"fmt"
"runtime"
"time"
)
func main() {
const n = 1_000
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
ctx3, cancel3 := context.WithTimeout(context.Background(), n*time.Millisecond)
defer cancel3()
for {
select {
case <-ctx3.Done():
return
default:
// runtime.Gosched()
}
}
}()
}
all := make([]time.Duration, n)
for i := 0; i < n; i++ {
t0 := time.Now()
ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Millisecond)
ctx2, cancel2 := context.WithTimeout(ctx1, 1000*time.Millisecond)
<-ctx2.Done()
d := time.Since(t0)
cancel1()
cancel2()
all[i] = d
}
ave, x, y := histogram(all, 10)
fmt.Println("min =", y[0])
fmt.Println("max =", y[1])
fmt.Println("ave =", ave)
fmt.Println(x)
fmt.Println(y)
fmt.Println(runtime.GOOS, runtime.GOARCH, runtime.Version())
}
func histogram(d []time.Duration, n int) (ave time.Duration, x []int, y []time.Duration) {
x = make([]int, n)
y = make([]time.Duration, n+1)
min := d[0]
max := min
ave = min
for _, v := range d[1:] {
ave += v
if v < min {
min = v
} else if v > max {
max = v
}
}
ave /= time.Duration(len(d))
distance := (max - min) / time.Duration(n)
v := min
for i := range y {
y[i] = v
v += distance
}
y[len(y)-1] = max // compensate division error
for _, v := range d {
i := int((v - min) / distance)
if i >= n {
i--
}
x[i]++
}
return
}
Benchmark result (Linux):
go run .
min = 1.046524ms
max = 22.717866ms
ave = 2.090832ms
[950 0 0 0 0 0 1 0 47 2]
[1.046524ms 3.213658ms 5.380792ms 7.547926ms 9.71506ms 11.882194ms 14.049328ms 16.216462ms 18.383596ms 20.55073ms 22.717866ms]
linux amd64 go1.16.6

Image.At returns nil

I'm having an issue. Here is my code:
package main
import (
"math/rand"
"image/draw"
"image/png"
"image/color"
"strconv"
"os"
"time"
"fmt"
)
func genSites(width, height int) ([][]int) {
rand.Seed(time.Now().Unix())
l, err := strconv.Atoi(os.Args[len(os.Args)-2])
if err != nil {
panic(err)
}
sites := make([][]int, l)
for i := range sites {
sites[i] = make([]int, 2)
sites[i][0] = rand.Intn(width)
sites[i][1] = rand.Intn(height)
}
return sites
}
func main() {
inputF, err := os.Open(os.Args[len(os.Args)-3])
if err != nil {
panic(err)
}
defer inputF.Close()
inputR, err := png.Decode(inputF)
if err != nil {
panic(err)
}
input := inputR.(draw.Image)
minx, miny := input.Bounds().Min.X, input.Bounds().Min.Y
maxx, maxy := input.Bounds().Max.X-1, input.Bounds().Max.Y-1
sites := genSites(maxx-minx, maxy-miny)
siteColors := make([][]color.Color, len(sites))
//todo: figure out something better than this
for i := range siteColors {
siteColors[i] = make([]color.Color, (maxx-minx)*(maxy-miny))
}
siteBelongs := make([][]int, maxx - minx)
for x := range siteBelongs {
siteBelongs[x] = make([]int, maxy - miny)
for y := range siteBelongs[x] {
dmin := (maxx-minx)*(maxx-minx) + (maxy-miny)*(maxy-miny)
var smin int
for i, s := range sites {
d := (s[0]-x)*(s[0]-x) + (s[1]-y)*(s[1]-y)
if d > dmin {
smin = i
dmin = d
}
}
siteBelongs[x][y] = smin
siteColors[smin] = append(siteColors[smin], input.At(x+minx, y+miny))
}
}
siteAvgColors := make([]color.Color, len(sites))
for i := range siteAvgColors {
var sR, sG, sB, sA int
for _, val := range siteColors[i] {
fmt.Println(val)
r, g, b, a := val.RGBA()
sR += int(r)
sG += int(g)
sB += int(b)
sA += int(a)
}
siteAvgColors[i] = color.RGBA{
uint8(sR/len(siteColors[i])),
uint8(sG/len(siteColors[i])),
uint8(sB/len(siteColors[i])),
uint8(sA/len(siteColors[i]))}
}
for x := range siteBelongs {
for y := range siteBelongs[x] {
input.Set(minx + x, miny + y, siteAvgColors[siteBelongs[x][y]])
}
}
output, err := os.Create(os.Args[len(os.Args)-1])
if err != nil {
panic(err)
}
defer output.Close()
err = png.Encode(output, input)
if err != nil {
panic(err)
}
}
The error is this:
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x18 pc=0x4b46e1]
goroutine 1 [running]:
main.main()
/home/victor/programs/goprograms/src/v/imagerandvornoi/main.go:71 +0x7a1
Line 71 is the one that says r, g, b, a := val.RGBA(). That val is inserted at line 63, or siteColors[smin] = append(siteColors[smin], input.At(x+minx, y+miny)), which means that input.At is returning nil. Why? How can I fix this?
This has to do with the semantics of the make() builtin within Go. make is special in that it can take two or three arguments:
make(Type, length) or make(Type, length, capacity). For the former, the length and capacity are set to the same value. If you're going to be assigning values to the slice using append, you're going to want to use the latter form. This is because the two argument form (make(Type, 10)) builds a slice with 10 elements in it already with the zero value, when you use append() it becomes the 11th item.
You're hitting a nil derefernece here because you're using the two-argument version of make with append, and so the first item in the slice is nil.

How to get this specific shape for my data

In Go, I'm using this function bars, err := custplotter.NewCandlesticks(data)
from here:
https://github.com/pplcc/plotext/tree/master/custplotter
It's expecting this shape for data:
[{2 16435 16458 16435 16446 1} {3 16446 16458 16435.04 16455 1} .....]
But my code below is creating my data in this shape instead:
[[2 16435 16458 16435 16446 1] [3 16446 16458 16435.04 16455 1] .....]
Which gives me this error message:
cannot use data (type [ ][ ]string) as type custplotter.TOHLCVer in argument to custplotter.NewCandlesticks:
[ ][ ]string does not implement custplotter.TOHLCVer (missing Len method)
I believe the problem is the data shape. How can i change my code to create the required data shape (with { } instead of [ ]) ?
//read excel file******************************************
xlsx, err := excelize.OpenFile("/media/Snaps/test snaps.xlsm")
if err != nil {
fmt.Println(err)
return
}
//read all rows into df
df := xlsx.GetRows("ticker_2")
//get only TOHLCV columns and 60 rows
df3 := make([][]string, 60) // create slice for 60 rows
idx := 0
for _, row := range df[1:61] { // read 60 rows
df3row := make([]string, 6) // create slice for 6 columns
copy(df3row, row[28:34]) // copy desired columns to new row slice
df3[idx] = df3row
idx++
}
All examples of slices i found in Go litterature uses only [ [ ], [ ] ]
as per https://github.com/pplcc/plotext/blob/68ab3c6e05c34baf5af21c9f5c3341f527a110ac/examples/tohlcvexampledata.go#L42
It seems that what you need is a custplotter.TOHLCVs which is just a slice of a struct of float64.
https://github.com/pplcc/plotext/blob/master/custplotter/tohlcv.go:
type TOHLCVer interface {
// Len returns the number of time, open, high, low, close, volume tuples.
Len() int
// TOHLCV returns an time, open, high, low, close, volume tuple.
TOHLCV(int) (float64, float64, float64, float64, float64, float64)
}
// TOHLCVs implements the TOHLCVer interface using a slice.
type TOHLCVs []struct{ T, O, H, L, C, V float64 }
so basically your solution could resemble this:
df3 := make(TOHLCVs, 60) // create slice for 60 rows
idx := 0
for _, row := range df[1:61] { // read 60 rows
df3[idx].T, err = strconv.ParseFloat(row[28], 64)
df3[idx].O, err = strconv.ParseFloat(row[29], 64)
df3[idx].H, err = strconv.ParseFloat(row[30], 64)
df3[idx].L, err = strconv.ParseFloat(row[31], 64)
df3[idx].C, err = strconv.ParseFloat(row[32], 64)
df3[idx].V, err = strconv.ParseFloat(row[33], 64)
idx++
}
Or you could just implement the TOHLCVer interface too :)
type SlicesOfTOHLCV [][6]float64
func (s SlicesOfTOHLCV) Len() int {
return len(s)
}
func (s SlicesOfTOHLCV) TOHLCV(i int) (float64, float64, float64, float64, float64) {
return s[i][0], s[i][1], s[i][2], s[i][3], s[i][4], s[i][5]
}
mySlice := make(SlicesOfTOHLCV, 60)
i := 0
for _, row := range df[1:61] {
mySlice[i] = [6]float64{}
for j := 0; j < 6; j ++ {
mySlice[i][j], err = strconv.ParseFloat(row[28+j], 64)
if err != nil {
panic(err)
}
}
i ++
}

Julia set image rendering ruined by concurrency

I have the following code that I am to change into a concurrent program.
// Stefan Nilsson 2013-02-27
// This program creates pictures of Julia sets (en.wikipedia.org/wiki/Julia_set).
package main
import (
"image"
"image/color"
"image/png"
"log"
"math/cmplx"
"os"
"strconv"
)
type ComplexFunc func(complex128) complex128
var Funcs []ComplexFunc = []ComplexFunc{
func(z complex128) complex128 { return z*z - 0.61803398875 },
func(z complex128) complex128 { return z*z + complex(0, 1) },
}
func main() {
for n, fn := range Funcs {
err := CreatePng("picture-"+strconv.Itoa(n)+".png", fn, 1024)
if err != nil {
log.Fatal(err)
}
}
}
// CreatePng creates a PNG picture file with a Julia image of size n x n.
func CreatePng(filename string, f ComplexFunc, n int) (err error) {
file, err := os.Create(filename)
if err != nil {
return
}
defer file.Close()
err = png.Encode(file, Julia(f, n))
return
}
// Julia returns an image of size n x n of the Julia set for f.
func Julia(f ComplexFunc, n int) image.Image {
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
img := image.NewRGBA(bounds)
s := float64(n / 4)
for i := bounds.Min.X; i < bounds.Max.X; i++ {
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
n := Iterate(f, complex(float64(i)/s, float64(j)/s), 256)
r := uint8(0)
g := uint8(0)
b := uint8(n % 32 * 8)
img.Set(i, j, color.RGBA{r, g, b, 255})
}
}
return img
}
// Iterate sets z_0 = z, and repeatedly computes z_n = f(z_{n-1}), n ≥ 1,
// until |z_n| > 2 or n = max and returns this n.
func Iterate(f ComplexFunc, z complex128, max int) (n int) {
for ; n < max; n++ {
if real(z)*real(z)+imag(z)*imag(z) > 4 {
break
}
z = f(z)
}
return
}
I have decided to try and make the Julia() function concurrent. So I changed it to:
func Julia(f ComplexFunc, n int) image.Image {
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
img := image.NewRGBA(bounds)
s := float64(n / 4)
for i := bounds.Min.X; i < bounds.Max.X; i++ {
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
go func(){
n := Iterate(f, complex(float64(i)/s, float64(j)/s), 256)
r := uint8(0)
g := uint8(0)
b := uint8(n % 32 * 8)
img.Set(i, j, color.RGBA{r, g, b, 255})
}()
}
}
return img
This change causes the images to look very different. The patterns are essentially the same, but there are a lot of white pixels that were not there before.
What is happening here?
There are 2 problems:
You don't actually wait for your goroutines to finish.
You don't pass i and j to the goroutine, so they will almost always be the last i and j.
Your function should look something like:
func Julia(f ComplexFunc, n int) image.Image {
var wg sync.WaitGroup
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
img := image.NewRGBA(bounds)
s := float64(n / 4)
for i := bounds.Min.X; i < bounds.Max.X; i++ {
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
wg.Add(1)
go func(i, j int) {
n := Iterate(f, complex(float64(i)/s, float64(j)/s), 256)
r := uint8(0)
g := uint8(0)
b := uint8(n % 32 * 8)
img.Set(i, j, color.RGBA{r, g, b, 255})
wg.Done()
}(i, j)
}
}
wg.Wait()
return img
}
A bonus tip, when diving into concurrency, it's usually a good idea to try your code with the race detector.
You might have to use a mutex to call img.Set but I'm not very sure and I can't test atm.

How to make fmt.Scanln() read into a slice of integers

I have a line containing 3 numbers that I want to read from stdin with fmt.Scanln() but this code won't work:
X := make([]int, 3)
fmt.Scanln(X...)
fmt.Printf("%v\n", X)
I get this error message:
cannot use X (type []int) as type []interface {} in function argument
I don't get it.
Idiomatic Go would be:
func read(n int) ([]int, error) {
in := make([]int, n)
for i := range in {
_, err := fmt.Scan(&in[i])
if err != nil {
return in[:i], err
}
}
return in, nil
}
interface{} means nothing. Please don't use it if you don't have to.
For example,
package main
import "fmt"
func intScanln(n int) ([]int, error) {
x := make([]int, n)
y := make([]interface{}, len(x))
for i := range x {
y[i] = &x[i]
}
n, err := fmt.Scanln(y...)
x = x[:n]
return x, err
}
func main() {
x, err := intScanln(3)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%v\n", x)
}
Input:
1 2 3
Output:
[1 2 3]
I think the the correct version should be
X := make([]int, 3)
fmt.Scanln(&X[0], &X[1], &X[2])
fmt.Printf("%v\n", X)
This error message occurs b/c there's no reasonable way to convert []int to []interface{}. Note, this is in reference to a slice. So the syntax your using is correct, but fmt.Scanln expects []interface{}. This has implications outside of pkg fmt.
The reason I've seen given for this is due to Go giving you control over memory layout so it currently has no reasonable way to do the slice conversion. This means you'll need to do the conversion manually before passing it to a function expecting the slice of a given type. For example:
package main
import (
"fmt"
)
func main() {
x := make([]int, 3)
y := make([]interface{}, 3)
y[0] = x[0]
y[1] = x[1]
y[2] = x[2]
fmt.Println(y...)
}
Or something a little more general:
x := make([]int, 3)
y := make([]interface{}, len(x))
for i, v := range x {
y[i] = v
}
fmt.Println(y...)
Regarding your specific issue, see the following:
x := make([]*int, 3)
for i := range x {
x[i] = new(int)
}
y := make([]interface{}, 3)
for i, v := range x {
y[i] = v
}
if _, err := fmt.Scanln(y...); err != nil {
fmt.Println("Scanln err: ", err)
}
for _, v := range y {
val := v.(*int)
fmt.Println(*val)
}
I saw in a comment you said the lines can have different lengths. In that case
you can implement your own fmt.Scanner:
package main
import (
"bytes"
"fmt"
)
type slice struct {
tok []int
}
func (s *slice) Scan(state fmt.ScanState, verb rune) error {
tok, err := state.Token(false, func(r rune) bool { return r != '\n' })
if err != nil { return err }
if _, _, err := state.ReadRune(); err != nil {
if len(tok) == 0 {
panic(err)
}
}
b := bytes.NewReader(tok)
for {
var d int
_, err := fmt.Fscan(b, &d)
if err != nil { break }
s.tok = append(s.tok, d)
}
return nil
}
func main() {
var s slice
fmt.Scan(&s)
fmt.Println(s.tok)
}
https://golang.org/pkg/fmt#Scanner

Resources