I wrote a simple GO program that should start 3 GO routines. However, the GO routines don't start.
Please note that the situation is not identical to the one describer within this post :
Why is my goroutine not executed?
The program should wait for the GO routines to end their execution... Thus, the program should wait forever (since the routines never stop).
package main
import (
"fmt"
"net"
"os"
"time"
"sync"
)
func main() {
wg := sync.WaitGroup{}
fmt.Print("Starting 3 clients\n")
for i:=0; i<3; i++ {
client := func(inName string) {
fmt.Printf("Client <%s> started\n", inName)
wg.Add(1)
conn, err := net.Dial("tcp", ":8000")
if err != nil {
fmt.Printf("[%s] Error while connecting to the server: %s", inName, err.Error())
os.Exit(1)
}
n := 0
sleepDuration, _ := time.ParseDuration("2s")
for {
message := fmt.Sprintf("[%s] > message %d\n", inName, n)
fmt.Printf("%s", message)
_, err := conn.Write([]byte(message))
if nil != err {
fmt.Sprintf("[%s] Error while writing data to the socket: %s", inName, err.Error())
os.Exit(1)
}
time.Sleep(sleepDuration)
n++
}
}
name := fmt.Sprintf("Client%d", i)
fmt.Printf("Starting client <%s>...\n", name)
go client(name)
fmt.Print("Done\n")
}
wg.Wait()
}
Result:
Starting 3 clients
Starting client <Client0>...
Done
Starting client <Client1>...
Done
Starting client <Client2>...
Done
As this solution says:
It is important that the wg.Add() happens before the go statement to
prevent race conditions. The following would also be correct:
So, you need to take out wg.Add(1) off inside of go routine itself and call it just before starting go routines. Also, call defer wg.Done() at the start of the function, so that, it will decrements the WaitGroup counter by one. Take a look at the code below.
package main
import (
"fmt"
"net"
"os"
"time"
"sync"
)
func main() {
wg := sync.WaitGroup{}
fmt.Print("Starting 3 clients\n")
for i:=0; i<3; i++ {
client := func(inName string) {
defer wg.Done() // added this line
fmt.Printf("Client <%s> started\n", inName)
conn, err := net.Dial("tcp", ":8000")
if err != nil {
fmt.Printf("[%s] Error while connecting to the server: %s", inName, err.Error())
os.Exit(1)
}
n := 0
sleepDuration, _ := time.ParseDuration("2s")
for {
message := fmt.Sprintf("[%s] > message %d\n", inName, n)
fmt.Printf("%s", message)
_, err := conn.Write([]byte(message))
if nil != err {
fmt.Sprintf("[%s] Error while writing data to the socket: %s", inName, err.Error())
os.Exit(1)
}
time.Sleep(sleepDuration)
n++
}
}
name := fmt.Sprintf("Client%d", i)
fmt.Printf("Starting client <%s>...\n", name)
wg.Add(1) // moved this line
go client(name)
fmt.Print("Done\n")
}
wg.Wait()
}
Related
I'm somewhat new to go and am reworking code that I found somewhere else to fit my needs. Because of that, I don't totally understand what is happening here, although I get the general idea.
I'm running a few websocket clients using go routines, but I'm getting an unexpected error that causes the program to crash. My program seems to close one too many threads (excuse me if this is the wrong terminology) when there is an error reading a message from the websocket (check the conn.ReadMessage() func in the readHandler func). Any ideas on how would I work around this issue? I would really appreciate anyone taking the time to look through it. Thanks in advance!
package main
import (
"context"
"fmt"
"os"
"time"
"os/signal"
"syscall"
"sync"
"net/url"
"github.com/gorilla/websocket"
"strconv"
"encoding/json"
"log"
"bytes"
"compress/gzip"
"io/ioutil"
)
// Structs
type Ping struct {
Ping int64 `json:"ping"`
}
type Pong struct {
Pong int64 `json:"pong"`
}
type SubParams struct {
Sub string `json:"sub"`
ID string `json:"id"`
}
func InitSub(subType string, pair string, i int) []byte {
var idInt string = "id" + strconv.Itoa(i)
subStr := "market." + pair + "." + subType
sub := &SubParams{
Sub: subStr,
ID: idInt,
}
out, err := json.MarshalIndent(sub, "", " ")
if err != nil {
log.Println(err);
}
//log.Println(string(out))
return out
}
// main func
func main() {
var server string = "api.huobi.pro"
pairs := []string{"btcusdt", "ethusdt", "ltcusdt"}
comms := make(chan os.Signal, 1)
signal.Notify(comms, os.Interrupt, syscall.SIGTERM)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
for x, pair := range pairs {
wg.Add(1)
go control(server, "ws", pair, ctx, &wg, x+1)
}
<-comms
cancel()
wg.Wait()
}
func control(server string, path string, pair string, ctx context.Context, wg *sync.WaitGroup, i int) {
fmt.Printf("Started control for %s\n", server)
url := url.URL {
Scheme: "wss",
Host: server,
Path: path,
}
fmt.Println(url.String())
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
subscribe(conn, pair, i)
defer conn.Close()
var localwg sync.WaitGroup
localwg.Add(1)
go readHandler(ctx, conn, &localwg, server)
<- ctx.Done()
localwg.Wait()
wg.Done()
return
}
func readHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
for {
select {
case <- ctx.Done():
wg.Done()
return
default:
_, p, err := conn.ReadMessage()
if err != nil {
wg.Done()
fmt.Println(err)
}
r, err := gzip.NewReader(bytes.NewReader(p))
if(err == nil) {
result, err := ioutil.ReadAll(r)
if(err != nil) {
fmt.Println(err)
}
d := string(result)
fmt.Println(d)
var ping Ping
json.Unmarshal([]byte(d), &ping)
if (ping.Ping > 0) {
str := Pong{Pong: ping.Ping}
msg, err := json.Marshal(str)
if (err == nil) {
fmt.Println(string(msg))
conn.WriteMessage(websocket.TextMessage, []byte(msg))
}
}
}
}
}
}
func subscribe(conn *websocket.Conn, pair string, id int) {
sub := string(InitSub("trade.detail", pair, id))
err := conn.WriteMessage(websocket.TextMessage, []byte(sub))
if err != nil {
panic(err)
}
}
Break out of the readHandler loop when the connection fails:
_, p, err := conn.ReadMessage()
if err != nil {
wg.Done()
fmt.Println(err)
return // <--- add this line
}
Without the return, the function spins in a tight loop reading errors until the panic.
Use defer wg.Done() at the beginning of the goroutine to ensure that Done is called exactly once.
func readHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
_, p, err := conn.ReadMessage()
if err != nil {
fmt.Println(err)
return
}
...
Update the control function also.
Because the caller does not execute any code concurrently with readHander, there's no value in running readHandler is a goroutine. Remove all references to wait groups from readHandler and call the function directly: change go readHandler(ctx, conn, &localwg, server) to readHandler(ctx, conn, server).
There are more issues, but this should move you further along.
I'm trying to adapt code from the consumer group example for github.com/Shopify/sarama, and am struggling to add a unit test which tests the functionality of session.MarkMessage() in the ConsumeClaim method (https://github.com/Shopify/sarama/blob/5466b37850a38f4ed6d04b94c6f058bd75032c2a/examples/consumergroup/main.go#L160).
Here is my adapted code with a consume() function:
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"sync"
"syscall"
"github.com/Shopify/sarama"
)
var (
addrs = []string{"localhost:9092"}
topic = "my-topic"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var wg sync.WaitGroup
defer wg.Wait()
consumer := &Consumer{ready: make(chan bool)}
close := consume(ctx, &wg, consumer)
defer close()
<-consumer.ready
log.Println("Sarama consumer up and running!")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
}
func consume(ctx context.Context, wg *sync.WaitGroup, consumer *Consumer) (close func()) {
config := sarama.NewConfig()
config.Version = sarama.V0_11_0_2 // The version has to be at least V0_10_2_0 to support consumer groups
config.Consumer.Offsets.Initial = sarama.OffsetOldest
consumerGroup, err := sarama.NewConsumerGroup(addrs, "my-group", config)
if err != nil {
log.Fatalf("NewConsumerGroup: %v", err)
}
wg.Add(1)
go func() {
defer wg.Done()
for {
if err := consumerGroup.Consume(ctx, []string{topic}, consumer); err != nil {
log.Panicf("Consume: %v", err)
}
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
close = func() {
if err := consumerGroup.Close(); err != nil {
log.Panicf("Close: %v", err)
}
}
return
}
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
handle func([]byte) error
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
close(consumer.ready)
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", message.Value, message.Timestamp, message.Topic)
if consumer.handle != nil {
if err := consumer.handle(message.Value); err != nil {
return fmt.Errorf("handle message %s: %v", message.Value, err)
}
}
session.MarkMessage(message, "")
}
return nil
}
Here are a couple of unit tests I've written for it:
package main
import (
"context"
"fmt"
"log"
"sync"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/stretchr/testify/require"
"gotest.tools/assert"
)
func TestConsume(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
producer, err := sarama.NewSyncProducer(addrs, config)
require.NoError(t, err)
partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder([]byte("foobar")),
})
require.NoError(t, err)
t.Logf("Sent message to partition %d with offset %d", partition, offset)
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
consumer := &Consumer{ready: make(chan bool)}
close := consume(ctx, &wg, consumer)
<-consumer.ready
log.Println("Sarama consumer up and running!")
time.Sleep(1 * time.Second)
cancel()
wg.Wait()
close()
}
func TestConsumeTwice(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
producer, err := sarama.NewSyncProducer(addrs, config)
require.NoError(t, err)
data1, data2 := "foobar1", "foobar2"
for _, data := range []string{data1, data2} {
partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Key: sarama.StringEncoder("foobar"),
Value: sarama.StringEncoder(data),
})
require.NoError(t, err)
t.Logf("Sent message to partition %d with offset %d", partition, offset)
}
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
messageReceived := make(chan []byte)
consumer := &Consumer{
ready: make(chan bool),
handle: func(data []byte) error {
messageReceived <- data
fmt.Printf("Received message: %s\n", data)
return nil
},
}
close := consume(ctx, &wg, consumer)
<-consumer.ready
log.Println("Sarama consumer up and running!")
for i := 0; i < 2; i++ {
data := <-messageReceived
switch i {
case 0:
assert.Equal(t, data1, string(data))
case 1:
assert.Equal(t, data2, string(data))
}
}
cancel()
wg.Wait()
close()
}
The tests can be run after running Kafka and Zookeeper in a Docker container such as johnnypark/kafka-zookeeper like so:
docker run -p 2181:2181 -p 9092:9092 -e ADVERTISED_HOST=127.0.0.1 -e NUM_PARTITIONS=10 johnnypark/kafka-zookeeper
What I'm struggling with is the following: if I comment out the line
session.MarkMessage(message, "")
the tests still pass. According to https://godoc.org/github.com/Shopify/sarama#ConsumerGroupSession, MarkMessage marks a message as consumed, but how would I test this in a unit test?
sarama.ConsumerGroupSession.MarkMessage calls sarama.PartitionOffsetManager.MarkOffset, and in the method comment they said: "Note: calling MarkOffset does not necessarily commit the offset to the backend store immediately for efficiency reasons, and it may never be committed if your application crashes. This means that you may end up processing the same message twice."
So in unit tests, MarkMessage does not commit offset fast enough. I faced the same problem and Google brought me here. Sleeping for a second at the end of test functions can be a workaround.
I'm trying to create a program that will connect to several servers though gorilla web-sockets. I currently have a program that will iterate over a list of server addresses and create a new goroutine that will create its own Websocket.conn and handle reading and writing.
The problem is that every time a new goroutine is created the previous goroutines are blocked and only the last one can continue. I believe this is because the gorilla websocket library is blocking each gorotutine, but I might be mistaken.
I have tried putting a timer in the server list iterator and each goroutine will work perfectly but then the moment a new goroutine is made with another address the previous goroutine is blocked.
The relevant bits of my code:
In my main.go
for _, server := range servers {
go control(ctx, server, port)
}
In control()
func control(ctx context.Context, server, port string) {
url := url.URL{
Scheme: "ws",
Host: server + ":" + port,
Path: "",
}
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
defer conn.Close()
go sendHandler(ctx, conn)
go readHandler(ctx, conn)
}
readHandler(ctx context.Context, conn *websocket.Con) {
for {
_, p, err := conn.ReadMessage(); if err != nil {
panic(err)
}
select {
case <-ctx.Done():
goto TERM
default:
// do nothing
}
}
TERM:
// do termination
}
sendHandler(ctx context.Context, conn *websocket.Con) {
for _, msg := range msges {
err = conn.WriteMessage(websocket.TextMessage, msg)
if err != nil {
panic(err)
}
}
<-ctx.Done()
}
I removed the parts where I add waitgroups and other unnecessary pieces of code.
So what I expect is for there to be 3n goroutines running (where n is the number of servers) without blocking but right now I see only 3 goroutines running which are the ones called by the last iteration of the server list.
Thanks!
EDIT 14/06/2019:
I spent some time making a small working example and in the example the bug did not occur - none of the threads blocked each other. I'm still unsure what was causing it but here is my small working example:
main.go
package main
import (
"context"
"fmt"
"os"
"time"
"os/signal"
"syscall"
"sync"
"net/url"
"github.com/gorilla/websocket"
)
func main() {
servers := []string{"5555","5556", "5557"}
comms := make(chan os.Signal, 1)
signal.Notify(comms, os.Interrupt, syscall.SIGTERM)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
for _, server := range servers {
wg.Add(1)
go control(server,
ctx,
&wg)
}
<-comms
cancel()
wg.Wait()
}
func control(server string, ctx context.Context, wg *sync.WaitGroup) {
fmt.Printf("Started control for %s\n", server)
url := url.URL {
Scheme: "ws",
Host: "0.0.0.0" + ":" + server,
Path: "",
}
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
defer conn.Close()
var localwg sync.WaitGroup
localwg.Add(1)
go sendHandler(ctx, conn, &localwg, server)
localwg.Add(1)
go readHandler(ctx, conn, &localwg, server)
<- ctx.Done()
localwg.Wait()
wg.Done()
return
}
func sendHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
for i := 0; i < 50; i++ {
err := conn.WriteMessage(websocket.TextMessage, []byte("ping"))
if err != nil {
panic(err)
}
fmt.Printf("sent msg to %s\n", server)
time.Sleep(1 * time.Second)
}
<- ctx.Done()
wg.Done()
}
func readHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
for {
select {
case <- ctx.Done():
wg.Done()
return
default:
_, p, err := conn.ReadMessage()
if err != nil {
wg.Done()
fmt.Println("done")
}
fmt.Printf("Got [%s] from %s\n", string(p), server)
}
}
}
I tested it with dpallot's simple-websocket-server by a server on 5555, 5556 and 5557 respectively.
This part of your code is causing the problem:
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
defer conn.Close()
go sendHandler(ctx, conn)
go readHandler(ctx, conn)
You create the connection, defer the close of it, start two other goroutines and then end the function. The function end closes the socket due to your defer.
In a function definition, if a channel is an argument without a direction, does it have to send or receive something?
func makeRequest(url string, ch chan<- string, results chan<- string) {
start := time.Now()
resp, err := http.Get(url)
defer resp.Body.Close()
if err != nil {
fmt.Printf("%v", err)
}
resp, err = http.Post(url, "text/plain", bytes.NewBuffer([]byte("Hey")))
defer resp.Body.Close()
secs := time.Since(start).Seconds()
if err != nil {
fmt.Printf("%v", err)
}
// Cannot move past this.
ch <- fmt.Sprintf("%f", secs)
results <- <- ch
}
func MakeRequestHelper(url string, ch chan string, results chan string, iterations int) {
for i := 0; i < iterations; i++ {
makeRequest(url, ch, results)
}
for i := 0; i < iterations; i++ {
fmt.Println(<-ch)
}
}
func main() {
args := os.Args[1:]
threadString := args[0]
iterationString := args[1]
url := args[2]
threads, err := strconv.Atoi(threadString)
if err != nil {
fmt.Printf("%v", err)
}
iterations, err := strconv.Atoi(iterationString)
if err != nil {
fmt.Printf("%v", err)
}
channels := make([]chan string, 100)
for i := range channels {
channels[i] = make(chan string)
}
// results aggregate all the things received by channels in all goroutines
results := make(chan string, iterations*threads)
for i := 0; i < threads; i++ {
go MakeRequestHelper(url, channels[i], results, iterations)
}
resultSlice := make([]string, threads*iterations)
for i := 0; i < threads*iterations; i++ {
resultSlice[i] = <-results
}
}
In the above code,
ch <- or <-results
seems to be blocking every goroutine that executes makeRequest.
I am new to concurrency model of Go. I understand that sending to and receiving from a channel blocks but find it difficult what is blocking what in this code.
I'm not really sure that you are doing... It seems really convoluted. I suggest you read up on how to use channels.
https://tour.golang.org/concurrency/2
That being said you have so much going on in your code that it was much easier to just gut it to something a bit simpler. (It can be simplified further). I left comments to understand the code.
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"sync"
"time"
)
// using structs is a nice way to organize your code
type Worker struct {
wg sync.WaitGroup
semaphore chan struct{}
result chan Result
client http.Client
}
// group returns so that you don't have to send to many channels
type Result struct {
duration float64
results string
}
// closing your channels will stop the for loop in main
func (w *Worker) Close() {
close(w.semaphore)
close(w.result)
}
func (w *Worker) MakeRequest(url string) {
// a semaphore is a simple way to rate limit the amount of goroutines running at any single point of time
// google them, Go uses them often
w.semaphore <- struct{}{}
defer func() {
w.wg.Done()
<-w.semaphore
}()
start := time.Now()
resp, err := w.client.Get(url)
if err != nil {
log.Println("error", err)
return
}
defer resp.Body.Close()
// don't have any examples where I need to also POST anything but the point should be made
// resp, err = http.Post(url, "text/plain", bytes.NewBuffer([]byte("Hey")))
// if err != nil {
// log.Println("error", err)
// return
// }
// defer resp.Body.Close()
secs := time.Since(start).Seconds()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println("error", err)
return
}
w.result <- Result{duration: secs, results: string(b)}
}
func main() {
urls := []string{"https://facebook.com/", "https://twitter.com/", "https://google.com/", "https://youtube.com/", "https://linkedin.com/", "https://wordpress.org/",
"https://instagram.com/", "https://pinterest.com/", "https://wikipedia.org/", "https://wordpress.com/", "https://blogspot.com/", "https://apple.com/",
}
workerNumber := 5
worker := Worker{
semaphore: make(chan struct{}, workerNumber),
result: make(chan Result),
client: http.Client{Timeout: 5 * time.Second},
}
// use sync groups to allow your code to wait for
// all your goroutines to finish
for _, url := range urls {
worker.wg.Add(1)
go worker.MakeRequest(url)
}
// by declaring wait and close as a seperate goroutine
// I can get to the for loop below and iterate on the results
// in a non blocking fashion
go func() {
worker.wg.Wait()
worker.Close()
}()
// do something with the results channel
for res := range worker.result {
fmt.Printf("Request took %2.f seconds.\nResults: %s\n\n", res.duration, res.results)
}
}
The channels in channels are nil (no make is executed; you make the slice but not the channels), so any send or receive will block. I'm not sure exactly what you're trying to do here, but that's the basic problem.
See https://golang.org/doc/effective_go.html#channels for an explanation of how channels work.
I am trying to build a very simple tcp server/client. And I want the program could close the connection when it is interrupted by ctrl-c.
If I only send message or only receive message in the main thread, everything works just fine.
Here is the code for the client side.
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"net"
"bufio"
"io"
"time"
)
const (
TIMEOUT = 10
)
func main() {
if len(os.Args) < 2 {
fmt.Println(usage(os.Args[0]))
return
}
var timeout time.Duration
if len(os.Args) > 2 {
timeout, _ = time.ParseDuration(os.Args[2])
}
if timeout == 0 {
timeout = time.Duration(TIMEOUT * time.Second)
}
conn, err := net.DialTimeout("tcp", os.Args[1], timeout)
if err != nil {
fmt.Println("Error connecting: ", err.Error())
return
}
defer conn.Close()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
go func() {
fmt.Println("wait ctrl-c")
for _ = range c {
fmt.Println("close on ctrl-c")
conn.Close()
}
}()
for {
message, err := bufio.NewReader(os.Stdin).ReadString('\n')
if err == io.EOF {
fmt.Fprint(conn, message)
break
} else if err != nil {
fmt.Println("Error reading: ", err.Error())
break
} else {
fmt.Fprintf(conn, message)
}
}
}
func usage(filename string) string {
return fmt.Sprintf("Usage: %s <address (ex. localhost:8016, google.com:http, [2001:db8::1]:http, etc.)> [timeout (ex. 10s, 300ms, etc.)]", filename)
}
But after I added the code that just print what it received and run it in another thread, the ctrl-c handler does not work.
This is the code:
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"net"
"bufio"
"io"
"time"
)
const (
TIMEOUT = 10
)
func main() {
if len(os.Args) < 2 {
fmt.Println(usage(os.Args[0]))
return
}
var timeout time.Duration
if len(os.Args) > 2 {
timeout, _ = time.ParseDuration(os.Args[2])
}
if timeout == 0 {
timeout = time.Duration(TIMEOUT * time.Second)
}
conn, err := net.DialTimeout("tcp", os.Args[1], timeout)
if err != nil {
fmt.Println("Error connecting: ", err.Error())
return
}
defer conn.Close()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
go func() {
fmt.Println("wait ctrl-c")
for _ = range c {
fmt.Println("close on ctrl-c")
conn.Close()
}
}()
// code added.
go func() {
for {
message, err := bufio.NewReader(conn).ReadString('\n')
if err == io.EOF {
fmt.Print(message)
break
} else if err != nil {
fmt.Println("Error remote reading: ", err.Error())
break
} else {
fmt.Print(message)
}
}
conn.Close()
os.Exit(0)
}()
for {
message, err := bufio.NewReader(os.Stdin).ReadString('\n')
if err == io.EOF {
fmt.Fprint(conn, message)
break
} else if err != nil {
fmt.Println("Error reading: ", err.Error())
break
} else {
fmt.Fprintf(conn, message)
}
}
}
func usage(filename string) string {
return fmt.Sprintf("Usage: %s <address (ex. localhost:8016, google.com:http, [2001:db8::1]:http, etc.)> [timeout (ex. 10s, 300ms, etc.)]", filename)
}
I am working on Windows 7 now.
What is the problem and how can I solve it?
You can find both server side and client side code here:
https://gist.github.com/programus/52591a97def30df9dc81
I did some more investigation and I found that the key point of the problem is not the threads but whether there is enough time for signal handler running.
I made a simple program to re-produce the problem under Windows 7 as well as the solution.
package main
import (
"fmt"
"io"
"os"
"os/signal"
)
func main() {
c := make(chan os.Signal, 1)
//q := make(chan bool, 1)
signal.Notify(c, os.Interrupt, os.Kill)
go func() {
sig := <- c
fmt.Println(sig)
//close(q)
}()
count, err := io.Copy(os.Stdout, os.Stdin)
fmt.Println(count, err)
//select {
//case <- q:
//}
}
You will find the signal might not be printed out sometime. But after I uncommented the q channel related code. It always works.
So I think it would because the program quit too quickly to let the signal handler run.
Hope this Q&A could help others who has the same problem.
Also, I worked out a solution for the simple tcp server/client and updated the gist.
https://gist.github.com/programus/52591a97def30df9dc81