using channels with google pubsub poll subscriber - go

I'm trying to create a google pubsub subscriber in golang where I take 100 messages at at a time and then write them to influx. I'm trying to use channels to do this like this:
package main
import (
"os"
"fmt"
"cloud.google.com/go/pubsub"
"log"
"sync"
"golang.org/x/net/context"
"encoding/json"
clnt "github.com/influxdata/influxdb/client/v2"
"time"
)
type SensorData struct {
Pressure float64 `json:"pressure"`
Temperature float64 `json:"temperature"`
Dewpoint float64 `json:"dewpoint"`
Timecollected int64 `json:"timecollected"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
Humidity float64 `json:"humidity"`
SensorID string `json:"sensorId"`
Zipcode int `json:"zipcode"`
Warehouse string `json:"warehouse"`
Area string `json:"area"`
}
type SensorPoints struct {
SensorData []SensorData
}
func main () {
messages := make(chan SensorData, 100)
// Create a new Influx HTTPClient
c, err := clnt.NewHTTPClient(clnt.HTTPConfig{
Addr: "http://localhost:8086",
Username: "user",
Password: "pass",
})
if err != nil {
log.Fatal(err)
}
// Create pubsub subscriber
ctx := context.Background()
proj := os.Getenv("GOOGLE_CLOUD_PROJECT")
if proj == "" {
fmt.Fprintf(os.Stderr, "GOOGLE_CLOUD_PROJECT environment variable must be set.\n")
os.Exit(1)
}
client, err := pubsub.NewClient(ctx, proj)
if err != nil {
log.Fatalf("Could not create pubsub Client: %v", err)
}
const sub = "influxwriter"
//create influx a blank batchpoint set
bp, err := clnt.NewBatchPoints(clnt.BatchPointsConfig{
Database: "sensordata",
Precision: "s",
})
if err != nil {
log.Fatal(err)
}
// Pull messages via the subscription.
go pullMsgs(client, sub, messages)
if err != nil {
log.Fatal(err)
}
writeInflux(messages, bp)
c.Close()
}
func pullMsgs(client *pubsub.Client, name string, messages chan<- SensorData) {
ctx := context.Background()
// [START pubsub_subscriber_async_pull]
// [START pubsub_quickstart_subscriber]
// Consume 10 messages.
var mu sync.Mutex
var sensorinfos SensorPoints
sensorinfo := &SensorData{}
received := 0
sub := client.Subscription(name)
cctx, _ := context.WithCancel(ctx)
err := sub.Receive(cctx, func(ctx context.Context, msg *pubsub.Message) {
msg.Ack()
json.Unmarshal(msg.Data, sensorinfo)
//fmt.Println(string(msg.Data))
//fmt.Println(sensorinfo.SensorID)
sensorinfos.SensorData = append(sensorinfos.SensorData, *sensorinfo)
mu.Lock()
defer mu.Unlock()
received++
fmt.Println("rcv: ", received)
messages <- *sensorinfo
})
if err != nil {
fmt.Println(err)
}
// [END pubsub_subscriber_async_pull]
// [END pubsub_quickstart_subscriber]
}
func writeInflux(sensorpoints <- chan SensorData, bp clnt.BatchPoints) {
for p := range sensorpoints {
// Create a point and add to batch
tags := map[string]string{
"sensorId": p.SensorID,
"warehouse": p.Warehouse,
"area": p.Area,
"zipcode": string(p.Zipcode),
}
fields := map[string]interface{}{
"pressure": p.Pressure,
"humidity": p.Humidity,
"temperature": p.Temperature,
"dewpoint": p.Dewpoint,
"longitude": p.Longitude,
"latitude": p.Latitude,
}
pt, err := clnt.NewPoint("sensordata", tags, fields, time.Unix(p.Timecollected, 0))
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
}
}
but it doesn't see to every get past the initial pullMsgs function and just keeps printing the output in there:
rcv: 1
rcv: 2
rcv: 3
rcv: 4
rcv: 5
rcv: 6
rcv: 7
I thought that once the channel get full, it should block until the channel is emptied out
this is the pubsub pull code I'm using as a reference.

When you've sent the desired number of messages on the channel, close the channel and cancel the context. Try using the technique demonstrated in the documentation of canceling after some number of messages. Since your buffer is 100 and you're trying to consume 100 messages at a time, that's the number. If you want your program to exit, close the channel so that the for e := range ch loop in writeInflux hits a stopping point and doesn't block waiting for more elements to be added to the channel.
Note this in the Go pubsub API doc:
To terminate a call to Receive, cancel its context.
That is not what's stalling your main goroutine, but your pullMsgs goroutine will not exit on its own without that cancel.
Also, check for errors on Unmarshal. If you don't want to handle unmarshal errors at this point in the code, consider changing the channel type and sending msg or msg.Data instead and unmarshaling upon channel receipt.
cctx, cancel := context.WithCancel(ctx)
err := sub.Receive(cctx, func(ctx context.Context, msg *pubsub.Message) {
msg.Ack()
err := json.Unmarshal(msg.Data, sensorinfo)
if err != nil {
fmt.Printf("Failed to unmarshal: %s\n", err)
}
mu.Lock()
defer mu.Unlock()
received++
fmt.Println("rcv: ", received)
messages <- *sensorinfo
if received == 100 {
close(messages) // no more messages will be sent on channel
cancel()
}

Related

Golang: how to ensure redis subscribers receive all messages from redis pubsub?

I am trying to publish messages to dynamically generated channels in redis while subscribing all messages from the existing channels.
The following seems to work, but it fails to receive some messages depending on the timing of requests from the client (browser).
I tried "fan-in" for the two go channels in the select statement, but it did not work well.
package main
import (
...
"github.com/go-redis/redis/v8"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{}
var rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
var ctx = context.Background()
func echo(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println("websocket connection err:", err)
return
}
defer conn.Close()
room := make(chan string)
start := make(chan string)
go func() {
loop:
for {
sub := rd.Subscribe(ctx)
defer sub.Close()
channels := []string{}
for {
select {
//it seems that messages are not received when executing this case sometimes
case channel := <-room:
log.Println("channel", channel)
channels = append(channels, channel)
sub = rd.Subscribe(ctx, channels...)
start <- "ok"
case msg := <-sub.Channel():
log.Println("msg", msg)
err := conn.WriteMessage(websocket.TextMessage, []byte(msg.Payload))
if err != nil {
log.Println("websocket write err:", err)
break loop
}
}
}
}
}()
for {
_, msg, err := conn.ReadMessage()
if err != nil {
log.Println("websocket read err:", err)
break
}
log.Println(string(msg))
chPrefix := strings.Split(string(msg), ":")[0]
ch := chPrefix + "-channel"
if string(msg) == "test" || string(msg) == "yeah" {
room <- ch
log.Println(ch)
log.Println(<-start)
}
if err := rd.Publish(ctx, ch, msg).Err(); err != nil {
log.Println("redis publish err:", err)
break
}
}
}
func main() {
http.Handle("/", http.FileServer(http.Dir("./js")))
http.HandleFunc("/ws", echo)
log.Println("server starting...", "http://localhost:5000")
log.Fatal(http.ListenAndServe("localhost:5000", nil))
}
If by all messages, you mean you do not wish to lose any messages, I would recommend using Redis Streams instead of pub/sub. This will ensure you are not missing messages and can go back on the stream history if necessary.
This is an example of using Go, Streams and websockets that should get you started in that direction

Sarama Kafka library: how to unit test a consumer group's session.MarkMessage()?

I'm trying to adapt code from the consumer group example for github.com/Shopify/sarama, and am struggling to add a unit test which tests the functionality of session.MarkMessage() in the ConsumeClaim method (https://github.com/Shopify/sarama/blob/5466b37850a38f4ed6d04b94c6f058bd75032c2a/examples/consumergroup/main.go#L160).
Here is my adapted code with a consume() function:
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"sync"
"syscall"
"github.com/Shopify/sarama"
)
var (
addrs = []string{"localhost:9092"}
topic = "my-topic"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var wg sync.WaitGroup
defer wg.Wait()
consumer := &Consumer{ready: make(chan bool)}
close := consume(ctx, &wg, consumer)
defer close()
<-consumer.ready
log.Println("Sarama consumer up and running!")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
}
func consume(ctx context.Context, wg *sync.WaitGroup, consumer *Consumer) (close func()) {
config := sarama.NewConfig()
config.Version = sarama.V0_11_0_2 // The version has to be at least V0_10_2_0 to support consumer groups
config.Consumer.Offsets.Initial = sarama.OffsetOldest
consumerGroup, err := sarama.NewConsumerGroup(addrs, "my-group", config)
if err != nil {
log.Fatalf("NewConsumerGroup: %v", err)
}
wg.Add(1)
go func() {
defer wg.Done()
for {
if err := consumerGroup.Consume(ctx, []string{topic}, consumer); err != nil {
log.Panicf("Consume: %v", err)
}
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
close = func() {
if err := consumerGroup.Close(); err != nil {
log.Panicf("Close: %v", err)
}
}
return
}
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
handle func([]byte) error
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
close(consumer.ready)
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", message.Value, message.Timestamp, message.Topic)
if consumer.handle != nil {
if err := consumer.handle(message.Value); err != nil {
return fmt.Errorf("handle message %s: %v", message.Value, err)
}
}
session.MarkMessage(message, "")
}
return nil
}
Here are a couple of unit tests I've written for it:
package main
import (
"context"
"fmt"
"log"
"sync"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/stretchr/testify/require"
"gotest.tools/assert"
)
func TestConsume(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
producer, err := sarama.NewSyncProducer(addrs, config)
require.NoError(t, err)
partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder([]byte("foobar")),
})
require.NoError(t, err)
t.Logf("Sent message to partition %d with offset %d", partition, offset)
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
consumer := &Consumer{ready: make(chan bool)}
close := consume(ctx, &wg, consumer)
<-consumer.ready
log.Println("Sarama consumer up and running!")
time.Sleep(1 * time.Second)
cancel()
wg.Wait()
close()
}
func TestConsumeTwice(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
producer, err := sarama.NewSyncProducer(addrs, config)
require.NoError(t, err)
data1, data2 := "foobar1", "foobar2"
for _, data := range []string{data1, data2} {
partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: topic,
Key: sarama.StringEncoder("foobar"),
Value: sarama.StringEncoder(data),
})
require.NoError(t, err)
t.Logf("Sent message to partition %d with offset %d", partition, offset)
}
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
messageReceived := make(chan []byte)
consumer := &Consumer{
ready: make(chan bool),
handle: func(data []byte) error {
messageReceived <- data
fmt.Printf("Received message: %s\n", data)
return nil
},
}
close := consume(ctx, &wg, consumer)
<-consumer.ready
log.Println("Sarama consumer up and running!")
for i := 0; i < 2; i++ {
data := <-messageReceived
switch i {
case 0:
assert.Equal(t, data1, string(data))
case 1:
assert.Equal(t, data2, string(data))
}
}
cancel()
wg.Wait()
close()
}
The tests can be run after running Kafka and Zookeeper in a Docker container such as johnnypark/kafka-zookeeper like so:
docker run -p 2181:2181 -p 9092:9092 -e ADVERTISED_HOST=127.0.0.1 -e NUM_PARTITIONS=10 johnnypark/kafka-zookeeper
What I'm struggling with is the following: if I comment out the line
session.MarkMessage(message, "")
the tests still pass. According to https://godoc.org/github.com/Shopify/sarama#ConsumerGroupSession, MarkMessage marks a message as consumed, but how would I test this in a unit test?
sarama.ConsumerGroupSession.MarkMessage calls sarama.PartitionOffsetManager.MarkOffset, and in the method comment they said: "Note: calling MarkOffset does not necessarily commit the offset to the backend store immediately for efficiency reasons, and it may never be committed if your application crashes. This means that you may end up processing the same message twice."
So in unit tests, MarkMessage does not commit offset fast enough. I faced the same problem and Google brought me here. Sleeping for a second at the end of test functions can be a workaround.

Google Pub/Sub message ordering not working (or increasing latency to over 10 seconds)?

I'm trying to make a simplified example demonstrating the use of Google Pub/Sub's message ordering feature (https://cloud.google.com/pubsub/docs/ordering). From those docs, after message ordering is enabled for a subscription,
After the message ordering property is set, the Pub/Sub service delivers messages with the same ordering key in the order that the Pub/Sub service receives the messages. For example, if a publisher sends two messages with the same ordering key, the Pub/Sub service delivers the oldest message first.
I've used this to write the following example:
package main
import (
"context"
"log"
"time"
"cloud.google.com/go/pubsub"
uuid "github.com/satori/go.uuid"
)
func main() {
client, err := pubsub.NewClient(context.Background(), "my-project")
if err != nil {
log.Fatalf("NewClient: %v", err)
}
topicID := "test-topic-" + uuid.NewV4().String()
topic, err := client.CreateTopic(context.Background(), topicID)
if err != nil {
log.Fatalf("CreateTopic: %v", err)
}
defer topic.Delete(context.Background())
subID := "test-subscription-" + uuid.NewV4().String()
sub, err := client.CreateSubscription(context.Background(), subID, pubsub.SubscriptionConfig{
Topic: topic,
EnableMessageOrdering: true,
})
if err != nil {
log.Fatalf("CreateSubscription: %v", err)
}
defer sub.Delete(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
messageReceived := make(chan struct{})
go sub.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) {
log.Printf("Received message with ordering key %s: %s", msg.OrderingKey, msg.Data)
msg.Ack()
messageReceived <- struct{}{}
})
topic.Publish(context.Background(), &pubsub.Message{Data: []byte("Dang1!"), OrderingKey: "foobar"})
topic.Publish(context.Background(), &pubsub.Message{Data: []byte("Dang2!"), OrderingKey: "foobar"})
for i := 0; i < 2; i++ {
select {
case <-messageReceived:
case <-time.After(10 * time.Second):
log.Fatal("Expected to receive a message, but timed out after 10 seconds.")
}
}
}
First, I tried the program without specifying OrderingKey: "foobar" in the topic.Publish() calls. This resulted in the following output:
> go run main.go
2020/08/10 21:40:34 Received message with ordering key : Dang2!
2020/08/10 21:40:34 Received message with ordering key : Dang1!
In other words, messages are not received in the same order as they were published in, which in my use case is undesirable and I'd like to prevent by specifying an OrderingKey
However, as soon as I added the OrderingKeys in the publish calls, the program times out after 10 seconds of waiting to receive Pub/Sub messages:
> go run main.go
2020/08/10 21:44:36 Expected to receive a message, but timed out after 10 seconds.
exit status 1
What I would expect is to now first receive the message Dang1! followed by Dang2!, but instead I'm not receiving any messages. Any idea why this is not happening?
The publishes are failing with the following error: Failed to publish: Topic.EnableMessageOrdering=false, but an OrderingKey was set in Message. Please remove the OrderingKey or turn on Topic.EnableMessageOrdering.
You can see this if you change your publish calls to check the error:
res1 := topic.Publish(context.Background(), &pubsub.Message{Data: []byte("Dang1!"), OrderingKey: "foobar"})
res2 := topic.Publish(context.Background(), &pubsub.Message{Data: []byte("Dang2!"), OrderingKey: "foobar"})
_, err = res1.Get(ctx)
if err != nil {
fmt.Printf("Failed to publish: %v", err)
return
}
_, err = res2.Get(ctx)
if err != nil {
fmt.Printf("Failed to publish: %v", err)
return
}
To fix it, add a line to enable message ordering on your topic. Your topic creation would be as follows:
topic, err := client.CreateTopic(context.Background(), topicID)
if err != nil {
log.Fatalf("CreateTopic: %v", err)
}
topic.EnableMessageOrdering = true
defer topic.Delete(context.Background())
I independently came up with the same solution as Kamal, just wanted to share the full revised implementation:
package main
import (
"context"
"flag"
"log"
"time"
"cloud.google.com/go/pubsub"
uuid "github.com/satori/go.uuid"
)
var enableMessageOrdering bool
func main() {
flag.BoolVar(&enableMessageOrdering, "enableMessageOrdering", false, "Enable and use Pub/Sub message ordering")
flag.Parse()
client, err := pubsub.NewClient(context.Background(), "fleetsmith-dev")
if err != nil {
log.Fatalf("NewClient: %v", err)
}
topicID := "test-topic-" + uuid.NewV4().String()
topic, err := client.CreateTopic(context.Background(), topicID)
if err != nil {
log.Fatalf("CreateTopic: %v", err)
}
topic.EnableMessageOrdering = enableMessageOrdering
defer topic.Delete(context.Background())
subID := "test-subscription-" + uuid.NewV4().String()
sub, err := client.CreateSubscription(context.Background(), subID, pubsub.SubscriptionConfig{
Topic: topic,
EnableMessageOrdering: enableMessageOrdering,
})
if err != nil {
log.Fatalf("CreateSubscription: %v", err)
}
defer sub.Delete(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
messageReceived := make(chan struct{})
go sub.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) {
log.Printf("Received message with ordering key %s: %s", msg.OrderingKey, msg.Data)
msg.Ack()
messageReceived <- struct{}{}
})
msg1, msg2 := &pubsub.Message{Data: []byte("Dang1!")}, &pubsub.Message{Data: []byte("Dang2!")}
if enableMessageOrdering {
msg1.OrderingKey, msg2.OrderingKey = "foobar", "foobar"
}
publishMessage(topic, msg1)
publishMessage(topic, msg2)
for i := 0; i < 2; i++ {
select {
case <-messageReceived:
case <-time.After(10 * time.Second):
log.Fatal("Expected to receive a message, but timed out after 10 seconds.")
}
}
}
func publishMessage(topic *pubsub.Topic, msg *pubsub.Message) {
publishResult := topic.Publish(context.Background(), msg)
messageID, err := publishResult.Get(context.Background())
if err != nil {
log.Fatalf("Get: %v", err)
}
log.Printf("Published message with ID %s", messageID)
}
When called with the enableMessageOrdering flag set to true, I receive Dang1! first, followed by Dang2!:
> go run main.go --enableMessageOrdering
2020/08/11 05:38:07 Published message with ID 1420685949616723
2020/08/11 05:38:08 Published message with ID 1420726763302425
2020/08/11 05:38:09 Received message with ordering key foobar: Dang1!
2020/08/11 05:38:11 Received message with ordering key foobar: Dang2!
whereas without it, I receive them in reverse order as before:
> go run main.go
2020/08/11 05:38:47 Published message with ID 1420687395091051
2020/08/11 05:38:47 Published message with ID 1420693737065665
2020/08/11 05:38:48 Received message with ordering key : Dang2!
2020/08/11 05:38:48 Received message with ordering key : Dang1!

Too Many open files/ No such host error while running a go program which makes concurrent requests

I have a golang program which is supposed to call an API with different payloads, the web application is a drop wizard application which is running on localhost, and the go program is below
package main
import (
"bufio"
"encoding/json"
"log"
"net"
"net/http"
"os"
"strings"
"time"
)
type Data struct {
PersonnelId string `json:"personnel_id"`
DepartmentId string `json:"department_id"`
}
type PersonnelEvent struct {
EventType string `json:"event_type"`
Data `json:"data"`
}
const (
maxIdleConnections = 20
maxIdleConnectionsPerHost = 20
timeout = time.Duration(5 * time.Second)
)
var transport = http.Transport{
Dial: dialTimeout,
MaxIdleConns: maxIdleConnections,
MaxIdleConnsPerHost: 20,
}
var client = &http.Client{
Transport: &transport,
}
func dialTimeout(network, addr string) (net.Conn, error) {
return net.DialTimeout(network, addr, timeout)
}
func makeRequest(payload string) {
req, _ := http.NewRequest("POST", "http://localhost:9350/v1/provider-
location-personnel/index", strings.NewReader(payload))
req.Header.Set("X-App-Token", "TESTTOKEN1")
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
log.Println("Api invocation returned an error ", err)
} else {
defer resp.Body.Close()
log.Println(resp.Body)
}
}
func indexPersonnels(personnelPayloads []PersonnelEvent) {
for _, personnelEvent := range personnelPayloads {
payload, err := json.Marshal(personnelEvent)
if err != nil {
log.Println("Error while marshalling payload ", err)
}
log.Println(string(payload))
// go makeRequest(string(payload))
}
}
func main() {
ch := make(chan PersonnelEvent)
for i := 0; i < 20; i++ {
go func() {
for personnelEvent := range ch {
payload, err := json.Marshal(personnelEvent)
if err != nil {
log.Println("Error while marshalling payload", err)
}
go makeRequest(string(payload))
//log.Println("Payload ", string(payload))
}
}()
}
file, err := os.Open("/Users/tmp/Desktop/personnels.txt")
defer file.Close()
if err != nil {
log.Fatalf("Error opening personnel id file %v", err)
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
go func() {
ch <- PersonnelEvent{EventType: "provider_location_department_personnel_linked", Data: Data{DepartmentId: "2a8d9687-aea8-4a2c-bc08-c64d7716d973", PersonnelId: scanner.Text()}}
}()
}
}
Its reading some ids from a file and then creating a payload out of it and invoking a post request on the web server, but when i run the program it gives too many open file errors/no such host errors, i feel that the program is too much concurrent how to make it run gracefully?
inside your 20 goroutines started in main(), "go makeRequest(...)" again created one goroutine for each event. you don't need start extra goroutine there.
Besides, I think you don't need start goroutine in your scan loop, either. buffered channel is enough,because bottleneck should be at doing http requests.
You can use a buffered channel, A.K.A. counting semaphore, to limit the parallelism.
// The capacity of the buffered channel is 10,
// which means you can have 10 goroutines to
// run the makeRequest function in parallel.
var tokens = make(chan struct{}, 10)
func makeRequest(payload string) {
tokens <- struct{}{} // acquire the token or block here
defer func() { <-tokens }() // release the token to awake another goroutine
// other code...
}

Golang concurrently reading from a tcp connectoin

I am having some issue with a Go project. The code is way too big to copy and paste so I will try to explain as well as I can.
My program first connects to a TCP server, then it starts a goroutine passing as argument the connection object.
What I'm trying to achieve is having the client to read infinitely from the tcp connection while at the same time to take user input and communicate to the server by sending a retrieving data. I've tried using another goroutine but the program blocks whenever trying to retrieve data from the server.
Here is a reproduction of the error on go playground.
https://play.golang.org/p/OD5ozCRmy_4 server
https://play.golang.org/p/t1r_BAQM-jn client
Basically whenever the client tries to read from the connection it gets stuck.
Thank you for your help.
You should use channel
here is a sample which can receive some connection and each connection could send data as wish
package tcp
import (
"bufio"
"fmt"
"net"
"strconv"
"../log"
"../config"
"../controllers"
h "../helpers"
)
type msgFormat struct {
text []byte
net.Conn
}
var accounts = make(map[net.Conn]int)
var conns = make(chan net.Conn)
var dconns = make(chan net.Conn)
var msgs = make(chan msgFormat)
var i int
//Init is first point
func Init() {
startserver()
for {
select {
case conn := <-conns:
handleconnect(conn)
case msg := <-msgs:
go handlemsg(msg)
case dconn := <-dconns:
handlediscounect(dconn)
}
}
}
func handlemsg(incomemsg msgFormat) {
logger.Log.Println(string(incomemsg.text))
resp, err := controllers.Do(incomemsg.text)
if err != nil {
logger.Log.Println(err.Error())
}
strLen := []byte(h.Lpad(string(fmt.Sprintf("%v", len(resp))), "0", 4))
//
fresponse := append(strLen, resp...)
incomemsg.Write(fresponse)
logger.Log.Println("response is %v" , string(fresponse))
}
func startserver() {
conf := config.GetConfigInstance()
ln, err := net.Listen(conf.SERVER.Nettype, conf.SERVER.Address)
if err != nil {
logger.Log.Println(err.Error())
}
logger.Log.Printf("server is serving at %v", conf.SERVER.Address)
go func() {
for {
conn, err := ln.Accept()
if err != nil {
logger.Log.Println(err.Error())
}
conns <- conn
}
}()
}
func readdate(conn net.Conn, i int) {
for {
rd := bufio.NewReader(conn)
dataLen := make([]byte, 4)
_, err := rd.Read(dataLen)
if err != nil {
break
}
intLen, _ := strconv.Atoi(string(dataLen))
data := make([]byte, intLen)
_, err = rd.Read(data)
if err != nil {
break
}
msgs <- msgFormat{data, conn}
}
dconns <- conn
}
func handleconnect(newconnection net.Conn) {
accounts[newconnection] = i
i++
// if addr , ok := newconnection.RemoteAddr().str
logger.Log.Printf("Action: Client_Connected %v is connected via %v \n", i, newconnection.RemoteAddr().(*net.TCPAddr).IP)
go readdate(newconnection, i)
}
func handlediscounect(disconnection net.Conn) {
logger.Log.Printf("Action: Client_Disconnected %v / %v is gone\n", accounts[disconnection] + 1, disconnection.RemoteAddr().(*net.TCPAddr).IP)
delete(accounts, disconnection)
}

Resources