Golang: how to ensure redis subscribers receive all messages from redis pubsub? - go

I am trying to publish messages to dynamically generated channels in redis while subscribing all messages from the existing channels.
The following seems to work, but it fails to receive some messages depending on the timing of requests from the client (browser).
I tried "fan-in" for the two go channels in the select statement, but it did not work well.
package main
import (
...
"github.com/go-redis/redis/v8"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{}
var rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
var ctx = context.Background()
func echo(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println("websocket connection err:", err)
return
}
defer conn.Close()
room := make(chan string)
start := make(chan string)
go func() {
loop:
for {
sub := rd.Subscribe(ctx)
defer sub.Close()
channels := []string{}
for {
select {
//it seems that messages are not received when executing this case sometimes
case channel := <-room:
log.Println("channel", channel)
channels = append(channels, channel)
sub = rd.Subscribe(ctx, channels...)
start <- "ok"
case msg := <-sub.Channel():
log.Println("msg", msg)
err := conn.WriteMessage(websocket.TextMessage, []byte(msg.Payload))
if err != nil {
log.Println("websocket write err:", err)
break loop
}
}
}
}
}()
for {
_, msg, err := conn.ReadMessage()
if err != nil {
log.Println("websocket read err:", err)
break
}
log.Println(string(msg))
chPrefix := strings.Split(string(msg), ":")[0]
ch := chPrefix + "-channel"
if string(msg) == "test" || string(msg) == "yeah" {
room <- ch
log.Println(ch)
log.Println(<-start)
}
if err := rd.Publish(ctx, ch, msg).Err(); err != nil {
log.Println("redis publish err:", err)
break
}
}
}
func main() {
http.Handle("/", http.FileServer(http.Dir("./js")))
http.HandleFunc("/ws", echo)
log.Println("server starting...", "http://localhost:5000")
log.Fatal(http.ListenAndServe("localhost:5000", nil))
}

If by all messages, you mean you do not wish to lose any messages, I would recommend using Redis Streams instead of pub/sub. This will ensure you are not missing messages and can go back on the stream history if necessary.
This is an example of using Go, Streams and websockets that should get you started in that direction

Related

Handling multiple websocket connections

I'm trying to create a program that will connect to several servers though gorilla web-sockets. I currently have a program that will iterate over a list of server addresses and create a new goroutine that will create its own Websocket.conn and handle reading and writing.
The problem is that every time a new goroutine is created the previous goroutines are blocked and only the last one can continue. I believe this is because the gorilla websocket library is blocking each gorotutine, but I might be mistaken.
I have tried putting a timer in the server list iterator and each goroutine will work perfectly but then the moment a new goroutine is made with another address the previous goroutine is blocked.
The relevant bits of my code:
In my main.go
for _, server := range servers {
go control(ctx, server, port)
}
In control()
func control(ctx context.Context, server, port string) {
url := url.URL{
Scheme: "ws",
Host: server + ":" + port,
Path: "",
}
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
defer conn.Close()
go sendHandler(ctx, conn)
go readHandler(ctx, conn)
}
readHandler(ctx context.Context, conn *websocket.Con) {
for {
_, p, err := conn.ReadMessage(); if err != nil {
panic(err)
}
select {
case <-ctx.Done():
goto TERM
default:
// do nothing
}
}
TERM:
// do termination
}
sendHandler(ctx context.Context, conn *websocket.Con) {
for _, msg := range msges {
err = conn.WriteMessage(websocket.TextMessage, msg)
if err != nil {
panic(err)
}
}
<-ctx.Done()
}
I removed the parts where I add waitgroups and other unnecessary pieces of code.
So what I expect is for there to be 3n goroutines running (where n is the number of servers) without blocking but right now I see only 3 goroutines running which are the ones called by the last iteration of the server list.
Thanks!
EDIT 14/06/2019:
I spent some time making a small working example and in the example the bug did not occur - none of the threads blocked each other. I'm still unsure what was causing it but here is my small working example:
main.go
package main
import (
"context"
"fmt"
"os"
"time"
"os/signal"
"syscall"
"sync"
"net/url"
"github.com/gorilla/websocket"
)
func main() {
servers := []string{"5555","5556", "5557"}
comms := make(chan os.Signal, 1)
signal.Notify(comms, os.Interrupt, syscall.SIGTERM)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
for _, server := range servers {
wg.Add(1)
go control(server,
ctx,
&wg)
}
<-comms
cancel()
wg.Wait()
}
func control(server string, ctx context.Context, wg *sync.WaitGroup) {
fmt.Printf("Started control for %s\n", server)
url := url.URL {
Scheme: "ws",
Host: "0.0.0.0" + ":" + server,
Path: "",
}
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
defer conn.Close()
var localwg sync.WaitGroup
localwg.Add(1)
go sendHandler(ctx, conn, &localwg, server)
localwg.Add(1)
go readHandler(ctx, conn, &localwg, server)
<- ctx.Done()
localwg.Wait()
wg.Done()
return
}
func sendHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
for i := 0; i < 50; i++ {
err := conn.WriteMessage(websocket.TextMessage, []byte("ping"))
if err != nil {
panic(err)
}
fmt.Printf("sent msg to %s\n", server)
time.Sleep(1 * time.Second)
}
<- ctx.Done()
wg.Done()
}
func readHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
for {
select {
case <- ctx.Done():
wg.Done()
return
default:
_, p, err := conn.ReadMessage()
if err != nil {
wg.Done()
fmt.Println("done")
}
fmt.Printf("Got [%s] from %s\n", string(p), server)
}
}
}
I tested it with dpallot's simple-websocket-server by a server on 5555, 5556 and 5557 respectively.
This part of your code is causing the problem:
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
defer conn.Close()
go sendHandler(ctx, conn)
go readHandler(ctx, conn)
You create the connection, defer the close of it, start two other goroutines and then end the function. The function end closes the socket due to your defer.

Golang concurrently reading from a tcp connectoin

I am having some issue with a Go project. The code is way too big to copy and paste so I will try to explain as well as I can.
My program first connects to a TCP server, then it starts a goroutine passing as argument the connection object.
What I'm trying to achieve is having the client to read infinitely from the tcp connection while at the same time to take user input and communicate to the server by sending a retrieving data. I've tried using another goroutine but the program blocks whenever trying to retrieve data from the server.
Here is a reproduction of the error on go playground.
https://play.golang.org/p/OD5ozCRmy_4 server
https://play.golang.org/p/t1r_BAQM-jn client
Basically whenever the client tries to read from the connection it gets stuck.
Thank you for your help.
You should use channel
here is a sample which can receive some connection and each connection could send data as wish
package tcp
import (
"bufio"
"fmt"
"net"
"strconv"
"../log"
"../config"
"../controllers"
h "../helpers"
)
type msgFormat struct {
text []byte
net.Conn
}
var accounts = make(map[net.Conn]int)
var conns = make(chan net.Conn)
var dconns = make(chan net.Conn)
var msgs = make(chan msgFormat)
var i int
//Init is first point
func Init() {
startserver()
for {
select {
case conn := <-conns:
handleconnect(conn)
case msg := <-msgs:
go handlemsg(msg)
case dconn := <-dconns:
handlediscounect(dconn)
}
}
}
func handlemsg(incomemsg msgFormat) {
logger.Log.Println(string(incomemsg.text))
resp, err := controllers.Do(incomemsg.text)
if err != nil {
logger.Log.Println(err.Error())
}
strLen := []byte(h.Lpad(string(fmt.Sprintf("%v", len(resp))), "0", 4))
//
fresponse := append(strLen, resp...)
incomemsg.Write(fresponse)
logger.Log.Println("response is %v" , string(fresponse))
}
func startserver() {
conf := config.GetConfigInstance()
ln, err := net.Listen(conf.SERVER.Nettype, conf.SERVER.Address)
if err != nil {
logger.Log.Println(err.Error())
}
logger.Log.Printf("server is serving at %v", conf.SERVER.Address)
go func() {
for {
conn, err := ln.Accept()
if err != nil {
logger.Log.Println(err.Error())
}
conns <- conn
}
}()
}
func readdate(conn net.Conn, i int) {
for {
rd := bufio.NewReader(conn)
dataLen := make([]byte, 4)
_, err := rd.Read(dataLen)
if err != nil {
break
}
intLen, _ := strconv.Atoi(string(dataLen))
data := make([]byte, intLen)
_, err = rd.Read(data)
if err != nil {
break
}
msgs <- msgFormat{data, conn}
}
dconns <- conn
}
func handleconnect(newconnection net.Conn) {
accounts[newconnection] = i
i++
// if addr , ok := newconnection.RemoteAddr().str
logger.Log.Printf("Action: Client_Connected %v is connected via %v \n", i, newconnection.RemoteAddr().(*net.TCPAddr).IP)
go readdate(newconnection, i)
}
func handlediscounect(disconnection net.Conn) {
logger.Log.Printf("Action: Client_Disconnected %v / %v is gone\n", accounts[disconnection] + 1, disconnection.RemoteAddr().(*net.TCPAddr).IP)
delete(accounts, disconnection)
}

Broadcast server end messages to clients using Websockets

I am new to Golang, I am trying to a create a WebSocket server which will broadcast messages to the connected clients. The messages here will be generated from the server side(by creating a default client).
Here is my client.go
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
log.Fatal("dial:", err)
}
defer c.Close()
done := make(chan struct{})
new_chan := make(chan string)
//defer new_chan.Stop()
go func() {
for {
new_chan <- "my message"
}
}()
hub := newHub()
go hub.run()
client := &Client{hub: hub, ws: c, send: make(chan []byte, 256)}
for {
select {
case <-done:
return
case t := <-new_chan:
err := c.WriteMessage(websocket.TextMessage, []byte(t))
if err != nil {
log.Println("write:", err)
return
}
log.Printf(t)
client.hub.broadcast <- bytes.TrimSpace(bytes.Replace([]byte(t), newline, space, -1))
}
}
this function will generate the messages and try to broadcast to other clients.
server.go will add the clients to the hub
func echo(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Print("upgrade:", err)
return
}
hub := newHub()
go hub.run()
client := &Client{hub: hub, ws: c, send: make(chan []byte, 256)}
client.hub.register <- client
go client.writePump()
writePump() here will listen to the client.send channel and broadcast messages
Now the connected client's hub is different is from the hub of the client at the server. So when I try to send messages, I am not receiving anything.
How can I make it belong to the same hub(context)?
It looks like you are starting from the Gorilla chat example. In that example, use hub.broadcast <- message to broadcast a message to all clients where hub is the *Hub created in main().
There's no need to create a client connection to send message originated from the server.
Here's a modified version of the main() that broadcasts "hello" to all clients every second:
func broadcast(hub *Hub) {
m := []byte("hello")
for range time.NewTicker(time.Second).C {
hub.broadcast <- m
}
}
func main() {
flag.Parse()
hub := newHub()
go hub.run()
go broadcast(hub)
http.HandleFunc("/", serveHome)
http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
serveWs(hub, w, r)
})
err := http.ListenAndServe(*addr, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}

Closing a redis subscription and ending the go routine when websocket connection closes

I'm pushing events from a redis subscription to a client who is connected via websocket. I'm having trouble unsubscribing and exiting the redis go routine when the client disconnects the websocket.
Inspired by this post, here's what I have thus far. I'm able to receive subscription events and send messages to the client via websocket, but when the client closes the websocket and the defer close(done) code fires, my case b, ok := <-done: doesn't fire. It seems to be overloaded by the default case???
package api
import (
...
"github.com/garyburd/redigo/redis"
"github.com/gorilla/websocket"
)
func wsHandler(w http.ResponseWriter, r *http.Request) {
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
HandleError(w, err)
return
}
defer conn.Close()
done := make(chan bool)
defer close(done)
for {
var req WSRequest
err := conn.ReadJSON(&req)
if err != nil {
HandleWSError(conn, err)
return
}
defer conn.Close()
go func(done chan bool, req *WSRequest, conn *websocket.Conn) {
rc := redisPool.Get()
defer rc.Close()
psc := redis.PubSubConn{Conn: rc}
if err := psc.PSubscribe(req.chanName); err != nil {
HandleWSError(conn, err)
return
}
defer psc.PUnsubscribe()
for {
select {
case b, ok := <-done:
if !ok || b == true {
return
}
default:
switch v := psc.Receive().(type) {
case redis.PMessage:
err := handler(conn, req, v)
if err != nil {
HandleWSError(conn, err)
}
case redis.Subscription:
log.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
case error:
log.Printf("error in redis subscription; err:\n%v\n", v)
HandleWSError(conn, v)
default:
// do nothing...
log.Printf("unknown redis subscription event type; %s\n", reflect.TypeOf(v))
}
}
}
}(done, &req, conn)
}
}
Make these changes to break out of the read loop when done serving the websocket connection:
Maintain a slice of the Redis connections created for this websocket connection.
Unsubscribe all connections when done.
Modify the read loop to return when the subscription count is zero.
Here's the code:
func wsHandler(w http.ResponseWriter, r *http.Request) {
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
HandleError(w, err)
return
}
defer conn.Close()
// Keep slice of all connections. Unsubscribe all connections on exit.
var pscs []redis.PubSubConn
defer func() {
for _, psc := range rcs {
psc.Unsubscribe() // unsubscribe with no args unsubs all channels
}
}()
for {
var req WSRequest
err := conn.ReadJSON(&req)
if err != nil {
HandleWSError(conn, err)
return
}
rc := redisPool.Get()
psc := redis.PubSubConn{Conn: rc}
pscs = append(pscs, psc)
if err := psc.PSubscribe(req.chanName); err != nil {
HandleWSError(conn, err)
return
}
go func(req *WSRequest, conn *websocket.Conn) {
defer rc.Close()
for {
switch v := psc.Receive().(type) {
case redis.PMessage:
err := handler(conn, req, v)
if err != nil {
HandleWSError(conn, err)
}
case redis.Subscription:
log.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
if v.Count == 0 {
return
}
case error:
log.Printf("error in redis subscription; err:\n%v\n", v)
HandleWSError(conn, v)
default:
// do nothing...
log.Printf("unknown redis subscription event type; %s\n", reflect.TypeOf(v))
}
}
}(&req, conn)
}
}
The code in the question and this answer dial multiple Redis connections for each websocket client. A more typical and scalable approach is to share a single Redis pubsub connection across multiple clients. The typical approach may be appropriate for your application given the high-level description, but I am still unsure of what you are trying to do given the code in the question.

Unable to consume messages from locally running Kafka server, using Golang Sarama Package

I am making a simple Telegram bot that would read messages from a local Kafka server and print it out to a chat.
Both zookeeper and kafka server config files are at their defaults. Console consumer works. The problem rises when I try to consume messages from code using Golang Sarama package. Before I added these lines:
case err := <-pc.Errors():
log.Panic(err)
the program only printed the messages once, after which it would stall.
Now it panics prinitng this to the log:
kafka: error while consuming test1/0: kafka: broker not connected
Here's the code:
type kafkaResponse struct {
telega *tgbotapi.Message
message []byte
}
type kafkaRequest struct {
telega *tgbotapi.Message
topic string
}
var kafkaBrokers = []string{"localhost:9092"}
func main() {
//channels for request response
var reqChan = make(chan kafkaRequest)
var respChan = make(chan kafkaResponse)
//starting kafka client routine to listen to topic channnel
go consumer(reqChan, respChan, kafkaBrokers)
//bot thingy here
bot, err := tgbotapi.NewBotAPI(token)
if err != nil {
log.Panic(err)
}
bot.Debug = true
log.Printf("Authorized on account %s", bot.Self.UserName)
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
updates, err := bot.GetUpdatesChan(u)
for {
select {
case update := <-updates:
if update.Message == nil {
continue
}
switch update.Message.Text {
case "Topic: test1":
topic := "test1"
reqChan <- kafkaRequest{update.Message, topic}
}
case response := <-respChan:
bot.Send(tgbotapi.NewMessage(response.telega.Chat.ID, string(response.message)))
}
}
here's the consumer.go:
func consumer(reqChan chan kafkaRequest, respChan chan kafkaResponse, brokers []string) {
config := sarama.NewConfig()
config.Consumer.Return.Errors = true
// Create new consumer
consumer, err := sarama.NewConsumer(brokers, config)
if err != nil {
panic(err)
}
defer func() {
if err := consumer.Close(); err != nil {
panic(err)
}
}()
select {
case request := <-reqChan:
//get all partitions on the given topic
partitionList, err := consumer.Partitions(request.topic)
if err != nil {
fmt.Println("Error retrieving partitionList ", err)
}
initialOffset := sarama.OffsetOldest
for _, partition := range partitionList {
pc, _ := consumer.ConsumePartition(request.topic, partition, initialOffset)
go func(pc sarama.PartitionConsumer) {
for {
select {
case message := <-pc.Messages():
respChan <- kafkaResponse{request.telega, message.Value}
case err := <-pc.Errors():
log.Panic(err)
}
}
}(pc)
}
}
}
You are closing your consumer after setting up all the PartitionConsumers in the code
defer func() {
if err := consumer.Close(); err != nil {
panic(err)
}
}()
However, the documentation specifies that you should only close the consumer after all the PartitionConsumers have been closed.
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
I would recommend you add a sync.WaitGroup to the function go func(pc sarama.PartitionConsumer) {

Resources