Golang Redis PubSub sends duplicate messages - go

I am trying to build a feature with the redis pub/sub implementation on golang
but for every client connected to the server (WebSocket) the number of messages published doubles.
func (c client) Subscribe() {
con := initRedis()
defer con.Close()
psc := redis.PubSubConn{Conn: con}
defer psc.Close()
psc.Subscribe(c.Channel)
for {
switch v := psc.Receive().(type) {
case redis.Message:
fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
broadcast <- map[string]string{
"channel": v.Channel,
"message": string(v.Data),
}
case redis.Subscription:
fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
case error:
log.Println(v)
psc.Unsubscribe()
}
}
}
func (m Message) Publish() {
c := initRedis()
defer c.Close()
reply, err := c.Do("PUBLISH", m.Channel, m.Msg)
if err != nil {
log.Println(err)
}
fmt.Println("Publishing", m, reply)
}
func initRedis() redis.Conn {
if err := godotenv.Load(); err != nil {
panic(err)
}
// fmt.Println(os.Getenv("REDIS_URL"))
c, err := redis.DialURL(os.Getenv("REDIS_URL"), redis.DialTLSSkipVerify(true))
if err != nil {
panic(err)
}
// defer c.Close()
return c
}
How do I stop the duplicates?
or what is causing the duplicates.

Related

redigo error log: write: connection reset by peer

Almost the same amount of time (point in time as redigo error log: write: connection reset by peer?), redis error log:
Client id=45183 addr=127.0.0.1:40420 fd=39 name= age=39706 idle=46 flags=N db=0 sub=8 psub=0 multi=-1 qbuf=0 qbuf-free=0 obl=16114 oll=528 omem=8545237 events=rw cmd=ping scheduled to be closed ASAP for overcoming of output buffer limits.
go error log
write tcp 127.0.0.1:40806->127.0.0.1:6379: write: connection reset by peer
Before that, the Go program didn't receive the subscription message for about 7 minutes. I presume it was a cache overflow caused by messages not being consumed.
The Redis client-output-buffer-limit is the default configuration.
The linux fd and connection count are normal, and I can't find of a reason for the unconsumable.
Here is my code:
server.go
func WaitFroMsg(ctx context.Context, pool *redis.Pool, onMessage func(channel string, data []byte) error, channel ...string) (err error) {
conn := pool.Get()
psc := redis.PubSubConn{Conn: conn}
if err := psc.Subscribe(redis.Args{}.AddFlat(channel)...); err != nil {
return err
}
done := make(chan error, 1)
go func() {
for {
switch n := psc.Receive().(type) {
case error:
done <- fmt.Errorf("redis pubsub receive err: %v", n)
return
case redis.Message:
if err = onMessage(n.Channel, n.Data); err != nil {
done <- err
return
}
case redis.Subscription:
if n.Count == 0 {
fmt.Println("all channels are unsubscribed", channel)
done <- nil
return
}
}
}
}()
const healthCheck = time.Minute
ticker := time.NewTicker(healthCheck)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err = psc.Ping(""); err != nil {
fmt.Println("healthCheck ", err, channel)
return err
}
case err := <-done:
return err
case <-ctx.Done():
if err := psc.Unsubscribe(); err != nil {
return fmt.Errorf("redis unsubscribe failed: %v", err)
}
return nil
}
}
}
pool.go
func NewPool(addr string, db int) *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", addr)
if err != nil {
return nil, err
}
if _, err = c.Do("SELECT", db); err != nil {
c.Close()
return nil, err
}
return c, nil
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Minute {
return nil
}
_, err := c.Do("PING")
fmt.Println("PING error", err)
return err
},
}
}

How I can close the connection by timeout if the client doesn't respond in 10 seconds?

I have code (I use https://github.com/fiorix/go-smpp):
// -----------------------------------------------
// handleConnection new clients.
// -----------------------------------------------
func (_srv *ServerSmpp) handleConnection(_cfg *ConfigSmpp, c *conn) {
defer c.Close()
if err := _srv.auth(_cfg, c); err != nil {
if err != io.EOF {
log.Printf("smpp_server: server auth failed: %s\n", err)
}
return
}
notify := make(chan error)
go func() {
for {
pb, err := c.Read()
if err != nil {
notify <- err
return
}
err = _srv.Handler(_srv.RemoteProvider, c, pb)
if err != nil {
fmt.Printf("%s\n", err)
notify <- err
return
}
}
}()
for {
select {
case err:= <-notify:
if io.EOF == err {
fmt.Printf("Smpp server (read): %s\n", err)
return
}
case <-time.After(time.Second * 10):
fmt.Printf("Client disconnected by timeout.\n")
return
}
}
}
Code for invoked handleConnection:
func (_srv *ServerSmpp) Serve(_cfg *ConfigSmpp) {
for {
client, err := _srv.NetListener.Accept()
if err != nil {
break
}
c := newConn(client)
go _srv.handleConnection(_cfg, c)
}
}
When this code work, the server disconnects all clients by timeout 10 sec, but how I can disconnect the client when it's doesn't work 10 sec?
Your client object seems to be a net.Conn,
choose a way to call client.SetReadDeadline() with the appropriate time.Time value before blocking on client.Read() :
c.client.SetDeadline( time.Now().Add(10 * time.Second )
pb, err := c.Read() { ...

Go TCP server always throwing EOF after connecting

I have an Go routine running a concurrent TCP 4 server, but everytime I connect to it from via Dial, the TCP server throws an error saying only "EOF". Interestingly it is processing the first line of data, but then the connection handle throws the error.
tcp server instance:
if err != nil {
fmt.Println("an error occured: ")
fmt.Println(err)
return
}
defer l.Close()
for {
conn, err_handle := l.Accept()
if err_handle != nil {
fmt.Println("node err: ",err_handle)
return
}
go handle_Connection(conn, topics)
}
handle_Connection:
func handle_Connection(conn net.Conn, topics map[string][]string) {
defer conn.Close()
fmt.Println("client connected")
for {
data, err_handle := bufio.NewReader(conn).ReadString('\n')
if err_handle != nil {
fmt.Println("Err: ", err_handle)
return
}
client:
conn, err := net.Dial("tcp4", ":"+strconv.Itoa(port))
if err != nil {
fmt.Println("an error occured: ")
fmt.Println(err)
}
The very simple fix to the problem is that the server bufio data reader is declared in the loop which is processing the data.
func handle_Connection(conn net.Conn, topics map[string][]string) {
defer conn.Close()
fmt.Println("client connected")
for {
data, err_handle := bufio.NewReader(conn).ReadString('\n')
if err_handle != nil {
fmt.Println("Err: ", err_handle)
return
}
should be:
func handle_Connection(conn net.Conn, topics map[string][]string) {
defer conn.Close()
fmt.Println("client connected")
data, err_handle := bufio.NewReader(conn).ReadString('\n')
for {
if err_handle != nil {
fmt.Println("Err: ", err_handle)
return
}

Kafka messages not delivered to consumer

I am facing a very weird problem over here. I started a Kafka consumer using sarama-cluster library in go to consume some messages from a kafka topic. But the messages are not being received by the consumer being started.
However, a very weird thing is happening. If I start another consumer parallel to it, the messages are suddenly being delivered to both the consumers.
I cannot think of a logical explanation to it. Any pointers will be appreciated.
Note : This problem started after the Kafka and Zookeeper servers were started non-gracefully.
Here is the go code of consumer for consuming the messages which is not working:
if err := consumer.Start(); err != nil {
return err
}
updChan, err := consumer.Consume()
if err != nil {
return err
}
go func() {
for {
select {
case msg, ok := <-updChan:
if !ok {
return
}
var message liveupdater.KafkaMessage
err := json.Unmarshal(msg.Msg, &message)
if err != nil {
fmt.Println(err)
}
err = handleMessaege(message)
if err != nil {
logrus.Println("encountered error:" + err.Error())
}
consumer.MarkProcessed(msg, string(message.Type))
}
}
}()
Following is the go code where consumer is receiving messages(the only difference between this and previous code is another consumer running in parallel for same topic).
consumeMessages(config)
if err := consumer.Start(); err != nil {
return err
}
updChan, err := consumer.Consume()
if err != nil {
return err
}
go func() {
for {
select {
case msg, ok := <-updChan:
if !ok {
return
}
var message liveupdater.KafkaMessage
err := json.Unmarshal(msg.Msg, &message)
if err != nil {
fmt.Println(err)
}
err = handleMessaege(message)
if err != nil {
logrus.Println("encountered error:" + err.Error())
}
consumer.MarkProcessed(msg, string(message.Type))
}
}
}()
func consumeMessages(config *rakshak_config.Config) {
kafkaConfig := kafka.Config{Brokers: strings.Split(config.Kafka.Brokers, ",")}
logrus.Println("brokers %s", config.Kafka.Brokers)
hermesConsumer, err := hermes.NewConsumer(hermes.Kafka, []string{config.Kafka.Topic}, kafkaConfig)
if err != nil {
logrus.Println("could not get consumer through hermes %s", err)
}
err = hermesConsumer.Start()
if err != nil {
logrus.Println("could not start consumer through hermes %s", err)
}
conChan, err := hermesConsumer.Consume()
if err != nil {
logrus.Println("not able to start consumer channel %s", err)
}
go func() {
for {
select {
case msg, ok := <-conChan:
if !ok {
logrus.Println("could not consume message")
}
logrus.Println("kafka msg string: %s", string(msg.Msg[:]))
hermesConsumer.MarkProcessed(msg, "")
}
}
}()
Thanks in advance.

Closing a redis subscription and ending the go routine when websocket connection closes

I'm pushing events from a redis subscription to a client who is connected via websocket. I'm having trouble unsubscribing and exiting the redis go routine when the client disconnects the websocket.
Inspired by this post, here's what I have thus far. I'm able to receive subscription events and send messages to the client via websocket, but when the client closes the websocket and the defer close(done) code fires, my case b, ok := <-done: doesn't fire. It seems to be overloaded by the default case???
package api
import (
...
"github.com/garyburd/redigo/redis"
"github.com/gorilla/websocket"
)
func wsHandler(w http.ResponseWriter, r *http.Request) {
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
HandleError(w, err)
return
}
defer conn.Close()
done := make(chan bool)
defer close(done)
for {
var req WSRequest
err := conn.ReadJSON(&req)
if err != nil {
HandleWSError(conn, err)
return
}
defer conn.Close()
go func(done chan bool, req *WSRequest, conn *websocket.Conn) {
rc := redisPool.Get()
defer rc.Close()
psc := redis.PubSubConn{Conn: rc}
if err := psc.PSubscribe(req.chanName); err != nil {
HandleWSError(conn, err)
return
}
defer psc.PUnsubscribe()
for {
select {
case b, ok := <-done:
if !ok || b == true {
return
}
default:
switch v := psc.Receive().(type) {
case redis.PMessage:
err := handler(conn, req, v)
if err != nil {
HandleWSError(conn, err)
}
case redis.Subscription:
log.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
case error:
log.Printf("error in redis subscription; err:\n%v\n", v)
HandleWSError(conn, v)
default:
// do nothing...
log.Printf("unknown redis subscription event type; %s\n", reflect.TypeOf(v))
}
}
}
}(done, &req, conn)
}
}
Make these changes to break out of the read loop when done serving the websocket connection:
Maintain a slice of the Redis connections created for this websocket connection.
Unsubscribe all connections when done.
Modify the read loop to return when the subscription count is zero.
Here's the code:
func wsHandler(w http.ResponseWriter, r *http.Request) {
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
HandleError(w, err)
return
}
defer conn.Close()
// Keep slice of all connections. Unsubscribe all connections on exit.
var pscs []redis.PubSubConn
defer func() {
for _, psc := range rcs {
psc.Unsubscribe() // unsubscribe with no args unsubs all channels
}
}()
for {
var req WSRequest
err := conn.ReadJSON(&req)
if err != nil {
HandleWSError(conn, err)
return
}
rc := redisPool.Get()
psc := redis.PubSubConn{Conn: rc}
pscs = append(pscs, psc)
if err := psc.PSubscribe(req.chanName); err != nil {
HandleWSError(conn, err)
return
}
go func(req *WSRequest, conn *websocket.Conn) {
defer rc.Close()
for {
switch v := psc.Receive().(type) {
case redis.PMessage:
err := handler(conn, req, v)
if err != nil {
HandleWSError(conn, err)
}
case redis.Subscription:
log.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
if v.Count == 0 {
return
}
case error:
log.Printf("error in redis subscription; err:\n%v\n", v)
HandleWSError(conn, v)
default:
// do nothing...
log.Printf("unknown redis subscription event type; %s\n", reflect.TypeOf(v))
}
}
}(&req, conn)
}
}
The code in the question and this answer dial multiple Redis connections for each websocket client. A more typical and scalable approach is to share a single Redis pubsub connection across multiple clients. The typical approach may be appropriate for your application given the high-level description, but I am still unsure of what you are trying to do given the code in the question.

Resources