Websocket freezes if disconnected abnormally - go

I've created a simple websocket that publishes a JSON stream. I't works fine most of the time except for few cases where I think while looping through the clients to send them message, it gets hung up on a client that is being disconnected abnormally. What measure can I add to this code to mitigate it?
Client.go
import (
"github.com/gorilla/websocket"
)
type client struct {
socket *websocket.Conn
send chan *Message
}
func (c *client) read() {
defer c.socket.Close()
for {
_, _, err := c.socket.ReadMessage()
if err != nil {
log.Info("Websocket: %s", err)
break
}
}
}
func (c *client) write() {
defer c.socket.Close()
for msg := range c.send {
err := c.socket.WriteJSON(msg)
if err != nil {
break
}
}
}
Stream.go
import (
"net/http"
"github.com/gorilla/websocket"
)
const (
socketBufferSize = 1024
messageBufferSize = 256
)
var upgrader = &websocket.Upgrader{
ReadBufferSize: socketBufferSize,
WriteBufferSize: socketBufferSize,
}
type Stream struct {
Send chan *Message
join chan *client
leave chan *client
clients map[*client]bool
}
func (s *Stream) Run() {
for {
select {
case client := <-s.join: // joining
s.clients[client] = true
case client := <-s.leave: // leaving
delete(s.clients, client)
close(client.send)
case msg := <-s.Send: // send message to all clients
for client := range s.clients {
client.send <- msg
}
}
}
}
func (s *Stream) ServeHTTP(w http.ResponseWriter, res *http.Request) {
socket, err := upgrader.Upgrade(w, res, nil)
if err != nil {
log.Error(err)
return
}
defer func() {
socket.Close()
}()
client := &client{
socket: socket,
send: make(chan *Message, messageBufferSize),
}
s.join <- client
defer func() { s.leave <- client }()
go client.write()
client.read()
}

See the Gorilla Chat Application for an example of how to avoid blocking on a client.
The key parts are:
Use a buffered channel for sending to the client. Your application is already doing this.
Send to the client using select/default to avoid blocking. Assume that the client is blocked on write when the client cannot immediately receive a message. Close the client's channel in this situation to cause the client's write loop to exit.
Write with a deadline.

Related

Go websocket test acting strange

So basically I'm writing a go test for my chat application and for some reason the if I write Test_saveMessage function in the top of this file my tests go through and they work fine, however if I write the Test_InitRouter in the top of this file - my server opens and the test doesn't finish. As if it would be listening for more requests. Does anyone know the reason of why this could be happening? Here is the that does not work code:
package messenger
import (
"fmt"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
"net/http/httptest"
"strings"
"testing"
)
var testMessage = Message{
Username: "Name",
Message: "Test message"}
//Tests InitRouter both sending and receiving messages
func Test_InitRouter(t *testing.T) {
var receivedMessage Message
//Create test server with the InitRouter handler
s := httptest.NewServer(InitRouter())
defer s.Close()
// Convert URL from http to ws
u := "ws" + strings.TrimPrefix(s.URL, "http")
fmt.Println(u)
// Connect to the test server
ws, _, err := websocket.DefaultDialer.Dial(u, nil)
if err != nil {
t.Fatalf("%v", err)
}
defer ws.Close()
//Send message to the server read received message and see if it's the same
if err != ws.WriteJSON(testMessage) {
t.Fatalf("%v", err)
}
err = ws.ReadJSON(&receivedMessage)
if err != nil {
t.Fatalf("%v", err)
}
if receivedMessage != testMessage {
t.Fatalf("%v", err)
}
}
//Test for the saveMessage function
func Test_saveMessage(t *testing.T) {
saveMessage(testMessage)
assert.Equal(t, 1, len(messages), "Expected to have 1 message")
}
As soon as I move the Test_saveMessage function to the top it starts working properly.
Here is the code for the handler:
package messenger
import (
"fmt"
"github.com/go-chi/chi"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"net/http"
)
func InitRouter() http.Handler {
r := chi.NewRouter()
r.Get("/", GetWebsocket)
return r
}
var clients = make(map[*websocket.Conn]bool) // connected clients
var broadcast = make(chan Message) // broadcast channel
var messages = []Message{}
func GetWebsocket(w http.ResponseWriter, r *http.Request) {
// Upgrade initial GET request to a websocket
upgrader := websocket.Upgrader{}
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error(err)
}
// Close the connection when the function returns
defer ws.Close()
// Register our new client and send him the chat history
clients[ws] = true
serveInitialMessages(ws)
//initialize message sending logic
sendMessages(ws)
}
// Sends messages from a particular websocket to the channel
func sendMessages(ws *websocket.Conn){
for {
var msg Message
// Read in a new message as JSON and map it to a Message object
err := ws.ReadJSON(&msg)
if err != nil {
log.Info(err)
delete(clients, ws)
break
}
// Send the newly received message to the broadcast channel
broadcast <- msg
saveMessage(msg)
}
}
func HandleMessages() {
for {
// Grab the next message from the broadcast channel
msg := <-broadcast
fmt.Println(msg)
// Send it out to every client that is currently connected
for client := range clients {
err := client.WriteJSON(msg)
if err != nil {
log.Printf("error: %v", err)
client.Close()
delete(clients, client)
}
}
}
}
func saveMessage(m Message) {
if len(messages) >= 50 {
messages = messages[1:]
}
messages = append(messages, m)
}
func serveInitialMessages(ws *websocket.Conn) {
for _, m := range messages {
err := ws.WriteJSON(m)
if err != nil {
log.Error(err)
}
}
}

Sending Websocket messages to new clients

I am creating a chat API using Go and Gorilla websocket. I would like my users to receive the last 10 messages on establishing a websocket connection. However I can't find a simple way to do that. I would just like to send every message from my messages array to the new client. Is there a simple way to edit my code without hubs? Here is my code:
package messenger
import (
"../config"
"fmt"
"github.com/go-chi/chi"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"net/http"
)
func InitRouter() http.Handler {
r := chi.NewRouter()
r.Get("/", getWebsocket)
return r
}
var clients = make(map[*websocket.Conn]bool) // connected clients
var broadcast = make(chan Message) // broadcast channel
var messages = []Message{}
// Configure the upgrader
var upgrader = websocket.Upgrader{}
func getWebsocket(w http.ResponseWriter, r *http.Request) {
// Upgrade initial GET request to a websocket
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Fatal(err)
}
// Make sure we close the connection when the function returns
defer ws.Close()
// Register our new client
clients[ws] = true
for {
var msg Message
// Read in a new message as JSON and map it to a Message object
err := ws.ReadJSON(&msg)
if err != nil {
log.Printf("error: %v", err)
delete(clients, ws)
break
}
// Send the newly received message to the broadcast channel
broadcast <- msg
saveMessage(msg)
}
}
func HandleMessages() {
for {
// Grab the next message from the broadcast channel
msg := <-broadcast
// Send it out to every client that is currently connected
for client := range clients {
err := client.WriteJSON(msg)
if err != nil {
log.Printf("error: %v", err)
client.Close()
delete(clients, client)
}
}
}
}
func saveMessage(m Message) {
if len(messages) >= config.Conf.MessageAmount {
messages = messages[1:]
}
messages = append(messages, m)
fmt.Println(messages)
}
Okay looks like I did it. I just created a new function and called it fromgetWebsocket function passing the newly created Websocket. Here is the new function:
func serveInitialMessages(ws *websocket.Conn) {
for _, m := range messages {
fmt.Println(m)
err := ws.WriteJSON(m)
if err != nil {
fmt.Println(err)
}
}
}

WebSocket Server that feeds messages to clients in a round robin fashion

I have a websocket server in Go using the Gorilla websocket package. At this stage, I will have only one server serving 5 clients. I am getting some messages from upstream into the WebSocket server. My intention is to NOT BROADCAST all the messages to the connected clients. I would like to send only one copy of the message to the connected clients in a round robin fashion. It doesn't matter which client gets it as long as there is only one that gets it.
My attempted solution
I have a simple Go server, created a Pool of clients (websocket connections) that I am receiving. However, I do not see any options to round robin the messages as I mentioned above. All my clients are getting the message. How can I send only one copy of the message to the connected clients instead of broadcasting to all.
Discalimer
The code I have is taken from online sources and modified to my requirement. I am relatively new to Go and Websockets. Is this something even possible using Websockets?
main.go
package main
import (
"fmt"
"net/http"
"github.com/realtime-chat-go-react/backend/pkg/websocket"
)
func serveWs(pool *websocket.Pool, w http.ResponseWriter, r *http.Request) {
fmt.Println("WebSocket Endpoint Hit")
conn, err := websocket.Upgrade(w, r)
if err != nil {
fmt.Fprintf(w, "%+v\n", err)
}
client := &websocket.Client{
Conn: conn,
Pool: pool,
}
pool.Register <- client
client.Read()
}
func setupRoutes() {
pool := websocket.NewPool()
go pool.Start()
http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
serveWs(pool, w, r)
})
}
func main() {
setupRoutes()
err := http.ListenAndServe(":8080",nil)
if err != nil {
fmt.Println(err)
}
}
websocket.go
package websocket
import (
"log"
"net/http"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
var wsList []*websocket.Conn
func Upgrade(w http.ResponseWriter, r *http.Request) (*websocket.Conn, error) {
upgrader.CheckOrigin = func(r *http.Request) bool { return true }
conn, err := upgrader.Upgrade(w, r, nil)
wsList = append(wsList, conn) //Creating a list here to store all websocket clients.
if err != nil {
log.Println(err)
return nil, err
}
return conn, nil
}
pool.go
package websocket
import "fmt"
type Pool struct {
Register chan *Client
Unregister chan *Client
Clients map[*Client]bool
Broadcast chan Message
}
func NewPool() *Pool {
return &Pool{
Register: make(chan *Client),
Unregister: make(chan *Client),
Clients: make(map[*Client]bool),
Broadcast: make(chan Message),
}
}
func (pool *Pool) Start() {
for {
select {
case client := <-pool.Register:
pool.Clients[client] = true
fmt.Println("Size of Connection Pool: ", len(pool.Clients))
for client, _ := range pool.Clients {
fmt.Println(client)
client.Conn.WriteJSON(Message{Type: 1, Body: "New User Joined..."})
}
break
case client := <-pool.Unregister:
delete(pool.Clients, client)
fmt.Println("Size of Connection Pool: ", len(pool.Clients))
for client, _ := range pool.Clients {
client.Conn.WriteJSON(Message{Type: 1, Body: "User Disconnected..."})
}
break
case message := <-pool.Broadcast: //This is where I need to modify the code but not sure how
fmt.Println("Sending message to all clients in Pool")
for client, _ := range pool.Clients {
if err := client.Conn.WriteJSON(message); err != nil {
fmt.Println(err)
return
}
}
}
}
}
client.go
package websocket
import (
"fmt"
"log"
"sync"
"github.com/gorilla/websocket"
)
type Client struct {
ID string
Conn *websocket.Conn
Pool *Pool
mu sync.Mutex
}
type Message struct {
Type int `json:"type"`
Body string `json:"body"`
}
func (c *Client) Read() {
defer func() {
c.Pool.Unregister <- c
c.Conn.Close()
}()
for {
messageType, p, err := c.Conn.ReadMessage()
if err != nil {
log.Println(err)
return
}
message := Message{Type: messageType, Body: string(p)}
c.Pool.Broadcast <- message
fmt.Printf("Message Received: %+v\n", message)
}
}
Modify the pool to store clients in a slice instead of a map. Add field to record index of the previous client used.
type Pool struct {
Register chan *Client
Unregister chan *Client
Clients []*Client
Broadcast chan Message
PrevClientIndex int
}
Round robin instead of broadcasting:
case message := <-pool.Broadcast:
if len(pool.Clients) == 0 {
continue
}
pool.PrevClientIndex++
if pool.PrevClientIndex >= len(pool.Clients) {
pool.PrevClientIndex = 0
}
client := pool.Clients[pool.PrevClientIndex]
if err := client.Conn.WriteJSON(message); err != nil {
// handle error
...
Register appends to the slice:
case client := <-pool.Register:
pool.Clients = append(pool.Clients, client)
...
Unregister removes the client from the slice:
case client := <-pool.Unregister:
j := 0
for _, c := range pool.Clients {
if c != client {
c.Clients[j] = c
j++
}
}
pool.Clients = pool.Clients[:j]
...

Using golang's defer in a separate method

I'm using the golang RabbitMQ library in a project, and I have a Connect function in a separate package. I'm calling Connect, in my main function, however because I connect to RabbitMQ in a separate function, the defer conn.Close() function is called, which closes the connection within the Connect function. Which makes perfect sense, but that begs the question, where then, do I call conn.Close()?
package drivers
import (
// Core
"log"
"os"
"time"
// Third party
"github.com/streadway/amqp"
)
type Queue struct {
Channel *amqp.Channel
}
func NewQueue() *Queue {
return &Queue{}
}
// Queue interface
type IQueue interface {
Connect(args ...interface{})
Publish(queue string, payload []byte) error
Listen(queue string) (<-chan amqp.Delivery, error)
Declare(queue string) (amqp.Queue, error)
}
// Connect - Connects to RabbitMQ
func (queue *Queue) Connect(args ...interface{}) {
var uri string
if args == nil {
// Get from env vars
uri = os.Getenv("RABBIT_MQ_URI")
if uri == "" {
log.Panic("No uri for queue given")
}
} else {
uri = args[0].(string)
}
// Make max 5 connection attempts, with a 1 second timeout
for i := 0; i < 5; i++ {
log.Println("Connecting to:", uri)
// If connection is successful, return new instance
conn, err := amqp.Dial(uri)
defer conn.Close()
if err == nil {
log.Println("Successfully connected to queue!")
channel, _ := conn.Channel()
queue.Channel = channel
return
}
log.Println("Failed to connect to queue, retrying...", err)
// Wait 1 second
time.Sleep(5 * time.Second)
}
}
// Declare a new queue
func (queue *Queue) Declare(queueName string) (amqp.Queue, error) {
return queue.Channel.QueueDeclare(
queueName,
true,
false,
false,
false,
nil,
)
}
// Publish a message
func (queue *Queue) Publish(queueName string, payload []byte) error {
return queue.Channel.Publish(
"",
queueName,
false,
false,
amqp.Publishing{
DeliveryMode: amqp.Persistent,
ContentType: "application/json",
Body: payload,
},
)
}
// Listen for a new message
func (queue *Queue) Listen(queueName string) (<-chan amqp.Delivery, error) {
return queue.Channel.Consume(
queueName,
"",
true,
false,
false,
false,
nil,
)
}
As you can see in the code above, I'm calling defer conn.Close() after making a connection, however, this immediately closes the connection again.
Here's a Go Playground spoofing what I'm talking about... https://play.golang.org/p/5cz2D4gDgn
The simple solution is to call conn.Close() from elsewhere. This might just be me, but I think it's kinda odd that you wouldn't expose the connection elsewhere, i.e. as a field in Queue. Exposing the ability to close the connection from the Queue would solve this and give you more flexibility.
So this:
type Queue struct {
// your original fields
Conn amqp.Connection
}
// Somewhere else
queue.Conn.Close()
You're other option is connecting, then doing all the actions you want with that connection, then closing. I'm thinking something like:
func action(conn amqp.Connection, args ...interface{}) (<-chan bool) {
done := make(chan bool)
go func(amqpConn amqp.Connection, dChan chan bool){
// Do what you want with the connection
dChan <- true
}(conn, done)
return done
}
func (queue *Queue) Connect(args ...interface{}) {
// your connection code
doneChans := make([](chan bool), 5)
for i := 0; i < 5; i++ {
conn, err := amqp.Dial(uri)
defer conn.Close()
if err != nil {
// handle error
}
done := action(conn)
}
// This for loop will block until the 5 action calls are done
for j := range doneChans {
isFinish := <-doneChans[j]
if !isFinish {
// handle bad state
}
}
}
One option is to have Connect return conn, and call defer conn.Close() in the caller.
package driver
// imports, etc
func (queue *Queue) Connect(args ...interface{}) amqp.Connection, error {
// ...
conn, err := amqp.Dial(uri)
if err != nil {
return nil, err
}
// ...
return conn, nil
}
Then in another package:
package stuff
// imports, etc
func doStuff() {
queue = driver.NewQueue()
conn, err := queue.Connect(args...)
if err != nil {
log.Fatalf("oh no! %v!", err)
}
defer conn.Close()
// Do stuff
}

Multiple connections to a TCP server

I've developed a small Go TCP server to make a chat application. But when I try to connect clients to it, the server works fine with two clients, but whenever I tried to connect the third client it is not connected to the server. I am running on Windows. What could be the issue?
package main
import (
"bufio"
"fmt"
"net"
)
var allClients map[*Client]int
type Client struct {
// incoming chan string
outgoing chan string
reader *bufio.Reader
writer *bufio.Writer
conn net.Conn
connection *Client
}
func (client *Client) Read() {
for {
line, err := client.reader.ReadString('\n')
if err == nil {
if client.connection != nil {
client.connection.outgoing <- line
}
fmt.Println(line)
} else {
break
}
}
client.conn.Close()
delete(allClients, client)
if client.connection != nil {
client.connection.connection = nil
}
client = nil
}
func (client *Client) Write() {
for data := range client.outgoing {
client.writer.WriteString(data)
client.writer.Flush()
}
}
func (client *Client) Listen() {
go client.Read()
go client.Write()
}
func NewClient(connection net.Conn) *Client {
writer := bufio.NewWriter(connection)
reader := bufio.NewReader(connection)
client := &Client{
// incoming: make(chan string),
outgoing: make(chan string),
conn: connection,
reader: reader,
writer: writer,
}
client.Listen()
return client
}
func main() {
allClients = make(map[*Client]int)
listener, _ := net.Listen("tcp", ":8080")
for {
conn, err := listener.Accept()
if err != nil {
fmt.Println(err.Error())
}
client := NewClient(conn)
for clientList, _ := range allClients {
if clientList.connection == nil {
client.connection = clientList
clientList.connection = client
fmt.Println("Connected")
}
}
allClients[client] = 1
fmt.Println(len(allClients))
}
}
Your code is fine. I compiled in on Linux, tried with 4 connections. Everything worked as expected.

Resources