I am a newbie in golang, I am studying concurrency in golang, and tried to wrote a simple crawler demo, when I read all given url, I push a false to processChannel, and this push just will execute once;
then in other goroutine, I select on processChannel, when got a false, I closed channel for application, but, in this select case, I got false twice, and got a panic for "panic: close of closed channel"
so, I cannot understand why I pushed false once, but select case false twice ???
All code at below:
package main
import (
"fmt"
"io/ioutil"
"net/http"
"sync"
"time"
)
var applicationStatus bool
var urls []string
var urlsProcessed int
var foundUrls []string
var fullText string
var totalURLCount int
var wg sync.WaitGroup
var v1 int
func main() {
applicationStatus = true
statusChannel := make(chan int)
textChannel := make(chan string)
processChannel := make(chan bool)
totalURLCount = 0
urls = append(urls, "https://www.msn.cn/zh-cn/news/other/nasa%E7%AC%AC%E4%BA%94%E6%AC%A1%E8%A7%82%E5%AF%9F%E5%88%B0%E9%BB%91%E6%B4%9E%E5%90%83%E6%8E%89%E4%B8%80%E9%A2%97%E6%B5%81%E6%B5%AA%E7%9A%84%E6%81%92%E6%98%9F/ar-AA15ybhx?cvid=0eaf927e48604c0588413d393c788a8f&ocid=winp2fptaskbarent")
urls = append(urls, "https://www.msn.cn/zh-cn/news/other/nasa%E7%AC%AC%E4%BA%94%E6%AC%A1%E8%A7%82%E5%AF%9F%E5%88%B0%E9%BB%91%E6%B4%9E%E5%90%83%E6%8E%89%E4%B8%80%E9%A2%97%E6%B5%81%E6%B5%AA%E7%9A%84%E6%81%92%E6%98%9F/ar-AA15ybhx?cvid=0eaf927e48604c0588413d393c788a8f&ocid=winp2fptaskbarent")
fmt.Println("Starting spider")
urlsProcessed = 0
totalURLCount = len(urls)
go evaluateStatus(statusChannel, processChannel)
go readURLs(statusChannel, textChannel)
go appendToFullText(textChannel, processChannel)
for {
if applicationStatus == false {
fmt.Println(fullText)
fmt.Println("Done!")
break
}
//select {
//case sC := <-statusChannel:
// fmt.Println("Message on statusChannel", sC)
//}
}
}
func evaluateStatus(statusChannel chan int, processChannel chan bool) {
for {
select {
case status := <-statusChannel:
urlsProcessed++
if status == 0 {
fmt.Println("got url")
}
if status == 1 {
close(statusChannel)
}
if urlsProcessed == totalURLCount {
fmt.Println("=============>>>>urlsProcessed")
fmt.Println(urlsProcessed)
fmt.Println("read all top-level url")
processChannel <- false
applicationStatus = false
}
}
}
}
func readURLs(statusChannel chan int, textChannel chan string) {
time.Sleep(time.Millisecond * 1)
fmt.Println("grabing ", len(urls), " urls")
for _, url := range urls {
resp, _ := http.Get(url)
text, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("No HTML body")
}
textChannel <- string(text)
statusChannel <- 0
}
}
func appendToFullText(textChannel chan string, processChannel chan bool) {
for {
select {
case pC := <-processChannel:
fmt.Println("pc==============>>>")
fmt.Println(pC)
if pC == true {
// hang out
}
if pC == false {
// all url got
close(textChannel)
close(processChannel)
}
case tC := <-textChannel:
fmt.Println("text len: ")
fmt.Println(len(tC))
fullText += tC
}
}
}
Thx for your help.
As per the Go Programming Language Specification
A receive operation on a closed channel can always proceed immediately, yielding the element type's zero value after any previously sent values have been received.
This can be seen in the following (playground) demonstration (the comments show what is output):
func main() {
processChannel := make(chan bool)
go func() {
processChannel <- true
processChannel <- false
close(processChannel)
}()
fmt.Println(<-processChannel) // true
fmt.Println(<-processChannel) // false
fmt.Println(<-processChannel) // false
select {
case x := <-processChannel:
fmt.Println(x) // false
}
}
In your code you are closing processChannel so future receives will return the default value (false). One solution is to use processChannel = nil after closing it because:
A nil channel is never ready for communication.
However in your case appendToFullText is closing both channels when pC == false; as such you should probably just return after doing so (because with both channels closed there is no point in keeping the loop running).
Please note that I have only scanned your code
I'm using GoLang to run two websocket clients (one for private and one for public data) simultaneously using goroutines. On the surface, everything seems to work fine. Both clients receive data transmitted from the websocket server. I believe I may have set something up wrong, however, since when I check activity monitor, my program consistently has between 500 - 1500 Idle Wake Ups and is using >200% of my CPU. This doesn't seem normal for something as simple as two websocket clients.
I've put the code in snippets so there's less to read (hopefully that makes it easier to understand), but if you need the entire code, I can post that as well. Here is the code in my main func that runs the ws clients
comms := make(chan os.Signal, 1)
signal.Notify(comms, os.Interrupt, syscall.SIGTERM)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
wg.Add(1)
go pubSocket.PubListen(ctx, &wg, &activeSubs, testing)
wg.Add(1)
go privSocket.PrivListen(ctx, &wg, &activeSubs, testing)
<- comms
cancel()
wg.Wait()
Here is the code for how the clients run the go routines
func (socket *Socket) PubListen(ctx context.Context, wg *sync.WaitGroup, subManager *ConnStatus, testing bool) {
defer wg.Done()
for {
select {
case <- ctx.Done():
log.Println("closing public socket")
socket.Close()
return
default:
socket.OnTextMessage = func(message string, socket Socket) {
log.Println(message)
pubJsonDecoder(message, testing)
//tradesParser(message);
}
}
}
}
func (socket *Socket) PrivListen(ctx context.Context, wg *sync.WaitGroup, subManager *ConnStatus, testing bool) {
defer wg.Done()
for {
select {
case <- ctx.Done():
log.Println("closing private socket")
socket.Close()
return
default:
socket.OnTextMessage = func(message string, socket Socket) {
log.Println(message)
}
}
}
}
Any ideas on why the Idle Wake Ups are so high? Should I be using multithreading instead of concurrency? Thanks in advance for any help!
You're wasting CPU here (superfluous loop):
for {
// ...
default:
// High CPU usage here.
}
}
Try something like this:
func (socket *Socket) PubListen(ctx context.Context, wg *sync.WaitGroup, subManager *ConnStatus, testing bool) {
defer wg.Done()
defer socket.Close()
socket.OnTextMessage = func(message string, socket Socket) {
log.Println(message)
pubJsonDecoder(message, testing)
//tradesParser(message);
}
<-ctx.Done()
log.Println("closing public socket")
}
func (socket *Socket) PrivListen(ctx context.Context, wg *sync.WaitGroup, subManager *ConnStatus, testing bool) {
defer wg.Done()
defer socket.Close()
socket.OnTextMessage = func(message string, socket Socket) {
log.Println(message)
}
<-ctx.Done()
log.Println("closing private socket")
}
Also this may help:
https://github.com/gorilla/websocket/blob/master/examples/chat/client.go
tl/dr: websockets are hard :)
It looks like you might have a couple of spinners. You are assigning the handler function for OnTextMessage() in the default case of a for - select statement. The default case always executes if no other cases are ready. Because there is nothing that blocks in the default case, that for loop just spins out of control. Both goroutines spinning like this will likely peg 2 cores. Websockets are network IO and those goroutines are likely to run in parallel. This is why you are seeing 200% utilization.
Take a look at the gorilla/websocket library. I'm not going to say that it is better or worse than any other websocket library, I have a lot of experience with it.
https://github.com/gorilla/websocket
Below is an implementation that I have used many times.
The way it is set up is you register handler functions that are triggered when a certain message is received. Say one of the values in your message was "type" : "start-job", the websocket server will call the handler you assigned to the "start-job" websocket message. It feels like writing endpoints for an http router.
Package serverws
context.go
package serverws
import (
"errors"
"fmt"
"strings"
"sync"
)
// ConnContext is the connection context to track a connected websocket user
type ConnContext struct {
specialKey string
supportGzip string
UserID string
mu sync.Mutex // Websockets are not thread safe, we'll use a mutex to lock writes.
}
// HashKeyAsCtx returns a ConnContext based on the hash provided
func HashKeyAsCtx(hashKey string) (*ConnContext, error) {
values := strings.Split(hashKey, ":")
if len(values) != 3 {
return nil, errors.New("Invalid Key received: " + hashKey)
}
return &ConnContext{values[0], values[1], values[2], sync.Mutex{}}, nil
}
// AsHashKey returns the hash key for a given connection context ConnContext
func (ctx *ConnContext) AsHashKey() string {
return strings.Join([]string{ctx.specialKey, ctx.supportGzip, ctx.UserID}, ":")
}
// String returns a string of the hash of a given connection context ConnContext
func (ctx *ConnContext) String() string {
return fmt.Sprint("specialkey: ", ctx.specialKey, " gzip ", ctx.supportGzip, " auth ", ctx.UserID)
}
wshandler.go
package serverws
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/rs/zerolog/log"
)
var (
receiveFunctionMap = make(map[string]ReceiveObjectFunc)
ctxHashMap sync.Map
)
// ReceiveObjectFunc is a function signature for a websocket request handler
type ReceiveObjectFunc func(conn *websocket.Conn, ctx *ConnContext, t map[string]interface{})
// WebSocketHandler does what it says, handles WebSockets (makes them easier for us to deal with)
type WebSocketHandler struct {
wsupgrader websocket.Upgrader
}
// WebSocketMessage that is sent over a websocket. Messages must have a conversation type so the server and the client JS know
// what is being discussed and what signals to raise on the server and the client.
// The "Notification" message instructs the client to display an alert popup.
type WebSocketMessage struct {
MessageType string `json:"type"`
Message interface{} `json:"message"`
}
// NewWebSocketHandler sets up a new websocket.
func NewWebSocketHandler() *WebSocketHandler {
wsh := new(WebSocketHandler)
wsh.wsupgrader = websocket.Upgrader{
ReadBufferSize: 4096,
WriteBufferSize: 4096,
}
return wsh
}
// RegisterMessageType sets up an event bus for a message type. When messages arrive from the client that match messageTypeName,
// the function you wrote to handle that message is then called.
func (wsh *WebSocketHandler) RegisterMessageType(messageTypeName string, f ReceiveObjectFunc) {
receiveFunctionMap[messageTypeName] = f
}
// onMessage triggers when the underlying websocket has received a message.
func (wsh *WebSocketHandler) onMessage(conn *websocket.Conn, ctx *ConnContext, msg []byte, msgType int) {
// Handling text messages or binary messages. Binary is usually some gzip text.
if msgType == websocket.TextMessage {
wsh.processIncomingTextMsg(conn, ctx, msg)
}
if msgType == websocket.BinaryMessage {
}
}
// onOpen triggers when the underlying websocket has established a connection.
func (wsh *WebSocketHandler) onOpen(conn *websocket.Conn, r *http.Request) (ctx *ConnContext, err error) {
//user, err := gothic.GetFromSession("ID", r)
user := "TestUser"
if err := r.ParseForm(); err != nil {
return nil, errors.New("parameter check error")
}
specialKey := r.FormValue("specialKey")
supportGzip := r.FormValue("support_gzip")
if user != "" && err == nil {
ctx = &ConnContext{specialKey, supportGzip, user, sync.Mutex{}}
} else {
ctx = &ConnContext{specialKey, supportGzip, "", sync.Mutex{}}
}
keyString := ctx.AsHashKey()
if oldConn, ok := ctxHashMap.Load(keyString); ok {
wsh.onClose(oldConn.(*websocket.Conn), ctx)
oldConn.(*websocket.Conn).Close()
}
ctxHashMap.Store(keyString, conn)
return ctx, nil
}
// onClose triggers when the underlying websocket has been closed down
func (wsh *WebSocketHandler) onClose(conn *websocket.Conn, ctx *ConnContext) {
//log.Info().Msg(("client close itself as " + ctx.String()))
wsh.closeConnWithCtx(ctx)
}
// onError triggers when a websocket connection breaks
func (wsh *WebSocketHandler) onError(errMsg string) {
//log.Error().Msg(errMsg)
}
// HandleConn happens when a user connects to us at the listening point. We ask
// the user to authenticate and then send the required HTTP Upgrade return code.
func (wsh *WebSocketHandler) HandleConn(w http.ResponseWriter, r *http.Request) {
user := ""
if r.URL.Path == "/websocket" {
user = "TestUser" // authenticate however you want
if user == "" {
fmt.Println("UNAUTHENTICATED USER TRIED TO CONNECT TO WEBSOCKET FROM ", r.Header.Get("X-Forwarded-For"))
return
}
}
// don't do this. You need to check the origin, but this is here as a place holder
wsh.wsupgrader.CheckOrigin = func(r *http.Request) bool {
return true
}
conn, err := wsh.wsupgrader.Upgrade(w, r, nil)
if err != nil {
log.Error().Msg("Failed to set websocket upgrade: " + err.Error())
return
}
defer conn.Close()
ctx, err := wsh.onOpen(conn, r)
if err != nil {
log.Error().Msg("Open connection failed " + err.Error() + r.URL.RawQuery)
if user != "" {
ctx.UserID = user
}
return
}
if user != "" {
ctx.UserID = user
}
conn.SetPingHandler(func(message string) error {
conn.WriteControl(websocket.PongMessage, []byte(message), time.Now().Add(time.Second))
return nil
})
// Message pump for the underlying websocket connection
for {
t, msg, err := conn.ReadMessage()
if err != nil {
// Read errors are when the user closes the tab. Ignore.
wsh.onClose(conn, ctx)
return
}
switch t {
case websocket.TextMessage, websocket.BinaryMessage:
wsh.onMessage(conn, ctx, msg, t)
case websocket.CloseMessage:
wsh.onClose(conn, ctx)
return
case websocket.PingMessage:
case websocket.PongMessage:
}
}
}
func (wsh *WebSocketHandler) closeConnWithCtx(ctx *ConnContext) {
keyString := ctx.AsHashKey()
ctxHashMap.Delete(keyString)
}
func (wsh *WebSocketHandler) processIncomingTextMsg(conn *websocket.Conn, ctx *ConnContext, msg []byte) {
//log.Debug().Msg("CLIENT SAID " + string(msg))
data := WebSocketMessage{}
// try to turn this into data
err := json.Unmarshal(msg, &data)
// And try to get at the data underneath
var raw = make(map[string]interface{})
terr := json.Unmarshal(msg, &raw)
if err == nil {
// What kind of message is this?
if receiveFunctionMap[data.MessageType] != nil {
// We'll try to cast this message and call the handler for it
if terr == nil {
if v, ok := raw["message"].(map[string]interface{}); ok {
receiveFunctionMap[data.MessageType](conn, ctx, v)
} else {
log.Debug().Msg("Nonsense sent over the websocket.")
}
} else {
log.Debug().Msg("Nonsense sent over the websocket.")
}
}
} else {
// Received garbage from the transmitter.
}
}
// SendJSONToSocket sends a specific message to a specific websocket
func (wsh *WebSocketHandler) SendJSONToSocket(socketID string, msg interface{}) {
fields := strings.Split(socketID, ":")
message, _ := json.Marshal(msg)
ctxHashMap.Range(func(key interface{}, value interface{}) bool {
if ctx, err := HashKeyAsCtx(key.(string)); err != nil {
wsh.onError(err.Error())
} else {
if ctx.specialKey == fields[0] {
ctx.mu.Lock()
if value != nil {
err = value.(*websocket.Conn).WriteMessage(websocket.TextMessage, message)
}
ctx.mu.Unlock()
}
if err != nil {
ctx.mu.Lock() // We'll lock here even though we're going to destroy this
wsh.onClose(value.(*websocket.Conn), ctx)
value.(*websocket.Conn).Close()
ctxHashMap.Delete(key) // Remove the websocket immediately
//wsh.onError("WRITE ERR TO USER " + key.(string) + " ERR: " + err.Error())
}
}
return true
})
}
package wsocket
types.go
package wsocket
// Acknowledgement is for ACKing simple messages and sending errors
type Acknowledgement struct {
ResponseID string `json:"responseId"`
Status string `json:"status"`
IPAddress string `json:"ipaddress"`
ErrorText string `json:"errortext"`
}
wsocket.go
package wsocket
import (
"fmt"
server "project/serverws"
"project/utils"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
// "github.com/mitchellh/mapstructure"
"github.com/inconshreveable/log15"
)
var (
WebSocket *server.WebSocketHandler // So other packages can send out websocket messages
WebSocketLocation string
Log log15.Logger = log15.New("package", "wsocket"
)
func SetupWebsockets(r *gin.Engine, socket *server.WebSocketHandler, debug_mode bool) {
WebSocket = socket
WebSocketLocation = "example.mydomain.com"
//WebSocketLocation = "example.mydomain.com"
r.GET("/websocket", func(c *gin.Context) {
socket.HandleConn(c.Writer, c.Request)
})
socket.RegisterMessageType("Hello", func(conn *websocket.Conn, ctx *server.ConnContext, data map[string]interface{}) {
response := Acknowledgement{
ResponseID: "Hello",
Status: fmt.Sprintf("OK/%v", ctx.AuthID),
IPAddress: conn.RemoteAddr().String(),
}
// mapstructure.Decode(data, &request) -- used if we wanted to read what was fed in
socket.SendJSONToSocket(ctx.AsHashKey(), &response)
})
socket.RegisterMessageType("start-job", func(conn *websocket.Conn, ctx *server.ConnContext, data map[string]interface{}) {
response := Acknowledgement{
ResponseID: "starting_job",
Status: fmt.Sprintf("%s is being dialed.", data["did"]),
IPAddress: conn.RemoteAddr().String(),
}
// mapstructure.Decode(data, &request) -- used if we wanted to read what was fed in to a struct.
socket.SendJSONToSocket(ctx.AsHashKey(), &response)
})
This implementation was for a web application. This is a simplified version of the client side in javascript. You can handle many concurrent connections with this implementation and all you do to communicate is define objects/structs that contain a responseID that matches a case in the switch below, it is basically one long switch statement, serialize it and send it to the other side, and the other side will ack. I have some version of this running in several production environments.
websocket.js
$(() => {
function wsMessage(object) {
switch (object.responseId) {
case "Hello": // HELLO! :-)
console.log("Heartbeat received, we're connected.");
break;
case "Notification":
if (object.errortext != "") {
$.notify({
// options
message: '<center><B><i class="fas fa-exclamation-triangle"></i> ' + object.errortext + '</B></center>',
}, {
// settings
type: 'danger',
offset: 50,
placement: {
align: 'center',
}
});
} else {
$.notify({
// options
message: '<center><B>' + object.status + '</B></center>',
}, {
// settings
type: 'success',
offset: 50,
placement: {
align: 'center',
}
});
}
break;
}
}
$(document).ready(function () {
function heartbeat() {
if (!websocket) return;
if (websocket.readyState !== 1) return;
websocket.send("{\"type\": \"Hello\", \"message\": { \"RequestID\": \"Hello\", \"User\":\"" + /*getCookie("_loginuser")*/"TestUser" + "\"} }");
setTimeout(heartbeat, 24000);
}
//TODO: CHANGE TO WSS once tls is enabled.
function wireUpWebsocket() {
websocket = new WebSocket('wss://' + WEBSOCKET_LOCATION + '/websocket?specialKey=' + WEBSOCKET_KEY + '&support_gzip=0');
websocket.onopen = function (event) {
console.log("Websocket connected.");
heartbeat();
//if it exists
if (typeof (wsReady) !== 'undefined') {
//execute it
wsReady();
}
};
websocket.onerror = function (event) {
console.log("WEBSOCKET ERROR " + event.data);
};
websocket.onmessage = function (event) {
wsMessage(JSON.parse(event.data));
};
websocket.onclose = function () {
// Don't close!
// Replace key
console.log("WEBSOCKET CLOSED");
WEBSOCKET_KEY = Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15);
websocketreconnects++;
if (websocketreconnects > 30) { // Too much, time to bounce
// location.reload(); Don't reload the page anymore, just re-connect.
}
setTimeout(function () { wireUpWebsocket(); }, 3000);
};
}
wireUpWebsocket();
});
});
function getCookie(name) {
var value = "; " + document.cookie;
var parts = value.split("; " + name + "=");
if (parts.length == 2) return parts.pop().split(";").shift();
}
function setCookie(cname, cvalue, exdays) {
var d = new Date();
d.setTime(d.getTime() + (exdays * 24 * 60 * 60 * 1000));
var expires = "expires=" + d.toUTCString();
document.cookie = cname + "=" + cvalue + ";" + expires + ";path=/";
}
Assigning handler functions over and over again in an infinite loop is definitely not going to work.
https://github.com/gorilla/websocket
I have a use case where I want to have a pool of N integers (0 - N-1) shared by N workers (N <= 100), each claiming an integer from the pool, "working" (for this example sleeping for a random duration), and returning them to the pool, and starting the process again. Each thread can take an arbitrary amount of time to return the key. I've quickly thrown together the following 2 solutions, and would like to know if there's a "best" or "safest" one, and if I'm missing a better approach. For the moment, these workers will never stop unless the application is killed, and we will have a fixed number of workers for the life of the application.
Single Buffered Channel
type Worker struct {
ID int
KeyIndex int
KeyChan chan int
}
func (w *Worker) GetKey() {
w.KeyIndex = <- w.KeyChan
}
func (w *Worker) ReturnKey() {
w.KeyChan <- w.KeyIndex
}
func (w *Worker) Work() {
for {
w.GetKey()
rand.Seed(time.Now().UnixNano())
n := rand.Intn(10)
time.Sleep(time.Duration(n) * time.Second)
w.ReturnKey()
}
}
func main() {
numWorkers := 5
c := make(chan int, numWorkers)
for i := 0; i < numWorkers; i++ {
c <- i
}
workers := make([]*Worker, numWorkers)
for i := range workers {
workers[i] = &Worker{
ID: i,
KeyChan: c,
}
}
for _, w := range workers {
go w.Work()
}
ch := make(chan byte, 1)
<-ch
}
Broker w/ Array + Mutex
type KeyBrokerMutex struct {
mu sync.Mutex
keys []bool
}
func (kb *KeyBrokerMutex) GetKey() int {
kb.mu.Lock()
defer kb.mu.Unlock()
for i, k := range kb.keys {
if k {
kb.keys[i] = false
return i
}
}
return -1
}
func (kb *KeyBrokerMutex) ReturnKey(index int) {
kb.mu.Lock()
defer kb.mu.Unlock()
kb.keys[index] = true
}
type Worker struct {
ID int
KeyIndex int
KeyBroker *KeyBrokerMutex
}
func (w *Worker) GetKeyBrokerMutex() {
w.KeyIndex = w.KeyPool.GetKey()
}
func (w *Worker) ReturnKeyBrokerMutex() {
w.KeyPool.ReturnKey(w.KeyIndex)
w.KeyIndex = -1
}
func (w *Worker) WorkMutex() {
for {
w.GetKeyBrokerMutex()
rand.Seed(time.Now().UnixNano())
n := rand.Intn(10)
time.Sleep(time.Duration(n) * time.Second)
w.ReturnKeyBrokerMutex()
}
}
func main() {
numWorkers := 5
keyBroker := KeyBrokerMutex{keys: make([]bool, numWorkers)}
for i := range keyBroker.keys {
keyBroker.keys[i] = true
}
workers := make([]*Worker, numWorkers)
for i := range workers {
workers[i] = &Worker{
ID: i,
KeyBroker: &keyBroker,
}
}
for _, w := range workers {
go w.WorkMutex()
}
ch := make(chan byte, 1)
<-ch
}
I also have a broker approach using 2 separate channels for getting and returning keys, however I don't think that offers any benefits over the above solutions.
I like the simplicity of the single channel approach, but is there any downside to having multiple consumers and producers to a single buffered channel?
I am implementing a set of codes that prints Lamport logical time upon the completion of sending messages to servers and broadcasting to nodes. My program runs fine before I implemented the codes Lamport logical time. Upon printing closing server..., the program breaks and shows deadlock. May I know if anyone can help me spot my mistake?
import (
"fmt"
"math/rand"
"time"
)
const num_nodes int = 3
const num_messages int = 2
// some arbitary large number
const buffer_channel = 10000
type Server struct {
serverChannel chan Message
nodeArray []Node
timestamp int
}
type Node struct {
nodeId int
nodeChannel chan Message
server Server
closeChannel chan int
readyChannel chan int
timestamp int
}
type Message struct {
senderId int
messageId int
timestamp int
}
func max(x int, y int) int {
if x > y {
return x
}
return y
}
func broadcast(m Message, s Server) {
for _, n := range s.nodeArray {
if n.nodeId != m.senderId {
broadcastMessage := Message{
m.senderId,
m.messageId,
s.timestamp,
}
go s.broadcastMessage(n.nodeChannel, broadcastMessage)
}
}
}
func (s Server) broadcastMessage(nodeChannel chan Message, broadcastMessage Message) {
fmt.Printf("[Server] is sending Message %d.%d to [Node %d]\n", broadcastMessage.senderId, broadcastMessage.messageId, broadcastMessage.senderId)
nodeChannel <- broadcastMessage
}
func (s Server) listen(messagesBufferChannel chan Message) {
numCompletedNodes := 0
for {
nodeMessage := <-s.serverChannel
s.timestamp = max(s.timestamp, nodeMessage.timestamp) + 1
nodeMessage.timestamp = s.timestamp
fmt.Printf("TS: %d -- [Server] has received Message %d.%d from [Node %d]\n", s.timestamp, nodeMessage.senderId, nodeMessage.messageId, nodeMessage.senderId)
messagesBufferChannel <- nodeMessage
s.timestamp += 1
broadcast(nodeMessage, s)
if nodeMessage.messageId == num_messages-1 {
numCompletedNodes += 1
if numCompletedNodes == num_nodes {
fmt.Println("Server finish broadcasting all messages. Stopping Server...")
return
}
}
numMilliSeconds := rand.Intn(1000) + 2000
time.Sleep(time.Duration(numMilliSeconds) * time.Millisecond)
}
}
func (n Node) preSendMessage() {
for i := 1; i <= num_messages; i++ {
numMilliSeconds := rand.Intn(1000) + 2000
time.Sleep(time.Duration(numMilliSeconds) * time.Millisecond)
n.readyChannel <- i
}
}
func (n Node) listenSendMessages(messagesBufferChannel chan Message) {
for {
select {
case receivedMessage := <-n.nodeChannel:
n.timestamp = max(n.timestamp, receivedMessage.timestamp) + 1
receivedMessage.timestamp = n.timestamp
fmt.Printf("TS: %d -- [Node %d] has received Message %d.%d from [Server]\n", n.timestamp, n.nodeId, receivedMessage.senderId, receivedMessage.messageId)
messagesBufferChannel <- receivedMessage
case nodeMessageId := <-n.readyChannel:
n.timestamp += 1
fmt.Printf("TS: %d -- [Node %d] is sending Message %d.%d to [Server]\n", n.timestamp, n.nodeId, n.nodeId, nodeMessageId)
nodeMessage := Message{
n.nodeId,
nodeMessageId,
n.timestamp,
}
n.server.serverChannel <- nodeMessage
case <-n.closeChannel:
fmt.Printf("Stopping [node %d]\n", n.nodeId)
return
default:
}
}
}
func main() {
fmt.Println("Start of Program...")
server := Server{
serverChannel: make(chan Message),
nodeArray: []Node{},
timestamp: 0,
}
for i := 1; i <= num_nodes; i++ {
newNode := Node{
nodeId: i,
nodeChannel: make(chan Message),
server: server,
readyChannel: make(chan int),
closeChannel: make(chan int),
timestamp: 0,
}
server.nodeArray = append(server.nodeArray, newNode)
}
var messagesBufferChannel chan Message = make(chan Message, buffer_channel)
for _, n := range server.nodeArray {
go n.preSendMessage()
go n.listenSendMessages(messagesBufferChannel)
}
server.listen(messagesBufferChannel)
time.Sleep(time.Second)
for _, n := range server.nodeArray {
n.closeChannel <- 1
}
time.Sleep(time.Second)
close(messagesBufferChannel)
}
I am implementing a simple grpc service where the summary of a task is to be sent to the grpc server. Everything works fine if I send less number of messages but when I begin to send like 5000 messages the server stops and gets deadline exceeded message in client side. I also tried to reconnect again but found the error message as.
rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: timed out waiting for server handshake
The server shows no error and is alive.
I tried setting GRPC_GO_REQUIRE_HANDSHAKE=off as well but the error still prevails. I also implemented sending summary in batch but same scenerio repeated.
Is there any limitations to number of messages to be sent in grpc?
Here is my service proto
// The Result service definition.
service Result {
rpc ConntectMaster(ConnectionRequest) returns (stream ExecutionCommand) {}
rpc postSummary(Summary) returns(ExecutionCommand) {}
}
message Summary{
int32 successCount = 1;
int32 failedCount = 2;
int32 startTime = 3;
repeated TaskResult results = 4;
bool isLast = 5;
string id = 6;
}
postSummary implementation in sever
// PostSummary posts the summary to the master
func (server *Server) PostSummary(ctx context.Context, in *pb.Summary) (*pb.ExecutionCommand, error) {
for i := 0; i < len(in.Results); i++ {
res := in.Results[i]
log.Printf("%s --> %d Res :: %s, len : %d", in.Id, i, res.Id, len(in.Results))
}
return &pb.ExecutionCommand{Type: stopExec}, nil
}
func postSummaryInBatch(executor *Executor, index int) {
summary := pb.Summary{
SuccessCount: int32(executor.summary.successCount),
FailedCount: int32(executor.summary.failedCount),
Results: []*pb.TaskResult{},
IsLast: false,
}
if index >= len(executor.summary.TaskResults) {
summary.IsLast = true
return
}
var to int
batch := 500
if (index + batch) <= len(executor.summary.TaskResults) {
to = index + batch
} else {
to = len(executor.summary.TaskResults)
}
for i := index; i < to; i++ {
result := executor.summary.TaskResults[i]
taskResult := pb.TaskResult{
Id: result.id,
Msg: result.msg,
Time: result.time,
}
// log.Printf("adding res : %s ", taskResult.Id)
if result.err != nil {
taskResult.IsError = true
}
summary.Results = append(summary.Results, &taskResult)
}
summary.Id = fmt.Sprintf("%d-%d", index, to)
log.Printf("sent from %d to %d ", index, to)
postSummary(executor, &summary, 0)
postSummaryInBatch(executor, to)
}
func postSummary(executor *Executor, summary *pb.Summary, retryCount int) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
cmd, err := client.PostSummary(ctx, summary)
if err != nil {
if retryCount < 3 {
reconnect(executor)
postSummary(executor, summary, retryCount+1)
}
log.Printf(err.Error())
// log.Fatal("cannot send summary report")
} else {
processServerCommand(executor, cmd)
}
}
grpc default maxReceiveMessageSize is 4MB, your grpc client probably went over that limit.
grpc uses h2 in transport layer which opens only one tcp conn and multiplex "requests" over that, reduce significant overhead compare to h1, I wouldn't worry too much for batching and will just make individual calls to grpc server.