How to close HTTP/2 client stream when server closes request stream? - go

Created a HTTP/2 client and server program using net/http Go library for streaming client and server request response. Taken the reference from code here https://github.com/golang/go/issues/13444#issuecomment-161115822 .
Server code
func handle_stream_request(w http.ResponseWriter, r *http.Request) {
buf := make([]byte, 1024)
i := 0
for {
i = i + 1
if i == 10 {
break
}
len, _ := r.Body.Read(buf)
response_str := "Server ACK for: " + string(buf[:len])
w.Write([]byte(response_str))
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
}
}
Client Code
func send_stream_request(client *http.Client) {
pr, pw := io.Pipe()
req, _ := http.NewRequest("PUT", "https://localhost:8000/", ioutil.NopCloser(pr))
var res *http.Response
go func() {
for {
time.Sleep(2 * time.Second)
s := "Client ping#" + get_time()
pw.Write([]byte(s))
if res != nil {
buf := make([]byte, 1024)
len, _ := res.Body.Read(buf)
log.Printf("Response is: %s", string(buf[:len]))
}
}
}()
go func() {
response, _ := client.Do(req)
res = response
log.Printf("\n Got: %#v", res)
}()
select {}
}
What did you expect to see?
After 10 requests from client the client program should exit successfully.
What did you see instead?
The client program blocks at writing at pipe "pw.Write([]byte(s))" after server has finished the handle function because there is no reader to read the data on pipe.
I am not able to understand how can I stop such client program from hanging. What signal does server send to client that request stream has been closed and it shouldn't try to write more stuff on pipe.

Related

Golang: how to ensure redis subscribers receive all messages from redis pubsub?

I am trying to publish messages to dynamically generated channels in redis while subscribing all messages from the existing channels.
The following seems to work, but it fails to receive some messages depending on the timing of requests from the client (browser).
I tried "fan-in" for the two go channels in the select statement, but it did not work well.
package main
import (
...
"github.com/go-redis/redis/v8"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{}
var rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
var ctx = context.Background()
func echo(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println("websocket connection err:", err)
return
}
defer conn.Close()
room := make(chan string)
start := make(chan string)
go func() {
loop:
for {
sub := rd.Subscribe(ctx)
defer sub.Close()
channels := []string{}
for {
select {
//it seems that messages are not received when executing this case sometimes
case channel := <-room:
log.Println("channel", channel)
channels = append(channels, channel)
sub = rd.Subscribe(ctx, channels...)
start <- "ok"
case msg := <-sub.Channel():
log.Println("msg", msg)
err := conn.WriteMessage(websocket.TextMessage, []byte(msg.Payload))
if err != nil {
log.Println("websocket write err:", err)
break loop
}
}
}
}
}()
for {
_, msg, err := conn.ReadMessage()
if err != nil {
log.Println("websocket read err:", err)
break
}
log.Println(string(msg))
chPrefix := strings.Split(string(msg), ":")[0]
ch := chPrefix + "-channel"
if string(msg) == "test" || string(msg) == "yeah" {
room <- ch
log.Println(ch)
log.Println(<-start)
}
if err := rd.Publish(ctx, ch, msg).Err(); err != nil {
log.Println("redis publish err:", err)
break
}
}
}
func main() {
http.Handle("/", http.FileServer(http.Dir("./js")))
http.HandleFunc("/ws", echo)
log.Println("server starting...", "http://localhost:5000")
log.Fatal(http.ListenAndServe("localhost:5000", nil))
}
If by all messages, you mean you do not wish to lose any messages, I would recommend using Redis Streams instead of pub/sub. This will ensure you are not missing messages and can go back on the stream history if necessary.
This is an example of using Go, Streams and websockets that should get you started in that direction

socket: too many open files Error for goroutines in indefinite loop

I have a requirement in my program to send metrics to datadog indefinitely (for continuous app monitoring in datadog). The program runs for a while and exits with the error "dial udp 127.0.0.1:18125: socket: too many open files".
func sendData(name []string, channel chan []string) {
c, err := statsd.New("127.0.0.1:18125")
if err != nil {
log.Fatal(err)
}
v := versionDetails()
tag := "tag:" + v
final_tag := []string{dd_tags}
appEpochTimeList := epochTime()
rate := float64(1)
for i, app := range name {
e := c.Gauge(app, float64(appEpochTimeList[i]), final_tag , rate)
if e != nil {
log.Println(e)
channel <- name
}
channel <- name
log.Printf("Metrics Sent !!")
}
}
The app names are read from a config.toml file
The problem is your sendData() function. This function is called in your for loop and has the following line:
c, err := statsd.New("127.0.0.1:18125")
This line will create a new DataDog client, which uses a Unix socket. This explains your error message.
With every iteration of your loop, a new socket is "allocated". After a sufficient amount of loops no sockets can be opened, resulting in:
socket: too many open files
To fix this you should create the client only once and pass it to your method as parameter.
func sendData(client *statsd.Client, name []string, channel chan []string) {
// do something with client...
}
func main() {
client, err := statsd.New("127.0.0.1:18125")
if err != nil {
log.Fatal(err)
}
// do something else ...
for res := range channel {
go func(client *statsd.Client, appName []string) {
time.Sleep(5 * time.Second)
go sendData(client, appName, channel)
}(client, res)
}
}

Too many open files serving http

I have the following code
package main
import (
"bytes"
"fmt"
"github.com/gorilla/mux"
"log"
"net/http"
"time"
"io"
httprouter "github.com/fasthttp/router"
"github.com/valyala/fasthttp"
)
func main() {
router := mux.NewRouter().StrictSlash(true)
/*router := NewRouter()*/
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
prepare(w, r, vars["name"])
}).Methods("POST")
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", 8080), router))
}
//using fast http
func _() {
router := httprouter.New()
router.GET("/", func(w *fasthttp.RequestCtx) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.POST("/:name", func(w *fasthttp.RequestCtx) {
prepareRequest(w, w.UserValue("name").(string))
})
log.Fatal(fasthttp.ListenAndServe(fmt.Sprintf(":%d", 8080), router.Handler))
}
//func prepare(w *fasthttp.RequestCtx, name string)
func prepare(w http.ResponseWriter, r *http.Request, name string) {
//other part of the code and call to goroutine
var urls []string
//lets say all the url loaded, call the go routine func and wait for channel to respond and then proceed with the response of all url
results := callUrls(urls) //there are 10 urls atleast to call simultaneously for each request everytime
process(w, results)
}
type Response struct {
status int
url string
body string
}
func callUrls(urls []string) []*Response {
ch := make(chan *Response, len(urls))
for _, url := range urls {
go func(url string) {
//http post on url,
//base on status code of url call, add to status code
//some thing like
req, err := http.NewRequest("POST", url, bytes.NewBuffer(somePostData))
req.Header.Set("Content-Type", "application/json")
req.Close = true
client := &http.Client{
Timeout: time.Duration(time.Duration(100) * time.Millisecond),
}
response, err := client.Do(req)
//Using fast http client
/*req := fasthttp.AcquireRequest()
req.SetRequestURI(url)
req.Header.Set("Content-Type", "application/json")
req.Header.SetMethod("POST")
req.SetBody(somePostData)
response := fasthttp.AcquireResponse()
client := &fasthttp.Client{
ReadTimeout: time.Duration(time.Duration(100) * time.Millisecond),
}
err := client.Do(req, response)*/
if err != nil {
//do other thing with the response received
_, _ = io.Copy(ioutil.Discard, response.Body)
_ = response.Body.Close()
} else {
//success response
_, _ = io.Copy(ioutil.Discard, response.Body)
_ = response.Body.Close()
body, _:= ioutil.ReadAll(response.Body)
strBody := string(body)
strBody = strings.Replace(strBody, "\r", "", -1)
strBody = strings.Replace(strBody, "\n", "", -1)
}
// return to channel accordingly
ch <- &Response{200, "url", "response body"}
}(url)
}
var results []*Response
for {
select {
case r := <-ch:
results = append(results, r)
if len(results) == len(urls) {
//Done
close(ch)
return results
}
}
}
}
//func process(w *fasthttp.RequestCtx,results []*Response){
func process(w http.ResponseWriter, results []*Response){
fmt.Println("response", "response body")
}
After serving few request on multi core CPU (there are around 4000-6000 req coming per sec) I get too many files open error and response time and CPU goes beyond limit. (Could CPU be be high because I convert byte to string a few times to replace few character? Any suggestion?)
I have seen other question referring to closing req/res body and/or setting sysctl or ulimit to higher values, I did follow those but I always end up with the error.
Config on the server:
/etc/sysctl.conf net.ipv4.tcp_tw_recycle = 1
open files (-n) 65535
I need the code to respond in millisec but it take upto 50sec when cpu is high.
Have tried both net/http and fast http but with no improvement. My Node.js request npm does everything perfectly on the same server. What will be best way to handle those connection or change in the code needed for improvement.
You can use the following library:
Requests: A Go library for reduce the headache when making HTTP requests (20k/s req)
https://github.com/alessiosavi/Requests
It's developed for solve theto many open files dealing with parallel requests.
The idea is to allocate a list of request, than send them with a configurable "parallel" factor that allow to run only "N" request at time.
Initialize the requests (you have already a set of urls)
// This array will contains the list of request
var reqs []requests.Request
// N is the number of request to run in parallel, in order to avoid "TO MANY OPEN FILES. N have to be lower than ulimit threshold"
var N int = 12
// Create the list of request
for i := 0; i < 1000; i++ {
// In this case, we init 1000 request with same URL,METHOD,BODY,HEADERS
req, err := requests.InitRequest("https://127.0.0.1:5000", "GET", nil, nil, true)
if err != nil {
// Request is not compliant, and will not be add to the list
log.Println("Skipping request [", i, "]. Error: ", err)
} else {
// If no error occurs, we can append the request created to the list of request that we need to send
reqs = append(reqs, *req)
}
}
At this point, we have a list that contains the requests that have to be sent.
Let's send them in parallel!
// This array will contains the response from the givens request
var response []datastructure.Response
// send the request using N request to send in parallel
response = requests.ParallelRequest(reqs, N)
// Print the response
for i := range response {
// Dump is a method that print every information related to the response
log.Println("Request [", i, "] -> ", response[i].Dump())
// Or use the data present in the response
log.Println("Headers: ", response[i].Headers)
log.Println("Status code: ", response[i].StatusCode)
log.Println("Time elapsed: ", response[i].Time)
log.Println("Error: ", response[i].Error)
log.Println("Body: ", string(response[i].Body))
}
You can find example usage into the example folder of the repository.
SPOILER:
I'm the author of this little library

How to use SingleFlight to share downloaded large size file?

I'm proxying a bunch of http GET calls through singleflight. But returned response is only seen by the first request.
I also noticed a problem in my test. If the first request times out, the response will be lost.
Let's say r1,r2,r3 are requests that come in order. They are all grouped in one groupKey. If r1 time out , r2 and r3 will wait until the shared HTTP call returns or until their own timeout.
proxy code (credits to here)
// add auth to the requst and proxy to target host
var serveReverseProxy = func(target string, res http.ResponseWriter, req *http.Request) {
log.Println("new request!")
requestURL, _ := url.Parse(target)
proxy := httputil.NewSingleHostReverseProxy(requestURL)
req1, _ := http.NewRequest(req.Method, req.RequestURI, req.Body)
for k, v := range req.Header {
for _, vv := range v {
req1.Header.Add(k, vv)
}
}
req1.Header.Set("Authorization", "Bearer "+"some token")
req1.Host = requestURL.Host
proxy.ServeHTTP(res, req1)
}
var requestGroup singleflight.Group
mockBackend := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
groupKey := req.Host + req.RequestURI
name := req.Header.Get("From")
ch := requestGroup.DoChan(groupKey, func() (interface{}, error) {
//increase key retention to 20s to make sure r1,r2,r3 are all in one group
go func() {
time.Sleep(20 * time.Second)
requestGroup.Forget(groupKey)
log.Println("Key deleted :", groupKey)
}()
// proxy to some host and expect the result to be written in res
serveReverseProxy("https://somehost.com", res, req)
return nil, nil
})
timeout := time.After(15 * time.Second)
var result singleflight.Result
select {
case <-timeout: // Timeout elapsed, send a timeout message (504)
log.Println(name, " timed out")
http.Error(res, "request timed out", http.StatusGatewayTimeout)
return
case result = <-ch: // Received result from channel
}
if result.Err != nil {
http.Error(res, result.Err.Error(), http.StatusInternalServerError)
return
}
if result.Shared {
log.Println(name, " is shared")
} else {
log.Println(name, " not shared")
}
}))
I'd expect r2,r3 to either
at least see the result from their own reponseWriter
timeout along with r1
https://github.com/golang/net/blob/master/http2/h2demo/h2demo.go#L181-L219
this works. Turns out I need to return handler in singleFlight.Group.Do instead of the response.
I don't know why

Golang concurrently reading from a tcp connectoin

I am having some issue with a Go project. The code is way too big to copy and paste so I will try to explain as well as I can.
My program first connects to a TCP server, then it starts a goroutine passing as argument the connection object.
What I'm trying to achieve is having the client to read infinitely from the tcp connection while at the same time to take user input and communicate to the server by sending a retrieving data. I've tried using another goroutine but the program blocks whenever trying to retrieve data from the server.
Here is a reproduction of the error on go playground.
https://play.golang.org/p/OD5ozCRmy_4 server
https://play.golang.org/p/t1r_BAQM-jn client
Basically whenever the client tries to read from the connection it gets stuck.
Thank you for your help.
You should use channel
here is a sample which can receive some connection and each connection could send data as wish
package tcp
import (
"bufio"
"fmt"
"net"
"strconv"
"../log"
"../config"
"../controllers"
h "../helpers"
)
type msgFormat struct {
text []byte
net.Conn
}
var accounts = make(map[net.Conn]int)
var conns = make(chan net.Conn)
var dconns = make(chan net.Conn)
var msgs = make(chan msgFormat)
var i int
//Init is first point
func Init() {
startserver()
for {
select {
case conn := <-conns:
handleconnect(conn)
case msg := <-msgs:
go handlemsg(msg)
case dconn := <-dconns:
handlediscounect(dconn)
}
}
}
func handlemsg(incomemsg msgFormat) {
logger.Log.Println(string(incomemsg.text))
resp, err := controllers.Do(incomemsg.text)
if err != nil {
logger.Log.Println(err.Error())
}
strLen := []byte(h.Lpad(string(fmt.Sprintf("%v", len(resp))), "0", 4))
//
fresponse := append(strLen, resp...)
incomemsg.Write(fresponse)
logger.Log.Println("response is %v" , string(fresponse))
}
func startserver() {
conf := config.GetConfigInstance()
ln, err := net.Listen(conf.SERVER.Nettype, conf.SERVER.Address)
if err != nil {
logger.Log.Println(err.Error())
}
logger.Log.Printf("server is serving at %v", conf.SERVER.Address)
go func() {
for {
conn, err := ln.Accept()
if err != nil {
logger.Log.Println(err.Error())
}
conns <- conn
}
}()
}
func readdate(conn net.Conn, i int) {
for {
rd := bufio.NewReader(conn)
dataLen := make([]byte, 4)
_, err := rd.Read(dataLen)
if err != nil {
break
}
intLen, _ := strconv.Atoi(string(dataLen))
data := make([]byte, intLen)
_, err = rd.Read(data)
if err != nil {
break
}
msgs <- msgFormat{data, conn}
}
dconns <- conn
}
func handleconnect(newconnection net.Conn) {
accounts[newconnection] = i
i++
// if addr , ok := newconnection.RemoteAddr().str
logger.Log.Printf("Action: Client_Connected %v is connected via %v \n", i, newconnection.RemoteAddr().(*net.TCPAddr).IP)
go readdate(newconnection, i)
}
func handlediscounect(disconnection net.Conn) {
logger.Log.Printf("Action: Client_Disconnected %v / %v is gone\n", accounts[disconnection] + 1, disconnection.RemoteAddr().(*net.TCPAddr).IP)
delete(accounts, disconnection)
}

Resources