Why is go http streaming server not sending lines successfully? - go

I have a very basic http server and http client. The server is not sending out the data when chunked despite flushing it. I have validated both with a go http client and curl.
Here is the server:
func main() {
r := mux.NewRouter()
baseURL := ""
r.HandleFunc(baseURL+"/",handler).Methods("GET")
listener, _ := net.Listen("tcp", "127.0.0.1:0")
http.Handle("/",r)
go http.Serve(listener,nil)
url = "http://" + listener.Addr().String() + "/"
fmt.Println(url)
TestSend()
}
func writeHeaders(w http.ResponseWriter, code int) {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(code)
}
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Println("handler")
writeHeaders(w, 200)
rsc := bufio.NewScanner(strings.NewReader(response))
for rsc.Scan() {
sender <- rsc.Text()
}
for d := range sender {
w.Write([]byte(d))
w.(http.Flusher).Flush()
fmt.Println("wrote and flushed "+d)
}
}
And the client:
res, _ := http.Get(url)
reader := bufio.NewReader(res.Body)
for {
fmt.Println("ReadBytes")
data, err := reader.ReadBytes('\r')
fmt.Println("read")
if err != nil {
fmt.Println("error!")
fmt.Println(err)
return
}
fmt.Println("read line "+string(data))
}
I get the server saying it is sending and flushing, and I get the "ReadBytes" saying it is waiting to read, but it never reads any lines:
$ go run httptest.go
http://127.0.0.1:55967/
handler
wrote and flushed abc
wrote and flushed def
wrote and flushed ghi
wrote and flushed jkl
wrote and flushed mno
ReadBytes
(here it just waits)
If, while it is running, I curl, I get the right headers, but no data:
$ curl -i http://127.0.0.1:55957/
HTTP/1.1 200 OK
Content-Type: text/plain
Date: Sun, 02 Aug 2015 07:01:21 GMT
Transfer-Encoding: chunked
(no data here at all)
Clearly the data is not leaving the server, but why? I am Flushing it.

Related

Transfer-encoding chunked in golang proxy

I'm new to golang and I'm trying to build a small local proxy. The request kinda works from Postman -> localhost:9097 -> localhost:9098 and back. But the content-length is 120 and the response body is just gibberish:
I expect to get a json body like { "result": { "id": "1", "price": 100, "quantity": 1 } }
If a make a request directly to :9098 I see that the response header transfer-encoding is chunked. Any idea how to adjust my code to parse the response body from the server properly and send it back to the client?
func httpHandler(w http.ResponseWriter, req *http.Request) {
reqURL := fmt.Sprint(req.URL)
newUrl = "http://localhost:9098" + reqURL
//forward request
client := http.Client{}
freq, reqerror := http.NewRequest(req.Method, newUrl, nil)
if reqerror != nil {
log.Fatalln(reqerror)
}
freq.Header = req.Header
freq.Body = req.Body
resp, resperr := client.Do(freq)
if resperr != nil {
log.Println(resperr)
fmt.Fprintf(w, "Error. No response")
return
}
defer resp.Body.Close()
body, ioerr := io.ReadAll(resp.Body)
if ioerr != nil {
log.Println(ioerr)
fmt.Fprintf(w, "IO Error (Response body)")
return
}
w.Header().Set("Content-Type", resp.Header.Get("Content-Type"))
w.WriteHeader(resp.StatusCode)
fmt.Fprintf(w, string(body))
}
Managed to solve this now! Thanks to Steffen Ullrich for pointing out the the issue could be "about compressed content".
Removing the Accept-Encoding header as mentioned here worked like a charm.
...
// if you manually set the Accept-Encoding request header, than gzipped response will not automatically decompressed
req.Header.Del("Accept-Encoding")
freq.Header = req.Header
...

Too many open files serving http

I have the following code
package main
import (
"bytes"
"fmt"
"github.com/gorilla/mux"
"log"
"net/http"
"time"
"io"
httprouter "github.com/fasthttp/router"
"github.com/valyala/fasthttp"
)
func main() {
router := mux.NewRouter().StrictSlash(true)
/*router := NewRouter()*/
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
prepare(w, r, vars["name"])
}).Methods("POST")
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", 8080), router))
}
//using fast http
func _() {
router := httprouter.New()
router.GET("/", func(w *fasthttp.RequestCtx) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.POST("/:name", func(w *fasthttp.RequestCtx) {
prepareRequest(w, w.UserValue("name").(string))
})
log.Fatal(fasthttp.ListenAndServe(fmt.Sprintf(":%d", 8080), router.Handler))
}
//func prepare(w *fasthttp.RequestCtx, name string)
func prepare(w http.ResponseWriter, r *http.Request, name string) {
//other part of the code and call to goroutine
var urls []string
//lets say all the url loaded, call the go routine func and wait for channel to respond and then proceed with the response of all url
results := callUrls(urls) //there are 10 urls atleast to call simultaneously for each request everytime
process(w, results)
}
type Response struct {
status int
url string
body string
}
func callUrls(urls []string) []*Response {
ch := make(chan *Response, len(urls))
for _, url := range urls {
go func(url string) {
//http post on url,
//base on status code of url call, add to status code
//some thing like
req, err := http.NewRequest("POST", url, bytes.NewBuffer(somePostData))
req.Header.Set("Content-Type", "application/json")
req.Close = true
client := &http.Client{
Timeout: time.Duration(time.Duration(100) * time.Millisecond),
}
response, err := client.Do(req)
//Using fast http client
/*req := fasthttp.AcquireRequest()
req.SetRequestURI(url)
req.Header.Set("Content-Type", "application/json")
req.Header.SetMethod("POST")
req.SetBody(somePostData)
response := fasthttp.AcquireResponse()
client := &fasthttp.Client{
ReadTimeout: time.Duration(time.Duration(100) * time.Millisecond),
}
err := client.Do(req, response)*/
if err != nil {
//do other thing with the response received
_, _ = io.Copy(ioutil.Discard, response.Body)
_ = response.Body.Close()
} else {
//success response
_, _ = io.Copy(ioutil.Discard, response.Body)
_ = response.Body.Close()
body, _:= ioutil.ReadAll(response.Body)
strBody := string(body)
strBody = strings.Replace(strBody, "\r", "", -1)
strBody = strings.Replace(strBody, "\n", "", -1)
}
// return to channel accordingly
ch <- &Response{200, "url", "response body"}
}(url)
}
var results []*Response
for {
select {
case r := <-ch:
results = append(results, r)
if len(results) == len(urls) {
//Done
close(ch)
return results
}
}
}
}
//func process(w *fasthttp.RequestCtx,results []*Response){
func process(w http.ResponseWriter, results []*Response){
fmt.Println("response", "response body")
}
After serving few request on multi core CPU (there are around 4000-6000 req coming per sec) I get too many files open error and response time and CPU goes beyond limit. (Could CPU be be high because I convert byte to string a few times to replace few character? Any suggestion?)
I have seen other question referring to closing req/res body and/or setting sysctl or ulimit to higher values, I did follow those but I always end up with the error.
Config on the server:
/etc/sysctl.conf net.ipv4.tcp_tw_recycle = 1
open files (-n) 65535
I need the code to respond in millisec but it take upto 50sec when cpu is high.
Have tried both net/http and fast http but with no improvement. My Node.js request npm does everything perfectly on the same server. What will be best way to handle those connection or change in the code needed for improvement.
You can use the following library:
Requests: A Go library for reduce the headache when making HTTP requests (20k/s req)
https://github.com/alessiosavi/Requests
It's developed for solve theto many open files dealing with parallel requests.
The idea is to allocate a list of request, than send them with a configurable "parallel" factor that allow to run only "N" request at time.
Initialize the requests (you have already a set of urls)
// This array will contains the list of request
var reqs []requests.Request
// N is the number of request to run in parallel, in order to avoid "TO MANY OPEN FILES. N have to be lower than ulimit threshold"
var N int = 12
// Create the list of request
for i := 0; i < 1000; i++ {
// In this case, we init 1000 request with same URL,METHOD,BODY,HEADERS
req, err := requests.InitRequest("https://127.0.0.1:5000", "GET", nil, nil, true)
if err != nil {
// Request is not compliant, and will not be add to the list
log.Println("Skipping request [", i, "]. Error: ", err)
} else {
// If no error occurs, we can append the request created to the list of request that we need to send
reqs = append(reqs, *req)
}
}
At this point, we have a list that contains the requests that have to be sent.
Let's send them in parallel!
// This array will contains the response from the givens request
var response []datastructure.Response
// send the request using N request to send in parallel
response = requests.ParallelRequest(reqs, N)
// Print the response
for i := range response {
// Dump is a method that print every information related to the response
log.Println("Request [", i, "] -> ", response[i].Dump())
// Or use the data present in the response
log.Println("Headers: ", response[i].Headers)
log.Println("Status code: ", response[i].StatusCode)
log.Println("Time elapsed: ", response[i].Time)
log.Println("Error: ", response[i].Error)
log.Println("Body: ", string(response[i].Body))
}
You can find example usage into the example folder of the repository.
SPOILER:
I'm the author of this little library

How to close HTTP/2 client stream when server closes request stream?

Created a HTTP/2 client and server program using net/http Go library for streaming client and server request response. Taken the reference from code here https://github.com/golang/go/issues/13444#issuecomment-161115822 .
Server code
func handle_stream_request(w http.ResponseWriter, r *http.Request) {
buf := make([]byte, 1024)
i := 0
for {
i = i + 1
if i == 10 {
break
}
len, _ := r.Body.Read(buf)
response_str := "Server ACK for: " + string(buf[:len])
w.Write([]byte(response_str))
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
}
}
Client Code
func send_stream_request(client *http.Client) {
pr, pw := io.Pipe()
req, _ := http.NewRequest("PUT", "https://localhost:8000/", ioutil.NopCloser(pr))
var res *http.Response
go func() {
for {
time.Sleep(2 * time.Second)
s := "Client ping#" + get_time()
pw.Write([]byte(s))
if res != nil {
buf := make([]byte, 1024)
len, _ := res.Body.Read(buf)
log.Printf("Response is: %s", string(buf[:len]))
}
}
}()
go func() {
response, _ := client.Do(req)
res = response
log.Printf("\n Got: %#v", res)
}()
select {}
}
What did you expect to see?
After 10 requests from client the client program should exit successfully.
What did you see instead?
The client program blocks at writing at pipe "pw.Write([]byte(s))" after server has finished the handle function because there is no reader to read the data on pipe.
I am not able to understand how can I stop such client program from hanging. What signal does server send to client that request stream has been closed and it shouldn't try to write more stuff on pipe.

Handling request with chunked transfer-encoding

Does golang's net/http package support requests with chunked transfer-encoding? Thus far I have been able to use the Hijacker interface (https://golang.org/src/net/http/server.go?s=6173:6875#L156) to at least not close the connection and receive the full chunked request, but not yet parsing the chunks and suspect I may be going down the wrong path with this.
From https://golang.org/src/net/http/httputil/httputil.go?s=688:732#L10, I see there is a chunked reader, but appears to be for internal use.
Essentially, I'm trying to accept an HTTP PUT with 'chunked' transfer-encoding and send it off to a backend server 'on-the-fly' (i.e. without buffering the full request in golang). I have no control over the upstream request. Is there a recommended way to handle such a request, or is Hijacker the way to do it?
The net/http client and server transparently read and write chunked bodies.
To accept a chunked request and send it to another HTTP server, pass the server request body as the client request body. Here's now to forward the body to another server as a PUT:
func handler(w http.ResponseWriter, r *http.Request) {
creq, err := http.NewRequest("PUT", url, r.Body)
if err != nil {
// handle error
}
if ct := r.Header.Get("Content-Type"); ct != "" {
creq.Header.Set("Content-Type", ct)
}
cresp, err := http.DefaultClient.Do(creq)
if err != nil {
// handle error
}
... do something with cresp.
}
If you want to copy to a file, then io.Copy the request body to the file.
func handler(w http.ResponseWriter, r *http.Request) {
f, err := os.Create("fname")
if err != nil {
// handle error
}
_, err := io.Copy(f, r.Body)
if err != nil {
// handle error
}
...
}
These snippets copy the body 'on the fly'.

Detect gzip encoding to manually decompress response, but 'Content-Encoding' header missing

I am using net/http library in 'Go' to make an HTTP GET request. In the response, i get 12 headers. But when i run the exact same query through postman, i get 16 headers. One of those missing is 'Content-Encoding'. I understand this must be a CORS issue.
But since i have not set the header Accept-Encoding: gzip in my request, and i am still getting the gzip encoding in response, the Go transport is not automatically decompressing the response for me. So, i need to be able to manually detect the encoding and then decompress it. But, i cannot detect if the 'Content-Encoding' header is missing in the response.
Here is my code where i try to do this:
func calcDistanceAndDurationWithUberApi(originLat float64, originLon float64, destinationLat float64, destinationLon float64) (float64, float64, error) {
endpoint := "https://api.uber.com/v1.2/estimates/price"
parameters := fmt.Sprintf("?start_latitude=%v&start_longitude=%v&end_latitude=%v&end_longitude=%v", originLat, originLon, destinationLat, destinationLon)
req, err := http.NewRequest("GET", endpoint + parameters, nil)
if err != nil {
return 0, 0, err
}
req.Header.Add("Authorization", "Token " + getUberApiKey())
req.Header.Add("Accept-Language", "en_US")
req.Header.Add("Content-Type", "application/json")
httpClient := &http.Client{}
resp, err := httpClient.Do(req)
if err != nil {
return 0, 0, err
}
if resp.StatusCode != 200 {
return 0, 0, errors.NotFound("Response: %v", resp.StatusCode)
}
defer resp.Body.Close()
pretty.Println("- REQUEST: ")
pretty.Println(req)
// Check if server sent gzipped response. Decompress if yes.
var respReader io.ReadCloser
switch resp.Header.Get("Content-Encoding") {
case "gzip":
fmt.Println("Content-Encoding is gzip")
respReader, err = gzip.NewReader(resp.Body)
defer respReader.Close()
default:
fmt.Println("Content-Encoding is Not gzip")
respReader = resp.Body
}
pretty.Println("- RESPONSE HEADER: ")
pretty.Println(resp.Header)
pretty.Println("- RESPONSE BODY: ")
pretty.Println(respReader)
return 0, 0, nil
}
The response status is '200 OK'. Here is the output (Response):
- RESPONSE HEADER:
http.Header{
"Content-Language": {"en"},
"Cache-Control": {"max-age=0"},
"X-Uber-App": {"uberex-nonsandbox", "optimus"},
"Strict-Transport-Security": {"max-age=604800", "max-age=2592000"},
"X-Content-Type-Options": {"nosniff"},
"Date": {"Fri, 19 May 2017 07:52:17 GMT"},
"Content-Geo-System": {"wgs-84"},
"Connection": {"keep-alive"},
"X-Frame-Options": {"SAMEORIGIN"},
"X-Xss-Protection": {"1; mode=block"},
"Server": {"nginx"},
"Content-Type": {"application/json"},
}
- RESPONSE BODY:
&http.gzipReader{
body: &http.bodyEOFSignal{
body: &http.body{
src: &internal.chunkedReader{
r: &bufio.Reader{
buf: {0x48, 0x54, .......... }
I gave in to the stubbornness of the uber api and added another request header, req.Header.Add("Accept-Encoding", "gzip").
Now i am getting the response header "Content-Encoding": "gzip", although i am still getting an undecipherable response body, but that's beyond the scope of this question.
If you don't disable compression [1], and you don't manually request compression with Accept-Encoding: gzip, then what I call "automatic mode" is used. With automatic mode, Go automatically adds Accept-Encoding: gzip, then if server responds Content-Encoding: gzip, Go wrap the response body in a Gzip reader, and removes the Content-Encoding and Content-Length response headers [2]. I disagree with this practice, as the end user is essentially being lied to about what the true response was. Contrast this with cURL, which gives you the pure response, regardless of what you do:
PS C:\> curl -v --compressed https://github.com/manifest.json
< content-encoding: gzip
< content-length: 345
To deal with this, I wrote a wrapper for http.Transport:
package mech
import (
"compress/gzip"
"io"
"net/http"
"strings"
)
type Transport struct { http.Transport }
func (t Transport) RoundTrip(req *http.Request) (*http.Response, error) {
if !t.DisableCompression {
req.Header.Set("Accept-Encoding", "gzip")
}
res, err := t.Transport.RoundTrip(req)
if err != nil {
return nil, err
}
if strings.EqualFold(res.Header.Get("Content-Encoding"), "gzip") {
gz, err := gzip.NewReader(res.Body)
if err != nil {
return nil, err
}
res.Body = readCloser{gz, res.Body}
}
return res, nil
}
type readCloser struct {
io.Reader
io.Closer
}
https://golang.org/pkg/net/http#Transport.DisableCompression
https://github.com/golang/go/blob/go1.16.5/src/net/http/transport.go#L2186-L2192

Resources