I am developing an API that calls client URL using the net/http package. There are between 1 and 8 URLs called for each request (POST call) in goroutines concurrently based on user country/os. The app works with low qps of around 1000-1500 requests, but scaling the app to 3k requests there is a sudden increase in the memory even if only 1 client URL is called an app stops responding after a few minute(Response time well above 50sec). I am using Go native net/http package along with gorilla/mux router. Other question on this issue says to close the response body but I have done that using
req, err := http.NewRequest("POST", "client_post_url", bytes.NewBuffer(requestBody))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Keep-Alive")
response, err := common.Client.Do(req)
status := 0
if err != nil {//handle and return}
defer response.Body.Close() //used with/without io.Copy
status = response.StatusCode
body, _ := ioutil.ReadAll(response.Body)
_, err = io.Copy(ioutil.Discard, response.Body)
I need to reuse connection hence I have made http client and transport global variable initialized in init method like this.
common.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DialContext: (&net.Dialer{
//Timeout: time.Duration(300) * time.Millisecond,
KeepAlive: 30 * time.Second,
}).DialContext,
//ForceAttemptHTTP2: true,
DisableKeepAlives: false,
//MaxIdleConns: 0,
//IdleConnTimeout: 0,
//TLSHandshakeTimeout: time.Duration(300) * time.Millisecond,
//ExpectContinueTimeout: 1 * time.Second,
}
common.Client = &http.Client{
Timeout: time.Duration(300) * time.Millisecond,
Transport: common.Transport,
}
I have read that using keep alive causes the memory to leak, I have tried a few combination for disabling keep-alive/close request flag on request. But nothing seems to work. Also If I don't make any http call and use time.Sleep(300 * time.Millisecond) in goroutine calling each url concurrently app does work without any leak.
So I am sure It has something to do with client/http package that under high load connection are not released or not used properly.
What should be my approach to achieve this?
Is creating a custom server and custom handler type to accept request and route requests will worked as mentioned in C10K approach in several article?
I can share the sample code with all details if required. Above just added that the part where I feel the issue lies.
this is a representative code
main.go
package main
import (
"./common"
"bytes"
"crypto/tls"
"fmt"
"github.com/gorilla/mux"
"io"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"net/http/pprof"
"os"
"runtime"
"strconv"
"sync"
"time"
)
func init() {
//Get Any command line argument passed
args := os.Args[1:]
numCPU := runtime.NumCPU()
if len(args) > 1 {
numCPU, _ = strconv.Atoi(args[0])
}
common.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DialContext: (&net.Dialer{
//Timeout: time.Duration() * time.Millisecond,
KeepAlive: 30 * time.Second,
}).DialContext,
//ForceAttemptHTTP2: true,
DisableKeepAlives: false,
//MaxIdleConns: 0,
//IdleConnTimeout: 0,
//TLSHandshakeTimeout: time.Duration(300) * time.Millisecond,
//ExpectContinueTimeout: 1 * time.Second,
}
common.Client = &http.Client{
Timeout: time.Duration(300) * time.Millisecond,
Transport: common.Transport,
}
runtime.GOMAXPROCS(numCPU)
rand.Seed(time.Now().UTC().UnixNano())
}
func main() {
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
prepareRequest(w, r, vars["name"])
}).Methods("POST")
// Register pprof handlers
router.HandleFunc("/debug/pprof/", pprof.Index)
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
router.HandleFunc("/debug/pprof/trace", pprof.Trace)
routerMiddleWare := http.TimeoutHandler(router, 500*time.Millisecond, "Timeout")
srv := &http.Server{
Addr: "0.0.0.0:" + "80",
/*ReadTimeout: 500 * time.Millisecond,
WriteTimeout: 500 * time.Millisecond,
IdleTimeout: 10 * time.Second,*/
Handler: routerMiddleWare,
}
log.Fatal(srv.ListenAndServe())
}
func prepareRequest(w http.ResponseWriter, r *http.Request, name string) {
//other part of the code and call to goroutine
var urls []string
results, s, c := callUrls(urls)
finalCall(w, results, s, c)
}
type Response struct {
Status int
Url string
Body string
}
func callUrls(urls []string) ([]*Response, []string, []string) {
var wg sync.WaitGroup
wg.Add(len(urls))
ch := make(chan func() (*Response, string, string), len(urls))
for _, url := range urls {
go func(url string) {
//decide if request is valid for client to make http call using country/os
isValid := true //assuming url to be called
if isValid {
//make post call
//request body have many more paramter, just sample included.
//instead of creating new request, time.Sleep for 300ms doesn't cause any memory leak.
req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(`{"body":"param"}`)))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Keep-Alive")
//req.Close = true
response, err := common.Client.Do(req)
if err != nil {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, "error", "500"
}
return
}
defer response.Body.Close()
body, _ := ioutil.ReadAll(response.Body)
_, err = io.Copy(ioutil.Discard, response.Body)
//Close the body, forced this
//Also tried without defer, and only wothout following line
response.Body.Close()
//do something with response body replace a few string etc.
//and return
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 200, Url: url, Body: string(body)}, "success", "200"
}
} else {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, "invalid", "500"
}
}
}(url)
}
wg.Wait()
var (
results []*Response
msg []string
status []string
)
for {
r, x, y := (<-ch)()
if r != nil {
results = append(results, r)
msg = append(msg, x)
status = append(status, y)
}
if len(results) == len(urls) {
return results, msg, status
}
}
}
func finalCall(w http.ResponseWriter, results []*Response, msg []string, status []string){
fmt.Println("response", "response body", results, msg, status)
}
vars.go
package common
import (
"net/http"
)
var (
//http client
Client *http.Client
//http Transport
Transport *http.Transport
)
pprof: Profiled app with 4 client url on average of around 2500qps.
Top command:
After 2minutes:
Without calling client url, by keeping isValid = false and time.Sleep(300* time.Millisecond) no leaks happens.
this code is not leaking.
To demonstrate, lets update it ** slightly so the post is reproducible.
main.go
package main
import (
"bytes"
"crypto/tls"
_ "expvar"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"strconv"
"sync"
"time"
"github.com/gorilla/mux"
)
var (
//http client
Client *http.Client
//http Transport
Transport *http.Transport
)
func init() {
go http.ListenAndServe("localhost:6060", nil)
//Get Any command line argument passed
args := os.Args[1:]
numCPU := runtime.NumCPU()
if len(args) > 1 {
numCPU, _ = strconv.Atoi(args[0])
}
Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DialContext: (&net.Dialer{
//Timeout: time.Duration() * time.Millisecond,
KeepAlive: 30 * time.Second,
}).DialContext,
//ForceAttemptHTTP2: true,
DisableKeepAlives: false,
//MaxIdleConns: 0,
//IdleConnTimeout: 0,
//TLSHandshakeTimeout: time.Duration(300) * time.Millisecond,
//ExpectContinueTimeout: 1 * time.Second,
}
Client = &http.Client{
// Timeout: time.Duration(300) * time.Millisecond,
Transport: Transport,
}
runtime.GOMAXPROCS(numCPU)
rand.Seed(time.Now().UTC().UnixNano())
}
func main() {
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
prepareRequest(w, r, vars["name"])
}).Methods("POST", "GET")
// Register pprof handlers
// router.HandleFunc("/debug/pprof/", pprof.Index)
// router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
// router.HandleFunc("/debug/pprof/profile", pprof.Profile)
// router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
// router.HandleFunc("/debug/pprof/trace", pprof.Trace)
routerMiddleWare := http.TimeoutHandler(router, 500*time.Millisecond, "Timeout")
srv := &http.Server{
Addr: "localhost:8080",
/*ReadTimeout: 500 * time.Millisecond,
WriteTimeout: 500 * time.Millisecond,
IdleTimeout: 10 * time.Second,*/
Handler: routerMiddleWare,
}
log.Fatal(srv.ListenAndServe())
}
func prepareRequest(w http.ResponseWriter, r *http.Request, name string) {
// go func() {
// make(chan []byte) <- make([]byte, 10024)
// }()
//other part of the code and call to goroutine
var urls []string
urls = append(urls,
"http://localhost:7000/",
"http://localhost:7000/",
)
results, s, c := callUrls(urls)
finalCall(w, results, s, c)
}
type Response struct {
Status int
Url string
Body string
}
func callUrls(urls []string) ([]*Response, []string, []string) {
var wg sync.WaitGroup
wg.Add(len(urls))
ch := make(chan func() (*Response, string, string), len(urls))
for _, url := range urls {
go func(url string) {
//decide if request is valid for client to make http call using country/os
isValid := true //assuming url to be called
if isValid {
//make post call
//request body have many more paramter, just sample included.
//instead of creating new request, time.Sleep for 300ms doesn't cause any memory leak.
req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(`{"body":"param"}`)))
if err != nil {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, err.Error(), "500"
}
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Keep-Alive")
//req.Close = true
response, err := Client.Do(req)
if err != nil {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, err.Error(), "500"
}
return
}
defer response.Body.Close()
body, _ := ioutil.ReadAll(response.Body)
io.Copy(ioutil.Discard, response.Body)
//Close the body, forced this
//Also tried without defer, and only wothout following line
response.Body.Close()
//do something with response body replace a few string etc.
//and return
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 200, Url: url, Body: string(body)}, "success", "200"
}
} else {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, "invalid", "500"
}
}
}(url)
}
wg.Wait()
var (
results []*Response
msg []string
status []string
)
for {
r, x, y := (<-ch)()
if r != nil {
results = append(results, r)
msg = append(msg, x)
status = append(status, y)
}
if len(results) == len(urls) {
return results, msg, status
}
}
}
func finalCall(w http.ResponseWriter, results []*Response, msg []string, status []string) {
fmt.Println("response", "response body", results, msg, status)
}
k/main.go
package main
import "net/http"
func main() {
y := make([]byte, 100)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write(y)
})
http.ListenAndServe(":7000", nil)
}
Install additional visualization tool, and use ab to simulate some load, it will do the job for that intuitive demonstration.
go get -u github.com/divan/expvarmon
go run main.go &
go run k/main.go &
ab -n 50000 -c 2500 http://localhost:8080/y
# in a different window, for live preview
expvarmon -ports=6060 -i 500ms
At that point you read the output of expvarmon, if it was live you have something like
you can see the stuff waving, the gc is being actively working.
the app is loaded, the memory is being consumed, wait for the server to release its conn and the gc to clean them
You can see the memstats.Alloc, memstats.HeapAlloc, memstats.HeapInuse are now reduced, as expected when the gc does his job and that no leak exists.
If you were to check for go tool pprof -inuse_space -web http://localhost:6060/debug/pprof/heap, right after ab ran
It shows that the app is using 177Mb of memory.
Most of it 102Mb is being used by net/http.Transport.getConn.
Your handler is accouting for 1Mb, the rest is various things required.
If you were to take the screenshot after the server has released and the gc too, you would see an even smaller graph. not demonstrated here.
Now let us generate a leak and see it using both tools again.
In the code uncomment in,
func prepareRequest(w http.ResponseWriter, r *http.Request, name string) {
go func() {
make(chan []byte) <- make([]byte, 10024)
}()
//...
restart apps (press q in expvarmon, although it is not required)
go get -u github.com/divan/expvarmon
go run main.go &
go run k/main.go &
ab -n 50000 -c 2500 http://localhost:8080/y
# in a different window, for live preview
expvarmon -ports=6060 -i 500ms
it shows
In expvarmon you can see the same behavior, only the numbers has changed, and at rest state, after it has been gced, it still consumed a lot of memory, a lot more than a void golang http server to take a comparison point.
Again, screenshot the heap, it shows that your handler is now consuming most of the memory ~450Mb, notice the arrows, it shows that there is for 452mb of 10kb allocations, and 4.50Mb of 96b. They respectively correspond to the []byte slice being pushed to the chan []byte.
Finally, you can check your stack traces to look for dead goroutines, and thus leaking memory, open http://localhost:6060/debug/pprof/goroutine?debug=1
goroutine profile: total 50012
50000 # 0x43098f 0x4077fa 0x4077d0 0x4074bb 0x76b85d 0x45d281
# 0x76b85c main.prepareRequest.func1+0x4c /home/mh-cbon/gow/src/test/oom/main.go:101
4 # 0x43098f 0x42c09a 0x42b686 0x4c3a3b 0x4c484b 0x4c482c 0x57d94f 0x590d79 0x6b4c67 0x5397cf 0x53a51d 0x53a754 0x6419ef 0x6af18d 0x6af17f 0x6b5f33 0x6ba4fd 0x45d281
# 0x42b685 internal/poll.runtime_pollWait+0x55 /home/mh-cbon/.gvm/gos/go1.12.7/src/runtime/netpoll.go:182
# 0x4c3a3a internal/poll.(*pollDesc).wait+0x9a /home/mh-cbon/.gvm/gos/go1.12.7/src/internal/poll/fd_poll_runtime.go:87
// more...
It tells us that the programs is hosting 50 012 goroutines, then it lists them grouped by file positions, where the first number is the count of instances running, 50 000 in the first group of this example. It is followed by the stack trace that lead to the goroutine to exist.
You can see there is a bunch of system thing, that in your case, you should not worry much about it.
You got to look for those that you believe you should not be live if your program was working as you think it should.
However, overall your code is not satisfying and could be, and probably, should be improved with a thorough review about its allocations and overall design conception.
** This is a summary of the changes applied to the original source code.
It adds a new program k/main.go to act as a backend server.
It adds _ "expvar" import statement
It starts the std api HTTP server instance that pprof registers onto during init phase with go http.ListenAndServe("localhost:6060", nil)
The client timeout is disabled Timeout: time.Duration(300) * time.Millisecond,, otherwise the load test does not return 200s
The server address is set to Addr: "localhost:8080",
The urls values created within prepareRequest are set to a static list of len=2
It adds error checking for req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte({"body":"param"})))
It disalbles error checking in io.Copy(ioutil.Discard, response.Body)
I have solved it by replacing net/http package with fasthttp. Earlier I haven't used it because I was not able find timeout method on fasthttp client but I see that there is indeed a method DoTimeout for fasthttp client which timedout the request after specified duration.
Here the updated code:
in vars.go ClientFastHttp *fasthttp.Client
main.go
package main
import (
"./common"
"crypto/tls"
"fmt"
"github.com/gorilla/mux"
"github.com/valyala/fasthttp"
"log"
"math/rand"
"net"
"net/http"
"net/http/pprof"
"os"
"runtime"
"strconv"
"sync"
"time"
)
func init() {
//Get Any command line argument passed
args := os.Args[1:]
numCPU := runtime.NumCPU()
if len(args) > 1 {
numCPU, _ = strconv.Atoi(args[0])
}
common.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DialContext: (&net.Dialer{
//Timeout: time.Duration() * time.Millisecond,
KeepAlive: 30 * time.Second,
}).DialContext,
//ForceAttemptHTTP2: true,
DisableKeepAlives: false,
//MaxIdleConns: 0,
//IdleConnTimeout: 0,
//TLSHandshakeTimeout: time.Duration(300) * time.Millisecond,
//ExpectContinueTimeout: 1 * time.Second,
}
common.Client = &http.Client{
Timeout: time.Duration(300) * time.Millisecond,
Transport: common.Transport,
}
runtime.GOMAXPROCS(numCPU)
rand.Seed(time.Now().UTC().UnixNano())
}
func main() {
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
prepareRequest(w, r, vars["name"])
}).Methods("POST")
// Register pprof handlers
router.HandleFunc("/debug/pprof/", pprof.Index)
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
router.HandleFunc("/debug/pprof/trace", pprof.Trace)
routerMiddleWare := http.TimeoutHandler(router, 500*time.Millisecond, "Timeout")
srv := &http.Server{
Addr: "0.0.0.0:" + "80",
/*ReadTimeout: 500 * time.Millisecond,
WriteTimeout: 500 * time.Millisecond,
IdleTimeout: 10 * time.Second,*/
Handler: routerMiddleWare,
}
log.Fatal(srv.ListenAndServe())
}
func prepareRequest(w http.ResponseWriter, r *http.Request, name string) {
//other part of the code and call to goroutine
var urls []string
results, s, c := callUrls(urls)
finalCall(w, results, s, c)
}
type Response struct {
Status int
Url string
Body string
}
func callUrls(urls []string) ([]*Response, []string, []string) {
var wg sync.WaitGroup
wg.Add(len(urls))
ch := make(chan func() (*Response, string, string), len(urls))
for _, url := range urls {
go func(url string) {
//decide if request is valid for client to make http call using country/os
isValid := true //assuming url to be called
if isValid {
//make post call
//request body have many more paramter, just sample included.
//instead of creating new request, time.Sleep for 300ms doesn't cause any memory leak.
req := fasthttp.AcquireRequest()
req.SetRequestURI(url)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Keep-Alive")
req.Header.SetMethod("POST")
req.SetBody([]byte(`{"body":"param"}`))
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req) // <- do not forget to release
defer fasthttp.ReleaseResponse(resp) // <- do not forget to release
//err := clientFastHttp.Do(req, response)
//endregion
t := time.Duration(300)
err := common.ClientFastHttp.DoTimeout(req, resp, t*time.Millisecond)
body := resp.Body()
if err != nil {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, "error", "500"
}
return
}
/*defer response.Body.Close()
body, _ := ioutil.ReadAll(response.Body)
_, err = io.Copy(ioutil.Discard, response.Body)
//Close the body, forced this
//Also tried without defer, and only wothout following line
response.Body.Close()*/
//do something with response body replace a few string etc.
//and return
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 200, Url: url, Body: string(body)}, "success", "200"
}
} else {
wg.Done()
ch <- func() (*Response, string, string) {
return &Response{Status: 500, Url: url, Body: ""}, "invalid", "500"
}
}
}(url)
}
wg.Wait()
var (
results []*Response
msg []string
status []string
)
for {
r, x, y := (<-ch)()
if r != nil {
results = append(results, r)
msg = append(msg, x)
status = append(status, y)
}
if len(results) == len(urls) {
return results, msg, status
}
}
}
func finalCall(w http.ResponseWriter, results []*Response, msg []string, status []string) {
fmt.Println("response", "response body", results, msg, status)
}
Related
I have created some Go functions that make HTTP GET calls to services that are out there on the internet and parse the results.
I am now working on writing test-cases for these functions.
In my test cases, I'm using the go package httptest to simulate calls to these external services. Below is my code. Error checking is purposefully removed for brevity. Here is the go-playground.
package main
import (
"fmt"
"io"
"context"
"net/http"
"net/http/httptest"
)
func handlerResponse() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"A":"B"}`))
})
}
func buildMyRequest(ctx context.Context, url string) *http.Request {
request, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
return request
}
func myPrint(response *http.Response) {
b := make([]byte, 60000)
for {
_, err := response.Body.Read(b)
if err == io.EOF {
break
}
}
fmt.Println(string(b))
}
func main() {
srv := httptest.NewServer(handlerResponse())
client := http.Client{}
myResponse1, _ := client.Do(buildMyRequest(context.Background(), srv.URL))
fmt.Println("myResponse1:")
myPrint(myResponse1)
myResponse2, _ := client.Do(buildMyRequest(context.Background(), srv.URL))
fmt.Println("myResponse2:")
myPrint(myResponse2)
}
This is the output it produces:
myResponse1:
{"A":"B"}
myResponse2:
{"A":"B"}
As you can see, I have created some dummy HTTP response data {"A":"B"} and when you send an HTTP request to srv.URL, it actually hits an ephemeral HTTP server which responds with the dummy data. Cool!
When you send the second HTTP request to srv.URL, it again responds with the same dummy data. But this is where my problem arises. I want the ephemeral HTTP server to return some different data the second time {"C":"D"} and third time {"E":"F"} it receives a request.
How can I change the first line of the main() function so that the server responds with my desired data on subsequent HTTP calls?
you could use a hack like follows ( playground : here)
package main
import (
"fmt"
"io"
"context"
"net/http"
"net/http/httptest"
"sync"
)
type responseWriter struct{
resp map[int]string
count int
lock *sync.Mutex
}
func NewResponseWriter()*responseWriter{
r := new(responseWriter)
r.lock = new(sync.Mutex)
r.resp = map[int]string{
0: `{"E":"F"}`,
1: `{"A":"B"}`,
2: `{"C":"D"}`,
}
r.count = 0
return r
}
func (r *responseWriter)GetResp()string{
r.lock.Lock()
defer r.lock.Unlock()
r.count ++
return r.resp[r.count%3]
}
func handlerResponse(rr *responseWriter) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(rr.GetResp()))
})
}
func buildMyRequest(ctx context.Context, url string) *http.Request {
request, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
return request
}
func myPrint(response *http.Response) {
b := make([]byte, 60000)
for {
_, err := response.Body.Read(b)
if err == io.EOF {
break
}
}
fmt.Println(string(b))
}
func main() {
rr := NewResponseWriter()
srv := httptest.NewServer(handlerResponse(rr))
client := http.Client{}
myResponse1, err := client.Do(buildMyRequest(context.Background(), srv.URL))
if err != nil{
fmt.Println(err)
return
}
defer myResponse1.Body.Close()
fmt.Println("myResponse1:")
myPrint(myResponse1)
myResponse2, err := client.Do(buildMyRequest(context.Background(), srv.URL))
if err != nil{
fmt.Println(err)
return
}
defer myResponse2.Body.Close()
fmt.Println("myResponse2:")
myPrint(myResponse2)
}
About 3~4minutes,Some Errors would happen in my log.
net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
I try to find out where it takes time Using httptrace.
httptrace.GetConn
httptrace.GotConn
I think it runs out of time before httptrace.GotConn.
So errors happend
request canceled while waiting for connection
My machine is ok.and this is my netstat.
LAST_ACK 2
CLOSE_WAIT 7
ESTABLISHED 108
SYN_SENT 3
TIME_WAIT 43
package main
import (
"crypto/md5"
"encoding/hex"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/httptrace"
"os"
"sync"
"time"
)
var Client *http.Client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives:true,
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 3 * time.Second, // 连接超时
KeepAlive: 10 * time.Second,
DualStack: true,
}).DialContext,
IdleConnTimeout: 120 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
Timeout: 500 * time.Millisecond,
}
func GenLogId() string {
h2 := md5.New()
rand.Seed(time.Now().Unix())
str := fmt.Sprintf("%d%d%d", os.Getpid(), time.Now().UnixNano(), rand.Int())
h2.Write([]byte(str))
uniqid := hex.EncodeToString(h2.Sum(nil))
return uniqid
}
func main() {
var (
wg sync.WaitGroup
maxParallel int = 50
parallelChan chan bool = make(chan bool, maxParallel)
)
for {
parallelChan <- true
wg.Add(1)
go func() {
defer func() {
wg.Done()
<-parallelChan
}()
testHttp2()
}()
}
wg.Wait()
}
func testHttp2() {
url := "http://10.33.108.39:11222/index.php"
req, _ := http.NewRequest("GET", url, nil)
uniqId := GenLogId()
trace := &httptrace.ClientTrace{
GetConn: func(hostPort string) {
fmt.Println("GetConn id:", uniqId, time.Now().UnixNano(), hostPort)
},
GotConn: func(connInfo httptrace.GotConnInfo) {
fmt.Println("GotConn id:", uniqId, time.Now().UnixNano(), connInfo.Conn.LocalAddr())
},
ConnectStart: func(network, addr string) {
fmt.Println("ConnectStart id:", uniqId, time.Now().UnixNano(), network, addr)
},
ConnectDone: func(network, addr string, err error) {
fmt.Println("ConnectDone id:", uniqId, time.Now().UnixNano(), network, addr, err)
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
resp, err := Client.Do(req)
if err != nil {
fmt.Println("err: id", uniqId, time.Now().UnixNano(), err)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("error", string(body))
}
return
}
You can reproduce using my code. I am so confuse about the bug...
Thank you.
You need to increase the client Timeout value for your test.
net/http: request canceled (Client.Timeout exceeded while awaiting headers)
This means your Client.Timeout value is less than your server response time, due to many reasons ( e.g. Busy, CPU overload, many requests per second you generated here, ...).
Here a simple way to explain it and regenerate it:
Run this server (which waits for 2 * time.Second then sends back the response):
package main
import (
"io"
"log"
"net/http"
"time"
)
func main() {
http.HandleFunc(`/`, func(w http.ResponseWriter, r *http.Request) {
log.Println("wait a couple of seconds ...")
time.Sleep(2 * time.Second)
io.WriteString(w, `Hi`)
log.Println("Done.")
})
log.Println(http.ListenAndServe(":8080", nil))
}
Then run this client which times out in 1 * time.Second:
package main
import (
"io/ioutil"
"log"
"net/http"
"time"
)
func main() {
log.Println("HTTP GET")
client := &http.Client{
Timeout: 1 * time.Second,
}
r, err := client.Get(`http://127.0.0.1:8080/`)
if err != nil {
log.Fatal(err)
}
defer r.Body.Close()
bs, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Fatal(err)
}
log.Println("HTTP Done.")
log.Println(string(bs))
}
The output is (Client.Timeout exceeded while awaiting headers):
2019/10/30 11:05:08 HTTP GET
2019/10/30 11:05:09 Get http://127.0.0.1:8080/: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
exit status 1
Note:
You need to change these two settings accordingly (http.Transport.ResponseHeaderTimeout and http.Client.Timeout).
You have set ResponseHeaderTimeout: 60 * time.Second, while Client.Timeout to half a second.
Suppose anyone wants to capture theses errors please use,
os.IsTimeout(err) -> it will return true for context deadlined
For capturing dial i/o timeout issue,
netErr, ok := err.(net.Error); (ok && netErr.Timeout()) -> it will return true for dial i/o timeout
I have the following code
package main
import (
"bytes"
"fmt"
"github.com/gorilla/mux"
"log"
"net/http"
"time"
"io"
httprouter "github.com/fasthttp/router"
"github.com/valyala/fasthttp"
)
func main() {
router := mux.NewRouter().StrictSlash(true)
/*router := NewRouter()*/
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
prepare(w, r, vars["name"])
}).Methods("POST")
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", 8080), router))
}
//using fast http
func _() {
router := httprouter.New()
router.GET("/", func(w *fasthttp.RequestCtx) {
_, _ = fmt.Fprintf(w, "Hello!!!")
})
router.POST("/:name", func(w *fasthttp.RequestCtx) {
prepareRequest(w, w.UserValue("name").(string))
})
log.Fatal(fasthttp.ListenAndServe(fmt.Sprintf(":%d", 8080), router.Handler))
}
//func prepare(w *fasthttp.RequestCtx, name string)
func prepare(w http.ResponseWriter, r *http.Request, name string) {
//other part of the code and call to goroutine
var urls []string
//lets say all the url loaded, call the go routine func and wait for channel to respond and then proceed with the response of all url
results := callUrls(urls) //there are 10 urls atleast to call simultaneously for each request everytime
process(w, results)
}
type Response struct {
status int
url string
body string
}
func callUrls(urls []string) []*Response {
ch := make(chan *Response, len(urls))
for _, url := range urls {
go func(url string) {
//http post on url,
//base on status code of url call, add to status code
//some thing like
req, err := http.NewRequest("POST", url, bytes.NewBuffer(somePostData))
req.Header.Set("Content-Type", "application/json")
req.Close = true
client := &http.Client{
Timeout: time.Duration(time.Duration(100) * time.Millisecond),
}
response, err := client.Do(req)
//Using fast http client
/*req := fasthttp.AcquireRequest()
req.SetRequestURI(url)
req.Header.Set("Content-Type", "application/json")
req.Header.SetMethod("POST")
req.SetBody(somePostData)
response := fasthttp.AcquireResponse()
client := &fasthttp.Client{
ReadTimeout: time.Duration(time.Duration(100) * time.Millisecond),
}
err := client.Do(req, response)*/
if err != nil {
//do other thing with the response received
_, _ = io.Copy(ioutil.Discard, response.Body)
_ = response.Body.Close()
} else {
//success response
_, _ = io.Copy(ioutil.Discard, response.Body)
_ = response.Body.Close()
body, _:= ioutil.ReadAll(response.Body)
strBody := string(body)
strBody = strings.Replace(strBody, "\r", "", -1)
strBody = strings.Replace(strBody, "\n", "", -1)
}
// return to channel accordingly
ch <- &Response{200, "url", "response body"}
}(url)
}
var results []*Response
for {
select {
case r := <-ch:
results = append(results, r)
if len(results) == len(urls) {
//Done
close(ch)
return results
}
}
}
}
//func process(w *fasthttp.RequestCtx,results []*Response){
func process(w http.ResponseWriter, results []*Response){
fmt.Println("response", "response body")
}
After serving few request on multi core CPU (there are around 4000-6000 req coming per sec) I get too many files open error and response time and CPU goes beyond limit. (Could CPU be be high because I convert byte to string a few times to replace few character? Any suggestion?)
I have seen other question referring to closing req/res body and/or setting sysctl or ulimit to higher values, I did follow those but I always end up with the error.
Config on the server:
/etc/sysctl.conf net.ipv4.tcp_tw_recycle = 1
open files (-n) 65535
I need the code to respond in millisec but it take upto 50sec when cpu is high.
Have tried both net/http and fast http but with no improvement. My Node.js request npm does everything perfectly on the same server. What will be best way to handle those connection or change in the code needed for improvement.
You can use the following library:
Requests: A Go library for reduce the headache when making HTTP requests (20k/s req)
https://github.com/alessiosavi/Requests
It's developed for solve theto many open files dealing with parallel requests.
The idea is to allocate a list of request, than send them with a configurable "parallel" factor that allow to run only "N" request at time.
Initialize the requests (you have already a set of urls)
// This array will contains the list of request
var reqs []requests.Request
// N is the number of request to run in parallel, in order to avoid "TO MANY OPEN FILES. N have to be lower than ulimit threshold"
var N int = 12
// Create the list of request
for i := 0; i < 1000; i++ {
// In this case, we init 1000 request with same URL,METHOD,BODY,HEADERS
req, err := requests.InitRequest("https://127.0.0.1:5000", "GET", nil, nil, true)
if err != nil {
// Request is not compliant, and will not be add to the list
log.Println("Skipping request [", i, "]. Error: ", err)
} else {
// If no error occurs, we can append the request created to the list of request that we need to send
reqs = append(reqs, *req)
}
}
At this point, we have a list that contains the requests that have to be sent.
Let's send them in parallel!
// This array will contains the response from the givens request
var response []datastructure.Response
// send the request using N request to send in parallel
response = requests.ParallelRequest(reqs, N)
// Print the response
for i := range response {
// Dump is a method that print every information related to the response
log.Println("Request [", i, "] -> ", response[i].Dump())
// Or use the data present in the response
log.Println("Headers: ", response[i].Headers)
log.Println("Status code: ", response[i].StatusCode)
log.Println("Time elapsed: ", response[i].Time)
log.Println("Error: ", response[i].Error)
log.Println("Body: ", string(response[i].Body))
}
You can find example usage into the example folder of the repository.
SPOILER:
I'm the author of this little library
I'm writing a small package which does a GET request to an external API every 2 seconds. It takes the value from this request and passes it into a channel. I have made this channel available to a http.handler (chi router) which upgrades to a websocket where the front-end will grab the value in realtime. the panic error is a lot of lines but i guess the most important is this:
2018/11/14 16:47:55 http: response.WriteHeader on hijacked connection
2018/11/14 16:47:55 http: response.Write on hijacked connection
Aside from that I'm sure there is a better way of doing this. Any experienced Gophers out there have any pointers to help a noob such as myself improve this?
package currencyticker
import (
"bitbucket.org/special/api/config"
"encoding/json"
"fmt"
"github.com/go-chi/chi"
"github.com/go-chi/render"
"github.com/gorilla/websocket"
"github.com/leekchan/accounting"
"io/ioutil"
"log"
"math/big"
"net/http"
"time"
)
var (
ac = accounting.Accounting{Precision: 2}
from = "USD"
to = "EUR,SWK"
url = "https://min-api.currencyapi.com/data/price?fsym=" + from + "&tsyms=" + to
messages = make(chan float64)
)
var wsupgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true // Disable CORS for testing
},
}
// Config - init
type Config struct {
*config.Config
}
type result map[string]float64
// New - init the configs
func New(configuration *config.Config) *Config {
return &Config{configuration}
}
// Routes - api urls
func (config *Config) Routes() *chi.Mux {
router := chi.NewRouter()
router.Use(
render.SetContentType(render.ContentTypeHTML), // Set content-Type headers as application/json
)
router.Get("/", config.GetPrice) // subscribe to new tweets
return router
}
func (config *Config) GetPrice(w http.ResponseWriter, r *http.Request) {
conn, err := wsupgrader.Upgrade(w, r, nil)
if err != nil {
fmt.Println(fmt.Printf("Failed to set websocket upgrade: %+v ", err))
return
}
for {
time.Sleep(1 * time.Second)
price := <-messages
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
fmt.Println("ws error", err)
}
currVal := ac.FormatMoneyBigFloat(big.NewFloat(price))
if _, err := w.Write([]byte(currVal)); err != nil {
fmt.Printf("w.Write() returned %v", err)
}
w.Close()
}
}
// start getting the price of ether as soon as they ap starts
func init() {
go startPollingPriceAPI()
}
// Go Routine to start polling
func startPollingPriceAPI() {
for {
time.Sleep(2 * time.Second)
go getPriceFromAPI()
}
}
func getPriceFromAPI() {
w := http.Client{
// Timeout: time.Second * 3,
}
req, _ := http.NewRequest(http.MethodGet, url, nil)
res, err := w.Do(req)
if err != nil {
log.Println("err getting price [req]: ", err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Println("err getting price [io-read]: ", err)
}
r := result{}
if jsonErr := json.Unmarshal(body, &r); jsonErr != nil {
log.Println("err getting price [json]: ", jsonErr)
}
fmt.Println("1 Dollar = €", r["EUR"])
messages <- r["EUR"]
}
I only want to support HTTP/2 for a new project, the client is not a browser so it's not a problem if we don't support HTTP/1.x at all.
from what I see in golang.org/x/net/http2. I can use tls.Listen and pass the net.Conn to http2.Server.ServeConn.
But I'm bit confused about how to use http2.Transport here, can anyone give me an example?
Thanks
UPDATE:
This is the server part, pretty simple, it's an echo server
package main
import (
"fmt"
"io"
"net"
"net/http"
"golang.org/x/net/http2"
)
func main() {
l, err := net.Listen("tcp4", ":1234")
panicIfNotNil(err)
s := &http2.Server{}
sopt := &http2.ServeConnOpts{
BaseConfig: &http.Server{},
Handler: http.HandlerFunc(handler),
}
for {
c, err := l.Accept()
panicIfNotNil(err)
go serve(s, sopt, c)
}
}
func serve(s *http2.Server, sopt *http2.ServeConnOpts, c net.Conn) {
defer c.Close()
s.ServeConn(c, sopt)
}
func handler(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor != 2 {
w.WriteHeader(500)
fmt.Fprintln(w, "Not HTTP/2")
return
}
f, ok := w.(http.Flusher)
if !ok {
w.WriteHeader(500)
fmt.Fprintln(w, "Not Flusher")
return
}
w.Header().Set("Content-Type", "application/octet-stream")
fmt.Fprintln(w, "Hello World, Echo Server")
buf := [1024]byte{}
for {
n, err := r.Body.Read(buf[:])
if err == io.EOF {
break
}
panicIfNotNil(err)
_, err = w.Write(buf[:n])
f.Flush()
panicIfNotNil(err)
}
}
func panicIfNotNil(err error) {
if err != nil {
panic(err)
}
}
tested with curl --http2-prior-knowledge http://127.0.0.1:1234 -d a=b -d c=d -d e=f
for the client part, I'm still trying, I will update this post again when I got something.
UPDATE:
for the sake of simplicity, I don't use TLS here
UPDATE:
This is the client part
package main
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"time"
"golang.org/x/net/http2"
)
func main() {
t := &http2.Transport{
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
return net.Dial(network, addr)
},
AllowHTTP: true,
}
c := &http.Client{
Transport: t,
}
pr, pw := io.Pipe()
req := &http.Request{
Method: "POST",
URL: mustUrl("http://127.0.0.1:1234/"),
Body: pr,
}
resp, err := c.Do(req)
panicIfNotNil(err)
defer resp.Body.Close()
if resp.StatusCode != 200 {
panic(fmt.Errorf("Server return non 200, %d", resp.StatusCode))
}
wchan := make(chan struct{})
go func() {
buf := [1024]byte{}
for {
n, err := resp.Body.Read(buf[:])
if err == io.EOF {
break
}
panicIfNotNil(err)
fmt.Printf("GOT DATA %s\n", string(buf[:n]))
}
close(wchan)
}()
time.Sleep(1 * time.Second)
pw.Write([]byte("hai AAA"))
time.Sleep(1 * time.Second)
pw.Write([]byte("hai BBB"))
time.Sleep(1 * time.Second)
pw.Write([]byte("hai CCC"))
time.Sleep(1 * time.Second)
pw.Write([]byte("hai CCC"))
time.Sleep(1 * time.Second)
pw.Close()
<-wchan
}
func mustUrl(s string) *url.URL {
r, err := url.Parse(s)
panicIfNotNil(err)
return r
}
func panicIfNotNil(err error) {
if err != nil {
panic(err)
}
}
but somehow it doesn't work
You can see network traffic in https://imgur.com/EJV0uGI
After looking into Wireshark more closely I found the problem, it happens because the server didn't send any header frame, so the client cannot continue with more data. Just printing into http.ResponseWriter doesn't ensure its written into the network, it gets buffered instead, so we need to explicitly flush it.
This fixes the problem:
--- main.go 2018-07-25 22:31:44.092823590 +0700
+++ main2.go 2018-07-25 22:32:50.586179879 +0700
## -43,6 +43,9 ##
return
}
w.Header().Set("Content-Type", "application/octet-stream")
+ w.WriteHeader(200)
+ f.Flush()
+
fmt.Fprintln(w, "Hello World, Echo Server")
buf := [1024]byte{}