tcp connection can still get eagain after receive fin packet? - go

this is my code
Firstly I init a tcp server
type server struct {
serverConns chan net.Conn
addr string
}
func (s *server) init() error {
s.serverConns = make(chan net.Conn)
l, err := net.Listen(network, ":8888")
if err != nil {
return err
}
s.addr = l.Addr().String()
fmt.Println(s.addr)
go func() {
for {
conn, err := l.Accept()
if err != nil {
panic(err)
}
s.serverConns <- conn
}
}()
return nil
}
Then I create a tcp connection, and close connection from server. CheckConnErr call syscall.Read to read data from connection. In my opinion, after server close connection, client receive fin packet and syscall.read will get EOF. But in this case, syscall.Read get EAGAIN, that means connection is still alive.
func TestRemoteEOF(t *testing.T) {
var s server
require.Nil(t, s.init())
// dial to server
var n net.Conn
// close connection from server
serverConn := <-s.serverConns
require.Nil(t, serverConn.Close())
buf := make([]byte, 100)
hit := time.Now()
err2 := checkConnErr(n, buf)
require.Equal(t, err2, io.EOF)
}
I also check with wireshark, the time of hit is later than fin arrive time, so what is the cause of this phenomenon?
enter image description here
enter image description here

Related

GoLang TCP Connection - Remote Network is down check

I am creating a GoLang application and clients are android phones. I am able to handle connections. If user closes the android application connection is dropped with EOF
My problem is, if client just turn off wifi network connection is still alive.
Here is my code
func main() {
fmt.Println("Starting server...")
connection, err := net.Listen("tcp", ":4406")
if err != nil {
fmt.Println(err)
}
defer connection.Close()
manager := ClientManager{
clients: make(map[*Client]bool),
broadcast: make(chan []byte),
register: make(chan *Client),
}
go manager.start()
for {
connection, _ := connection.Accept()
if err != nil {
fmt.Println(err)
}
client := &Client{socket: connection, data: make(chan []byte), uuid: connection.RemoteAddr().String()}
manager.register <- client
go manager.receive(client)
go handleConnection(client)
}
}
Handeling connections
func handleConnection(client *Client) {
conn := client.socket
defer conn.Close()
notify := make(chan error)
go func() {
buf := make([]byte, 1024)
for {
n, err := conn.Read(buf)
if err != nil {
notify <- err
return
}
if n > 0 {
fmt.Println("unexpected data: %s", buf[:n])
}
}
}()
for {
select {
case err := <-notify:
if err != nil {
fmt.Println("connection dropped message", err)
return
}
case <-time.After(time.Second * 1):
fmt.Println("timeout 1, still alive")
}
}
}
When remote wifi is off (cable removed) I want to disconnect the user. I tried to read a byte and every second and it is reading it. I sent a byte and it is sent as well.

How to make net.Dial in Go reconnect if connection is lost?

I have an app in Go that's connecting to XMPP host using tcp and then xml Decoder to talk XMPP. How can I make net.Dial reconnect if tcp connection is dropped?
I am getting the following error on my error channel when the connection is dropped:
write tcp client:port->xmpp_server:5222: write: broken pipe. However I'm not sure how to properly handle it in my Dial function to make it reconnect.
// package xmpp
// Conn represents a connection
type Conn struct {
incoming *xml.Decoder
outgoing net.Conn
errchan chan error
}
// SetErrorChannel sets the channel for handling errors
func (c *Conn) SetErrorChannel(channel chan error) {
c.errchan = channel
}
// Dial dials an xmpp host
func Dial(host string) (*Conn, error) {
c := new(Conn)
var err error
c.outgoing, err = net.Dial("tcp", host+":5222")
if err != nil {
log.Printf("Can't dial %s:5222: %s", host, err)
return c, err
}
// TCP Keep Alive
err = c.outgoing.(*net.TCPConn).SetKeepAlive(true)
if err != nil {
c.errchan <- err
}
err = c.outgoing.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)
if err != nil {
c.errchan <- err
}
c.incoming = xml.NewDecoder(c.outgoing)
log.Printf("Connected to: %s", c.outgoing.RemoteAddr())
return c, nil
}
// In a separate package
func NewXMPPClient(config) (*Client, error) {
errchannel := make(chan error)
connection, err := xmpp.Dial(host)
if err != nil {
return nil, err
}
connection.SetErrorChannel(errchannel)
// Do XMPP auth, receive messages, etc...
Figured it out. I just started to close the current tcp connection on any error in my error channel and re-create both TCP and XMPP (auth+listen) connections.

Accept a persistent tcp connection in Golang Server

I am experimenting with Go - and would like to create a TCP server which I can telnet to, send commands and receive responses.
const (
CONN_HOST = "localhost"
CONN_PORT = "3333"
CONN_TYPE = "tcp"
)
func main() {
listener, err := net.Listen(CONN_TYPE, fmt.Sprintf("%s:%s", CONN_HOST, CONN_PORT))
if err != nil {
log.Panicln(err)
}
defer listener.Close()
for {
conn, err := listener.Accept()
if err != nil {
log.Panicln(err)
}
go handleRequest(conn)
}
}
func handleRequest(conn net.Conn) {
buffer := make([]byte, 1024)
length, err := conn.Read(buffer)
if err != nil {
log.Panicln(err)
}
str := string(buffer[:length])
fmt.Println(conn.RemoteAddr().String())
fmt.Printf("Received command %d\t:%s\n", length, str)
switch str {
case "PING\r\n":
sendResponse("PONG", conn)
case "PUSH\r\n":
sendResponse("GOT PUSH", conn)
default:
conn.Write([]byte(fmt.Sprintf("UNKNOWN_COMMAND: %s\n", str)))
}
conn.Close() // closes the connection
}
func sendResponse(res string, conn net.Conn) {
conn.Write([]byte(res+"\n"))
}
The above snippet will close the connection every time, kicking me out of the terminal session. But what I actually want, is to be able to keep the connection open for more I/O operations. If I simply remove the conn.Close(), then the server appears to hang somewhere as it does not get any more responses.
The way I have resolved this is to have my handleRequest method endlessly loop so that it never exits till it receives a QUIT\r\n message. Is this appropriate - or is there a better way of achieving?
func handleRequest(conn net.Conn) {
for {
log.Println("Handling Request")
buffer := make([]byte, 1024)
length, err := conn.Read(buffer)
if err != nil {
log.Panicln(err)
}
str := string(buffer[:length])
fmt.Println(conn.RemoteAddr().String())
fmt.Printf("Received command %d\t:%s\n", length, str)
switch str {
case "PING\r\n":
sendResponse("PONG", conn)
case "PUSH\r\n":
sendResponse("GOT PUSH", conn)
case "QUIT\r\n":
sendResponse("Goodbye", conn)
conn.Close()
default:
conn.Write([]byte(fmt.Sprintf("UNKNOWN_COMMAND: %s\n", str)))
}
}
}
Your second example with the loop is already what you want. You simply loop and read as long as you want (or probably until some read/write timeout or an external cancellation signal).
However it still has an error in it:
TCP gives you a stream of bytes, where it is not guaranteed that one write from a side will yield exactly one read on the other side with the same data length. This means if the client writes PING\r\n you could still receive only PI in the first read. You could fix that by using a bufio.Scanner and always read up to the first newline.
Not sure if this is what you're looking for. Taken from net/http implementation, wrapping your net.TCPListener's Accept method.
tcpKeepAliveListener{listener.(*net.TCPListener)}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
Refer : Link 1 & Link 2

How support concurrent connections with a UDP Server using go

I'm playing with my first basic udp server and I wondering how support concurrent connections? I think with my code only can get a connection at a time in order to process it, with a tcp simple server the things seems to be more clear than on this case, throwing a goroutine to process the data, but here I'm very lost, thanks in advance.
func main() {
ListenerUDP("127.0.0.1", 1111)
}
func ListenerUDP(ip string, port int) {
buffer := make([]byte, 1024)
log.Println("Listener Started!")
addr := net.UDPAddr{
Port: port,
IP: net.ParseIP(ip),
}
conn, err := net.ListenUDP("udp", &addr)
if err != nil {
log.Fatalf("Error Listening:%s\n", err.Error())
panic(err)
}
defer conn.Close()
for {
_, remoteAddr, err := conn.ReadFromUDP(buffer[0:])
if err != nil {
log.Fatalf("Error:%s\n", err)
}
// Process data here? using a > go something()?
fmt.Printf("Data:%s From:%v\n", buffer, remoteAddr)
}
}
UDP is a connectionless protocol--hosts send packets without establishing a connection first.
To get multiple cores handling UDP packets in parallel, you might start a bunch of goroutines that each do the ReadFromUDP loop:
package main
import (
"fmt"
"net"
"runtime"
)
func listen(connection *net.UDPConn, quit chan struct{}) {
buffer := make([]byte, 1024)
n, remoteAddr, err := 0, new(net.UDPAddr), error(nil)
for err == nil {
n, remoteAddr, err = connection.ReadFromUDP(buffer)
// you might copy out the contents of the packet here, to
// `var r myapp.Request`, say, and `go handleRequest(r)` (or
// send it down a channel) to free up the listening
// goroutine. you do *need* to copy then, though,
// because you've only made one buffer per listen().
fmt.Println("from", remoteAddr, "-", buffer[:n])
}
fmt.Println("listener failed - ", err)
quit <- struct{}{}
}
func main() {
addr := net.UDPAddr{
Port: 12345,
IP: net.IP{127, 0, 0, 1},
}
connection, err := net.ListenUDP("udp", &addr)
if err != nil {
panic(err)
}
quit := make(chan struct{})
for i := 0; i < runtime.NumCPU(); i++ {
go listen(connection, quit)
}
<-quit // hang until an error
}

How to reuse listener/connection ? Golang

I'm trying to connect a computer behind NAT with the internet through a 3rd party server(aka reverse connection). I'm listening on two ports. On one port (dstNet) is connecting the machine behind NAT and on the other port are connecting the internet clients.
The issue is that I don't know how to handle the disconnection of the machine behind NAT. Even if the machine is connecting again the the traffic is not handled sent/written anymore... I get [DEBUG] socks: Copied 0 bytes to client which is my warning of course. Below is the code. It's quite long but I can't find what to trim.
// Make a bridge between dstNet which is
// usually behind NAT and srcNet which is usually a client
// which wants to route the traffic though the NAT machine.
package main
import (
"bufio"
"errors"
log "github.com/golang/glog"
"io"
"net"
"time"
)
const (
// listen on the dstNet so that we can
// create a connection with the NAT client
dstNet = "0.0.0.0:9000"
// listen on srcNet so that we can get traffic
// to forward to dstNet
srcNet = "0.0.0.0:9001"
)
var errCh = make(chan error, 1)
// make a channel to send the reverse connections
var lrCh = make(chan net.Conn, 1)
func listenDst() {
// Listen on the dstNet
lr, err := net.Listen("tcp", dstNet)
if err != nil {
log.Error(err)
errCh <- err
return
}
// accept the connection
for {
lrConn, err := lr.Accept()
if err != nil {
log.Error(err)
errCh <- err
return
}
log.Errorf("sent connection")
// lrConn.SetReadDeadline(time.Now().Add(10 * time.Second))
lrCh <- lrConn
}
}
func main() {
go func() {
for err := range errCh {
if err != nil {
panic(err)
}
}
}()
// listen for the nat server
go listenDst()
// listen for clients to connect
l, err := net.Listen("tcp", srcNet)
if err != nil {
log.Error(err)
panic(err)
}
// accept the connection
for {
conn, err := l.Accept()
if err != nil {
log.Error(err)
panic(err)
}
// serve the connection
go func(conn net.Conn) {
defer conn.Close()
bufConn := bufio.NewReader(conn)
dst := <-lrCh
defer dst.Close()
// Start proxying
errCh2 := make(chan error, 2)
go proxy("target", dst, bufConn, errCh2)
go proxy("client", conn, dst, errCh2)
// Wait
var ei int
for err = range errCh2 {
switch {
case err != nil && err.Error() == "no byte":
log.Error(err)
case err != nil && err.Error() == "use of closed network connection":
// if the connection is closed we restart it.
log.Error(err)
// BUG() attempt to write again the bytes
case err != nil:
log.Error(err)
errCh <- err
}
if ei == 1 {
log.Errorf("done with errors")
close(errCh2)
}
ei++
}
}(conn)
}
}
// proxy is used to suffle data from src to destination, and sends errors
// down a dedicated channel
func proxy(name string, dst io.Writer, src io.Reader, errCh2 chan error) {
n, err := io.Copy(dst, src)
// Log, and sleep. This is jank but allows the otherside
// to finish a pending copy
log.Errorf("[DEBUG] socks: Copied %d bytes to %s", n, name)
time.Sleep(10 * time.Millisecond)
// Send any errors
switch {
case err != nil:
log.Error(err)
errCh2 <- err
case n < 1:
errCh2 <- errors.New("no byte")
default:
errCh2 <- nil
}
return
}
The only time you can reuse a connection after an error is if is a temporary condition.
if err, ok := err.(net.Error); ok && err.Temporary() {
}
If you are trying to proxy a TCPconnection, and there is any other error (checking for Temporary may not even be that useful), you need to drop the whole thing and start over. You have no idea what the state of the remote server is, how many packets are in flight or lost, and it's only going to cause more difficult bugs the harder you try. (tip: don't hide concurrency or timing problems with a sleep. It's just making it harder in the long run)
Here is a much simpler proxy pattern for go if you want to reference it:
https://gist.github.com/jbardin/821d08cb64c01c84b81a
func Proxy(srvConn, cliConn *net.TCPConn) {
// channels to wait on the close event for each connection
serverClosed := make(chan struct{}, 1)
clientClosed := make(chan struct{}, 1)
go broker(srvConn, cliConn, clientClosed)
go broker(cliConn, srvConn, serverClosed)
// wait for one half of the proxy to exit, then trigger a shutdown of the
// other half by calling CloseRead(). This will break the read loop in the
// broker and allow us to fully close the connection cleanly without a
// "use of closed network connection" error.
var waitFor chan struct{}
select {
case <-clientClosed:
// the client closed first and any more packets from the server aren't
// useful, so we can optionally SetLinger(0) here to recycle the port
// faster.
srvConn.SetLinger(0)
srvConn.CloseRead()
waitFor = serverClosed
case <-serverClosed:
cliConn.CloseRead()
waitFor = clientClosed
}
// Wait for the other connection to close.
// This "waitFor" pattern isn't required, but gives us a way to track the
// connection and ensure all copies terminate correctly; we can trigger
// stats on entry and deferred exit of this function.
<-waitFor
}
// This does the actual data transfer.
// The broker only closes the Read side.
func broker(dst, src net.Conn, srcClosed chan struct{}) {
// We can handle errors in a finer-grained manner by inlining io.Copy (it's
// simple, and we drop the ReaderFrom or WriterTo checks for
// net.Conn->net.Conn transfers, which aren't needed). This would also let
// us adjust buffersize.
_, err := io.Copy(dst, src)
if err != nil {
log.Printf("Copy error: %s", err)
}
if err := src.Close(); err != nil {
log.Printf("Close error: %s", err)
}
srcClosed <- struct{}{}
}
It turned out that I had to restart the listener not only to close the connection. I've modified the broker function to reset the destNet listener if it can't write (i.e. writes 0 bytes) to src. I'm still not sure if this is the right way to do it (i.e. closing the listener seems bad in a multi-connections scenario as I guess I reset all the client connections dialing on that address) but so far this is the best I could do to fix it.
if n == 0 {
lrNewCh <- 1
}
Here is all the code. All the credit goes to #JimB
// Make a bridge between dstNet which is
// usually behind NAT and srcNet which is usually a client
// which wants to route the traffic though the NAT machine.
package main
import (
log "github.com/golang/glog"
"io"
"net"
)
// listen on the dstNet so that we can
// create a connection with the NAT client
var dstNet *net.TCPAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 9000}
// listen on srcNet so that we can get traffic
// to forward to dstNet
var srcNet *net.TCPAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 9001}
var errCh = make(chan error, 1)
// make a channel to send the reverse connections
var lrCh = make(chan *net.TCPConn, 1)
var lrNewCh = make(chan int, 1)
func listenDst() {
// Listen on the dstNet
lr, err := net.ListenTCP("tcp", dstNet)
if err != nil {
log.Error(err)
errCh <- err
return
}
// accept the connection
for {
lrConn, err := lr.AcceptTCP()
if err != nil {
log.Error(err)
//errCh <- err
//return
}
status := <-lrNewCh
log.Errorf("status request is %v", status)
if status == 1{
log.Errorf("we close and restart the listener and the connection")
if err = lrConn.Close(); err !=nil{
log.Error(err)
}
if err = lr.Close(); err !=nil{
log.Error(err)
}
lr, err = net.ListenTCP("tcp", dstNet)
if err != nil {
log.Error(err)
errCh <- err
return
}
lrConn, err = lr.AcceptTCP()
if err !=nil{
log.Error(err)
errCh <- err
}
}else{
log.Errorf("new connection on its way")
lrCh <- lrConn
}
// default:
// log.Errorf("accepting new connections")
}
}
func main() {
go func() {
for err := range errCh {
if err != nil {
panic(err)
}
}
}()
// listen for the nat server
go listenDst()
// listen for clients to connect
l, err := net.ListenTCP("tcp", srcNet)
if err != nil {
log.Error(err)
panic(err)
}
// accept the connection
for {
conn, err := l.AcceptTCP()
if err != nil {
log.Error(err)
panic(err)
}
// serve the connection
go func(conn *net.TCPConn) {
defer conn.Close()
lrNewCh <- 0
dst := <-lrCh
defer dst.Close()
proxy(dst, conn)
}(conn)
}
}
func proxy(srvConn, cliConn *net.TCPConn) {
// channels to wait on the close event for each connection
serverClosed := make(chan struct{}, 1)
clientClosed := make(chan struct{}, 1)
go broker(srvConn, cliConn, clientClosed)
go broker(cliConn, srvConn, serverClosed)
// wait for one half of the proxy to exit, then trigger a shutdown of the
// other half by calling CloseRead(). This will break the read loop in the
// broker and allow us to fully close the connection cleanly without a
// "use of closed network connection" error.
var waitFor chan struct{}
select {
case <-clientClosed:
// the client closed first and any more packets from the server aren't
// useful, so we can optionally SetLinger(0) here to recycle the port
// faster.
srvConn.SetLinger(0)
srvConn.CloseRead()
waitFor = serverClosed
case <-serverClosed:
cliConn.CloseRead()
waitFor = clientClosed
}
// Wait for the other connection to close.
// This "waitFor" pattern isn't required, but gives us a way to track the
// connection and ensure all copies terminate correctly; we can trigger
// stats on entry and deferred exit of this function.
<-waitFor
}
// This does the actual data transfer.
// The broker only closes the Read side.
func broker(dst, src net.Conn, srcClosed chan struct{}) {
// We can handle errors in a finer-grained manner by inlining io.Copy (it's
// simple, and we drop the ReaderFrom or WriterTo checks for
// net.Conn->net.Conn transfers, which aren't needed). This would also let
// us adjust buffersize.
n, err := io.Copy(dst, src)
log.Errorf(" %v bytes copied", n)
if err != nil {
log.Errorf("Copy error: %s", err)
// errCh <- err
}
if err := src.Close(); err != nil {
log.Errorf("Close error: %s", err)
errCh <- err
}
if n == 0 {
lrNewCh <- 1
}
srcClosed <- struct{}{}
}

Resources