I'm trying to use gopacket to parse the packets of a .pcap file and pretty much to get all the information in it, until now I get either truncated information or an error IF I try to use a filter.
package main
import (
"fmt"
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
//"github.com/google/gopacket/layers"
"log"
)
var (
pcapFile string = "myFile.pcap"
handle *pcap.Handle
err error
)
func main() {
// Open file instead of device
handle, err = pcap.OpenOffline(pcapFile)
if err != nil { log.Fatal(err) }
defer handle.Close()
// Loop through packets in file
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
for packet := range packetSource.Packets() {
fmt.Println
}
}
This returns:
PACKET: 122 bytes, wire length 122 cap length 122 # 2017-06-11 02:57:03.133873 +0100 WEST
- Layer 1 (36 bytes) = RadioTap {Contents=[..36..] Payload=[..86..] Version=0 Length=36 Present=2684370991 TSFT=661956589449 Flags=FCS Rate=1 Mb/s ChannelFrequency=2412 MHz ChannelFlags=CCK,Ghz2 FHSS=0 DBMAntennaSignal=-91 DBMAntennaNoise=0 LockQuality=0 TxAttenuation=0 DBTxAttenuation=0 DBMTxPower=0 Antenna=0 DBAntennaSignal=0 DBAntennaNoise=0 RxFlags= TxFlags= RtsRetries=0 DataRetries=0 MCS= AMPDUStatus=ref#0 VHT=}
- Layer 2 (24 bytes) = Dot11 {Contents=[..24..] Payload=[..58..] Type=DataQOSData Proto=0 Flags=TO-DS,WEP DurationID=0 Address1=11:22:33:44:55:66 Address2=00:11:22:33:44:55 Address3=11:22:33:44:55:66 Address4= SequenceNumber=0 FragmentNumber=0 Checksum=4262477891}
- Layer 3 (58 bytes) = Dot11WEP {Contents=[..58..] Payload=[]}
PACKET: 116 bytes, wire length 116 cap length 116 # 2017-06-11 02:57:03.243457 +0100 WEST
- Layer 1 (18 bytes) = RadioTap {Contents=[..18..] Payload=[..102..] Version=0 Length=18 Present=18478 TSFT=0 Flags= Rate=1 Mb/s ChannelFrequency=2417 MHz ChannelFlags=CCK,Ghz2 FHSS=0 DBMAntennaSignal=-25 DBMAntennaNoise=0 LockQuality=0 TxAttenuation=0 DBTxAttenuation=0 DBMTxPower=0 Antenna=1 DBAntennaSignal=0 DBAntennaNoise=0 RxFlags= TxFlags= RtsRetries=0 DataRetries=0 MCS= AMPDUStatus=ref#0 VHT=}
- Layer 2 (24 bytes) = Dot11 {Contents=[..24..] Payload=[..74..] Type=DataQOSData Proto=0 Flags=TO-DS,WEP DurationID=314 Address1=00:11:22:33:44:55 Address2=11:22:33:44:55:66 Address3=00:11:22:33:44:55 Address4= SequenceNumber=0 FragmentNumber=0 Checksum=412506031}
- Layer 3 (74 bytes) = Dot11WEP {Contents=[..74..] Payload=[]}
I would like to see for example the SSID of the packets or more info inside each layer but everytime I try to drill down the items I get:
RadioTap
Dot11
Dot11WEP
RadioTap
Dot11
Dot11WEP
CODE FOR THE ABOVE OUTPUT
package main
import (
"fmt"
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
//"github.com/google/gopacket/layers"
"log"
)
var (
pcapFile string = "myFile.pcap"
handle *pcap.Handle
err error
)
func main() {
// Open file instead of device
handle, err = pcap.OpenOffline(pcapFile)
if err != nil { log.Fatal(err) }
defer handle.Close()
// Loop through packets in file
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
for packet := range packetSource.Packets() {
for _, p := range packet.Layers() {
for _, b := range p.LayerType() {
fmt.Println(b)
}
}
}
}
But in reality I would like to Know the SSID/BSSID and the flags inside the packtet from Dot11 layer.
package main
import (
"fmt"
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
"github.com/google/gopacket/layers"
"log"
)
var (
pcapFile string = "Network_Join_Nokia_Mobile.pcap"
handle *pcap.Handle
err error
)
func main() {
// Open file instead of device
handle, err = pcap.OpenOffline(pcapFile)
if err != nil { log.Fatal(err) }
defer handle.Close()
// Loop through packets in file
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
for packet := range packetSource.Packets() {
fmt.Println(packet.Metadata().CaptureInfo.Timestamp)
dot11 := packet.Layer(layers.LayerTypeDot11)
if nil != dot11 {
dot11, _ := dot11.(*layers.Dot11)
// the flags are empty in many of the packets of this example capture file
fmt.Printf("BSSID: %v Flags: %+v\n", dot11.Address3, dot11.Flags)
}
dot11info := packet.Layer(layers.LayerTypeDot11InformationElement)
// some wlan frames contain these with the SSID, usually beacons, probes and association requests
if nil != dot11info {
dot11info, _ := dot11info.(*layers.Dot11InformationElement)
if dot11info.ID == layers.Dot11InformationElementIDSSID {
fmt.Printf("SSID: %q\n", dot11info.Info)
}
}
fmt.Printf("\n")
}
}
Part of output with example file Network_Join_Nokia_Mobile.pcap from Wireshark:
2000-01-01 00:05:04.913478 +0000 UTC
BSSID: 00:01:e3:41:bd:6e Flags: Retry
SSID: "martinet3"
Note that the capture file you used in your question does not contain a frame with the layer that contains the SSID.
Related
Problem
I am sending packet using tcp, with first 8 byte as long which
contains the actual packet length while receiving it, after some point
it gets wrong packet length which causes the following error "slice
out of range" because the packet length that is received is way bigger
but using TCP Dump I can see it is receiving the correct packet size.
Client TCP Code
package main
import (
"fmt"
"net"
"ByteBuffer"
"log"
"sync"
)
func main() {
conn, err := net.Dial("tcp", "192.168.90.116:8300")
if err != nil {
fmt.Println(err)
return
}
byteBuffer := ByteBuffer.Buffer{
Endian:"big",
}
msg := "Hello World"
totalByteLen := len(msg)
byteBuffer.PutLong(totalByteLen)
byteBuffer.Put([]byte(msg))
log.Println(byteBuffer.Array())
for i:=0;i<1000000000000;i++{
go write(conn, byteBuffer.Array())
}
}
var lck = &sync.Mutex{}
func write(conn net.Conn, data []byte){
lck.Lock()
_, err := conn.Write(data)
lck.Unlock()
if err != nil{
return
}
}
Server TCP Code
func HandleRequest(conn net.Conn){
defer conn.Close()
for {
// creating a 8 byte buffer array
sizeBuf := make([]byte, 8)
// reading from tcp sockets
_, err := conn.Read(sizeBuf)
// converting the packet size to int64
packetSize := int64(binary.BigEndian.Uint64(sizeBuf))
log.Println(packetSize)
if packetSize < 0 {
continue
}
// reading more bytes from tcp pipe of packetSize length
/*
Here it catches error as the packet size is incorrect but it throws error after receiving aroung 4-5K messages.
*/
completePacket := make([]byte, packetSize)
_, err = conn.Read(completePacket)
// checking error type
if err == io.EOF{
break
}
if err != nil{
break
}
fmt.Println(completePacket)
}
}
The first TCP connection running on localhost on osx always parses the binary sent to it correctly. Subsequent requests lose the binary data, only seeing the first byte [8]. How have I failed to set up my Reader?
package main
import (
"fmt"
"log"
"net"
"os"
"app/src/internal/handler"
"github.com/golang-collections/collections/stack"
)
func main() {
port := os.Getenv("SERVER_PORT")
s := stack.New()
ln, err := net.Listen("tcp", ":8080")
if err != nil {
log.Fatalf("net.Listen: %v", err)
}
fmt.Println("Serving on " + port)
for {
conn, err := ln.Accept()
// defer conn.Close()
if err != nil {
log.Fatal("ln.Accept")
}
go handler.Handle(conn, s)
}
}
package handler
import (
"fmt"
"io"
"log"
"net"
"github.com/golang-collections/collections/stack"
)
func Handle(c net.Conn, s *stack.Stack) {
fmt.Printf("Serving %s\n", c.RemoteAddr().String())
buf := make([]byte, 0, 256)
tmp := make([]byte, 128)
n, err := c.Read(tmp)
if err != nil {
if err != io.EOF {
log.Fatalf("connection Read() %v", err)
}
return
}
buf = append(buf, tmp[:n]...)
}
log:
Serving [::1]:51699
------------- value ---------------:QCXhoy5t
Buffer Length: 9. First Value: 8
Serving [::1]:51700
------------- value ---------------:
Buffer Length: 1. First Value: 8
Serving [::1]:51701
test sent over:
push random string:
QCXhoy5t
push random string:
GPh0EnbS
push random string:
4kJ0wN0R
The docs for Reader say:
Read reads up to len(p) bytes into p. It returns the number of bytes read (0 <= n
<= len(p)) and any error encountered. Even if Read returns n < len(p), it may use
all of p as scratch space during the call. If some data is available but not
len(p) bytes, Read conventionally returns what is available instead of waiting
for more.
So the most likely cause of your issue is that Read is returning the data available (in this case a single character). You can fix this by using ioutil.ReadAll or performing the read in a loop (the fact the data is being added to a buffer makes it look like that was the original intention) with something like:
for {
n, err := c.Read(tmp)
if err != nil {
if err != io.EOF {
// Note that data might have also been received - you should process that
// if appropriate.
log.Fatalf("connection Read() %v", err)
return
}
break // All data received so process it
}
buf = append(buf, tmp[:n]...)
}
Note: There is no guarantee that any data is received; you should check the length before trying to access it (i.e. buf[0] may panic)
I need to read a file at a specific location, given by a byte offset.
filePath := "test_file.txt"
byteOffset := 6
// Read file
How can I achieve this, if possible without reading the whole file in memory ?
Package os
import "os"
func (*File) Seek
func (f *File) Seek(offset int64, whence int) (ret int64, err error)
Seek sets the offset for the next Read or Write on file to offset,
interpreted according to whence: 0 means relative to the origin of the
file, 1 means relative to the current offset, and 2 means relative to
the end. It returns the new offset and an error, if any. The behavior
of Seek on a file opened with O_APPEND is not specified.
Package io
import "io"
Seek whence values.
const (
SeekStart = 0 // seek relative to the origin of the file
SeekCurrent = 1 // seek relative to the current offset
SeekEnd = 2 // seek relative to the end
)
For example,
package main
import (
"fmt"
"io"
"os"
)
func main() {
filePath := "test.file"
byteOffset := 6
f, err := os.Open(filePath)
if err != nil {
panic(err)
}
defer f.Close()
_, err = f.Seek(int64(byteOffset), io.SeekStart)
if err != nil {
panic(err)
}
buf := make([]byte, 16)
n, err := f.Read(buf[:cap(buf)])
buf = buf[:n]
if err != nil {
if err != io.EOF {
panic(err)
}
}
fmt.Printf("%s\n", buf)
}
Output:
$ cat test.file
0123456789
$ go run seek.go
6789
$
I have the following proto file:
package dogs;
enum Breed {
terrier = 0;
shepherd = 1;
hound = 2;
};
message Dog {
required int64 nbLegs = 1;
optional int64 nbTeeth = 2 [default=24];
optional Breed breed = 3;
optional string name = 4;
}
And the following Go program written using the goprotobuf package. The program
reads a Varint from stdin in order to get the length of the encoded message,
reads that number of bytes from stdin into a buffer, and
attempts to unmarshal the buffer into a Dog.
--START CODE--
package main
import "bufio"
import "encoding/binary"
import "os"
import "log"
import "fmt"
import "dogs"
import "code.google.com/p/goprotobuf/proto"
func render(dog *dogs.Dog) string {
return fmt.Sprintf("Dog: %v %v %v %v", dog.GetName(), dog.GetBreed(), dog.GetNbLegs(), dog.GetNbTeeth())
}
func main() {
var dog = new(dogs.Dog)
stdin := bufio.NewReader(os.Stdin)
sz, _ := binary.ReadVarint(stdin)
bytes := make([]byte, sz)
os.Stdin.Read(bytes)
buf := proto.NewBuffer(bytes)
err := buf.Unmarshal(dog)
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "Receiving %s of length %d\n", render(dog), sz)
}
--END CODE--
And the this encoded message (preceded by a Varint) (in hexdump form)
0000000: 1408 0418 0222 0446 6964 6f .....".Fido
which i can successfully decode using c++ and libprotobuf as a Dog with
name: Fido
breed: hound
nbLegs: 4
nbTeeth: 24
Unmarshaling in the go program, however, always returns the error "illegal tag 0".
I suspect that I've misunderstood the decoding API but, so far, I can't see how.
Try just umarshalling the bytes instead of creating a buffer like this:
You may also want to check to see if os.Stdin.Read(bytes) is actually reading sz.
func main() {
var dog = new(dogs.Dog)
stdin := bufio.NewReader(os.Stdin)
sz, _ := binary.ReadVarint(stdin)
bytes := make([]byte, sz)
stdin.Read(bytes)
err := buf.Unmarshal(dog, bytes)
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "Receiving %s of length %d\n", render(dog), sz)
}
I'm wondering if it's possible to count and print the number of bytes downloaded while the file is being downloaded.
out, err := os.Create("file.txt")
defer out.Close()
if err != nil {
fmt.Println(fmt.Sprint(err))
panic(err)
}
resp, err := http.Get("http://example.com/zip")
defer resp.Body.Close()
if err != nil {
fmt.Println(fmt.Sprint(err))
panic(err)
}
n, er := io.Copy(out, resp.Body)
if er != nil {
fmt.Println(fmt.Sprint(err))
}
fmt.Println(n, "bytes ")
If I understand you correctly, you wish to display the number of bytes read, while the data is transferring. Presumably to maintain some kind of a progress bar or something. In which case, you can use Go's compositional data structures to wrap the reader or writer in a custom io.Reader or io.Writer implementation.
It simply forwards the respective Read or Write call to the underlying stream, while doing some additional work with the (int, error) values returned by them. Here is an example you can run on the Go playground.
package main
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
// PassThru wraps an existing io.Reader.
//
// It simply forwards the Read() call, while displaying
// the results from individual calls to it.
type PassThru struct {
io.Reader
total int64 // Total # of bytes transferred
}
// Read 'overrides' the underlying io.Reader's Read method.
// This is the one that will be called by io.Copy(). We simply
// use it to keep track of byte counts and then forward the call.
func (pt *PassThru) Read(p []byte) (int, error) {
n, err := pt.Reader.Read(p)
pt.total += int64(n)
if err == nil {
fmt.Println("Read", n, "bytes for a total of", pt.total)
}
return n, err
}
func main() {
var src io.Reader // Source file/url/etc
var dst bytes.Buffer // Destination file/buffer/etc
// Create some random input data.
src = bytes.NewBufferString(strings.Repeat("Some random input data", 1000))
// Wrap it with our custom io.Reader.
src = &PassThru{Reader: src}
count, err := io.Copy(&dst, src)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Transferred", count, "bytes")
}
The output it generates is this:
Read 512 bytes for a total of 512
Read 1024 bytes for a total of 1536
Read 2048 bytes for a total of 3584
Read 4096 bytes for a total of 7680
Read 8192 bytes for a total of 15872
Read 6128 bytes for a total of 22000
Transferred 22000 bytes
The stdlib now provides something like jimt's PassThru: io.TeeReader. It helps simplify things a bit:
// WriteCounter counts the number of bytes written to it.
type WriteCounter struct {
Total int64 // Total # of bytes transferred
}
// Write implements the io.Writer interface.
//
// Always completes and never returns an error.
func (wc *WriteCounter) Write(p []byte) (int, error) {
n := len(p)
wc.Total += int64(n)
fmt.Printf("Read %d bytes for a total of %d\n", n, wc.Total)
return n, nil
}
func main() {
// ...
// Wrap it with our custom io.Reader.
src = io.TeeReader(src, &WriteCounter{})
// ...
}
playground
The grab Go package implements progress updates (and many other features) for file downloads.
An example of printing progress updates while a download is in process is included in the following walkthrough: http://cavaliercoder.com/blog/downloading-large-files-in-go.html
You can basically call grab.GetAsync which downloads in a new Go routine and then monitor the BytesTransferred or Progress of the returned grab.Response from the calling thread.
Other answers have explained about PassThru. Just provide a full example with callback function base on Dave Jack's answer.
package main
import (
"fmt"
"io"
"net/http"
"os"
"strconv"
)
// writeCounter counts the number of bytes written to it.
type writeCounter struct {
total int64 // total size
downloaded int64 // downloaded # of bytes transferred
onProgress func(downloaded int64, total int64)
}
// Write implements the io.Writer interface.
//
// Always completes and never returns an error.
func (wc *writeCounter) Write(p []byte) (n int, e error) {
n = len(p)
wc.downloaded += int64(n)
wc.onProgress(wc.downloaded, wc.total)
return
}
func newWriter(size int64, onProgress func(downloaded, total int64)) io.Writer {
return &writeCounter{total: size, onProgress: onProgress}
}
func main() {
client := http.DefaultClient
url := "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4"
saveTo := "/Users/tin/Desktop/ForBiggerFun.mp4"
download(client, url, saveTo, func(downloaded, total int64) {
fmt.Printf("Downloaded %d bytes for a total of %d\n", downloaded, total)
})
}
func download(client *http.Client, url, filePath string, onProgress func(downloaded, total int64)) (err error) {
// Create file writer
file, err := os.Create(filePath)
if err != nil {
return
}
defer file.Close()
// Determinate the file size
resp, err := client.Head(url)
if err != nil {
return
}
contentLength := resp.Header.Get("content-length")
length, err := strconv.Atoi(contentLength)
if err != nil {
return
}
// Make request
resp, err = client.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
// pipe stream
body := io.TeeReader(resp.Body, newWriter(int64(length), onProgress))
_, err = io.Copy(file, body)
return err
}
Base #Dave Jack
I add progress value and receiving file data from NC (direct TCP data transfer)
// WriteCounter counts the number of bytes written to it.
type WriteCounter struct {
Total int64 // Total # of bytes transferred
Last int64
LastUpdate time.Time
}
// Write implements the io.Writer interface.
//
// Always completes and never returns an error.
func (wc *WriteCounter) Write(p []byte) (int, error) {
n := len(p)
wc.Total += int64(n)
now := time.Now()
duration := now.Sub(wc.LastUpdate).Seconds()
if duration > 1 {
wc.LastUpdate = now
rate := float64(wc.Total-wc.Last) / (duration) / 1024.0
wc.Last = wc.Total
fmt.Printf("Read %d bytes for a total of %d , Rate %.1fKb/s \n", n, wc.Total, rate)
}
return n, nil
}
func Server(dest string) {
outputFile, err := os.Create(dest)
if err != nil {
fmt.Println(err)
}
defer outputFile.Close()
fileWriter := bufio.NewWriter(outputFile)
serverListener, err := net.Listen("tcp", "0.0.0.0:"+PORT)
if err != nil {
fmt.Println(err)
}
defer serverListener.Close()
serverConn, err := serverListener.Accept()
if err != nil {
fmt.Println(err)
}
defer serverConn.Close()
wc := &WriteCounter{}
reader := io.TeeReader(serverConn, wc)
serverConnReader := bufio.NewReaderSize(reader, 32*1024*1024)
io.Copy(fileWriter, serverConnReader)
fileWriter.Flush()
outputFile.Sync()
fmt.Println("Done: Writer")
}