I want get increment id with file, code as below:
// get increment id
func GetID() uint64 {
appIdLock.Lock()
defer appIdLock.Unlock()
f, err := os.OpenFile(idPath, os.O_RDWR, 0666)
if err != nil {
return 0
}
defer f.Close()
// Read
bufferTemp := make([]byte, 16)
bufferResult := make([]byte, 0)
for {
n, _ := f.Read(bufferTemp)
if n > 0 {
bufferResult = append(bufferResult, bufferTemp[:n]...)
} else {
break
}
}
if len(bufferResult) == 0 {
return 0
}
s := common.StringToUint64(string(bufferResult))
s += 1
// Write (how to cover?)
f.WriteString(strconv.FormatUint(s, 10))
return s
}
f.WriteString function was append, example, my file content: 123, run GetID() I hope my file content is: 124, But result was: 123124
Without changing much of your code, here is the solution that will work and do what you want to do. No loops or more than a single byte slice.
func GetID() uint64 {
appIdLock.Lock()
defer appIdLock.Unlock()
// Added + os.O_CREATE to create the file if it doesn't exist.
f, err := os.OpenFile(idPath, os.O_RDWR + os.O_CREATE, 0666)
if err != nil {
fmt.Println(err)
return 0
}
defer f.Close()
// Know file content beforehand so I allocate a suitable bytes slice.
fileStat, err := f.Stat()
if err != nil {
fmt.Println(err)
return 0
}
buffer := make([]byte, fileStat.Size())
_, err = f.Read(buffer)
if err != nil {
fmt.Println(err)
return 0
}
s, _ := strconv.ParseUint(string(buffer), 10, 64)
s += 1
// The Magic is here ~ Writes bytes at 0 index of the file.
_, err = f.WriteAt([]byte(strconv.FormatUint(s, 10)), 0)
if err != nil {
fmt.Println(err)
return 0
}
return s
}
Hope it helps!
Related
I have a function in which I take in a base64 string and get the content of it (PDF or JPEG).
I read in the base64 content, convert it to bytes and decode it into the file that it is.
I then create a file where I will output the decoded file (JPEG or PDF).
Then I write the bytes to it.
Then I call my GetFileContentType on it and it returns to me an empty string.
If I run the functions separately, as in I first the first function to create the decoded file, and end it. And then call the second function to get the content type, it works and returns it as JPEG or PDF.
What am I doing wrong here?
And is there a better way to do this?
func ConvertToJPEGBase64(
src string,
dst string,
) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
str := string(b)
byteArray, err := base64.StdEncoding.DecodeString(str)
if err != nil {
return err
}
f, err := os.Create(dst)
if err != nil {
return err
}
if _, err := f.Write(byteArray); err != nil {
return err
}
f.Sync()
filetype, err := client.GetFileContentType(f)
if err != nil {
return err
}
if strings.Contains(filetype, "jpeg") {
// do something
} else {
// do something else
}
return nil
}
// GetFileContentType tells us the type of file
func GetFileContentType(out *os.File) (string, error) {
// Only the first 512 bytes are used to sniff the content type.
buffer := make([]byte, 512)
_, err := out.Read(buffer)
if err != nil {
return "", err
}
contentType := http.DetectContentType(buffer)
return contentType, nil
}
The problem is that GetFileContentType reads from the end of the file. Fix this be seeking back to the beginning of the file before calling calling GetFileContentType:
if _, err := f.Seek(io.SeekStart, 0); err != nil {
return err
}
A better fix is to use the file data that's already in memory. This simplifies the code to the point where there's no need for the GetFileContentType function.
func ConvertToJPEGBase64(
src string,
dst string,
) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
str := string(b)
byteArray, err := base64.StdEncoding.DecodeString(str)
if err != nil {
return err
}
f, err := os.Create(dst)
if err != nil {
return err
}
defer f.Close() // <-- Close the file on return.
if _, err := f.Write(byteArray); err != nil {
return err
}
fileType := http.DetectContentType(byteArray) // <-- use data in memory
if strings.Contains(fileType, "jpeg") {
// do something
} else {
// do something else
}
return nil
}
More code can be eliminated by using ioutil.WriteFile:
func ConvertToJPEGBase64(src, dst string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
byteArray, err := base64.StdEncoding.DecodeString(string(b))
if err != nil {
return err
}
if err := ioutil.WriteFile(dst, byteArray, 0666); err != nil {
return err
}
fileType := http.DetectContentType(byteArray)
if strings.Contains(fileType, "jpeg") {
// do something
} else {
// do something else
}
return nil
}
I successfully write Byte array to port but failed to read from it. function "read"
return "read /dev/tty.usbserial1: interrupted system call" error, I running it on macOS sierra 10.12.6, use "github.com/tarm/serial", anyone met the same problem?
here is the code:
func TestTarmSerialDataRecvAndSend(t *testing.T) {
c := &serial.Config{
Name: "/dev/cu.usbserial1",
Baud: 9600,
}
s, err := serial.OpenPort(c)
if err != nil {
fmt.Println(err)
return
}
for i := 0; i < 10; i++ {
n, err := s.Write([]byte("test"))
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("send byte : %v", n)
}
fmt.Println("send over")
buf := make([]byte, 128)
n, err := s.Read(buf)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%q", buf[:n])
}
func sendCommand(p *serial.Port, command string, waitForOk bool) string {
log.Println("--- SendCommand: ", command)
var status string = ""
p.Flush()
_, err := p.Write([]byte(command))
if err != nil {
log.Fatal(err)
}
buf := make([]byte, 32)
var loop int = 1
if waitForOk {
loop = 10
}
for i := 0; i < loop; i++ {
// ignoring error as EOF raises error on Linux
n, _ := p.Read(buf)
if n > 0 {
status = string(buf[:n])
log.Printf("SendCommand: rcvd %d bytes: %s\n", n, status)
if strings.HasSuffix(status, "OK\r\n") || strings.HasSuffix(status, "ERROR\r\n") {
break
}
}
}
return status
}
the solutions is here: https://golang.hotexamples.com/examples/github.com.tarm.serial/Port/Read/golang-port-read-method-examples.html
I'm coding a little Go program.
It reads files in a directory line by line, it only reads lines with a certain prefix, normalizes the data and outputs to one of two files, depending on whether the normalized record has certain number of elements.
Data is being outputted to the Data file, but errors are not being outputted to the Errors file.
Debugging I see no issue.
Any help is much appreciated.
Thanks,
Martin
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
)
func main() {
//Output file - Data
if _, err := os.Stat("allData.txt"); os.IsNotExist(err) {
var file, err = os.Create("allData.txt")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
}
file, err := os.OpenFile("allData.txt", os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
panic(err)
}
w := bufio.NewWriter(file)
//Output file - Errors
if _, err := os.Stat("errorData.txt"); os.IsNotExist(err) {
var fileError, err = os.Create("errorData.txt")
if err != nil {
fmt.Println(err)
return
}
defer fileError.Close()
}
fileError, err := os.OpenFile("errorData.txt", os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
panic(err)
}
z := bufio.NewWriter(fileError)
//Read Directory
files, err := ioutil.ReadDir("../")
if err != nil {
log.Fatal(err)
}
//Build file path
for _, f := range files {
fName := string(f.Name())
sPath := string("../" + fName)
sFile, err := os.Open(sPath)
if err != nil {
fmt.Println(err)
return
}
//Create scanner
scanner := bufio.NewScanner(sFile)
scanner.Split(bufio.ScanLines)
var lines []string
// This is the buffer now
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
for _, line := range lines {
sRecordC := strings.HasPrefix((line), "DATA:")
if sRecordC {
splitted := strings.Split(line, " ")
splittedNoSpaces := deleteEmpty(splitted)
if len(splittedNoSpaces) == 11 {
splittedString := strings.Join(splittedNoSpaces, " ")
sFinalRecord := string(splittedString + "\r\n")
if _, err = fmt.Fprintf(w, sFinalRecord); err != nil {
}
}
if len(splittedNoSpaces) < 11 {
splitted := strings.Split(line, " ")
splittedNoSpaces := deleteEmpty(splitted)
splittedString := strings.Join(splittedNoSpaces, " ")
sFinalRecord := string(splittedString + "\r\n")
if _, err = fmt.Fprintf(z, sFinalRecord); err != nil {
}
err = fileError.Sync()
if err != nil {
log.Fatal(err)
}
}
}
}
}
err = file.Sync()
if err != nil {
log.Fatal(err)
}
}
//Delete Empty array elements
func deleteEmpty(s []string) []string {
var r []string
for _, str := range s {
if str != "" {
r = append(r, str)
}
}
return r
}
Don't open the file multiple times, and don't check for the file's existence before creating it, just use the os.O_CREATE flag. You're also not deferring the correct os.File.Close call, because it's opened multiple times.
When using a bufio.Writer, you should always call Flush() to ensure that all data has been written to the underlying io.Writer.
Is there an implementation of io.ReaderAt that can be created from an implementation of io.Reader without first being read into a []byte or string?
Something like the below. Note bytes.Reader implements the ReadAt(...) method/function: https://golang.org/pkg/bytes/#Reader.ReadAt. So the line bytes.NewReader is esssentially what you are looking for.
Getting a bytes.Reader:
var ioReader io.Reader
...
buff := bytes.NewBuffer([]byte{})
size, err := io.Copy(buff, ioReader)
if err != nil {
return err
}
reader := bytes.NewReader(buff.Bytes())
// Do something with `reader`
Yes, this is possible. As mentioned in my comment above, the implementation is limited in that you cannot seek backward nor can you re-read a section that has already been read.
Here is a example implementation:
type unbufferedReaderAt struct {
R io.Reader
N int64
}
func NewUnbufferedReaderAt(r io.Reader) io.ReaderAt {
return &unbufferedReaderAt{R: r}
}
func (u *unbufferedReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
if off < u.N {
return 0, errors.New("invalid offset")
}
diff := off - u.N
written, err := io.CopyN(ioutil.Discard, u.R, diff)
u.N += written
if err != nil {
return 0, err
}
n, err = u.R.Read(p)
u.N += int64(n)
return
}
Example usage:
s := strings.NewReader("hello world")
var b [5]byte
ura := NewUnbufferedReaderAt(s)
if _, err := ura.ReadAt(b[:], 0); err != nil {
panic(err)
}
fmt.Printf("%s\n", b[:]) // prints "hello"
/*
if _, err := ura.ReadAt(b[:], 0); err != nil {
panic(err) // panics
}
fmt.Printf("%s\n", b[:])
*/
if _, err := ura.ReadAt(b[:], 6); err != nil {
panic(err)
}
fmt.Printf("%s\n", b[:]) // prints "world"
I have some a Go client for a custom protocol. The protocol is lz4-compressed JSON-RPC with a four byte header giving the length of the compressed JSON.
func ReceiveMessage(conn net.Conn) ([]byte, error) {
start := time.Now()
bodyLen := 0
body := make([]byte, 0, 4096)
buf := make([]byte, 0, 256)
for bodyLen == 0 || len(body) < bodyLen {
if len(body) > 4 {
header := body[:4]
body = body[:4]
bodyLen = int(unpack(header))
}
n, err := conn.Read(buf[:])
if err != nil {
if err != io.EOF {
return body, err
}
}
body = append(body, buf[0:n]...)
now := time.Now()
if now.Sub(start) > time.Duration(readTimeout) * time.Millisecond {
return body, fmt.Errorf("Timed-out while reading from socket.")
}
time.Sleep(time.Duration(1) * time.Millisecond)
}
return lz4.Decode(nil, body)
}
The client:
func main() {
address := os.Args[1]
msg := []byte(os.Args[2])
fmt.Printf("Sending %s to %s\n", msg, address)
conn, err := net.Dial(address)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// Another library call
_, err = SendMessage(conn, []byte(msg))
if err != nil {
fmt.Printf("%v\n", err)
return
}
response, err := ReceiveMessage(conn)
conn.Close()
if err != nil {
fmt.Printf("%v\n", err)
return
}
fmt.Printf("Response: %s\n", response)
}
When I call it, I get no response and it just times out. (If I do not explicitly ignore the EOF, it returns there with io.EOF error.) I have another library for this written in Python that also works against the same endpoint with the same payload. Do you see anything immediately?
[JimB just beat me to an answer but here goes anyway.]
The root issue is that you did body = body[:4]
when you wanted body = body[4:].
The former keeps only the first four header bytes
while the latter tosses
the four header bytes just decoded.
Here is a self contained version with some debug logs
that works.
It has some of the other changes I mentioned.
(I guessed at various things that you didn't include, like the lz4 package used, the timeout, unpack, etc.)
package main
import (
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"net"
"time"
"github.com/bkaradzic/go-lz4"
)
const readTimeout = 30 * time.Second // XXX guess
func ReceiveMessage(conn net.Conn) ([]byte, error) {
bodyLen := 0
body := make([]byte, 0, 4096)
var buf [256]byte
conn.SetDeadline(time.Now().Add(readTimeout))
defer conn.SetDeadline(time.Time{}) // disable deadline
for bodyLen == 0 || len(body) < bodyLen {
if bodyLen == 0 && len(body) >= 4 {
bodyLen = int(unpack(body[:4]))
body = body[4:]
if bodyLen <= 0 {
return nil, errors.New("invalid body length")
}
log.Println("read bodyLen:", bodyLen)
continue
}
n, err := conn.Read(buf[:])
body = append(body, buf[:n]...)
log.Printf("appended %d bytes, len(body) now %d", n, len(body))
// Note, this is checked *after* handing any n bytes.
// An io.Reader is allowed to return data with an error.
if err != nil {
if err != io.EOF {
return nil, err
}
break
}
}
if len(body) != bodyLen {
return nil, fmt.Errorf("got %d bytes, expected %d",
len(body), bodyLen)
}
return lz4.Decode(nil, body)
}
const address = ":5678"
var msg = []byte(`{"foo":"bar"}`)
func main() {
//address := os.Args[1]
//msg := []byte(os.Args[2])
fmt.Printf("Sending %s to %s\n", msg, address)
conn, err := net.Dial("tcp", address)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// Another library call
_, err = SendMessage(conn, msg)
if err != nil {
fmt.Printf("%v\n", err)
return
}
response, err := ReceiveMessage(conn)
conn.Close()
if err != nil {
fmt.Printf("%v\n", err)
return
}
fmt.Printf("Response: %s\n", response)
}
// a guess at what your `unpack` does
func unpack(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
func SendMessage(net.Conn, []byte) (int, error) {
// stub
return 0, nil
}
func init() {
// start a simple test server in the same process as a go-routine.
ln, err := net.Listen("tcp", address)
if err != nil {
log.Fatal(err)
}
go func() {
defer ln.Close()
for {
conn, err := ln.Accept()
if err != nil {
log.Fatalln("accept:", err)
}
go Serve(conn)
}
}()
}
func Serve(c net.Conn) {
defer c.Close()
// skip readding the initial request/message and just respond
const response = `{"somefield": "someval"}`
// normally (de)compression in Go is done streaming via
// an io.Reader or io.Writer but we need the final length.
data, err := lz4.Encode(nil, []byte(response))
if err != nil {
log.Println("lz4 encode:", err)
return
}
log.Println("sending len:", len(data))
if err = binary.Write(c, binary.LittleEndian, uint32(len(data))); err != nil {
log.Println("writing len:", err)
return
}
log.Println("sending data")
if _, err = c.Write(data); err != nil {
log.Println("writing compressed response:", err)
return
}
log.Println("Serve done, closing connection")
}
Playground (but not runnable there).
You have a number of issues with the server code. Without a full reproducing case, it's hard to tell if these will fix everything.
for bodyLen == 0 || len(body) < bodyLen {
if len(body) > 4 {
header := body[:4]
body = body[:4]
bodyLen = int(unpack(header))
}
every iteration, if len(body) > 4, you slice body back to the first 4 bytes. Body might never get to be >= bodyLen.
n, err := conn.Read(buf[:])
You don't need to re-slice buf here, use conn.Read(buf)
if err != nil {
if err != io.EOF {
return body, err
}
}
io.EOF is the end of the stream, and you need to handle it. Note that n might still be > 0 when you get an EOF. Check after processing the body for io.EOF or you could loop indefinitely.
body = append(body, buf[0:n]...)
now := time.Now()
if now.Sub(start) > time.Duration(readTimeout) * time.Millisecond {
return body, fmt.Errorf("Timed-out while reading from socket.")
you would be better off using conn.SetReadDeadline before each read, so a stalled Read could be interrupted.