Go program memory consumption keeps increasing on Windows Server 2016 - go

This is my Go code. I am using version 1.13
Once the code starts running, the memory consumption continuously increases, never decreasing. I dont think I am creating new variables. I am only reusing global variables for all the storage.
I read online that the GC in Go should kick in every 2 minutes...?
EDIT: I am monitoring the memory consumption of the program on the Windows Task Manager. And the Memory starts at ~5MB, when the program starts to run and increases 0.3~0.5 MB every minute.
package main
import (
"context"
"database/sql"
"encoding/csv"
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"time"
_ "github.com/denisenkom/go-mssqldb"
)
var path = os.Args[1]
var table = os.Args[4]
var backupPath = os.Args[2]
var location = os.Args[5]
var server = "localhost"
var port = 1433
var user = "###"
var password = "###"
var database = os.Args[3]
var ctx = context.Background()
var insertQuery = "INSERT INTO [" + database + "].[dbo].[" + table + "] ([meter_id],[time_stamp],[r_voltage],[y_voltage],[b_voltage],[ry_voltage],[yb_voltage],[br_voltage],[r_current],[y_current],[b_current],[kva],[kw],[kvar],[power_factor],[freq],[kwh],[run_hr],[updated_date]) VALUES"
var updateStatusQuery = "UPDATE EMS_Location_Status SET active='100',mailed='N',Updated_Date=getdate(),Error_desc='Data is coming from FTPS' where Location_Id ='" + location + "'"
var db *sql.DB
var connString = fmt.Sprintf("server=%s;user id=%s;password=%s;port=%d;database=%s;",
server, user, password, port, database)
var err error
var files []string
var tempInsertQuery string
var csvFile *os.File
var csvLines [][]string
var i int
var line []string
var point string
var processedFile []string
var fileName string
var iterFile string
var c = 10
func processFile(file string, table string) {
fmt.Println("Processing file...", file)
tempInsertQuery = insertQuery + " "
csvFile, err = os.Open(file)
if err != nil {
for err != nil {
fmt.Println("Encountered error in opening file.. trying again in 10 seconds.")
time.Sleep(10 * time.Second)
csvFile, err = os.Open(file)
}
fmt.Println(err)
}
csvLines, err = csv.NewReader(csvFile).ReadAll()
if err != nil {
fmt.Println(err)
}
for i, line = range csvLines {
if i == 0 {
continue
} else {
tempInsertQuery = tempInsertQuery + "("
for _, point = range line {
tempInsertQuery = tempInsertQuery + "'" + point + "'" + ","
}
tempInsertQuery = tempInsertQuery[0:len(tempInsertQuery)-1] + ", GETDATE()" + "), "
}
}
tempInsertQuery = tempInsertQuery[0 : len(tempInsertQuery)-2]
// Execute query
//fmt.Println(tempInsertQuery)
_, err = db.QueryContext(ctx, tempInsertQuery)
if err != nil {
fmt.Println(err)
return
}
_, err = db.QueryContext(ctx, updateStatusQuery)
if err != nil {
fmt.Println(err)
return
}
csvFile.Close()
fmt.Println("Done processing file ", file)
backupFile(file)
runtime.GC()
debug.FreeOSMemory()
}
func backupFile(file string) {
fmt.Println("Backing up...", file)
processedFile = strings.Split(file, "\\")
fileName = processedFile[len(processedFile)-1]
err = os.Rename(file, backupPath+fileName)
if err != nil {
log.Fatal(err)
}
}
func scanner(scanPath string, table string) {
err = filepath.Walk(scanPath, func(path string, info os.FileInfo, err error) error {
if strings.Contains(path, ".csv") {
files = append(files, path)
}
return nil
})
if err != nil {
panic(err)
}
for _, iterFile = range files {
time.Sleep(2 * time.Second)
processFile(iterFile, table)
}
files = files[:0]
}
func main() {
fmt.Println(connString)
// Create connection pool
db, err = sql.Open("sqlserver", connString)
if err != nil {
fmt.Println("Error creating connection pool: ", err.Error())
}
err = db.PingContext(ctx)
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Printf("Connected to server!\n")
fmt.Printf("Initiating...\n")
for c > 0 {
scanner(path, table)
time.Sleep(1 * time.Second)
}
fmt.Println("Quitting..")
}
Any help would be appreciated! Thanks.

General concept in Go is whatever implements io.Closer (that has a Close() method), when you have such a value which you don't intend to use anymore, you should call its Close() method which in general frees resources which otherwise may not be freed immediately or not at all (ever).
Your db.QueryContext() call is sql.Conn.QueryContext() which returns an *sql.Rows value which has a Close() method. Do call that. Currently you're not even storing the returned sql.Rows.
Do it like this:
var rows *sql.Rows
rows, err = db.QueryContext(ctx, tempInsertQuery)
if err != nil {
fmt.Println(err)
return
}
if err2 := rows.Close(); err2 != nil {
fmt.Println("Error closing rows:", err2
}
Apply this at all uses of Conn.QueryContext().
Whenever makes sense, use defer to call such Close() methods, so it will be executed even if your code panics or have a return statement before calling that "manually".
Such example in your code is csvFile.Close(). There are returns before that, and if one of those branch is executed, the csvFile will not be closed.
Do it like this:
csvFile, err = os.Open(file)
if err != nil {
for err != nil {
fmt.Println("Encountered error in opening file.. trying again in 10 seconds.")
time.Sleep(10 * time.Second)
csvFile, err = os.Open(file)
}
fmt.Println(err)
}
defer func() {
if err2 := csvFile.Close(); err2 != nil {
fmt.Println("Error closing csvFile:", err2
}
}()

Related

Why does golang concurrent uploading of files to S3 bucket result in canceled, context deadline exceeded

I have written a small golang piece of code to recursive traverse a directory and upload the files in the director. There are approximately 93K+ items in the directory.
After a while I get the following error:
Got error uploading file: /Users/randolphhill/Fat-Tree-Business/SandBox/DDD/heydoc/ios/Pods/gRPC-Core/src/core/ext/transport/chttp2/alpn/alpn.h
operation error S3: PutObject, https response error StatusCode: 0, RequestID: , HostID: , canceled, context deadline exceeded.
Below is the code snippet
func PutFile(c context.Context, api S3PutObjectAPI, input *s3.PutObjectInput) (*s3.PutObjectOutput, error) {
return api.PutObject(c, input)
}
func PutFileS3(dir, filename, bucket, reg string) error {
var cfg aws.Config
st, err := fthash.Filehash(dir + filename)
if err != nil {
panic("configuration error, " + err.Error())
return err
}
m := make(map[string]string)
m["hashcode"] = st
cfg, err = config.LoadDefaultConfig(context.TODO(), config.WithRegion(reg))
if err != nil {
panic("configuration error, " + err.Error())
}
client := s3.NewFromConfig(cfg)
tmp := "backup" + dir + filename
uri := strings.Replace(tmp, " ", "##,##", -1)
if checkFileOnS3(client, bucket, uri, st) {
fmt.Println(" FILE EXIST")
return nil
}
file, err2 := os.Open(dir + filename)
defer file.Close()
if err2 != nil {
fmt.Println("Unable to open file " + filename)
return err2
}
tmp = "backup" + dir + filename
//uri := "backup" + dir + filename
uri = strings.Replace(tmp, " ", "##,##", -1)
input := &s3.PutObjectInput{
Bucket: &bucket,
Key: aws.String(uri),
//Key: &filename,
Body: file,
Metadata: m,
}
ctx, cancelFn := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancelFn()
_, err2 = PutFile(ctx, client, input)
if err2 != nil {
fmt.Println("Got error uploading file:", dir+filename)
fmt.Println(err2)
return err2
}
return nil
}
You've added a 10 second timeout here:
ctx, cancelFn := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancelFn()
_, err2 = PutFile(ctx, client, input)
if err2 != nil {
fmt.Println("Got error uploading file:", dir+filename)
fmt.Println(err2)
return err2
}
After 10 seconds, the call to PutFile will exit with a context error. You likely just need to increase the timeout if you have files that take longer to upload.
package main
import (
// "html/template"
"log"
"net/http"
"os"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
var AccessKeyID string
var SecretAccessKey string
var MyRegion string
var MyBucket string
var filepath string
//GetEnvWithKey : get env value
func GetEnvWithKey(key string) string {
return os.Getenv(key)
}
func LoadEnv() {
err := godotenv.Load(".env")
if err != nil {
log.Fatalf("Error loading .env file")
os.Exit(1)
}
}
func ConnectAws() *session.Session {
AccessKeyID = GetEnvWithKey("AWS_ACCESS_KEY_ID")
SecretAccessKey = GetEnvWithKey("AWS_SECRET_ACCESS_KEY")
MyRegion = GetEnvWithKey("AWS_REGION")
sess, err := session.NewSession(
&aws.Config{
Region: aws.String(MyRegion),
Credentials: credentials.NewStaticCredentials(
AccessKeyID,
SecretAccessKey,
"", // a token will be created when the session it's used.
),
})
if err != nil {
panic(err)
}
return sess
}
func SetupRouter(sess *session.Session) {
router := gin.Default()
router.Use(func(c *gin.Context) {
c.Set("sess", sess)
c.Next()
})
// router.Get("/upload", Form)
router.POST("/upload", UploadImage)
// router.GET("/image", controllers.DisplayImage)
_ = router.Run(":4000")
}
func UploadImage(c *gin.Context) {
sess := c.MustGet("sess").(*session.Session)
uploader := s3manager.NewUploader(sess)
MyBucket = GetEnvWithKey("BUCKET_NAME")
file, header, err := c.Request.FormFile("photo")
filename := header.Filename
//upload to the s3 bucket
up, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(MyBucket),
//ACL: aws.String("public-read"),
Key: aws.String(filename),
Body: file,
})
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": "Failed to upload file",
"uploader": up,
})
return
}
filepath = "https://" + MyBucket + "." + "s3-" + MyRegion + ".amazonaws.com/" + filename
c.JSON(http.StatusOK, gin.H{
"filepath": filepath,
})
}
func main() {
LoadEnv()
sess := ConnectAws()
router := gin.Default()
router.Use(func(c *gin.Context) {
c.Set("sess", sess)
c.Next()
})
router.POST("/upload", UploadImage)
//router.LoadHTMLGlob("templates/*")
//router.GET("/image", func(c *gin.Context) {
//c.HTML(http.StatusOK, "index.tmpl", gin.H{
// "title": "Main website",
//})
//})
_ = router.Run(":4000")
}

No output to error file

I'm coding a little Go program.
It reads files in a directory line by line, it only reads lines with a certain prefix, normalizes the data and outputs to one of two files, depending on whether the normalized record has certain number of elements.
Data is being outputted to the Data file, but errors are not being outputted to the Errors file.
Debugging I see no issue.
Any help is much appreciated.
Thanks,
Martin
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
)
func main() {
//Output file - Data
if _, err := os.Stat("allData.txt"); os.IsNotExist(err) {
var file, err = os.Create("allData.txt")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
}
file, err := os.OpenFile("allData.txt", os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
panic(err)
}
w := bufio.NewWriter(file)
//Output file - Errors
if _, err := os.Stat("errorData.txt"); os.IsNotExist(err) {
var fileError, err = os.Create("errorData.txt")
if err != nil {
fmt.Println(err)
return
}
defer fileError.Close()
}
fileError, err := os.OpenFile("errorData.txt", os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
panic(err)
}
z := bufio.NewWriter(fileError)
//Read Directory
files, err := ioutil.ReadDir("../")
if err != nil {
log.Fatal(err)
}
//Build file path
for _, f := range files {
fName := string(f.Name())
sPath := string("../" + fName)
sFile, err := os.Open(sPath)
if err != nil {
fmt.Println(err)
return
}
//Create scanner
scanner := bufio.NewScanner(sFile)
scanner.Split(bufio.ScanLines)
var lines []string
// This is the buffer now
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
for _, line := range lines {
sRecordC := strings.HasPrefix((line), "DATA:")
if sRecordC {
splitted := strings.Split(line, " ")
splittedNoSpaces := deleteEmpty(splitted)
if len(splittedNoSpaces) == 11 {
splittedString := strings.Join(splittedNoSpaces, " ")
sFinalRecord := string(splittedString + "\r\n")
if _, err = fmt.Fprintf(w, sFinalRecord); err != nil {
}
}
if len(splittedNoSpaces) < 11 {
splitted := strings.Split(line, " ")
splittedNoSpaces := deleteEmpty(splitted)
splittedString := strings.Join(splittedNoSpaces, " ")
sFinalRecord := string(splittedString + "\r\n")
if _, err = fmt.Fprintf(z, sFinalRecord); err != nil {
}
err = fileError.Sync()
if err != nil {
log.Fatal(err)
}
}
}
}
}
err = file.Sync()
if err != nil {
log.Fatal(err)
}
}
//Delete Empty array elements
func deleteEmpty(s []string) []string {
var r []string
for _, str := range s {
if str != "" {
r = append(r, str)
}
}
return r
}
Don't open the file multiple times, and don't check for the file's existence before creating it, just use the os.O_CREATE flag. You're also not deferring the correct os.File.Close call, because it's opened multiple times.
When using a bufio.Writer, you should always call Flush() to ensure that all data has been written to the underlying io.Writer.

Golang TCP File Transfer Gets Stuck In the Middle

I'm having some file transfer issue over TCP in go. The file transfer works sometimes and sometimes it gets stuck in the middle. When it gets stuck, it looks like it is expecting data in the communication channel but there is no data and no error as well. Hence it gets stuck indefinitely. To make thing confusing it shows this behavior for same file i.e for same file it works sometimes and sometimes it doesn't work.
This is how my program works. It'll listen for incoming requests. The requests are in JSON format. Based on request type it'll do different operation. I'm posting the code segment related to file transfer.
server.go
package main
import (
"bufio"
"encoding/json"
"fmt"
_"io"
"net"
"os"
)
const (
COMMAND_RECEIVE_FILE = "TRANSFER_FILE"
COMMAND_EXIT = "EXIT"
CONNECTION_TYPE = "tcp"
CONNECTION_PORT = "3645"
CONNECTION_HOST = ""
BUFFER_SIZE = 1024
)
type Command struct {
Identifier string `json:"identifier"`
Name string `json:"name"`
Size int64 `json:"size"`
}
type Result struct {
Message string `json:"message"`
}
func receiveFile(connection net.Conn, fileName string, fileSize int64) Result {
fmt.Println("Receiving file")
result := Result{Message: ""}
file, err := os.Create(fileName)
if err != nil {
fmt.Println(err)
result.Message = "Error opening file: " + fileName
return result
}
defer file.Close()
fileBuffer := make([]byte, BUFFER_SIZE)
bytesRead := int64(0)
count := 0
for {
if fileSize-bytesRead < int64(BUFFER_SIZE) {
fileBuffer = make([]byte, fileSize-bytesRead)
}
fmt.Println("Reading ", BUFFER_SIZE, " bytes of data")
n, err := connection.Read(fileBuffer)
count++
fmt.Println("Completed reading", n, " bytes of data, count=", count)
file.Write(fileBuffer[0:n])
bytesRead += int64(n)
if err != nil {
result.Message = "File transfer incomplete"
break
}
if bytesRead >= fileSize {
result.Message = "File transfer complete"
break
}
}
file.Chmod(0777)
return result
}
func main() {
ln, err := net.Listen(CONNECTION_TYPE, CONNECTION_HOST + ":"+CONNECTION_PORT)
if err != nil {
fmt.Println("error opening a tcp connection")
}
for {
fmt.Println("waiting for new connection")
conn, err := ln.Accept()
if err != nil {
} else {
var commandStr string
reader := bufio.NewReader(conn)
var exitStatus = 1
for exitStatus == 1 {
fmt.Println("Waiting for new command: ")
line,_,err := reader.ReadLine()
if err != nil {
conn.Close()
exitStatus = 0
break
} else {
fmt.Println("Size read :", len(line))
}
commandStr = string(line)
fmt.Println("CommandStr: ", commandStr)
var msg Command
err = json.Unmarshal([]byte(commandStr), &msg)
if err != nil {
fmt.Println("Error")
conn.Close()
break
}
result := Result{}
fmt.Println("Received new command: ", msg.Identifier)
switch msg.Identifier {
case COMMAND_RECEIVE_FILE:
result = receiveFile(conn, msg.Name, msg.Size)
case COMMAND_EXIT:
exitStatus = 0
conn.Close()
default:
result = Result{Message: "Unrecognized command"}
}
out, _ := json.Marshal(result)
fmt.Fprint(conn, string(out)+"\n")
}
}
}
}
test.go
package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"log"
"net"
"os"
"strings"
_"time"
)
const (
COMMAND_TRANSFER_FILE = "TRANSFER_FILE"
COMMAND_EXIT = "EXIT"
CONNECTION_TYPE = "tcp"
CONNECTION_PORT = "3645"
CONNECTION_HOST = ""
)
type Command struct {
Identifier string `json:"identifier"`
Name string `json:"name"`
Size int64 `json:"size"`
}
type Result struct {
Message string `json:"message"`
}
func main() {
conn, _ := net.Dial(CONNECTION_TYPE, CONNECTION_HOST + ":" + CONNECTION_PORT)
decoder := json.NewDecoder(conn)
com := Command{}
sourceFileName := ""
destinationFileName := ""
for {
com = Command{}
reader := bufio.NewReader(os.Stdin)
identifier, _ := reader.ReadString('\n')
com.Identifier = strings.TrimSpace(identifier)
switch com.Identifier {
case COMMAND_TRANSFER_FILE:
fmt.Print("Source file name:")
sourceFileName, _ = reader.ReadString('\n')
sourceFileName = strings.TrimSpace(sourceFileName)
fmt.Print("Destination file name:")
destinationFileName, _ = reader.ReadString('\n')
com.Name = strings.TrimSpace(destinationFileName)
file, err := os.Open(sourceFileName)
if err != nil {
log.Fatal(err)
}
defer file.Close()
fileInfo, err := file.Stat()
fileSize := fileInfo.Size()
com.Size = fileSize
case COMMAND_EXIT:
conn.Close()
os.Exit(0)
}
out, _ := json.Marshal(com)
conn.Write([]byte(string(out) + "\n"))
if strings.Compare(com.Identifier, COMMAND_TRANSFER_FILE) == 0 {
file, err := os.Open(sourceFileName)
if err != nil {
log.Fatal(err)
}
defer file.Close()
n, err := io.Copy(conn, file)
if err != nil {
log.Fatal(err)
}
fmt.Println(n, "bytes sent")
}
var msg Result
err := decoder.Decode(&msg)
if err != nil {
fmt.Println(err)
}
fmt.Println(msg)
}
}
I tested it on both Linux and Windows and it shows same behavior on both system. The only thing I can think of is that the sender is faster than the receiver even though I'm running it on the same machine. If that is the case, what will be a best practice to solve it other than the handshaking mechanism.
You can't wrap the net.Conn in a bufio.Reader, then continue to use the net.Conn. The reason your function is blocked is because you left data buffered in the reader, so you won't ever reach the desired message size.
You need to pass the reader to the receiveFile function in order to not lose the buffered data.
You are also ignoring the isPrefix return value from ReadLine. I would follow the documentation and use ReadBytes instead if you're not going to handle all cases from that method.

A function by type? How refactor it?

I'm reading json data from a file and sending it to a remote server using gob encoding,
but I'm not happy with my code, I tried several ways to get a more generic function, but I'm failed, the only way that my code works is having identical functions for every type.
I tried using switch for types, but in the same way is needed repeat code in order to unmarshall and encode gob data
Please, could somebody help me to understand how improve that?
Two types:
type Data1 struct{
ID int
Message string
}
type Data2 struct{
Serial int
Height float64
Loss float64
Temp float64
Oil float64
}
Function for Data1 type
func SenderData1(address string, buff *filebuffer.Buffer) {
var conn net.Conn
var err error
var line string
var obj Data1
for {
line, err = buff.Pop()
if err != nil {
log.Critical("Error Poping:", err.Error())
continue
}
if len(line) == 0 {
time.Sleep(1 * time.Second)
continue
}
if err := json.Unmarshal([]byte(line), &obj); err != nil {
log.Critical("Error Unmarshaling:", err.Error())
continue
}
for {
log.Info("Trying to connect with Server...")
conn, err = net.Dial(PROTO, address)
// If err try to connect again
if err != nil {
log.Error("Error connecting:", err.Error())
time.Sleep(1 * time.Second)
continue
}
// If connected break the loop
break
}
log.Debug("Sending ", obj, " to:", address)
encoder := gob.NewEncoder(conn)
err := encoder.Encode(obj)
if err != nil {
log.Critical("Error Encoding Gob:", err.Error())
}
// Timer between every sending, ie. Reading from buffer
time.Sleep(300 * time.Millisecond)
conn.Close()
}
}
The same function but for Data2 type
func SenderData2(address string, buff *filebuffer.Buffer) {
var conn net.Conn
var err error
var line string
var obj Data2
for {
line, err = buff.Pop()
if err != nil {
log.Critical("Error Poping:", err.Error())
continue
}
if len(line) == 0 {
time.Sleep(1 * time.Second)
continue
}
if err := json.Unmarshal([]byte(line), &obj); err != nil {
log.Critical("Error Unmarshaling:", err.Error())
continue
}
for {
log.Info("Trying to connect with Server...")
conn, err = net.Dial(PROTO, address)
// If err try to connect again
if err != nil {
log.Error("Error connecting:", err.Error())
time.Sleep(1 * time.Second)
continue
}
// If connected break the loop
break
}
log.Debug("Sending ", obj, " to:", address)
encoder := gob.NewEncoder(conn)
err := encoder.Encode(obj)
if err != nil {
log.Critical("Error Encoding Gob:", err.Error())
}
// Timer between every sending, ie. Reading from buffer
time.Sleep(300 * time.Millisecond)
conn.Close()
}
Add a parameter that allocates a new value of the type to receive and send:
func SenderData1(address string, buff *filebuffer.Buffer) {
SenderData(address, buff, func() interface{} { return new(Data1) })
}
func SenderData2(address string, buff *filebuffer.Buffer) {
SenderData(address, buff, func() interface{} { return new(Data2) })
}
func SenderData(address string, buff *filebuffer.Buffer, newfn func() interface{}) {
var conn net.Conn
var err error
var line string
for {
line, err = buff.Pop()
if err != nil {
log.Critical("Error Poping:", err.Error())
continue
}
if len(line) == 0 {
time.Sleep(1 * time.Second)
continue
}
obj := newfn()
if err := json.Unmarshal([]byte(line), obj); err != nil {
log.Critical("Error Unmarshaling:", err.Error())
continue
}
for {
log.Info("Trying to connect with Server...")
conn, err = net.Dial(PROTO, address)
// If err try to connect again
if err != nil {
log.Error("Error connecting:", err.Error())
time.Sleep(1 * time.Second)
continue
}
// If connected break the loop
break
}
log.Debug("Sending ", obj, " to:", address)
encoder := gob.NewEncoder(conn)
err := encoder.Encode(obj)
if err != nil {
log.Critical("Error Encoding Gob:", err.Error())
}
// Timer between every sending, ie. Reading from buffer
time.Sleep(300 * time.Millisecond)
conn.Close()
}
}
The code in this answer allocates a new value every time through the loop while the code in the question allocates the object once. Allocating each time through the loop prevents crosstalk between received JSON objects.

Reading CSV file in Go

Here is a code snippet that reads CSV file:
func parseLocation(file string) (map[string]Point, error) {
f, err := os.Open(file)
defer f.Close()
if err != nil {
return nil, err
}
lines, err := csv.NewReader(f).ReadAll()
if err != nil {
return nil, err
}
locations := make(map[string]Point)
for _, line := range lines {
name := line[0]
lat, laterr := strconv.ParseFloat(line[1], 64)
if laterr != nil {
return nil, laterr
}
lon, lonerr := strconv.ParseFloat(line[2], 64)
if lonerr != nil {
return nil, lonerr
}
locations[name] = Point{lat, lon}
}
return locations, nil
}
Is there a way to improve readability of this code? if and nil noise.
Go now has a csv package for this. Its is encoding/csv. You can find the docs here: https://golang.org/pkg/encoding/csv/
There are a couple of good examples in the docs. Here is a helper method I created to read a csv file and returns its records.
package main
import (
"encoding/csv"
"fmt"
"log"
"os"
)
func readCsvFile(filePath string) [][]string {
f, err := os.Open(filePath)
if err != nil {
log.Fatal("Unable to read input file " + filePath, err)
}
defer f.Close()
csvReader := csv.NewReader(f)
records, err := csvReader.ReadAll()
if err != nil {
log.Fatal("Unable to parse file as CSV for " + filePath, err)
}
return records
}
func main() {
records := readCsvFile("../tasks.csv")
fmt.Println(records)
}
Go is a very verbose language, however you could use something like this:
// predeclare err
func parseLocation(file string) (locations map[string]*Point, err error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close() // this needs to be after the err check
lines, err := csv.NewReader(f).ReadAll()
if err != nil {
return nil, err
}
//already defined in declaration, no need for :=
locations = make(map[string]*Point, len(lines))
var lat, lon float64 //predeclare lat, lon
for _, line := range lines {
// shorter, cleaner and since we already have lat and err declared, we can do this.
if lat, err = strconv.ParseFloat(line[1], 64); err != nil {
return nil, err
}
if lon, err = strconv.ParseFloat(line[2], 64); err != nil {
return nil, err
}
locations[line[0]] = &Point{lat, lon}
}
return locations, nil
}
//edit
A more efficient and proper version was posted by #Dustin in the comments, I'm adding it here for completeness sake:
func parseLocation(file string) (map[string]*Point, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
csvr := csv.NewReader(f)
locations := map[string]*Point{}
for {
row, err := csvr.Read()
if err != nil {
if err == io.EOF {
err = nil
}
return locations, err
}
p := &Point{}
if p.lat, err = strconv.ParseFloat(row[1], 64); err != nil {
return nil, err
}
if p.lon, err = strconv.ParseFloat(row[2], 64); err != nil {
return nil, err
}
locations[row[0]] = p
}
}
playground
I basically copied my answer from here: https://www.dotnetperls.com/csv-go. For me, this was a better answer than what I found on stackoverflow.
import (
"bufio"
"encoding/csv"
"os"
"fmt"
"io"
)
func ReadCsvFile(filePath string) {
// Load a csv file.
f, _ := os.Open(filePath)
// Create a new reader.
r := csv.NewReader(f)
for {
record, err := r.Read()
// Stop at EOF.
if err == io.EOF {
break
}
if err != nil {
panic(err)
}
// Display record.
// ... Display record length.
// ... Display all individual elements of the slice.
fmt.Println(record)
fmt.Println(len(record))
for value := range record {
fmt.Printf(" %v\n", record[value])
}
}
}
I also dislike the verbosity of the default Reader, so I made a new type that is
similar to bufio#Scanner:
package main
import "encoding/csv"
import "io"
type Scanner struct {
Reader *csv.Reader
Head map[string]int
Row []string
}
func NewScanner(o io.Reader) Scanner {
csv_o := csv.NewReader(o)
a, e := csv_o.Read()
if e != nil {
return Scanner{}
}
m := map[string]int{}
for n, s := range a {
m[s] = n
}
return Scanner{Reader: csv_o, Head: m}
}
func (o *Scanner) Scan() bool {
a, e := o.Reader.Read()
o.Row = a
return e == nil
}
func (o Scanner) Text(s string) string {
return o.Row[o.Head[s]]
}
Example:
package main
import "strings"
func main() {
s := `Month,Day
January,Sunday
February,Monday`
o := NewScanner(strings.NewReader(s))
for o.Scan() {
println(o.Text("Month"), o.Text("Day"))
}
}
https://golang.org/pkg/encoding/csv
You can also read contents of a directory to load all the CSV files. And then read all those CSV files 1 by 1 with goroutines
csv file:
101,300.00,11000901,1155686400
102,250.99,11000902,1432339200
main.go file:
const sourcePath string = "./source"
func main() {
dir, _ := os.Open(sourcePath)
files, _ := dir.Readdir(-1)
for _, file := range files {
fmt.Println("SINGLE FILE: ")
fmt.Println(file.Name())
filePath := sourcePath + "/" + file.Name()
f, _ := os.Open(filePath)
defer f.Close()
// os.Remove(filePath)
//func
go func(file io.Reader) {
records, _ := csv.NewReader(file).ReadAll()
for _, row := range records {
fmt.Println(row)
}
}(f)
time.Sleep(10 * time.Millisecond)// give some time to GO routines for execute
}
}
And the OUTPUT will be:
$ go run main.go
SINGLE FILE:
batch01.csv
[101 300.00 11000901 1155686400]
[102 250.99 11000902 1432339200]
----------------- -------------- ---------------------- -------
---------------- ------------------- ----------- --------------
Below example with the Invoice struct
func main() {
dir, _ := os.Open(sourcePath)
files, _ := dir.Readdir(-1)
for _, file := range files {
fmt.Println("SINGLE FILE: ")
fmt.Println(file.Name())
filePath := sourcePath + "/" + file.Name()
f, _ := os.Open(filePath)
defer f.Close()
go func(file io.Reader) {
records, _ := csv.NewReader(file).ReadAll()
for _, row := range records {
invoice := new(Invoice)
invoice.InvoiceNumber = row[0]
invoice.Amount, _ = strconv.ParseFloat(row[1], 64)
invoice.OrderID, _ = strconv.Atoi(row[2])
unixTime, _ := strconv.ParseInt(row[3], 10, 64)
invoice.Date = time.Unix(unixTime, 0)
fmt.Printf("Received invoice `%v` for $ %.2f \n", invoice.InvoiceNumber, invoice.Amount)
}
}(f)
time.Sleep(10 * time.Millisecond)
}
}
type Invoice struct {
InvoiceNumber string
Amount float64
OrderID int
Date time.Time
}

Resources