Goland sqlite database is locked (5) (SQLITE_BUSY) Error/Warning - windows

The following simplified code just go over a sample table and update it row by row (assume this update can not be done by an SQL query, and it might be on some of the rows only). However when using the sqlite driver, it fails on the update with the "database locked warning".
I am running on windows with the go sqlite driver. This error does not happen with PostgreSQL database.
import (
"fmt"
"github.com/glebarez/sqlite"
"gorm.io/gorm"
"log"
//"modernc.org/sqlite"
)
type Tbl struct {
ID uint `gorm:"primaryKey,autoIncrement"`
Name string
Code int
}
var dbConn *gorm.DB
func main() {
db, err := gorm.Open(sqlite.Open("sqlite.db"), &gorm.Config{})
if err != nil {
log.Fatal(err)
}
err = db.AutoMigrate(&Tbl{})
if err != nil {
log.Fatal(err)
}
// clear the table and add sample data
db.Exec(`delete from tbls where 1=1`)
tblsclice := make([]Tbl, 50)
for i, _ := range tblsclice {
tblsclice[i].Name = fmt.Sprintf("This is line %d", i)
tblsclice[i].Code = i * 10
}
db.Create(&tblsclice)
// update each line (assume that it can not be updated by sql- this is just a sample)
rows, err := db.Model(&Tbl{}).Rows()
if err != nil {
log.Fatal("Error scanning DB rows", err)
}
defer rows.Close()
var t Tbl
for rows.Next() {
// ScanRows is a method of `gorm.DB`, it can be used to scan a row into a struct
err := db.ScanRows(rows, &t)
if err != nil {
log.Fatal("Error scanning DB rows", err)
}
t.Name = " NAME " + t.Name
//---- this throws the warning-----: database is locked (5) (SQLITE_BUSY)
db.Save(&t)
}
}

Related

rows.Next() halts after some number of rows

Im newbie in Golang, so it may be simple for professionals but I got stuck with no idea what to do next.
I'm making some migration app that extract some data from oracle DB and after some conversion insert it to Postges one-by-one.
The result of native Query in DB console returns about 400k of rows and takes about 13 sec to end.
The data from Oracle extracts with rows.Next() with some strange behavior:
First 25 rows extracted fast enough, then about few sec paused, then new 25 rows until it pauses "forever".
Here is the function:
func GetHrTicketsFromOra() (*sql.Rows, error) {
rows, err := oraDB.Query("select id,STATE_ID,REMEDY_ID,HEADER,CREATE_DATE,TEXT,SOLUTION,SOLUTION_USER_LOGIN,LAST_SOLUTION_DATE from TICKET where SOLUTION_GROUP_ID = 5549")
if err != nil {
println("Error while getting rows from Ora")
return nil, err
}
log.Println("Finished legacy tickets export")
return rows, err
}
And here I export data:
func ConvertRows(rows *sql.Rows, c chan util.ArchTicket, m chan int) error {
log.Println("Conversion start")
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
log.Println("ORA connection closed", err)
return
}
}(rows)
for rows.Next() {
log.Println("Reading the ticket")
ot := util.OraTicket{}
at := util.ArchTicket{}
err := rows.Scan(&ot.ID, &ot.StateId, &ot.RemedyId, &ot.Header, &ot.CreateDate, &ot.Text, &ot.Solution, &ot.SolutionUserLogin, &ot.LastSolutionDate)
if err != nil {
log.Println("Error while reading row", err)
return err
}
at = convertLegTOArch(ot)
c <- at
}
if err := rows.Err(); err != nil {
log.Println("Error while reading row", err)
return err
}
m <- 1
return nil
}
UPD. I use "github.com/sijms/go-ora/v2" driver
UPD2. Seems like the root cause of the problem is in TEXT and SOLUTION fields of the result rows. They are varchar and can be big enough. Deleting them from the direct query changes the time of execution from 13sec to 258ms. But I still have no idea what to do with that.
UPD3.
Minimal reproducible example
package main
import (
"database/sql"
_ "github.com/sijms/go-ora/v2"
"log"
)
var oraDB *sql.DB
var con = "oracle://login:password#ora_db:1521/database"
func InitOraDB(dataSourceName string) error {
var err error
oraDB, err = sql.Open("oracle", dataSourceName)
if err != nil {
return err
}
return oraDB.Ping()
}
func GetHrTicketsFromOra() {
var ot string
rows, err := oraDB.Query("select TEXT from TICKET where SOLUTION_GROUP_ID = 5549")
if err != nil {
println("Error while getting rows from Ora")
}
for rows.Next() {
log.Println("Reading the ticket")
err := rows.Scan(&ot)
if err != nil {
log.Println("Reading failed", err)
}
log.Println("Read:")
}
log.Println("Finished legacy tickets export")
}
func main() {
err := InitOraDB(con)
if err != nil {
log.Println("Error connection Ora")
}
GetHrTicketsFromOra()
}

Reading BigQuery in Golang. Not all expected results are given. What to do?

Given that the SQL is running perfectly in Query Editor. Still after assigning it to a struct, the data seems to have different values. Why is it like that?
var RunQuery = func(req *http.Request, query string)(*bigquery.RowIterator, error){
ctx := appengine.NewContext(req)
ctxWithDeadline, _ := context.WithTimeout(ctx, 30*time.Minute)
bqClient, bqErr := bigquery.NewClient(ctxWithDeadline, project, option.WithCredentialsFile(serviceAccount))
if bqErr != nil {
log.Errorf(ctx, "%v", bqErr)
return nil, bqErr
}
q := bqClient.Query(query)
job, err := q.Run(ctx)
if err != nil {
log.Errorf(ctx, "%v", err)
return nil, err
}
status, err := job.Wait(ctx)
if err != nil {
log.Errorf(ctx, "%v", err)
return nil, err
}
if err := status.Err(); err != nil {
log.Errorf(ctx, "%v", err)
return nil, err
}
it, err := job.Read(ctx)
if err != nil {
log.Errorf(ctx, "%v", err)
return nil, err
}
log.Infof(ctx, "Total Rows: %v", it.TotalRows)
return it, nil
}
type Customers struct {
CustomerName string `bigquery:"customer_name"`
CustomerAge int `bigquery:"customer_age"`
}
var rowsRead int
func main() {
query := `SELECT
name as customer_name,
age as customer_age
FROM customer_table
WHERE customerStatus = '0'`
customerInformation, customerInfoErr := RunQuery(req, query, false)
if customerInfoErr != nil {
log.Errorf(ctx, "Fetching customer information error :: %v", customerInfoErr)
return
}
for {
var row Customers
err := customerInformation.Next(&row)
log.Infof(ctx, "row %v", row)
if err == iterator.Done {
log.Infof(ctx, "ITERATION COMPLETE. Rows read %v", rowsRead)
break
}
rowsRead++
}
}
Let's say i have Query Results of
customer_name|customer_age
cat | 2
dog | 3
horse | 10
But after assigning it to a struct the results was
customer_name|customer_age
"" | 2
dog | ""
"" | ""
Why is it like this? i even tested it on chunk where i set the limit to 1000, still the same results. But the query results in Query Editor is what i expect
Solved it using Value Loader bigquery.Value. Instead of using expected struct in mapping the query results. used map[string]bigquery.Value. Still don't know why mapping query results with expected struct is not working perfectly. Here is my solution.
for {
row := make(map[string]bigquery.Value)
err := customerInformation.Next(&row)
log.Infof(ctx, "row %v", row)
if err == iterator.Done {
log.Infof(ctx, "ITERATION COMPLETE. Rows read %v", rowsRead)
break
}
rowsRead++
}
From the documentation:
If dst is a pointer to a struct, each column in the schema will be matched with an exported field of the struct that has the same name, ignoring the case. Unmatched schema columns and struct fields will be ignored.
cloud.google.com/go/bigquery
Here you try to resolve customer_age to a struct property named CustomerAge. If you update it to Customer_Age or customer_age it should work.

How to bundle an SQLite database in a Go binary?

I am try to use go-bindata and packr, but those packages do not show how to pack an SQLite database file in to a binary file.
I don't need to update the database in any way, I just want to read the data from it on startup.
How can I embed an SQLite database file in a Go binary file?
The SQLite driver can't read a database file from memory (e.g. from a byte slice). But you can write the data to a temporary file, and open that:
//go:generate go run gen.go
package main
import (
"database/sql"
"fmt"
"io/ioutil"
"log"
"os"
_ "github.com/mattn/go-sqlite3"
)
func main() {
// Create temporary file for database.
tmpDB, err := ioutil.TempFile("", "db*.sqlite3")
if err != nil {
log.Fatal(err)
}
// Remove this file after on exit.
defer func() {
err := os.Remove(tmpDB.Name())
if err != nil {
log.Print(err)
}
}()
// Write database to file.
_, err = tmpDB.Write(sqlDB)
if err != nil {
log.Print(err)
}
err = tmpDB.Close()
if err != nil {
log.Print(err)
}
// Open DB.
db, err := sql.Open("sqlite3", tmpDB.Name()+"?mode=ro")
if err != nil {
log.Fatal(err)
}
// Make sure it's loaded correct.
rows, err := db.Query("select * from test")
if err != nil {
log.Fatal(err)
}
for rows.Next() {
var c string
err := rows.Scan(&c)
if err != nil {
log.Fatal(err)
}
fmt.Println(c)
}
}
And you can write the database to db.go with something like:
// +build generate
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
)
func main() {
// Read source database file.
d, err := ioutil.ReadFile("source.sqlite3")
if err != nil {
log.Fatal(err)
}
fp, err := os.Create("db.go")
if err != nil {
log.Fatal(err)
}
_, err = fmt.Fprintf(fp, "// Code generated by gen.go; DO NOT EDIT.\n\n"+
"package main\n\n"+
"var sqlDB = %s\n", asbyte(d))
if err != nil {
log.Fatal(err)
}
}
// Write any data as byte array.
func asbyte(s []byte) string {
var b strings.Builder
for i, c := range s {
if i%19 == 0 {
b.WriteString("\n\t\t")
}
b.WriteString(fmt.Sprintf("%#x, ", c))
}
return "[]byte{" + b.String() + "}"
}
You can also use go-bindata or packr for that if you prefer, but I don't really see an advantage.
An alternative way is to use a memory database, which may be faster depending on what you want to do.
Embed the SQL schema and rows you want in your Go binary as strings.
Open a new memory database when your program starts (sql.Open("sqlite3",:memory:`) and create the schema and insert the rows.
There is no disk access with this method, so querying it will probably be a bit faster at the expensive of slower startup times (benchmark to be sure!)

How to use rows.Scan of Go's database/sql

I use database/sql and define a struct mapping to DB table columns(tag field):
// Users ...
type Users struct {
ID int64 `field:"id"`
Username string `field:"username"`
Password string `field:"password"`
Tel string `field:"tel"`
}
then I query:
rows, err := db.Query(sql) // select * from users
if err != nil {
fmt.Println(err)
}
defer rows.Close()
for rows.Next() {
user := new(Users)
// works but I don't think it is good code for too many columns
err = rows.Scan(&user.ID, &user.Username, &user.Password, &user.Tel)
// TODO: How to scan in a simple way
if err != nil {
fmt.Println(err)
}
fmt.Println("user: ", user)
list = append(list, *user)
}
if err := rows.Err(); err != nil {
fmt.Println(err)
}
As you can see for rows.Scan() , I have to write all columns , and I don't think it's a good way for 20 or more columns .
How to scan in a clear way.
It's a good practice for using reflect:
for rows.Next() {
user := Users{}
s := reflect.ValueOf(&user).Elem()
numCols := s.NumField()
columns := make([]interface{}, numCols)
for i := 0; i < numCols; i++ {
field := s.Field(i)
columns[i] = field.Addr().Interface()
}
err := rows.Scan(columns...)
if err != nil {
log.Fatal(err)
}
log.Println(user)
}
You may consider using jmoiron's sqlx package. It has support for assigning to a struct.
Excerpt from the readme:
type Place struct {
Country string
City sql.NullString
TelCode int
}
places := []Place{}
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
fmt.Println(err)
return
}

many2many in Gorm, really

I'm trying to use the many-to-many relationship in gorm. However, the example is a partial snippet, and my attempt at creating a similar example snippet is failing.
package main
import (
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
)
type Part struct {
gorm.Model
Name string
}
type Machine struct {
gorm.Model
Name string
Subtasks []Part `gorm:"many2many:parts;"`
}
func main() {
// Connect to the database
db, err := gorm.Open("sqlite3", "example.db")
if err != nil {
panic(err)
}
defer db.Close()
db.LogMode(true)
// Set up associations
if err := db.CreateTable(&Part{}).Error; err != nil {
panic(err)
}
if err := db.CreateTable(&Machine{}).Related(&[]Part{}).Error; err != nil {
panic(err)
}
}
This panics on the last CreateTable call: panic: invalid association []
I think you have to drop the Related-part. CreateTable doesnt need it as far as i can see.
if err := db.CreateTable(&Machine{}).Error; err != nil {
panic(err)
}
Works for me

Resources