I am trying to load 300 million records from the stored procedure cursor. (Gordor + sql)
"database/sql" + "github.com/godror/godror"
Making sql rows
func (o *OracleDb) OpenCursorTX(tx *sql.Tx, sqlStmt string) (*sql.Rows, error) {
var cur driver.Rows
ctx := context.Background()
stmt, err := tx.PrepareContext(ctx, sqlStmt)
if err != nil {
logrus.Fatalf("error parsing cursor %s: %s", sqlStmt, err.Error())
return nil, err
}
if _, err := stmt.ExecContext(ctx, sql.Out{Dest: &cur}, godror.PrefetchCount(100000), godror.FetchArraySize(100000)); err != nil {
logrus.Fatalf("error exec cursor %s: %s", sqlStmt, err.Error())
return nil, err
}
rows, err := godror.WrapRows(ctx, tx, cur)
if err != nil {
logrus.Fatalf("error cursor wrap rows %s: %s", sqlStmt, err.Error())
return nil, err
}
if err := stmt.Close(); err != nil {
return nil, err
}
return rows, nil
}
Reading rows in code
rows, err := l.oracleDb.OpenCursorTX(tx, "begin drs_router_read.get_rate_b_groups(po_rate_b_groups => :po_rate_b_groups); end;")
if err != nil {
return err
}
var rbg domain.RmsGroupHist
var gwgrId, direction int
var dialCode, key, rbgDBegin, rbgDEnd string
rbgMap := make(map[string][]domain.RmsGroupHist)
i:=0
for rows.Next() {
if err := rows.Scan(&gwgrId, &direction, &dialCode, &rbg.RmsgId, &rbgDBegin, &rbgDEnd); err != nil {
return fmt.Errorf("error scan db rows loadRateBGroups %v", err)
}
rbg.DBegin = util.StrToInt64(rbgDBegin)
rbg.DEnd = util.StrToInt64(rbgDEnd)
key = domain.RBObjectKey + ":" + strconv.Itoa(gwgrId) + ":" + strconv.Itoa(direction) + ":" + dialCode
rbgMap[key] = append(rbgMap[key], rbg)
i++
if i%100000 == 0 {
logrus.Infof("rows %d\n", i)
}
}
rows.Next() hangs for 17 minutes after that len(rbgMap) = 0
I think I'm missing something, it works fine on 9 million.
Related
I'm building a web based data browser called Mavgo Flight.
I want large tables from sqlite to print continuously instead of the default behavior of only printing when all the data is available. I tried running the template per row of data which fails.
func renderHTMLTable(w http.ResponseWriter, result *sqlx.Rows) {
cols, err := result.Columns()
if err != nil {
log.Println(err, "renderHTMLTable")
return
}
tmpl, err := template.ParseFiles("./templates/2d.html")
if err != nil {
log.Println("template failed", err)
return
}
data := HTMLTable{}
data.Cols = cols
for result.Next() {
cols, err := result.SliceScan()
if err != nil {
log.Println(w, err)
break
}
s := make([]string, len(cols))
for i, v := range cols {
s[i] = fmt.Sprint(v)
}
tmpl.Execute(w, s)
}
}
I gave up on being clever did exactly what Cerise suggested
The function that writes rows incrementally:
func renderHTMLTable(w http.ResponseWriter, result *sqlx.Rows) {
cols, err := result.Columns()
if err != nil {
log.Println(err, "renderHTMLTable")
return
}
head, err := template.ParseFiles("./templates/head.html")
if err != nil {
log.Println("template failed", err)
return
}
row, err := template.ParseFiles("./templates/row.html")
if err != nil {
log.Println("template failed", err)
return
}
foot := ` </tbody>
</table>
</div>
</body>
</html>`
head.Execute(w, cols)
s := make([]string, len(cols))
for result.Next() {
values, err := result.SliceScan()
if err != nil {
log.Println(w, err)
break
}
for i, v := range values {
s[i] = fmt.Sprint(v)
}
row.Execute(w, s)
}
fmt.Fprint(w, foot)
}
Im newbie in Golang, so it may be simple for professionals but I got stuck with no idea what to do next.
I'm making some migration app that extract some data from oracle DB and after some conversion insert it to Postges one-by-one.
The result of native Query in DB console returns about 400k of rows and takes about 13 sec to end.
The data from Oracle extracts with rows.Next() with some strange behavior:
First 25 rows extracted fast enough, then about few sec paused, then new 25 rows until it pauses "forever".
Here is the function:
func GetHrTicketsFromOra() (*sql.Rows, error) {
rows, err := oraDB.Query("select id,STATE_ID,REMEDY_ID,HEADER,CREATE_DATE,TEXT,SOLUTION,SOLUTION_USER_LOGIN,LAST_SOLUTION_DATE from TICKET where SOLUTION_GROUP_ID = 5549")
if err != nil {
println("Error while getting rows from Ora")
return nil, err
}
log.Println("Finished legacy tickets export")
return rows, err
}
And here I export data:
func ConvertRows(rows *sql.Rows, c chan util.ArchTicket, m chan int) error {
log.Println("Conversion start")
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
log.Println("ORA connection closed", err)
return
}
}(rows)
for rows.Next() {
log.Println("Reading the ticket")
ot := util.OraTicket{}
at := util.ArchTicket{}
err := rows.Scan(&ot.ID, &ot.StateId, &ot.RemedyId, &ot.Header, &ot.CreateDate, &ot.Text, &ot.Solution, &ot.SolutionUserLogin, &ot.LastSolutionDate)
if err != nil {
log.Println("Error while reading row", err)
return err
}
at = convertLegTOArch(ot)
c <- at
}
if err := rows.Err(); err != nil {
log.Println("Error while reading row", err)
return err
}
m <- 1
return nil
}
UPD. I use "github.com/sijms/go-ora/v2" driver
UPD2. Seems like the root cause of the problem is in TEXT and SOLUTION fields of the result rows. They are varchar and can be big enough. Deleting them from the direct query changes the time of execution from 13sec to 258ms. But I still have no idea what to do with that.
UPD3.
Minimal reproducible example
package main
import (
"database/sql"
_ "github.com/sijms/go-ora/v2"
"log"
)
var oraDB *sql.DB
var con = "oracle://login:password#ora_db:1521/database"
func InitOraDB(dataSourceName string) error {
var err error
oraDB, err = sql.Open("oracle", dataSourceName)
if err != nil {
return err
}
return oraDB.Ping()
}
func GetHrTicketsFromOra() {
var ot string
rows, err := oraDB.Query("select TEXT from TICKET where SOLUTION_GROUP_ID = 5549")
if err != nil {
println("Error while getting rows from Ora")
}
for rows.Next() {
log.Println("Reading the ticket")
err := rows.Scan(&ot)
if err != nil {
log.Println("Reading failed", err)
}
log.Println("Read:")
}
log.Println("Finished legacy tickets export")
}
func main() {
err := InitOraDB(con)
if err != nil {
log.Println("Error connection Ora")
}
GetHrTicketsFromOra()
}
I'm trying to get and return all the nodes of NodeClass Variable of my OPC UA Simulation Server starting at the root node and going down all folder nodes and object nodes that have childs. I tried to use browse-example of the gopcua repo but whenever the program gets to attrs, err := n.Attributes(...) for the second time, it returns an EOF error.
I tried to recreate a minimal example:
package main
import (
"context"
"fmt"
"log"
"github.com/gopcua/opcua"
"github.com/gopcua/opcua/id"
"github.com/gopcua/opcua/ua"
)
const (
endpoint string = "opc.tcp://<ServerAddress>"
rootNodeId string = "i=85"
)
func browse(c *opcua.Client, n *opcua.Node) error {
_, err := n.Attributes(ua.AttributeIDNodeClass, ua.AttributeIDDataType)
if err != nil {
return err
}
browseChildren := func(refType uint32) error {
refs, err := n.ReferencedNodes(refType, ua.BrowseDirectionForward, ua.NodeClassAll, true)
if err != nil {
return fmt.Errorf("references: %d: %s", refType, err)
}
fmt.Printf("found %d child refs\n", len(refs))
for _, rn := range refs {
err := browse(c, rn)
if err != nil {
return fmt.Errorf("browse children: %s", err)
}
fmt.Printf("Found a Node: %s\n", rn.ID.String())
}
return nil
}
if err := browseChildren(id.HasChild); err != nil {
return err
}
if err := browseChildren(id.Organizes); err != nil {
return err
}
return nil
}
func main() {
ctx := context.Background()
c := opcua.NewClient(endpoint)
if err := c.Connect(ctx); err != nil {
log.Fatal("Could not connect")
panic(err)
}
defer c.Close()
id, _ := ua.ParseNodeID(rootNodeId)
err := browse(c, c.Node(id))
if err != nil {
log.Fatal(err)
}
}
This is how the Prosys OPC UA Simulation Server looks like:
And this the output I get from the program:
found 0 child refs
found 5 child refs
2022/01/21 14:26:06 browse children: EOF
exit status 1
tl;dr
browseChildren := func(refType uint32) error {
refs, err := n.ReferencedNodes(refType, ua.BrowseDirectionForward, ua.NodeClassAll, true)
if err != nil {
return fmt.Errorf("references: %d: %s", refType, err)
}
fmt.Printf("found %d child refs\n", len(refs))
for _, rn := range refs {
refNodeID := ua.MustParseNodeID(rn.ID.String())
refNode := c.Node(refNodeID) // parse the referenced nodes before usage
err := browse(c, refNode)
if err != nil {
return fmt.Errorf("browse children: %s", err)
}
fmt.Printf("Found a Node: %s\n", refNode.ID.String())
}
return nil
}
Reason
The referenced nodes that are returned by n.ReferencedNodes have an invalid NodeIDType (64). The only supported NodeIDTypes are 0-5. Reparsing them gives them a proper NodeIDType. (Related Gitlab issue: https://github.com/gopcua/opcua/issues/550)
I'm trying to use Tinygo Bluetooth to connect to a Timeflip v2 (https://github.com/DI-GROUP/TimeFlip.Docs/blob/master/Hardware/TimeFlip%20BLE%20protocol%20ver4_02.06.2020.md).
The API documentation says, that I need to login (which is a write to f1196f57-71a4-11e6-bdf4-0800200c9a66, followed by a read on f1196f53-71a4-11e6-bdf4-0800200c9a66 for checking the result of the login).
The only thing I see in TinyGo for DeviceCharacteristics is WriteWithoutResponse (https://pkg.go.dev/github.com/tinygo-org/bluetooth#DeviceCharacteristic.WriteWithoutResponse).
When I use this, it looks like having no affect on the result.
package main
import (
"log"
"strconv"
"tinygo.org/x/bluetooth"
)
var adapter = bluetooth.DefaultAdapter
const (
timeFlipService = "f1196f50-71a4-11e6-bdf4-0800200c9a66"
commandOutCharacteristic = "f1196f53-71a4-11e6-bdf4-0800200c9a66"
passwordCharacteristic = "f1196f57-71a4-11e6-bdf4-0800200c9a66"
)
func main() {
if err := adapter.Enable(); err != nil {
log.Fatalln(err)
}
ch := make(chan bluetooth.ScanResult, 1)
log.Println("scanning...")
if err := adapter.Scan(func(adapter *bluetooth.Adapter, device bluetooth.ScanResult) {
log.Println(device)
if device.LocalName() == "TimeFlip v2.0" {
log.Println("found timeflip")
if err := adapter.StopScan(); err != nil {
log.Println(err)
}
ch <- device
}
}); err != nil {
log.Fatalln(err)
}
var device *bluetooth.Device
select {
case result := <-ch:
var err error
device, err = adapter.Connect(result.Address, bluetooth.ConnectionParams{})
if err != nil {
log.Fatalln(err)
}
log.Println("connected to ", result.Address.String())
}
defer func() {
if err := device.Disconnect(); err != nil {
log.Println(err)
} else {
log.Println("disconnected")
}
}()
tfService, _ := bluetooth.ParseUUID(timeFlipService)
srvcs, err := device.DiscoverServices([]bluetooth.UUID{tfService})
if err != nil {
log.Fatalln(err)
}
buf := make([]byte, 20)
password, _ := bluetooth.ParseUUID(passwordCharacteristic)
cmdOut, _ := bluetooth.ParseUUID(commandOutCharacteristic)
for _, srvc := range srvcs {
log.Println("- service", srvc.UUID().String())
chars, err := srvc.DiscoverCharacteristics([]bluetooth.UUID{password, cmdOut})
if err != nil {
log.Println(err)
}
var pwChar bluetooth.DeviceCharacteristic
var cmdOutChar bluetooth.DeviceCharacteristic
for _, char := range chars {
log.Println("-- characteristic", char.UUID().String())
switch char.UUID() {
case password:
pwChar = char
case cmdOut:
cmdOutChar = char
}
}
rightPassword := []byte{0x30, 0x30, 0x30, 0x30, 0x30, 0x30}
_, err = pwChar.WriteWithoutResponse(rightPassword)
if err != nil {
log.Println(" ", err.Error())
}
n, err := cmdOutChar.Read(buf)
if err != nil {
log.Println(" ", err.Error())
} else {
log.Println(" data bytes", strconv.Itoa(n))
log.Println(" value =", buf[:n])
}
wrongPassword := []byte{0x30, 0x30, 0x30, 0x30, 0x30, 0x31}
_, err = pwChar.WriteWithoutResponse(wrongPassword)
if err != nil {
log.Println(" ", err.Error())
}
n, err = cmdOutChar.Read(buf)
if err != nil {
log.Println(" ", err.Error())
} else {
log.Println(" data bytes", strconv.Itoa(n))
log.Println(" value =", buf[:n])
}
}
}
results in
scanning...
{7A:05:BF:06:97:D6 -61 0xc00029c1e0}
{C6:D8:C0:F7:A7:3C -57 0xc00029c480}
found timeflip
connected to C6:D8:C0:F7:A7:3C
- service f1196f50-71a4-11e6-bdf4-0800200c9a66
-- characteristic f1196f57-71a4-11e6-bdf4-0800200c9a66
-- characteristic f1196f53-71a4-11e6-bdf4-0800200c9a66
data bytes 1
value = [1]
data bytes 1
value = [1]
disconnected
At least one of the results should be 0x02 (invalid password).
Could somebody give me a hint?
I have a task to pull data from an Oracle Database and I am trying to pull huge data > 6MM records with 100 columns for processing.
Need to convert the data to a Map. I was successfully able to process them for 350K records in less than 35 seconds. After that the server hangs and does not proceed further.
Is there a way I can batch these based on the row size or batch them to free up my space.
func FetchUsingGenericResult(ctx context.Context, dsConnection *string, sqlStatement string) (*entity.GenericResultCollector, error) {
columnTypes := make(map[string]string)
var resultCollection entity.GenericResultCollector
db, err := sql.Open("godror", *dsConnection)
if err != nil {
return &resultCollection, errors.Wrap(err, "error connecting to Oracle")
}
log := logger.FromContext(ctx).Sugar()
log.Infof("start querying from Oracle at :%v", time.Now())
rows, err := db.Query(sqlStatement, godror.FetchRowCount(defaultFetchCount))
if err != nil {
return &resultCollection, errors.Wrap(err, "error querying")
}
objects, err := rows2Strings(ctx, rows)
log.Infof("total Rows converted are :%v by %v", len(*objects), time.Now())
resultCollection = entity.GenericResultCollector{
Columns: columnTypes,
Rows: objects,
}
return &resultCollection, nil
}
func rows2Strings(ctx context.Context, rows *sql.Rows) (*[]map[string]string, error) {
result := make(map[string]string)
resultsSlice := []map[string]string{}
fields, err := rows.Columns()
if err != nil {
return nil, err
}
log := logger.FromContext(ctx).Sugar()
waitGroup, ctx := errgroup.WithContext(ctx)
counter := 0
for rows.Next() {
counter++
if counter%defaultFetchCount == 0 {
log.Infof("finished converting %v rows by %v", counter, time.Now())
}
waitGroup.Go(func() error {
result, err = row2mapStr(rows, fields)
if err != nil {
return err
}
resultsSlice = append(resultsSlice, result)
return nil
})
if err := waitGroup.Wait(); err != nil {
return nil, err
}
}
return &resultsSlice, nil
}
func row2mapStr(rows *sql.Rows, fields []string) (resultsMap map[string]string, err error) {
result := make(map[string]string)
scanResultContainers := make([]interface{}, len(fields))
for i := 0; i < len(fields); i++ {
var scanResultContainer interface{}
scanResultContainers[i] = &scanResultContainer
}
if err := rows.Scan(scanResultContainers...); err != nil {
return nil, err
}
for ii, key := range fields {
rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
// if row is null then as empty string
if rawValue.Interface() == nil {
result[key] = ""
continue
}
if data, err := value2String(&rawValue); err == nil {
result[key] = data
} else {
return nil, err
}
}
return result, nil
}
func value2String(rawValue *reflect.Value) (str string, err error) {
aa := reflect.TypeOf((*rawValue).Interface())
vv := reflect.ValueOf((*rawValue).Interface())
switch aa.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
str = strconv.FormatInt(vv.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
str = strconv.FormatUint(vv.Uint(), 10)
case reflect.Float32, reflect.Float64:
str = strconv.FormatFloat(vv.Float(), 'f', -1, 64)
case reflect.String:
str = vv.String()
case reflect.Array, reflect.Slice:
switch aa.Elem().Kind() {
case reflect.Uint8:
data := rawValue.Interface().([]byte)
str = string(data)
if str == "\x00" {
str = "0"
}
default:
err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
}
// time type
case reflect.Struct:
if aa.ConvertibleTo(timeType) {
str = vv.Convert(timeType).Interface().(time.Time).Format(time.RFC3339Nano)
} else {
err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
}
case reflect.Bool:
str = strconv.FormatBool(vv.Bool())
case reflect.Complex128, reflect.Complex64:
str = fmt.Sprintf("%v", vv.Complex())
default:
err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
}
return
}
Has anyone encountered a similar problem?
Modified the code as below:
func FetchUsingGenericResult(ctx context.Context, dsConnection *string, sqlStatement string) (*entity.GenericResultCollector, error) {
columnTypes := make(map[string]string)
var resultCollection entity.GenericResultCollector
db, err := sql.Open("godror", *dsConnection)
if err != nil {
return &resultCollection, errors.Wrap(err, "error connecting to Oracle")
}
log := logger.FromContext(ctx).Sugar()
log.Infof("start querying from Oracle at :%v", time.Now())
rows, err := db.Query(sqlStatement, godror.FetchRowCount(defaultFetchCount))
if err != nil {
return &resultCollection, errors.Wrap(err, "error querying")
}
objects, err := rows2Strings(ctx, rows)
log.Infof("total Rows converted are :%v by %v", len(*objects), time.Now())
resultCollection = entity.GenericResultCollector{
Columns: columnTypes,
Rows: objects,
}
return &resultCollection, nil
}
func rows2Strings(ctx context.Context, rows *sql.Rows) (*[]map[string]string, error) {
result := make(map[string]string)
resultsSlice := []map[string]string{}
fields, err := rows.Columns()
if err != nil {
return nil, err
}
log := logger.FromContext(ctx).Sugar()
counter := 0
for rows.Next() {
counter++
if counter%defaultFetchCount == 0 {
log.Infof("finished converting %v rows by %v", counter, time.Now())
}
result, err = row2mapStr(rows, fields)
if err != nil {
return nil, err
}
resultsSlice = append(resultsSlice, result)
}
return &resultsSlice, nil
}
func row2mapStr(rows *sql.Rows, fields []string) (resultsMap map[string]string, err error) {
result := make(map[string]string)
scanResultContainers := make([]interface{}, len(fields))
for i := 0; i < len(fields); i++ {
var scanResultContainer interface{}
scanResultContainers[i] = &scanResultContainer
}
if err := rows.Scan(scanResultContainers...); err != nil {
return nil, err
}
for ii, key := range fields {
rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
// if row is null then as empty string
if rawValue.Interface() == nil {
result[key] = ""
continue
}
if data, err := value2String(&rawValue); err == nil {
result[key] = data
} else {
return nil, err
}
}
return result, nil
}
func value2String(rawValue *reflect.Value) (str string, err error) {
aa := reflect.TypeOf((*rawValue).Interface())
vv := reflect.ValueOf((*rawValue).Interface())
switch aa.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
str = strconv.FormatInt(vv.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
str = strconv.FormatUint(vv.Uint(), 10)
case reflect.Float32, reflect.Float64:
str = strconv.FormatFloat(vv.Float(), 'f', -1, 64)
case reflect.String:
str = vv.String()
case reflect.Array, reflect.Slice:
switch aa.Elem().Kind() {
case reflect.Uint8:
data := rawValue.Interface().([]byte)
str = string(data)
if str == "\x00" {
str = "0"
}
default:
err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
}
// time type
case reflect.Struct:
if aa.ConvertibleTo(timeType) {
str = vv.Convert(timeType).Interface().(time.Time).Format(time.RFC3339Nano)
} else {
err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
}
case reflect.Bool:
str = strconv.FormatBool(vv.Bool())
case reflect.Complex128, reflect.Complex64:
str = fmt.Sprintf("%v", vv.Complex())
default:
err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
}
return
}
As the server memory was limited the array could was stuck without proceeding further.
I have started process the data as it gets scanned and this solved my problem.