Why am I always getting random results regardless of seed value? - go

I have a fairly complex Go application. It generates many random results in a long chain. It is seeded only once - when an HTTP request comes in.
No matter what the seed is - whether it's unix time, or whether it's my own alphanumeric seed function - it always generates completely random results.
I've attempted cutting out my alphanumeric seed function, but that doesn't alter the behavior. I have also tried setting the seed always to 1111. This has no effect.
Here is a sample (verbose and lifted directly from the source, since that's what was requested):
func main() {
sentryDSN := os.Getenv("SENTRY_DSN")
sentry.Init(sentry.ClientOptions{
Dsn: sentryDSN,
})
sentryHandler := sentryhttp.New(sentryhttp.Options{
Repanic: true,
})
r := chi.NewRouter()
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
r.Use(middleware.URLFormat)
r.Use(middleware.SetHeader("Content-Type", "application/json"))
r.Use(middleware.Timeout(60 * time.Second))
r.Get("/buildingstyle", sentryHandler.HandleFunc(getBuildingStyleRandom))
r.Get("/buildingstyle/{id}", sentryHandler.HandleFunc(getBuildingStyle))
r.Get("/character", sentryHandler.HandleFunc(getCharacterRandom))
r.Get("/character/{id}", sentryHandler.HandleFunc(getCharacter))
r.Get("/climate", sentryHandler.HandleFunc(getClimateRandom))
r.Get("/climate/{id}", sentryHandler.HandleFunc(getClimate))
port := 7531
fmt.Printf("World Generator API is running on http://localhost:%d.\n", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), r))
}
func SeedFromString(source string) error {
h := md5.New()
_, err := io.WriteString(h, source)
if err != nil {
err = fmt.Errorf("Failed to seed random number generator: %w", err)
return err
}
seed := binary.BigEndian.Uint64(h.Sum(nil))
rand.Seed(int64(seed))
return nil
}
func getClimate(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
var o climate.SimplifiedClimate
err := random.SeedFromString(id)
if err != nil {
handleError(w, r, err)
return
}
randomClimate, err := climate.Random()
if err != nil {
handleError(w, r, err)
return
}
o = randomClimate.Simplify()
json.NewEncoder(w).Encode(o)
}
// Generate generates a climate with a given name
func Generate(name string) (Climate, error) {
rawClimate, err := ByName(name)
if err != nil {
err = fmt.Errorf("Could not generate climate by name: %w", err)
return Climate{}, err
}
climate, err := rawClimate.populate()
if err != nil {
err = fmt.Errorf("Could not generate climate by name: %w", err)
return Climate{}, err
}
return climate, nil
}
func (climate Climate) populate() (Climate, error) {
gems := mineral.Gems()
insects := climate.getFilteredInsects()
metals := mineral.Metals()
stones := mineral.Stones()
trees := climate.getFilteredTrees()
climate.Seasons = climate.getSeasons()
lakeChance := rand.Intn(100)
riverChance := rand.Intn(100)
oceanChance := rand.Intn(100)
wetlandsChance := rand.Intn(100)
if lakeChance > 30 {
climate.HasLakes = true
}
if riverChance > 20 {
climate.HasRivers = true
}
if oceanChance > 80 {
climate.HasOcean = true
}
if wetlandsChance > 80 {
climate.HasWetlands = true
}
soils := climate.getFilteredSoils()
if climate.HasLakes || climate.HasRivers || climate.HasOcean {
climate.Fish = climate.getFish()
} else {
climate.Fish = []fish.Fish{}
}
climate.Insects = insect.RandomSubset(7, insects)
filteredMetals, err := mineral.RandomWeightedSet(climate.MaxMetals, metals)
if err != nil {
err = fmt.Errorf("Could not populate climate: %w", err)
return Climate{}, err
}
climate.Metals = filteredMetals
climate.Gems = mineral.Random(climate.MaxGems, gems)
climate.OtherMinerals = mineral.OtherMinerals()
climate.Animals, err = climate.getAnimals()
if err != nil {
err = fmt.Errorf("Could not populate climate: %w", err)
return Climate{}, err
}
climate.Plants, err = climate.getPlants()
if err != nil {
err = fmt.Errorf("Could not populate climate: %w", err)
return Climate{}, err
}
climate.Soils = soil.Random(climate.MaxSoils, soils)
climate.Stones = mineral.Random(climate.MaxStones, stones)
climate.Trees = tree.RandomSubset(climate.MaxTrees, trees)
resources := climate.getResources()
climate.Resources = resources
description, err := climate.getDescription()
if err != nil {
err = fmt.Errorf("Could not populate climate: %w", err)
return Climate{}, err
}
climate.Description = description
climate.Habitability = climate.calculateHabitability()
return climate, nil
}
Many of the functions like doStuff() return n random elements from a slice with some filtering applied.
All of this I expect to be consistent and identical when the same seed is used for multiple runs. However, this is not the case. Instead, the results are random every single time, regardless of seed value.
Is there some fundamental piece of rand.Intn()'s or rand.Seed()'s operation that I'm unaware of?

You're seeding the default source, which is used by many parts of the system. In a large, complicated project, it is very likely that there is some other part that is consuming an indeterminate number of random values dependent on the environment. It's even possible that somewhere else in your code you have a call to rand.Seed().
If you want your random values to be independent, create your own rand.Rand and use that for the pieces you want to be determined by your seed.
If you can reproduce this in a self-contained piece of code, then we can explore the specific case, but I expect when you create a self-contained piece of code the problem will go away because you will have removed the other value consumer(s).

Related

rows.Next() halts after some number of rows

Im newbie in Golang, so it may be simple for professionals but I got stuck with no idea what to do next.
I'm making some migration app that extract some data from oracle DB and after some conversion insert it to Postges one-by-one.
The result of native Query in DB console returns about 400k of rows and takes about 13 sec to end.
The data from Oracle extracts with rows.Next() with some strange behavior:
First 25 rows extracted fast enough, then about few sec paused, then new 25 rows until it pauses "forever".
Here is the function:
func GetHrTicketsFromOra() (*sql.Rows, error) {
rows, err := oraDB.Query("select id,STATE_ID,REMEDY_ID,HEADER,CREATE_DATE,TEXT,SOLUTION,SOLUTION_USER_LOGIN,LAST_SOLUTION_DATE from TICKET where SOLUTION_GROUP_ID = 5549")
if err != nil {
println("Error while getting rows from Ora")
return nil, err
}
log.Println("Finished legacy tickets export")
return rows, err
}
And here I export data:
func ConvertRows(rows *sql.Rows, c chan util.ArchTicket, m chan int) error {
log.Println("Conversion start")
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
log.Println("ORA connection closed", err)
return
}
}(rows)
for rows.Next() {
log.Println("Reading the ticket")
ot := util.OraTicket{}
at := util.ArchTicket{}
err := rows.Scan(&ot.ID, &ot.StateId, &ot.RemedyId, &ot.Header, &ot.CreateDate, &ot.Text, &ot.Solution, &ot.SolutionUserLogin, &ot.LastSolutionDate)
if err != nil {
log.Println("Error while reading row", err)
return err
}
at = convertLegTOArch(ot)
c <- at
}
if err := rows.Err(); err != nil {
log.Println("Error while reading row", err)
return err
}
m <- 1
return nil
}
UPD. I use "github.com/sijms/go-ora/v2" driver
UPD2. Seems like the root cause of the problem is in TEXT and SOLUTION fields of the result rows. They are varchar and can be big enough. Deleting them from the direct query changes the time of execution from 13sec to 258ms. But I still have no idea what to do with that.
UPD3.
Minimal reproducible example
package main
import (
"database/sql"
_ "github.com/sijms/go-ora/v2"
"log"
)
var oraDB *sql.DB
var con = "oracle://login:password#ora_db:1521/database"
func InitOraDB(dataSourceName string) error {
var err error
oraDB, err = sql.Open("oracle", dataSourceName)
if err != nil {
return err
}
return oraDB.Ping()
}
func GetHrTicketsFromOra() {
var ot string
rows, err := oraDB.Query("select TEXT from TICKET where SOLUTION_GROUP_ID = 5549")
if err != nil {
println("Error while getting rows from Ora")
}
for rows.Next() {
log.Println("Reading the ticket")
err := rows.Scan(&ot)
if err != nil {
log.Println("Reading failed", err)
}
log.Println("Read:")
}
log.Println("Finished legacy tickets export")
}
func main() {
err := InitOraDB(con)
if err != nil {
log.Println("Error connection Ora")
}
GetHrTicketsFromOra()
}

Atomically Execute commands across Redis Data Structures

I want to execute some redis commands atomically (HDel, SADD, HSet etc). I see the Watch feature in the go-redis to implement transactions , however since I am not going to modify the value of a key i.e use SET,GET etc , does it make sense to use Watch to execute it as transaction or just wrapping the commands in a TxPipeline would be good enough?
Approach 1 : Using Watch
func sampleTransaction() error{
transactionFunc := func(tx *redis.Tx) error {
// Get the current value or zero.
_, err := tx.TxPipelined(context.Background(), func(pipe redis.Pipeliner) error {
_, Err := tx.SAdd(context.Background(), "redis-set-key", "value1").Result()
if Err != nil {
return Err
}
_, deleteErr := tx.HDel(context.Background(), "redis-hash-key", "value1").Result()
if deleteErr != nil {
return deleteErr
}
return nil
})
return err
}
retries:=10
// Retry if the key has been changed.
for i := 0; i < retries; i++ {
fmt.Println("tries", i)
err := redisClient.Watch(context.Background(), transactionFunc())
if err == nil {
// Success.
return nil
}
if err == redis.TxFailedErr {
continue
}
return err
}
}
Approach 2: Just wrapping in TxPipelined
func sampleTransaction() error {
_, err:= tx.TxPipelined(context.Background(), func(pipe redis.Pipeliner) error {
_, Err := tx.SAdd(context.Background(), "redis-set-key", "value1").Result()
if Err != nil {
return Err
}
_, deleteErr := tx.HDel(context.Background(), "redis-hash-key", "value1").Result()
if deleteErr != nil {
return deleteErr
}
return nil
})
return err
}
As far as I know, pipelines do not guarantee atomicity. If you need atomicity, use lua.
https://pkg.go.dev/github.com/mediocregopher/radix.v3#NewEvalScript

How to write clean code without all these cascading error Christmas trees? [duplicate]

This question already has answers here:
Go Error Handling Techniques [closed]
(11 answers)
Closed 2 years ago.
I wrote a function that should do a simple thing:
look up for a specific address in a table and return the ID, if
already existing
if not, create a new record for this particular address
return the ID of this newly created record
As RDMS I use mysql here. I put everything in a transaction to avoid race conditions in my concurrent go-routines that makes calls to this function.
However, the tons of constant checks for err makes the code ugly and full test coverage hard to get.
Is there anything I can improve here in terms of better code quality?
func getAddressId(db *sql.DB, address string) (int64, error) {
tx, err := db.Begin()
if err != nil {
tx.Rollback()
return 0, err
}
stmt, err := tx.Prepare("SELECT id FROM address WHERE `address`=?")
if err != nil {
tx.Rollback()
return 0, err
}
defer stmt.Close()
var result sql.NullInt64
err = stmt.QueryRow(address).Scan(&result)
if err != nil && err != sql.ErrNoRows {
tx.Rollback()
return 0, err
}
if result.Valid {
tx.Commit()
return result.Int64, nil
}
stmt, err = tx.Prepare("INSERT INTO address (address) VALUES (?)")
if err != nil {
tx.Rollback()
return 0, err
}
var res sql.Result = nil
res, err = stmt.Exec(address)
if err != nil {
tx.Rollback()
return 0, err
}
tx.Commit()
var id int64 = 0
id, err = res.LastInsertId()
return id, err
}
First, and most importantly, there's very little wrong with the above code. There are a few pieces I'd adjust (and will below), but generally it is very clear, straightforward, and (almost) hard to get wrong. There is nothing ugly about that.
Second, see Error Handling and Go for thoughts on error handling Go, though I won't be using those techniques here because they're not necessary.
Now there is one thing that's a bit bad, which is that it's easy to forget to call tx.Rollback() or tx.Commit() in the right places. In my opinion, that's reasonable to fix (but it's really more style than substance). The below isn't tested.
// Name your return values so that we can use bare returns.
func getAddressId(db *sql.DB, address string) (id int64, err error) {
tx, err := db.Begin()
if err != nil {
return // This is a bare return. No need to write "0, err" everywhere.
}
// From this point on, if we exit with an error, then rollback, otherwise commit.
defer func() {
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
}()
stmt, err := tx.Prepare("SELECT id FROM address WHERE `address`=?")
if err != nil {
return
}
defer stmt.Close() // I'm not sure this is correct, because you reuse stmt
// This is purely style, but you can tighten up `err = ...; if err` logic like this:
var result sql.NullInt64
if err = stmt.QueryRow(address).Scan(&result); err != nil && err != sql.ErrNoRows {
return
}
if result.Valid {
id = result.Int64
return
}
if stmt, err = tx.Prepare("INSERT INTO address (address) VALUES (?)"); err != nil {
return
}
res, err := stmt.Exec(address)
if err != nil {
return
}
id = res.LastInsertId()
}
That said, I think this function is doing way too much, and if you break it up, it becomes easier to understand. For example (again, untested):
func getExistingAddressId(tx *sql.Tx, address string) (id int64, err error) {
stmt, err := tx.Prepare("SELECT id FROM address WHERE `address`=?")
if err != nil {
return
}
// I believe you need to close both statements, and splitting it up makes that clearer
defer stmt.Close()
var result sql.NullInt64
if err = stmt.QueryRow(address).Scan(&result); err != nil && err != sql.ErrNoRows {
return
}
// This is probably over-complicated. If !Valid, then .Int64 is 0.
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func insertNewAddress(tx *sql.Tx, address string) (id int64, err error) {
stmt, err := tx.Prepare("INSERT INTO address (address) VALUES (?)")
if err != nil {
return
}
defer stmt.Close()
res, err := stmt.Exec(address)
if err != nil {
return
}
return res.LastInsertId()
}
func getAddressId(db *sql.DB, address string) (id int64, err error) {
tx, err := db.Begin()
if err != nil {
return
}
defer func() {
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
}()
if id, err = getExistingAddressId(tx, address); err != nil || id != 0 {
return
}
return insertNewAddress(tx, address)
}
Using named return values like this is a matter of style, and you could certainly not do it that way and it would be just as clear. But the point (a) defer is a powerful way to avoid duplicating logic that must always run and (b) if a function becomes a mess of error handling, it probably is doing too much.
As a side note, I strongly suspect you could get rid of the Prepare calls here, would would simplify things significantly. You only use the Statements one time. If you cached that Statements and reused them, then it would make sense to Prepare them. If you do that, then the code simplifies to:
func getExistingAddressId(tx *sql.Tx, address string) (int64, error) {
var result sql.NullInt64
if err := tx.QueryRow("SELECT id FROM address WHERE `address`=?", address).
Scan(&result); err != nil && err != sql.ErrNoRows {
return 0, err
}
return result.Int64, nil
}
func insertNewAddress(tx *sql.Tx, address string) (int64, error) {
res, err := tx.Exec("INSERT INTO address (address) VALUES (?)", address)
if err != nil {
return 0, err
}
return res.LastInsertId()
}
func getAddressId(db *sql.DB, address string) (id int64, err error) {
tx, err := db.Begin()
if err != nil {
return 0, err
}
defer func() {
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
}()
if id, err = getExistingAddressId(tx, address); err != nil || id != 0 {
return
}
return insertNewAddress(tx, address)
}
Rather than trying to simplify Go syntax, this simplifies the operation, which as a side effect makes the syntax simpler.
A small subtlety that may go overlooked if you're not very familiar with named return values. In return insertNewAddress(...), the return value of the function call gets assigned to id and err before the defer runs, so the if err != nil check will correctly reflect the returned value. This can be a bit tricky, so you may prefer to write this all more explicitly, especially now that the function is so much shorter.
func getAddressId(db *sql.DB, address string) (int64, error) {
tx, err := db.Begin()
if err != nil {
return 0, err
}
var id Int64
id, err = getExistingAddressId(tx, address)
if err == nil && id == 0 {
id, err = insertNewAddress(tx, address)
}
if err != nil {
tx.Rollback()
return 0, err
}
tx.Commit()
return id, nil
}
And now the code is very straightforward, with no tricks, which IMO is Go at its best.

Need faster way to list all datasets/tables in project

I am creating a utility that needs to be aware of all the datasets/tables that exist in my BigQuery project. My current code for getting this information is as follows (using Go API):
func populateExistingTableMap(service *bigquery.Service, cloudCtx context.Context, projectId string) (map[string]map[string]bool, error) {
tableMap := map[string]map[string]bool{}
call := service.Datasets.List(projectId)
//call.Fields("datasets/datasetReference")
if err := call.Pages(cloudCtx, func(page *bigquery.DatasetList) error {
for _, v := range page.Datasets {
if tableMap[v.DatasetReference.DatasetId] == nil {
tableMap[v.DatasetReference.DatasetId] = map[string]bool{}
}
table_call := service.Tables.List(projectId, v.DatasetReference.DatasetId)
//table_call.Fields("tables/tableReference")
if err := table_call.Pages(cloudCtx, func(page *bigquery.TableList) error {
for _, t := range page.Tables {
tableMap[v.DatasetReference.DatasetId][t.TableReference.TableId] = true
}
return nil
}); err != nil {
return errors.New("Error Parsing Table")
}
}
return nil
}); err != nil {
return tableMap, err
}
return tableMap, nil
}
For a project with about 5000 datasets, each with up to 10 tables, this code takes almost 15 minutes to return. Is there a faster way to iterate through the names of all existing datasets/tables? I have tried using the Fields method to return only the fields I need (you can see those lines commented out above), but that results in only 50 (exactly 50) of my datasets being returned.
Any ideas?
Here is an updated version of my code, with concurrency, that reduced the processing time from about 15 minutes to 3 minutes.
func populateExistingTableMap(service *bigquery.Service, cloudCtx context.Context, projectId string) (map[string]map[string]bool, error) {
tableMap = map[string]map[string]bool{}
call := service.Datasets.List(projectId)
//call.Fields("datasets/datasetReference")
if err := call.Pages(cloudCtx, func(page *bigquery.DatasetList) error {
var wg sync.WaitGroup
wg.Add(len(page.Datasets))
for _, v := range page.Datasets {
if tableMap[v.DatasetReference.DatasetId] == nil {
tableMap[v.DatasetReference.DatasetId] = map[string]bool{}
}
go func(service *bigquery.Service, datasetID string, projectId string) {
defer wg.Done()
table_call := service.Tables.List(projectId, datasetID)
//table_call.Fields("tables/tableReference")
if err := table_call.Pages(cloudCtx, func(page *bigquery.TableList) error {
for _, t := range page.Tables {
tableMap[datasetID][t.TableReference.TableId] = true
}
return nil // NOTE: returning a non-nil error stops pagination.
}); err != nil {
// TODO: Handle error.
fmt.Println(err)
}
}(service, v.DatasetReference.DatasetId, projectId)
}
wg.Wait()
return nil // NOTE: returning a non-nil error stops pagination.
}); err != nil {
return tableMap, err
// TODO: Handle error.
}
return tableMap, nil
}

How to query Redis db from golang using redigo library

I am trying to figure out what is the best way to query Redis db for multiple keys in one command.
I have seen MGET which can be used for redis-cli. But how you do that using redigo library from GOlang code. Imagine I have an array of keys and I want to take from Redis db all the values for those keys in one query.
Thanks in advance!
Assuming that c is a Redigo connection and keys is a []string of your keys:
var args []interface{}
for _, k := range keys {
args = append(args, k)
}
values, err := redis.Strings(c.Do("MGET", args...))
if err != nil {
// handle error
}
for _, v := range values {
fmt.Println(v)
}
The Go FAQ explains why you need to copy the keys. The spec describes how to pass a slice to a variadic param.
http://play.golang.org/p/FJazj_PuCq
func main() {
// connect to localhost, make sure to have redis-server running on the default port
conn, err := redis.Dial("tcp", ":6379")
if err != nil {
log.Fatal(err)
}
defer conn.Close()
// add some keys
if _, err = conn.Do("SET", "k1", "a"); err != nil {
log.Fatal(err)
}
if _, err = conn.Do("SET", "k2", "b"); err != nil {
log.Fatal(err)
}
// for fun, let's leave k3 non-existing
// get many keys in a single MGET, ask redigo for []string result
strs, err := redis.Strings(conn.Do("MGET", "k1", "k2", "k3"))
if err != nil {
log.Fatal(err)
}
// prints [a b ]
fmt.Println(strs)
// now what if we want some integers instead?
if _, err = conn.Do("SET", "k4", "1"); err != nil {
log.Fatal(err)
}
if _, err = conn.Do("SET", "k5", "2"); err != nil {
log.Fatal(err)
}
// get the keys, but ask redigo to give us a []interface{}
// (it doesn't have a redis.Ints helper).
vals, err := redis.Values(conn.Do("MGET", "k4", "k5", "k6"))
if err != nil {
log.Fatal(err)
}
// scan the []interface{} slice into a []int slice
var ints []int
if err = redis.ScanSlice(vals, &ints); err != nil {
log.Fatal(err)
}
// prints [1 2 0]
fmt.Println(ints)
}
UPDATE March 10th 2015: redigo now has a redis.Ints helper.

Resources