I have a for loop, which inserts data into the 2 different tables. How can I use ants(below package) in this case.
GH Package Ref: https://github.com/panjf2000/ants
for _, row := range rows {
user := User{}
user.Name = row.Name
user.Email = row.Email
err := dm.Insert(&user)
if err != nil {
panic(err)
}
address := Address{}
address.Address1 = row.Address1
address.Address2 = row.Address2
address.PinCode = row.PinCode
address.City = row.City
err := dm.Insert(&address)
if err != nil {
panic(err)
}
}
Something like:
func insertRow() {
// TODO add code to get 'rows'
const workerCount = 10
p, err := NewPool(workerCount)
if err != nil {
panic(err)
}
defer p.Release()
rowChan := make(chan RowType)
var wg sync.WaitGroup
insertRecords := func() {
defer wg.Done()
row <- rowChan
user := User{}
user.Name = row.Name
user.Email = row.Email
err := dm.Insert(&user)
if err != nil {
panic(err)
}
address := Address{}
address.Address1 = row.Address1
address.Address2 = row.Address2
address.PinCode = row.PinCode
address.City = row.City
err := dm.Insert(&address)
if err != nil {
panic(err)
}
}
for _, row := range rows {
wg.Add(1)
_ = ants.Submit(insertRecords)
rowChan <- row
}
wg.Wait()
}
Related
For example
func Query(myvarlist []string) {
stmt, err := tx.Prepare("SELECT * FROM mytable WHERE
myvar = $1 AND myvar2 = $2".......)
defer stmt.Close()
if _, err := stmt.Exec(myvarlist....); err != nil {
}
Can we pass in Exec a variable length variable?
You can do something like the this:
func Query() {
db, err := sql.Open("pgx", `postgresql://CONNSTRING`)
if err != nil {
panic(err)
}
queryParamMap := map[string]string{
"id": "6",
"name": "1",
}
// Build up statement and params
cols := make([]string, len(queryParamMap))
args := make([]any, len(queryParamMap))
i := 0
for k, v := range queryParamMap {
cols[i] = fmt.Sprintf("%s = $%d", k, i+1) // WARNING - SQL Injection possible here if col names are not sanitised
args[i] = v
i++
}
// Using Prepare because the question used it but this is only worthwhile if you will run stmt multiple times
stmt, err := db.Prepare(`SELECT id FROM devices WHERE ` + strings.Join(cols, " and "))
if err != nil {
panic(err)
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var id int
if err = rows.Scan(&id); err != nil {
panic(err)
}
fmt.Println("value:", id)
}
// Should check rows.Error() etc but this is just an example...
}
I've put the column names and values into a map because it was not clear where any extra column names would come from in your question but hopefully this provides the info you need.
This example is also using Query rather than Exec (because it's easier to test) but the same approach will work with Exec.
Note: Take a look at squirrel for an example of how to take this a lot further....
I use the following code which works and installs helm charts.
I got a list of charts and it installs each chart (via loop) and wait (upgradeAction.Wait = true, see below ) that the chart is up and running (using the wait=true flag of the helm) and then install the next one, the problem is that it takes a lot of time to wait that each chart is up-and-running and just then proceed to the next one, Is there a way to install all in parallel and just verify at the end (of all the charts installations) that it works (like how the wait works but for list of charts).
Here is the code:
mpfile, err := ioutil.TempFile(kp, kcp)
if err != nil {
log.Error(err, "error")
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(cfg); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
for _, chartInstallation := range charts {
chart, err := loader.Load(chartInstallation.Path)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info("chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
// HERE IT WAIT FOR THE CHART TO VERIFY THAT EVERYTHING IS UP
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
}
If I change it to upgradeAction.Wait = false , It starts to install all the charts without waiting to each one health checks, however not sure how can I verify it at the end of all the charts installations
You could start goroutines for each chart you're installing (wrapping chart install code inside go routines) and then use sync.WaitGroup to wait all goroutines to finish. Something like this:
package main
import (
"fmt"
"os"
"strings"
"sync"
)
func main() {
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
var wg sync.WaitGroup
for _, chartInstallation := range charts {
wg.Add(1)
go installChart(&wg, chartInstallation.Path)
}
fmt.Println("Installing...")
wg.Wait()
fmt.Println("Installed!")
}
func installChart(wg *sync.WaitGroup, chartInstallationPath string) error {
defer wg.Done()
chart, err := loader.Load(chartInstallationPath)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info("chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
// HERE IT WAIT FOR THE CHART TO VERIFY THAT EVERYTHING IS UP
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
}
Here's a good resource for that: https://goinbigdata.com/golang-wait-for-all-goroutines-to-finish/
Here is a snippet of my code so far
func DownloadFile(filepath string, url string) {
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
out, err := os.Create(filepath)
if err != nil {
log.Fatal(err)
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
log.Fatal(err)
}
wg.Done()
}
func main() {
for {
fp := <computes filepath>
u := <computes url>
zero := len(x.String())
j := 1
for {
wg.Add(1)
pad := fmt.Sprintf("%0" + strconv.Itoa(zero) + "d", j)
go DownloadFile(fp + string(os.PathSeparator) + pad + ".txt", u)
j = j + 1
}
wg.Wait()
}
}
Right now, goroutines are launched in the inner for loop. I want to make my program faster by adding goroutines in the outer for loop as well.
EDIT : Progress Bar
The progress bar is incremented for each time the outer for loop is incremented. I want it to increment only if the inner for loop has completed.
func DownloadFile(filepath string, url string) {
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
out, err := os.Create(filepath)
if err != nil {
log.Fatal(err)
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
log.Fatal(err)
}
wg.Done()
}
func main() {
for {
fp := <computes filepath>
u := <computes url>
zero := len(x.String())
count := int(x.Int())
bar := pb.StartNew(count)
j := 1
for {
bar.Increment()
wg.Add(1)
pad := fmt.Sprintf("%0" + strconv.Itoa(zero) + "d", j)
go DownloadFile(fp + string(os.PathSeparator) + pad + ".txt", u)
j = j + 1
}
}
wg.Wait()
bar.Finish()
}
Following some #Sam Whited advance and doing some research on stack, i've rewritten my code see below: This version of the code seems more stable, however, it is having issues where every once and a while the i get a slew of TCP errors as if i'm no closing my requests. I've throttled the requests by adding a sleep. It seems to help a bit.
func main() {
runtime.GOMAXPROCS(maxParallelism())
var file = flag.String("f", "", "Enter new line deliminated text file")
var fileName = flag.String("s", "contact_bot.csv", "Enter new line deliminated text file")
flag.Parse()
if *file != "" {
counter := 0
filters = []string{"info", "ads", "sales", "sale", "info", "media", "mediarelations", "media_relations", "contact", "contacts", "contactus", "contact_us", "contact-us", "about_us", "general", "advertise", "support", "systems", "system"}
emailRE = regexp.MustCompile(`([a-z0-9!#$%&'*+\/=?^_{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_{|}~-]+)*(#|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)`)
seedUrls, err := readLines(*file)
checkErr(err)
numberOfUrls := len(seedUrls)
usr, err := user.Current()
checkErr(err)
parentPath := filepath.Join(usr.HomeDir, "/Desktop/"+*fileName)
file, err := os.Create(parentPath)
checkErr(err)
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
var header = []string{"URL", "EMAILS"}
err = writer.Write(header)
checkErr(err)
data = make(chan *HTTPResponse)
go asyncHTTPGets(seedUrls)
loop:
for result := range data {
counter++
emails := findEmails(result.HTML, filters)
fmt.Printf("%s, %s, %s\n", result.URL, emails, strconv.Itoa(numberOfUrls))
var row = []string{result.URL, strings.Join(emails, ",")}
err := writer.Write(row)
// writer.Flush()
checkErr(err)
if counter == len(seedUrls) {
break loop
}
numberOfUrls--
}
}
}
// AsyncHTTPGets ...
func asyncHTTPGets(urls []string) {
counter := 0
for _, url := range urls {
counter++
if counter%10 == 0 {
time.Sleep(1 * time.Second)
}
go func(url string) {
fmt.Printf("Fetching %s \n", url)
resp, err := http.Get(url)
if err != nil {
fmt.Println(err.Error())
data <- &HTTPResponse{url, err.Error()}
return
}
b := resp.Body
buf := new(bytes.Buffer)
buf.ReadFrom(b)
resp.Body.Close()
myHTML := buf.String()
data <- &HTTPResponse{url, myHTML}
}(url)
}
}
func findEmails(html string, filters []string) []string {
emails := emailRE.FindAllString(html, -1)
filteredEmails := []string{}
for _, email := range emails {
if stringInSlice(email, filters) {
if !stringInSlice(email, filteredEmails) {
filteredEmails = append(filteredEmails, email)
}
}
}
sort.Strings(filteredEmails)
return filteredEmails
}
The application will open a large number of sockets and possibly breach file descriptor limits. I suggest limiting the number of concurrent requests to prevent this issue:
var (
requestMu sync.Mutex // protects requestCount
requestCount int // incremented on each request
)
// Create 10 workers. Adjust up or down as needed.
for w := 0; w < 10; w++ {
go func() {
for {
// Increment request count. Exit at end.
requestMu.Lock()
i := requestCount
requestCount++
requestMu.Unlock()
if i >= len(seedUrls) {
return
}
// Fetch the current URL.
myURL := seedUrls[i]
resp, err := http.Get(myUrl)
if err != nil {
fmt.Println(myURL, err.Error(), i)
data <- &HTTPResponse{myURL, err.Error()}
continue
}
// Read body and close.
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
fmt.Println(myURL, err.Error(), i)
data <- &HTTPResponse{myURL, err.Error()}
continue
}
myHTML := string(b)
data <- &HTTPResponse{myURL, myHTML}
}
}()
}
// Recieve expected number of results
for i := 0; i < len(seedUrls); i++ {
result <- data
emails := findEmails(result.HTML, filters)
fmt.Printf("%s, %s, %d\n", result.URL, emails, i)
var row = []string{result.URL, strings.Join(emails, ",")}
err := writer.Write(row)
writer.Flush()
if err != nil {
panic(err)
}
}
I have a loop that is apparently causing a data race its near the bottom of this function and I will have it marked:
func (p *PartialParty) SendReadyCheck(party PartialParty) {
msg, err := json.Marshal(&ReadyCheckMsg{"ReadyCheck", ""})
if err != nil {
log.Println(err)
}
for _, member := range party.Members {
member.Conn.send <- msg
}
counter := 0
loopBreaker := true
for {
select {
case <-p.Accept:
counter++
resp, err := json.Marshal(&ReadyCheckMsg{"ReadyAccepted", ""})
if err != nil {
log.Println(err)
}
for _, member := range party.Members {
member.Conn.send <- resp
}
if counter == 2 {
// Create a new party with all members
partyid := PartyID(feeds.NewUUID().String())
db := common.Db()
newParty := &Party{
Active: true,
Members: p.Members,
Broadcast: make(chan []byte),
PartyID: partyid,
}
// Insert the new party into the database
_, err := db.Exec("INSERT INTO party SET party_id = ?, active = ?", partyid.String(), true)
if err != nil {
log.Println(err)
}
// Go through the members and update the database
var wg sync.WaitGroup
for _, member := range party.Members {
wg.Add(1)
m := member
go func() {
_, err := db.Exec("UPDATE party_members SET active = ? WHERE steamid = ?", false, m.SteamID)
if err != nil {
log.Println(err)
}
_, err = db.Exec("INSERT INTO party_members SET belongs_to =?, active = ?, steamid = ?", partyid.String(), true, m.SteamID)
if err != nil {
log.Println(err)
}
wg.Done()
}()
}
// Wait for all the database stuff to finish
wg.Wait()
PHub.AddNewParty(newParty)
loopBreaker = false
break
}
case conn := <-p.Decline:
if conn.Ready {
break
}
conn.Ready = false
conn.InQueue = false
conn.CurrentParty = ""
resp, err := json.Marshal(&ReadyCheckMsg{"ReadyCheckDeclined", ""})
if err != nil {
log.Println(err)
}
p.Accepting = true
identifier := conn.Identifier
if _, ok := party.Members[identifier]; ok {
delete(p.Members, identifier)
}
for _, m := range party.Members {
member := m
member.Conn.send <- resp
}
log.Println("Here")
loopBreaker = false
break
case <-time.After(30 * time.Second):
if counter == 2 {
return
}
p.Accepting = true
failedMsg, err := json.Marshal(&ReadyCheckMsg{"FailedToReady", ""})
if err != nil {
log.Println(err)
}
somebodyDeclinedMsg, err := json.Marshal(&ReadyCheckMsg{"ReadyCheckDeclined", ""})
if err != nil {
log.Println(err)
}
>>>> for _, member := range party.Members { ***<<<< This Line***
m := member
if !m.Conn.Ready {
m.Conn.Ready = false
m.Conn.InQueue = false
m.Conn.CurrentParty = ""
m.Conn.send <- failedMsg
} else {
m.Conn.Ready = false
m.Conn.send <- somebodyDeclinedMsg
}
}
loopBreaker = false
break
}
if !loopBreaker {
break
}
}
}
It is apparently conflicting with this:
// AddNewMember will add a new user to the party
func (p *PartyHub) AddNewMember(member *Member, partyid PartyID) {
p.Lock()
defer p.Unlock()
>>> p.PartialPartys[partyid].Members[member.Conn.Identifier] = member
}
type PartialParty struct {
Accepting bool
Members map[Identifier]*Member
Accept chan *Connection
Decline chan *Connection
PartyID PartyID
sync.Mutex
}
Right now it is impossible to AddNewMember if the goroutine SendReadyCheck is running ``because it is protected by an if statement that checks if the goroutine is running, so I'm not sure why they are saying they are racing each other. Any help on clearing this up would be great. I've tried setting a variable inside the loop to try to get away from it but it doesn't seem to cause it
Right now it is impossible to AddNewMember if the goroutine SendReadyCheck is running ``because it is protected by an if statement that checks if the goroutine is running
You didn't actually show that part of the code, but presumably it's not impossible. What if SendReadyCheck starts running after the if test but before AddNewMember does its modification?