I have a loop that is apparently causing a data race its near the bottom of this function and I will have it marked:
func (p *PartialParty) SendReadyCheck(party PartialParty) {
msg, err := json.Marshal(&ReadyCheckMsg{"ReadyCheck", ""})
if err != nil {
log.Println(err)
}
for _, member := range party.Members {
member.Conn.send <- msg
}
counter := 0
loopBreaker := true
for {
select {
case <-p.Accept:
counter++
resp, err := json.Marshal(&ReadyCheckMsg{"ReadyAccepted", ""})
if err != nil {
log.Println(err)
}
for _, member := range party.Members {
member.Conn.send <- resp
}
if counter == 2 {
// Create a new party with all members
partyid := PartyID(feeds.NewUUID().String())
db := common.Db()
newParty := &Party{
Active: true,
Members: p.Members,
Broadcast: make(chan []byte),
PartyID: partyid,
}
// Insert the new party into the database
_, err := db.Exec("INSERT INTO party SET party_id = ?, active = ?", partyid.String(), true)
if err != nil {
log.Println(err)
}
// Go through the members and update the database
var wg sync.WaitGroup
for _, member := range party.Members {
wg.Add(1)
m := member
go func() {
_, err := db.Exec("UPDATE party_members SET active = ? WHERE steamid = ?", false, m.SteamID)
if err != nil {
log.Println(err)
}
_, err = db.Exec("INSERT INTO party_members SET belongs_to =?, active = ?, steamid = ?", partyid.String(), true, m.SteamID)
if err != nil {
log.Println(err)
}
wg.Done()
}()
}
// Wait for all the database stuff to finish
wg.Wait()
PHub.AddNewParty(newParty)
loopBreaker = false
break
}
case conn := <-p.Decline:
if conn.Ready {
break
}
conn.Ready = false
conn.InQueue = false
conn.CurrentParty = ""
resp, err := json.Marshal(&ReadyCheckMsg{"ReadyCheckDeclined", ""})
if err != nil {
log.Println(err)
}
p.Accepting = true
identifier := conn.Identifier
if _, ok := party.Members[identifier]; ok {
delete(p.Members, identifier)
}
for _, m := range party.Members {
member := m
member.Conn.send <- resp
}
log.Println("Here")
loopBreaker = false
break
case <-time.After(30 * time.Second):
if counter == 2 {
return
}
p.Accepting = true
failedMsg, err := json.Marshal(&ReadyCheckMsg{"FailedToReady", ""})
if err != nil {
log.Println(err)
}
somebodyDeclinedMsg, err := json.Marshal(&ReadyCheckMsg{"ReadyCheckDeclined", ""})
if err != nil {
log.Println(err)
}
>>>> for _, member := range party.Members { ***<<<< This Line***
m := member
if !m.Conn.Ready {
m.Conn.Ready = false
m.Conn.InQueue = false
m.Conn.CurrentParty = ""
m.Conn.send <- failedMsg
} else {
m.Conn.Ready = false
m.Conn.send <- somebodyDeclinedMsg
}
}
loopBreaker = false
break
}
if !loopBreaker {
break
}
}
}
It is apparently conflicting with this:
// AddNewMember will add a new user to the party
func (p *PartyHub) AddNewMember(member *Member, partyid PartyID) {
p.Lock()
defer p.Unlock()
>>> p.PartialPartys[partyid].Members[member.Conn.Identifier] = member
}
type PartialParty struct {
Accepting bool
Members map[Identifier]*Member
Accept chan *Connection
Decline chan *Connection
PartyID PartyID
sync.Mutex
}
Right now it is impossible to AddNewMember if the goroutine SendReadyCheck is running ``because it is protected by an if statement that checks if the goroutine is running, so I'm not sure why they are saying they are racing each other. Any help on clearing this up would be great. I've tried setting a variable inside the loop to try to get away from it but it doesn't seem to cause it
Right now it is impossible to AddNewMember if the goroutine SendReadyCheck is running ``because it is protected by an if statement that checks if the goroutine is running
You didn't actually show that part of the code, but presumably it's not impossible. What if SendReadyCheck starts running after the if test but before AddNewMember does its modification?
Related
I have a for loop, which inserts data into the 2 different tables. How can I use ants(below package) in this case.
GH Package Ref: https://github.com/panjf2000/ants
for _, row := range rows {
user := User{}
user.Name = row.Name
user.Email = row.Email
err := dm.Insert(&user)
if err != nil {
panic(err)
}
address := Address{}
address.Address1 = row.Address1
address.Address2 = row.Address2
address.PinCode = row.PinCode
address.City = row.City
err := dm.Insert(&address)
if err != nil {
panic(err)
}
}
Something like:
func insertRow() {
// TODO add code to get 'rows'
const workerCount = 10
p, err := NewPool(workerCount)
if err != nil {
panic(err)
}
defer p.Release()
rowChan := make(chan RowType)
var wg sync.WaitGroup
insertRecords := func() {
defer wg.Done()
row <- rowChan
user := User{}
user.Name = row.Name
user.Email = row.Email
err := dm.Insert(&user)
if err != nil {
panic(err)
}
address := Address{}
address.Address1 = row.Address1
address.Address2 = row.Address2
address.PinCode = row.PinCode
address.City = row.City
err := dm.Insert(&address)
if err != nil {
panic(err)
}
}
for _, row := range rows {
wg.Add(1)
_ = ants.Submit(insertRecords)
rowChan <- row
}
wg.Wait()
}
I use the following code which works and installs helm charts.
I got a list of charts and it installs each chart (via loop) and wait (upgradeAction.Wait = true, see below ) that the chart is up and running (using the wait=true flag of the helm) and then install the next one, the problem is that it takes a lot of time to wait that each chart is up-and-running and just then proceed to the next one, Is there a way to install all in parallel and just verify at the end (of all the charts installations) that it works (like how the wait works but for list of charts).
Here is the code:
mpfile, err := ioutil.TempFile(kp, kcp)
if err != nil {
log.Error(err, "error")
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(cfg); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
for _, chartInstallation := range charts {
chart, err := loader.Load(chartInstallation.Path)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info("chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
// HERE IT WAIT FOR THE CHART TO VERIFY THAT EVERYTHING IS UP
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
}
If I change it to upgradeAction.Wait = false , It starts to install all the charts without waiting to each one health checks, however not sure how can I verify it at the end of all the charts installations
You could start goroutines for each chart you're installing (wrapping chart install code inside go routines) and then use sync.WaitGroup to wait all goroutines to finish. Something like this:
package main
import (
"fmt"
"os"
"strings"
"sync"
)
func main() {
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
var wg sync.WaitGroup
for _, chartInstallation := range charts {
wg.Add(1)
go installChart(&wg, chartInstallation.Path)
}
fmt.Println("Installing...")
wg.Wait()
fmt.Println("Installed!")
}
func installChart(wg *sync.WaitGroup, chartInstallationPath string) error {
defer wg.Done()
chart, err := loader.Load(chartInstallationPath)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info("chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
// HERE IT WAIT FOR THE CHART TO VERIFY THAT EVERYTHING IS UP
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
}
Here's a good resource for that: https://goinbigdata.com/golang-wait-for-all-goroutines-to-finish/
I use gorose for web project with golang ,code like
var tablecheckrequest = "checkrequest"
func (mysqldao *MysqlDao) GetAllCheckRulesByRequestId(id int) []map[string]interface{} {
result, _ := mysqldao.connection.Table(tablecheckrequest).
Where("requestid", "=", id).Get()
return result
}
After a time I get this
Can't create more than max_prepared_stmt_count statements (current value: 16382)
Why is this error happening?
I find out it finally!
There is the source code of gorose
func (dba *Database) Execute(args ...interface{}) (int64, error) {
//defer DB.Close()
lenArgs := len(args)
var sqlstring string
var vals []interface{}
sqlstring = args[0].(string)
if lenArgs > 1 {
for k, v := range args {
if k > 0 {
vals = append(vals, v)
}
}
}
// 记录sqllog
//Connect.SqlLog = append(Connect.SqlLog, fmt.Sprintf(sqlstring, vals...))
dba.LastSql = fmt.Sprintf(sqlstring, vals...)
dba.SqlLogs = append(dba.SqlLogs, dba.LastSql)
var operType string = strings.ToLower(sqlstring[0:6])
if operType == "select" {
return 0, errors.New("this method does not allow select operations, use Query")
}
if dba.trans == true {
stmt, err := tx.Prepare(sqlstring)
if err != nil {
return 0, err
}
return dba.parseExecute(stmt, operType, vals)
}
stmt, err := DB.Prepare(sqlstring)
if err != nil {
return 0, err
}
return dba.parseExecute(stmt, operType, vals)
}
during this method a stmt has been create but never close!
to fix it we just need to add a defer stmt.close() durging method parseExecute() just like
func (dba *Database) parseExecute(stmt *sql.Stmt, operType string, vals []interface{}) (int64, error) {
defer stmt.Close()
var rowsAffected int64
var err error
result, errs := stmt.Exec(vals...)
if errs != nil {
return 0, errs
}
switch operType {
case "insert":
// get rows affected
rowsAffected, err = result.RowsAffected()
dba.RowsAffected = int(rowsAffected)
// get last insert id
rowsAffected, err = result.LastInsertId()
dba.LastInsertId = int(rowsAffected)
case "update":
rowsAffected, err = result.RowsAffected()
case "delete":
rowsAffected, err = result.RowsAffected()
}
return rowsAffected, err
}
I am trying to achieve some sort of multi-thread processing over here.
func (m *Map) Parse(mapData Node) error {
wg := &sync.WaitGroup{}
for _, node := range mapData.child {
wg.Add(1)
go parseChild(node, m, wg)
}
wg.Wait()
close(errors)
return nil
}
func parseChild(node Node, m *Map, wg *sync.WaitGroup) {
defer wg.Done()
var nodeType uint8
if err := binary.Read(node.data, binary.LittleEndian, &nodeType); err != nil {
errors <- err
}
if nodeType == OTBMNodeTowns {
for _, town := range node.child {
var nodeType uint8
if err := binary.Read(town.data, binary.LittleEndian, &nodeType); err != nil {
errors <- err
return
}
if nodeType != OTBMNodeTown {
errors <- fmt.Errorf("Parsing map towns: expected %v got %v", OTBMNodeTown, nodeType)
return
}
currentTown := Town{}
if err := binary.Read(town.data, binary.LittleEndian, ¤tTown.ID); err != nil {
errors <- err
return
} else if currentTown.Name, err = town.ReadString(); err != nil {
errors <- err
return
} else if currentTown.TemplePosition, err = town.ReadPosition(); err != nil {
errors <- err
return
}
m.Towns = append(m.Towns, currentTown)
errors <- fmt.Errorf("This should be called: %v, nodeType)
return
}
}
}
But my goroutine never sends anything to the errors channel. Seems to be that the main thread is not waiting for the goroutines to even finish
I have no idea what I am missing here. Im waiting for all routines to finish using wg.Wait but doesnt seem to be working as I think it should
And yes. the slice is populated with atleast 3 results. This is the errrors channel
var (
errors = make(chan error, 0)
)
func init() {
go errChannel()
}
func errChannel() {
for {
select {
case err := <-errors:
log.Println(err)
}
}
}
Following some #Sam Whited advance and doing some research on stack, i've rewritten my code see below: This version of the code seems more stable, however, it is having issues where every once and a while the i get a slew of TCP errors as if i'm no closing my requests. I've throttled the requests by adding a sleep. It seems to help a bit.
func main() {
runtime.GOMAXPROCS(maxParallelism())
var file = flag.String("f", "", "Enter new line deliminated text file")
var fileName = flag.String("s", "contact_bot.csv", "Enter new line deliminated text file")
flag.Parse()
if *file != "" {
counter := 0
filters = []string{"info", "ads", "sales", "sale", "info", "media", "mediarelations", "media_relations", "contact", "contacts", "contactus", "contact_us", "contact-us", "about_us", "general", "advertise", "support", "systems", "system"}
emailRE = regexp.MustCompile(`([a-z0-9!#$%&'*+\/=?^_{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_{|}~-]+)*(#|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)`)
seedUrls, err := readLines(*file)
checkErr(err)
numberOfUrls := len(seedUrls)
usr, err := user.Current()
checkErr(err)
parentPath := filepath.Join(usr.HomeDir, "/Desktop/"+*fileName)
file, err := os.Create(parentPath)
checkErr(err)
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
var header = []string{"URL", "EMAILS"}
err = writer.Write(header)
checkErr(err)
data = make(chan *HTTPResponse)
go asyncHTTPGets(seedUrls)
loop:
for result := range data {
counter++
emails := findEmails(result.HTML, filters)
fmt.Printf("%s, %s, %s\n", result.URL, emails, strconv.Itoa(numberOfUrls))
var row = []string{result.URL, strings.Join(emails, ",")}
err := writer.Write(row)
// writer.Flush()
checkErr(err)
if counter == len(seedUrls) {
break loop
}
numberOfUrls--
}
}
}
// AsyncHTTPGets ...
func asyncHTTPGets(urls []string) {
counter := 0
for _, url := range urls {
counter++
if counter%10 == 0 {
time.Sleep(1 * time.Second)
}
go func(url string) {
fmt.Printf("Fetching %s \n", url)
resp, err := http.Get(url)
if err != nil {
fmt.Println(err.Error())
data <- &HTTPResponse{url, err.Error()}
return
}
b := resp.Body
buf := new(bytes.Buffer)
buf.ReadFrom(b)
resp.Body.Close()
myHTML := buf.String()
data <- &HTTPResponse{url, myHTML}
}(url)
}
}
func findEmails(html string, filters []string) []string {
emails := emailRE.FindAllString(html, -1)
filteredEmails := []string{}
for _, email := range emails {
if stringInSlice(email, filters) {
if !stringInSlice(email, filteredEmails) {
filteredEmails = append(filteredEmails, email)
}
}
}
sort.Strings(filteredEmails)
return filteredEmails
}
The application will open a large number of sockets and possibly breach file descriptor limits. I suggest limiting the number of concurrent requests to prevent this issue:
var (
requestMu sync.Mutex // protects requestCount
requestCount int // incremented on each request
)
// Create 10 workers. Adjust up or down as needed.
for w := 0; w < 10; w++ {
go func() {
for {
// Increment request count. Exit at end.
requestMu.Lock()
i := requestCount
requestCount++
requestMu.Unlock()
if i >= len(seedUrls) {
return
}
// Fetch the current URL.
myURL := seedUrls[i]
resp, err := http.Get(myUrl)
if err != nil {
fmt.Println(myURL, err.Error(), i)
data <- &HTTPResponse{myURL, err.Error()}
continue
}
// Read body and close.
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
fmt.Println(myURL, err.Error(), i)
data <- &HTTPResponse{myURL, err.Error()}
continue
}
myHTML := string(b)
data <- &HTTPResponse{myURL, myHTML}
}
}()
}
// Recieve expected number of results
for i := 0; i < len(seedUrls); i++ {
result <- data
emails := findEmails(result.HTML, filters)
fmt.Printf("%s, %s, %d\n", result.URL, emails, i)
var row = []string{result.URL, strings.Join(emails, ",")}
err := writer.Write(row)
writer.Flush()
if err != nil {
panic(err)
}
}