package main
import (
"encoding/json"
"fmt"
"/something/models"
"os"
"path/filepath"
"runtime"
)
func WriteDeviceToFile(d chan *models.Device, fileName string) {
_, b, _, _ := runtime.Caller(0)
basepath := filepath.Dir(b)
filePath := basepath + "/dataFile/" + fileName
var f *os.File
var err error
f, _ = os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0600)
defer f.Close()
for device := range d {
deviceB, err := json.Marshal(device)
fmt.Println(string(deviceB))
if err == nil {
if _, err = f.WriteString(string(deviceB)); err != nil {
panic(err)
}
} else {
panic(err)
}
}
}
func main() {
deviceChan := make(chan *models.Device)
go WriteDeviceToFile(deviceChan, "notalive.txt")
d := models.NewDevice("12346", "")
deviceChan <- d
d = models.NewDevice("abcd", "")
deviceChan <- d
close(deviceChan)
}
This only works with at least two devices sent to channel. With only one device in deviceChan, the function does not receive anything. Is the channel gone before the WriteDeviceToFile gets to it?
The program exits when main returns. Nothing prevents main from exiting before the files are written
Related
I have the following script that generates random sequences and hashes them on the cpu with several threads in go.
package main
import(
"fmt"
"crypto/sha256"
"encoding/hex"
"math/rand"
"time"
"log"
"os"
)
func randChr(i int)(string){
i = i + 65
if i>90 {
i = i - 43
}
return string(i)
}
func randStr(random *rand.Rand, length int)(string){
result := ""
for len(result)<length{
result = result + randChr(random.Intn(36))
}
return result
}
func HashPass(data []byte) (bool,[32]byte){
hash := sha256.Sum256(data)
s := hex.EncodeToString(hash[:])
pass := true
for i := 0; i<7; i++ {
if s[i] != s[i+1]{
pass = false
break;
}
}
return pass,hash
}
func getAPassingHash()(string){
randSource := rand.NewSource(time.Now().UnixNano())
random := rand.New(randSource)
passes := false
s := ""
for !passes {
s=randStr(random,64)
passes,_ = HashPass([]byte(s))
}
return(s)
}
func worker(ch chan string){
ch <- getAPassingHash()
}
func timer(ch chan string,wait time.Duration){
time.Sleep(wait)
ch <- "End"
}
func append(fn string,conts string){
f, err := os.OpenFile(fn,
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Println(err)
}
defer f.Close()
if _, err := f.WriteString(conts); err != nil {
log.Println(err)
}
}
func main(){
ch := make(chan string)
go timer(ch,6*time.Hour)
for i:=0;i<9;i++{
time.Sleep(time.Second)
go worker(ch)
}
for true{
result := <-ch
if result == "End"{
break;
}
go worker(ch)
fmt.Println(result)
hash := sha256.Sum256([]byte(result))
fmt.Println(hex.EncodeToString(hash[:]))
fmt.Println()
append("hashes.txt","\n"+result+"\n"+hex.EncodeToString(hash[:])+"\n")
}
fmt.Println("done")
}
For some reason the script hangs up occasionally until I click on the command prompt and hit enter. I'm not sure where it is getting stuck but I can see that my system CPU utilization goes down and I know I have results somehow blocking out the program, I hit enter and results print and cpu usage spikes back up. I know this might be hard to replicate but I would really appreciate any suggestions.
It doesn't hang, it waits for the value of passes to be true
func getAPassingHash()(string){
randSource := rand.NewSource(time.Now().UnixNano())
random := rand.New(randSource)
passes := false
s := ""
for !passes { // wait for value is true
s=randStr(random,64)
passes,_ = HashPass([]byte(s))
}
return(s)
}
I want to have the limited number of goroutines that make some computation (func worker(), it makes some computation and places the result in a channel). Also a have another channel, that has "jobs" for my workers. As a result I can see that all jobs were computed correctly, but after computation executions stucks.
package main
import (
"bufio"
"fmt"
"os"
"net/http"
"io/ioutil"
"strings"
"time"
)
func worker(id int, urls <- chan string, results chan<- int) {
var data string
for url := range urls {
fmt.Println("worker", id, "started job", url)
if (strings.HasPrefix(url, "http") || strings.HasPrefix(url, "https")) {
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
}
data = string(body)
} else {
body, err := ioutil.ReadFile(url)
if err != nil {
fmt.Println(err)
}
data = string(body)
}
number := strings.Count(data, "Go")
fmt.Println("worker", id, "finished job", url, "Number of Go is", number)
results <- number
}
return
}
func main() {
final_result := 0
maxNbConcurrentGoroutines := 5
numJobs := 0
urls := make(chan string)
results := make(chan int)
scanner := bufio.NewScanner(os.Stdin)
start := time.Now()
for w := 1; w <= maxNbConcurrentGoroutines; w++ {
go worker(w, urls, results)
}
for scanner.Scan() {
url := (scanner.Text())
urls <- url
numJobs += 1
}
close(urls)
for num := range results {
final_result += num
}
t := time.Now()
elapsed := t.Sub(start)
for i := 1; i <= numJobs; i++ {
one_result := <- results
final_result += one_result
}
fmt.Println("Number = ", final_result)
fmt.Println("Time = ", elapsed)
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, "error:", err)
os.Exit(1)
}
}
I tried to use https://gobyexample.com/worker-pools to extract all the values from results channel, but was not succeed. What should I do to have it unstacked and gone further. Here is an example of how to run it:
echo -e 'https://golang.org\n/etc/passwd\nhttps://golang.org\nhttps://golang.org' | go run 1.go
Your program doesn't return because it waits the closed status of results channel.
In https://gobyexample.com/worker-pools the loop for getting results is different:
for a := 1; a <= numJobs; a++ {
<-results
}
If you want to use for num := range results you need close(results) and determine when to call it.
You can view another example using WaitGroup at https://gobyexample.com/waitgroups
Example script is just wrapper to "wc -m" command, simple symbol counter.
I trying just feed input with "teststrings" slice elements. And receive number of symbol of each string at output listener goroutine. Looking for a way to make "wc" listen forever for input at all. I'v notice when i increase sleep to
time.Sleep(6000 * time.Nanosecond)
wc don't wait for input.
package main
import (
"bytes"
"fmt"
"os/exec"
"time"
)
func main() {
BashCommand := exec.Command("wc", "-m")
InputBytes := &bytes.Buffer{}
OutputBytes := &bytes.Buffer{}
BashCommand.Stdin = InputBytes
BashCommand.Stdout = OutputBytes
e := BashCommand.Start()
time.Sleep(1 * time.Nanosecond)
_, _ = InputBytes.Write([]byte("13symbolsting"))
if e != nil {
fmt.Println(e)
}
fmt.Println("after run")
teststrings := []string{
"one",
"twoo",
"threeeee",
}
for _, s := range teststrings {
_, _ = InputBytes.Write([]byte(s))
}
//result printer
go func() {
for {
line, _ := OutputBytes.ReadString('\n')
if line != "" {
fmt.Println(line)
}
}
}()
var input string
fmt.Scanln(&input) //dont exit until keypress
}
If you increase the sleep to a large value, the goroutine started by the command to pump InputBytes to the process runs before data is written to InputBytes. The goroutine closes the pipe to the child and exits without having read any data.
Use pipes instead of bytes.Buffer:
c := exec.Command("wc", "-m")
w, _ := c.StdinPipe()
r, _ := c.StdoutPipe()
if err := c.Start(); err != nil {
log.Fatal(err)
}
w.Write([]byte("13symbolsting"))
teststrings := []string{
"one",
"twoo",
"threeeee",
}
for _, s := range teststrings {
w.Write([]byte(s))
}
w.Close() // Close pipe to indicate input is done.
var wg sync.WaitGroup
wg.Add(1)
go func() {
s := bufio.NewScanner(r)
for s.Scan() {
fmt.Println(s.Text())
}
wg.Done()
}()
wg.Wait()
Another option is to write to the bytes.Buffer before starting the command and wait for command to complete before reading the output:
c := exec.Command("wc", "-m")
var w, r bytes.Buffer
c.Stdin = &w
c.Stdout = &r
// Write data before starting command.
w.Write([]byte("13symbolsting"))
teststrings := []string{
"one",
"twoo",
"threeeee",
}
for _, s := range teststrings {
w.Write([]byte(s))
}
if err := c.Start(); err != nil {
log.Fatal(err)
}
// Wait for command to complete before reading data.
if err := c.Wait(); err != nil {
log.Fatal(err)
}
s := bufio.NewScanner(&r)
for s.Scan() {
fmt.Println(s.Text())
}
I am new to Go and have been working through different issues I am having with the code I am trying to write. One problem though has me scratching my head. I have been searching net but so far did not find a way to solve this.
As you will see in the below code, I am using flag to specify whether to create log file or not. The problem I am running into is that if I put w := bufio.NewWriter(f) inside the if loop then w is inaccessible from the following for loop. If I leave it outside of the if loop then buffio cannot access f.
I know I am missing something dead simple, but I am at a loss at this moment.
Does anyone have any suggestions?
package main
import (
"bufio"
"flag"
"fmt"
"os"
"time"
"path/filepath"
"strconv"
)
var (
logFile = flag.String("file", "yes", "Save output into file")
t = time.Now()
dir, _ = filepath.Abs(filepath.Dir(os.Args[0]))
)
func main() {
flag.Parse()
name := dir + "/" + "output_" + strconv.FormatInt(t.Unix(), 10) + ".log"
if *logFile == "yes" {
f, err := os.Create(name)
if err != nil {
panic(err)
}
defer f.Close()
}
w := bufio.NewWriter(f)
for _, v := range my_slice {
switch {
case *logFile == "yes":
fmt.Fprintln(w, v)
case *logFile != "yes":
fmt.Println(v)
}
}
w.Flush()
}
os.Stdout is an io.Writer too, so you can simplify your code to
w := bufio.NewWriter(os.Stdout)
if *logFile == "yes" {
// ...
w = bufio.NewWriter(f)
}
for _, v := range mySlice {
fmt.Fprintln(w, v)
}
w.Flush()
such as, I write 'A' but in file it is '1000001' ,
how can I do ?
I have tried
buf := new(bytes.Buffer)
data := []int8{65, 80}
for _, i := range data {
binary.Write(buf, binary.LittleEndian, i)
fp.Write(buf.Bytes())
}
but I got string 'AP' in file not a binary code
I didn't really understand the question, but perhaps you want something like:
package main
import (
"fmt"
"log"
"os"
)
func main() {
f, err := os.OpenFile("out.txt", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
log.Fatal(err)
}
for _, v := range "AP" {
fmt.Fprintf(f, "%b\n", v)
}
f.Close()
}
which gives:
$ cat out.txt
1000001
1010000