How to connect xml decoder to stdoutPipe() of go exec - go

I'm having some trouble linking things up here.
What am I doing incorrectly?
package main
import (
"encoding/xml"
"fmt"
"log"
"os/exec"
)
func checkError(err error) {
if err != nil {
log.Fatalf("Error: %s", err)
}
}
func metrics() {
cmd := exec.Command(
"nvidia-smi",
"--query",
"--xml-format")
out, err := cmd.StdoutPipe()
checkError(err)
cmd.Start()
defer cmd.Wait()
go func() {
var data interface{}
dec := xml.NewDecoder(out)
dec.Decode(&data)
fmt.Printf("Data: %+v\n", data)
}()
//go io.Copy(os.Stdout, out)
}
func main() {
metrics()
}
Result after running program is:
Data:

Things seem to be "linked" correctly.
Problem is likely to be here:
var data interface{}
You then do:
dec.Decode(&data)
But that won't work.
You need to pass in a struct that can actually be used to decode the fields in the XML that the nvidia-smi command returns.
Find below a modified example (replacing your nvidia-smi for an echo command to make it return a sample XML).
You should adjust the struct to be able to map to the actual XML you'll receive.
By the way:
You should check the error returned by decode just in case
I don't understand why you are decoding in a separate goroutine. I left it like that in the modified example, but it would work if you do it right in the same goroutine as well.
Example:
package main
import (
"log"
"os/exec"
"fmt"
"encoding/xml"
)
func checkError(err error) {
if err != nil {
log.Fatalf("Error: %s", err)
}
}
type Result struct {
Value int `xml:"value"`
}
func metrics() {
cmd := exec.Command(
"echo", "-n",
`<result><value>1</value></result>`)
out, err := cmd.StdoutPipe()
checkError(err)
cmd.Start()
defer cmd.Wait()
go func() {
var data Result
dec := xml.NewDecoder(out)
err = dec.Decode(&data)
checkError(err)
fmt.Printf("Data: %+v\n", data)
}()
//go io.Copy(os.Stdout, out)
}
func main() {
metrics()
}

Related

Go: negative WaitGroup counter

I'm somewhat new to go and am reworking code that I found somewhere else to fit my needs. Because of that, I don't totally understand what is happening here, although I get the general idea.
I'm running a few websocket clients using go routines, but I'm getting an unexpected error that causes the program to crash. My program seems to close one too many threads (excuse me if this is the wrong terminology) when there is an error reading a message from the websocket (check the conn.ReadMessage() func in the readHandler func). Any ideas on how would I work around this issue? I would really appreciate anyone taking the time to look through it. Thanks in advance!
package main
import (
"context"
"fmt"
"os"
"time"
"os/signal"
"syscall"
"sync"
"net/url"
"github.com/gorilla/websocket"
"strconv"
"encoding/json"
"log"
"bytes"
"compress/gzip"
"io/ioutil"
)
// Structs
type Ping struct {
Ping int64 `json:"ping"`
}
type Pong struct {
Pong int64 `json:"pong"`
}
type SubParams struct {
Sub string `json:"sub"`
ID string `json:"id"`
}
func InitSub(subType string, pair string, i int) []byte {
var idInt string = "id" + strconv.Itoa(i)
subStr := "market." + pair + "." + subType
sub := &SubParams{
Sub: subStr,
ID: idInt,
}
out, err := json.MarshalIndent(sub, "", " ")
if err != nil {
log.Println(err);
}
//log.Println(string(out))
return out
}
// main func
func main() {
var server string = "api.huobi.pro"
pairs := []string{"btcusdt", "ethusdt", "ltcusdt"}
comms := make(chan os.Signal, 1)
signal.Notify(comms, os.Interrupt, syscall.SIGTERM)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
for x, pair := range pairs {
wg.Add(1)
go control(server, "ws", pair, ctx, &wg, x+1)
}
<-comms
cancel()
wg.Wait()
}
func control(server string, path string, pair string, ctx context.Context, wg *sync.WaitGroup, i int) {
fmt.Printf("Started control for %s\n", server)
url := url.URL {
Scheme: "wss",
Host: server,
Path: path,
}
fmt.Println(url.String())
conn, _, err := websocket.DefaultDialer.Dial(url.String(), nil)
if err != nil {
panic(err)
}
subscribe(conn, pair, i)
defer conn.Close()
var localwg sync.WaitGroup
localwg.Add(1)
go readHandler(ctx, conn, &localwg, server)
<- ctx.Done()
localwg.Wait()
wg.Done()
return
}
func readHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
for {
select {
case <- ctx.Done():
wg.Done()
return
default:
_, p, err := conn.ReadMessage()
if err != nil {
wg.Done()
fmt.Println(err)
}
r, err := gzip.NewReader(bytes.NewReader(p))
if(err == nil) {
result, err := ioutil.ReadAll(r)
if(err != nil) {
fmt.Println(err)
}
d := string(result)
fmt.Println(d)
var ping Ping
json.Unmarshal([]byte(d), &ping)
if (ping.Ping > 0) {
str := Pong{Pong: ping.Ping}
msg, err := json.Marshal(str)
if (err == nil) {
fmt.Println(string(msg))
conn.WriteMessage(websocket.TextMessage, []byte(msg))
}
}
}
}
}
}
func subscribe(conn *websocket.Conn, pair string, id int) {
sub := string(InitSub("trade.detail", pair, id))
err := conn.WriteMessage(websocket.TextMessage, []byte(sub))
if err != nil {
panic(err)
}
}
Break out of the readHandler loop when the connection fails:
_, p, err := conn.ReadMessage()
if err != nil {
wg.Done()
fmt.Println(err)
return // <--- add this line
}
Without the return, the function spins in a tight loop reading errors until the panic.
Use defer wg.Done() at the beginning of the goroutine to ensure that Done is called exactly once.
func readHandler(ctx context.Context, conn *websocket.Conn, wg *sync.WaitGroup, server string) {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
_, p, err := conn.ReadMessage()
if err != nil {
fmt.Println(err)
return
}
...
Update the control function also.
Because the caller does not execute any code concurrently with readHander, there's no value in running readHandler is a goroutine. Remove all references to wait groups from readHandler and call the function directly: change go readHandler(ctx, conn, &localwg, server) to readHandler(ctx, conn, server).
There are more issues, but this should move you further along.

Writing to file from cmd output

I am trying to write a small code in Go that will collect and save stats from IPFS.
So my Go code will execute IPFS command and save its output in .txt file and keep updating that .txt file.
I am having trouble doing that.
This is my code:
package main
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"time"
)
func ipfsCommand() (ipfsOutput string) {
// output and error
out, err := exec.Command("ipfs","stats","bitswap","--human").Output()
// if there are errors, print/log them
if err != nil {
log.Printf("error!")
log.Fatal(err)
} else {
log.Printf("no error, printing output")
fmt.Printf("%s", out)
}
return
}
func writeToFile(message string) error {
f, err := os.Create("outputTest2_2.txt")
if err != nil {
return err
}
defer f.Close()
l, err := io.WriteString(f, message)
if err != nil {
fmt.Println(err)
f.Close()
return err
}
fmt.Println(l, "bytes written successfully")
return f.Sync()
}
func main() {
// get current time
currentTime := time.Now()
fmt.Println("YYYY.MM.DD : ", currentTime.Format("2006.01.02 15:04:05"))
writeToFile(currentTime)
// get output from ipfs command
msg := ipfsCommand()
// write the output to file
writeToFile(msg)
fmt.Println("file written!!!")
/* // write to file many times
for i:=0;i<3;i++{
// get output from ipfs command
msg := ipfsCommand()
// write the output to file
writeToFile(msg)
}*/
}
When the above code is run, this is the error:
# command-line-arguments
.\test2.go:49:13: cannot use currentTime (type time.Time) as type string in argument to writeToFile
Again, I want to get output from IPFS and save it to .txt file along with current time. I want to do this in loop because I want to save output from IPFS over a long period of time.
I tried to fix your script as is, but it just has too many issues. Here is a
rewrite, maybe you can use it as a new starting point:
package main
import (
"os"
"os/exec"
"time"
)
func main() {
f, err := os.Create("outputTest2_2.txt")
if err != nil {
panic(err)
}
defer f.Close()
currentTime, err := time.Now().MarshalText()
if err != nil {
panic(err)
}
f.Write(append(currentTime, '\n'))
msg, err := exec.Command("go", "env").Output()
if err != nil {
panic(err)
}
f.Write(msg)
}

Print the console log to the screen of a webpage continuously using Go Routines

I got the go routine below to work but the problem is that it prints to the console instead of to the screen. My idea is to have a running log of what commands or output is happening in a script show on a webpage where it can be watched in real time. Using fmt.Fprint doesn't do the trick. All that happens is that my webpage will never fully load. Help please?
Running external python in Golang, Catching continuous exec.Command Stdout
go code
package main
import (
"log"
"net/http"
"time"
"os/exec"
"io"
"bufio"
"fmt"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
s := r.PathPrefix("/api/").Subrouter()
s.HandleFunc("/export", export).Methods("GET")
http.Handle("/", r)
log.Panic(http.ListenAndServe(":80", nil))
}
func export(w http.ResponseWriter, r *http.Request) {
cmd := exec.Command("python", "game.py")
stdout, err := cmd.StdoutPipe()
if err != nil {
panic(err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
panic(err)
}
err = cmd.Start()
if err != nil {
panic(err)
}
go copyOutput(stdout)
go copyOutput(stderr)
cmd.Wait()
}
func copyOutput(r io.Reader, w http.ResponseWriter) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
fmt.Fprint(w, scanner.Text()) //line I expect to print to the screen, but doesn't
}
}
python script
import time
import sys
while True:
print "Hello"
sys.stdout.flush()
time.sleep(1)
There's a lot more to the site so I know the route is configured correctly because printing to the screen works when I'm not using the go routine'
UPDATE:
Here is my new update function which prints to the screen, but only after the entire script has ran, not as it goes
func export(w http.ResponseWriter, r *http.Request) {
cmd := exec.Command("python", "game.py")
cmd.Stdout = w
cmd.Start()
cmd.Wait()
}
I believe I may still need a go routine in order to get it to print as I go, but putting cmd.Start and/or cmd.Wait in one doesn't work
UPDATE:
So even with everything given, I have not been able to get having the outputs show on a browser as they are ran working. It simply locks up the browser, even with the headers and flush. I will hopefully have time to give a complete, working answer to this but for now, the code above prints the code to the browser correctly after it has ran. I found a repo that I think may be what I'm looking for and maybe it will help others who come across this question as well.
https://github.com/yudai/gotty
This is a very basic (naive) example but how can give you an idea of how to stream data continuously:
https://play.golang.org/p/vtXPEHSv-Sg
The code for game.py is:
import time
import sys
while True:
print("Hello")
sys.stdout.flush()
time.sleep(1)
The web app code:
package main
import (
"bufio"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"github.com/nbari/violetear"
)
func stream(w http.ResponseWriter, r *http.Request) {
cmd := exec.Command("python", "game.py")
rPipe, wPipe, err := os.Pipe()
if err != nil {
log.Fatal(err)
}
cmd.Stdout = wPipe
cmd.Stderr = wPipe
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
go writeOutput(w, rPipe)
cmd.Wait()
wPipe.Close()
}
func writeOutput(w http.ResponseWriter, input io.ReadCloser) {
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
// Important to make it work in browsers
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
in := bufio.NewScanner(input)
for in.Scan() {
fmt.Fprintf(w, "data: %s\n", in.Text())
flusher.Flush()
}
input.Close()
}
func main() {
router := violetear.New()
router.HandleFunc("/", stream, "GET")
log.Fatal(http.ListenAndServe(":8080", router))
}
The key part here is the use of http.Flusher and some headers to make it work within a browser:
w.Header().Set("Content-Type", "text/event-stream")
Note the problem with this code is that once a request arrives it will exec the command that loops forever, so the wPipe.Close() will never be called
cmd.Wait()
wPipe.Close()
To be more verbose you could print the output the terminal beside the browser:
for in.Scan() {
data := in.Text()
log.Printf("data: %s\n", data)
fmt.Fprintf(w, "data: %s\n", data)
flusher.Flush()
}
If you have more than one request you will notice it will write faster in the terminal, not bad but you will also notice that if the client closed the connection/browser you will still see data going out.
A better way could execute the command within a context, from the example: https://golang.org/pkg/os/exec/#CommandContext
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
if err := exec.CommandContext(ctx, "sleep", "5").Run(); err != nil {
// This will fail after 100 milliseconds. The 5 second sleep
// will be interrupted.
}
Also take a look at the context (https://stackoverflow.com/a/44146619/1135424) not replaces http.CloseNotifier so could be usefull for terminate the process once the client close browser, disconetcs.
At the end depends on your needs but hope can give you an idea about how to stream data in an easy way by using the http.Flusher interface.
Just for fun here is an example using the context:
https://play.golang.org/p/V69BuDUceBA
Still very basic, but in this case if client closes the browser the program also terminates, as an exercice could be nice to improve it an share back ;-), notice the use of CommandContext and ctx.Done()
package main
import (
"bufio"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"github.com/nbari/violetear"
)
func stream(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ch := make(chan struct{})
cmd := exec.CommandContext(ctx, "python", "game.py")
rPipe, wPipe, err := os.Pipe()
if err != nil {
log.Fatal(err)
}
cmd.Stdout = wPipe
cmd.Stderr = wPipe
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
go writeOutput(w, rPipe)
go func(ch chan struct{}) {
cmd.Wait()
wPipe.Close()
ch <- struct{}{}
}(ch)
select {
case <-ch:
case <-ctx.Done():
err := ctx.Err()
log.Printf("Client disconnected: %s\n", err)
}
}
func writeOutput(w http.ResponseWriter, input io.ReadCloser) {
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
// Important to make it work in browsers
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
in := bufio.NewScanner(input)
for in.Scan() {
data := in.Text()
log.Printf("data: %s\n", data)
fmt.Fprintf(w, "data: %s\n", data)
flusher.Flush()
}
input.Close()
}
func main() {
router := violetear.New()
router.HandleFunc("/", stream, "GET")
log.Fatal(http.ListenAndServe(":8080", router))
}

golang: net.Conn: check conn status

I encountered a strange behavior of the conn.Read:
let's presume that I have a couple of functions for testing net.Conn:
package example
import (
"io"
"log"
"net"
"os"
"time"
)
func CheckConn(conn net.Conn) (net.Conn, error) {
conn.SetReadDeadline(time.Now())
var one = []byte{}
_, err := conn.Read(one)
if err != nil {
log.Println("Net err: ", err)
}
if err == io.EOF {
return conn, err
}
var zero time.Time
conn.SetReadDeadline(zero)
return conn, nil
}
func CheckConnWithTimeout(conn net.Conn) (net.Conn, error) {
ch := make(chan bool, 1)
defer func() {
ch <- true
}()
go func() {
select {
case <-ch:
case <-time.After(1 * time.Second):
log.Println("It works too long")
os.Exit(1)
}
}()
return CheckConn(conn)
}
And I want to implement tests for it, lets start with this one:
package example
import (
"io"
"net"
"testing"
)
func TestClosedConn(t *testing.T) {
server, client := net.Pipe()
client.Close()
defer server.Close()
_, err := CheckConn(server)
if err != io.EOF {
t.Errorf("Not equal:\nExpected: %v\nactual: %v", io.EOF, err)
}
}
this works pretty well, we will receive io.EOF from CheckConn function, lets add one more test:
func TestClosedConnAfterWrite(t *testing.T) {
server, client := net.Pipe()
go func() {
client.Write([]byte{0xb})
}()
client.Close()
defer server.Close()
_, err := CheckConn(server)
err = nil
if err != io.EOF {
t.Errorf("Not equal:\nExpected: %v\nactual: %v", io.EOF, err)
}
}
looks like the first test, but we wrote to the client before(?) it was closed.
And this will not pass!
conn.Read will return &errors.errorString{s:"EOF"}, instead of io.EOF, so CheckConn will return error == nil,
It looks so weird!
But let's continue the tests, now I want to check unclosed connections:
func TestActiveConn(t *testing.T) {
server, client := net.Pipe()
defer client.Close()
defer server.Close()
_, err := CheckConnWithTimeout(server)
if err != nil {
t.Errorf("Not equal:\nExpected: %v\nactual: %v", nil, err)
}
}
I think you noticed that I use the function with a timeout just because SetReadDeadline will not work in this case(I have no idea why!)
So what is going wrong in last two test cases? Is there a normal way to test the connection? Why SetReadDeadline is not working in this case?

Proper pattern to encapsulate log setup in golang

When trying to move log setup code into a separate function I ran into inability to hide the destination file object from the main function. In the following INCORRECT simplified example the attempt is made to setup log writing to both Stderr and a file via a single function call:
package main
import (
"io"
"log"
"os"
)
func SetupLogging() {
logFile, err := os.OpenFile("test.log", os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Panicln(err)
}
defer logFile.Close()
log.SetOutput(io.MultiWriter(os.Stderr, logFile))
}
func main() {
SetupLogging()
log.Println("Test message")
}
Clearly is does not work because defer closes the log file at the end of the SetupLogging function.
A working example below adds extra code and IMHO looses some clarity if repeated in a larger application as a pattern:
package main
import (
"io"
"log"
"os"
)
func SetupLogging() *os.File {
logFile, err := os.OpenFile("test.log", os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Panicln(err)
}
log.SetOutput(io.MultiWriter(os.Stderr, logFile))
return logFile
}
func main() {
logf := SetupLogging()
defer logf.Close()
log.Println("Test message")
}
Is there a different way to fully encapsulate open file management into a function, yet still nicely release the handle?
I have now successfully used the below approach for about a year in multiple projects. The idea is to return a function from the setup call. That resulting function contains the destruction logic. Here is an example:
package main
import (
"fmt"
"io"
"log"
"os"
)
func LogSetupAndDestruct() func() {
logFile, err := os.OpenFile("test.log", os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
if err != nil {
log.Panicln(err)
}
log.SetOutput(io.MultiWriter(os.Stderr, logFile))
return func() {
e := logFile.Close()
if e != nil {
fmt.Fprintf(os.Stderr, "Problem closing the log file: %s\n", e)
}
}
}
func main() {
defer LogSetupAndDestruct()()
log.Println("Test message")
}
It is using a closure around the cleanup logic being deferred.
A somewhat more elaborate public example of using this approach is in the Viper code: here is the return from a test initializer, and here it is used to encapsulate the cleanup logic and objects
The proper way of doing this is passing the handle in main to SetupLogging:
func SetupLogging(lf *os.File) {
log.SetOutput(io.MultiWriter(os.Stderr, logFile))
log.Println("Started")
}
func main() {
logFile, err := os.OpenFile("test.log", os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Panicln(err)
}
defer logFile.Close()
SetupLogging(logFile)
log.Println("Test message")
}
Another option is to use runtime.SetFinalizer, but it's not always guaranteed to run before main exits.
func SetupLogging() {
logFile, err := os.OpenFile("test.log", os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Panicln(err)
}
runtime.SetFinalizer(logFile, func(h *os.File) {
h.Close()
})
log.SetOutput(io.MultiWriter(os.Stderr, logFile))
}
You can do this using channels, here is my approach
type InfoLog struct {
InfoChan chan string
CloseChan chan struct{} //empty signal
log *log.Logger
file *os.File
}
func NewInfoLog(file *os.File) *InfoLog {
return &InfoLog{
InfoChan: make(chan string),
CloseChan: make(chan struct{}),
log: log.New(file, "TAG", log.Ldate|log.Ltime),
file: file,
}
}
func (i *InfoLog) listen() {
for {
select {
case infoMsg := <-i.InfoChan:
i.log.Println(infoMsg)
case <-i.CloseChan:
i.file.Close()
close(i.InfoChan)
}
}
}
then in main
func main() {
infoLog := NewInfoLog(ANY_OPEN_FILE_HERE)
go infoLog.listen()
infoLog.InfoChan <- "msg"
infoLog.InfoChan <- "msg"
infoLog.InfoChan <- "msg"
infoLog.CloseChan <- struct{}{}
// exits normaly
}
you can see an asynchronous log system i have made for a complete example: https://github.com/sescobb27/ciudad-gourmet/blob/master/services/log_service.go
in case where multiple "teardown" processes are needed, great solution to this is using google context package (https://blog.golang.org/context). advantage is that you can teardown all currently executing procedures with single context. smth like this:
package main
import (
"fmt"
"io"
"log"
"os"
"golang.org/x/net/context"
)
func LogSetup(ctx context.Context) error {
logFile, err := os.OpenFile("test.log", os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
if err != nil {
return err
}
log.SetOutput(io.MultiWriter(os.Stderr, logFile))
// here we could f.ex. execute:
// sendLogOutputToExternalService(ctx)
// and it could have it's own teardown procedure
// which would be called on main context's expiration
go func() {
for _ = range ctx.Done() {
err := logFile.Close()
if err = nil {
fmt.Fprintf(os.Stderr, "Problem closing the log file: %s\n", e)
}
}()
return nil
}
func main() {
var stopAll func()
mainContext, stopAll = context.WithCancel(context.Background())
defer stopAll()
err := LogSetup(mainContext)
if err!=nil {
log.Fatal("error while initializing logging")
}
log.Println("Test message")
}

Resources