Here is a snippet of my code that does a GET request, and streams the response into cmd.Stdin.
resp, err = httpClient.Get(url)
if err != nil {
err = errors.Wrap(err, "HTTP request failed")
return
}
reader = bufio.NewReader(resp.Body)
args = append(args, "-") // Keep listening on stdin for file data
cmd := exec.Command("exiftool", args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stdin = reader
err = cmd.Start()
if err != nil {
return
}
I want to know how much data is streamed by the time it finishes executing.
So what I need is to capture what is being read while it's streaming, or at least capture the size of what is being read.
Wrap the reader. Count in the wrapper.
type wrapper struct {
io.Reader
n int
}
func (w *wrapper) Read(p []byte) (int, error) {
n, err := w.Reader.Read(p)
w.n += n
return n, err
}
Plug it into your application like this:
args = append(args, "-")
cmd := exec.Command("exiftool", args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
reader := &wrapper{Reader: resp.Body}
cmd.Stdin = reader
err = cmd.Run()
if err != nil {
return
}
fmt.Println(reader.n) // prints number of bytes read.
Because the exec package uses a buffer when copying from the response to the stdin, a bufio.Reader is unlikely to provide a benefit. In case there is some benefit, use one of these options;
reader := &wrapper{Reader: bufio.NewReader(resp.Body)} // Option 1
cmd.Stdin = bufio.NewReader(reader) // Option 2
Related
I'm trying to figure out how to implement copying files from remote and get the data []byte from the buffer.
I have succeeded in doing the implementation with the upload by referring to this guide: https://chuacw.ath.cx/development/b/chuacw/archive/2019/02/04/how-the-scp-protocol-works.aspx
Inside the go func there's the implementation of the upload process of the SCP but I have no idea how to change it.
Any advice ?
func download(con *ssh.Client, buf bytes.Buffer, path string,) ([]byte,error) {
//https://chuacw.ath.cx/development/b/chuacw/archive/2019/02/04/how-the-scp-protocol-works.aspx
session, err := con.NewSession()
if err != nil {
return nil,err
}
buf.WriteString("sudo scp -f " + path + "\n")
stdin, err := session.StdinPipe()
if err != nil {
return nil,err
}
go func() {
defer stdin.Close()
fmt.Fprint(stdin, "C0660 "+strconv.Itoa(len(content))+" file\n")
stdin.Write(content)
fmt.Fprint(stdin, "\x00")
}()
output, err := session.CombinedOutput("sudo scp -f " + path)
buf.Write(output)
if err != nil {
return nil,&DeployError{
Err: err,
Output: buf.String(),
}
}
session.Close()
session, err = con.NewSession()
if err != nil {
return nil,err
}
defer session.Close()
return output,nil
}
The sink side is significantly more difficult than the source side. Made an example which should get you close to what you want. Note that I have not tested this code, that the error handling is sub optimal and it only supports 1/4th the protocol messages SCP may use. So you will still need to do some work to get it perfect.
With all that said, this is what I came up with:
func download(con *ssh.Client, path string) ([]byte, error) {
//https://chuacw.ath.cx/development/b/chuacw/archive/2019/02/04/how-the-scp-protocol-works.aspx
session, err := con.NewSession()
if err != nil {
return nil, err
}
defer session.Close()
// Local -> remote
stdin, err := session.StdinPipe()
if err != nil {
return nil, err
}
defer stdin.Close()
// Request a file, note that directories will require different handling
_, err = stdin.Write([]byte("sudo scp -f " + path + "\n"))
if err != nil {
return nil, err
}
// Remote -> local
stdout, err := session.StdoutPipe()
if err != nil {
return nil, err
}
// Make a buffer for the protocol messages
const megabyte = 1 << 20
b := make([]byte, megabyte)
// Offset into the buffer
off := 0
var filesize int64
// SCP may send multiple protocol messages, so keep reading
for {
n, err := stdout.Read(b[off:])
if err != nil {
return nil, err
}
nl := bytes.Index(b[:off+n], []byte("\n"))
// If there is no newline in the buffer, we need to read more
if nl == -1 {
off = off + n
continue
}
// We read a full message, reset the offset
off = 0
// if we did get a new line. We have the full protocol message
msg := string(b[:nl])
// Send back 0, which means OK, the SCP source will not send the next message otherwise
_, err = stdin.Write([]byte("0\n"))
if err != nil {
return nil, err
}
// First char is the mode (C=file, D=dir, E=End of dir, T=Time metadata)
mode := msg[0]
if mode != 'C' {
// Ignore other messags for now.
continue
}
// File message = Cmmmm <length> <filename>
msgParts := strings.Split(msg, " ")
if len(msgParts) > 1 {
// Parse the second part <length> as an base 10 integer
filesize, err = strconv.ParseInt(msgParts[1], 10, 64)
if err != nil {
return nil, err
}
}
// The file message will be followed with binary data containing the file
break
}
// Wrap the stdout reader in a limit reader so we will not read more than the filesize
fileReader := io.LimitReader(stdout, filesize)
// Seed the bytes buffer with the existing byte slice, saves additional allocation if file <= 1mb
buf := bytes.NewBuffer(b)
// Copy the file into the bytes buffer
_, err = io.Copy(buf, fileReader)
return buf.Bytes(), err
}
I come from a C# background and am used IO methods like File.ReadAllLines and File.WriteAllLines from the System.IO namespace. I was a bit surprised to learn that Go didn't have convenience functions for these IO operations. In an effort to avoid code duplication, I wrote the below helpers. Is there any reason to not do this?
// WriteBytes writes the passed in bytes to the specified file. Before writing,
// if the file already exists, deletes all of its content; otherwise, creates
// the file.
func WriteBytes(filepath string, bytes []byte) (err error) {
file, err := os.Create(filepath)
if err != nil {
return err
}
defer closeWithErrorPropagation(file, &err)
_, err = file.Write(bytes)
if err != nil {
return err
}
return err
}
// WriteString writes the passed in sting to the specified file. Before writing,
// if the file already exists, deletes all of its content; otherwise, creates
// the file.
func WriteString(filepath string, text string) (err error) {
file, err := os.Create(filepath)
if err != nil {
return err
}
defer closeWithErrorPropagation(file, &err)
_, err = file.WriteString(text)
if err != nil {
return err
}
return err
}
// WriteLines writes the passed in lines to the specified file. Before writing,
// if the file already exists, deletes all of its content; otherwise, creates
// the file.
func WriteLines(filepath string, lines []string) (err error) {
file, err := os.Create(filepath)
if err != nil {
return err
}
defer closeWithErrorPropagation(file, &err)
for _, line := range lines {
_, err := file.WriteString(fmt.Sprintln(line))
if err != nil {
return err
}
}
return err
}
func closeWithErrorPropagation(c io.Closer, err *error) {
if closerErr := c.Close(); closerErr != nil && *err == nil { // Only propagate the closer error if there isn't already an earlier error.
*err = closerErr
}
}
os.WriteFile can handle the equivalent functionality of WriteBytes and WriteString functions:
// func WriteBytes(filepath string, bytes []byte) (err error)
err = os.WriteFile("testdata/hello", []byte("Hello, Gophers!"), 0666)
// func WriteString(filepath string, text string) (err error)
text := "Hello, Gophers!"
err = os.WriteFile("testdata/hello", []byte(text), 0666)
and combined with strings.Join can handle WriteLines:
//func WriteLines(filepath string, lines []string) (err error)
lines := []string{"hello", "gophers!"}
err = os.WriteFile("testdata/hello", []byte(strings.Join(lines, "\n")), 0666)
The goal: I want to capture all the bytes of cmd.Stdin and process them with this rot13 function: https://play.golang.org/p/VX2pwaIqhmT
The story: I'm coding a small tool which will be cross compiled for both win/ linux, so I'm trying to make it as simple as possible. This tool connects to the server from which I can execute commands on the client.
Since I had to do the same thing for cmd.Stdout, I used this:
.......
conn, err := net.Dial(nObj.Type, nObj.TCPIndirizzo)
......
cmd := exec.Command(/bin/sh, "-i") // please keep in mind that this is an ***interactive***
//***shell***, and not just a simple command
cmd.Stdin = conn
cmdStdout, err := cmd.StdoutPipe() // works fine
if err != nil {
fmt.Fprintf(os.Stderr, "error creating shell stdout pipe: %s\n", err)
}
cmd.Stderr = conn
err = cmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "error starting shell: %s\n", err)
}
.....
err = OBFprocessStream(cmdStdout, conn) // works fine
....
Where OBFprocessStream function is based on this one: https://play.golang.org/p/j_TKZWuhGaK. Everything works fine here .
So, I tried to replicate the same thing for cmd.Stdin:
.......
conn, err := net.Dial(nObj.Type, nObj.TCPIndirizzo)
......
cmd := exec.Command(/bin/sh, "-i")
cmdStdin, err := cmd.StdinPipe()
if err != nil {
fmt.Fprintf(os.Stderr, "error creating shell stdin pipe: %s\n", err)
}
cmdStdout, err := cmd.StdoutPipe()
if err != nil {
fmt.Fprintf(os.Stderr, "error creating shell stdout pipe: %s\n", err)
}
cmd.Stderr = conn
err = cmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "error starting shell: %s\n", err)
}
.....
err = INOBFprocessStream(cmdStdin, conn)
....
.....
err = OBFprocessStream(cmdStdout, conn)
....
But.. cmdStdin is an Io.WriterCloser, and I don't really know what to do to capture the bytes sEGIHOsegoihszrhoiĆ²
Can you please help me?
So it seems what you actually want is to read the data from conn, filter it with ROT13 and then pass it to cmd.Stdin (which accepts an io.Reader).
And your rot13Reader is already implementing io.Reader:
type rot13Reader struct {
r io.Reader
}
func (r13 *rot13Reader) Read(b []byte) (int, error) {
n, err := r13.r.Read(b)
for i := 0; i <= n; i++ {
b[i] = rot13(b[i])
}
return n, err
}
So a quick solution can be to construct a small filter chain out of it like so:
cmd.Stdin = &rot13Reader{conn}
I am executing some exec.Commands that output sensitive data. I want to filter this data out. Since you can set the stdout writer to the Command struct, my idea is to write a custom io.Writer that basically consumes the output and filters the output by a given word.
type passwordFilter struct {
keyWord string
}
func (pf passwordFilter) Write(p []byte) (n int, err error) {
// this is where I have no idea what to do
// I think I should somehow use a scanner and then filter
// out = strings.Replace(out, pf.keyWord, "*******", -1)
// something like this
// but I have to deal with byte array here
}
func main() {
pf := passwordFilter{keyWord: "password123"}
cmd := exec.Command(someBinaryFile)
cmd.Stdout = pf
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
}
I'm not sure if I'm headed the right way here, but I'm sure I can somehow reuse the existing io.Writers or scanners here.
Use Cmd.StdoutPipe to get a reader on the program output. Use a scanner on that reader.
cmd := exec.Command(someBinaryFile)
r, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
s := bufio.NewScanner(r)
for s.Scan() {
out := s.String()
out = strings.Replace(out, pf.keyWord, "*******", -1)
// write out to destination
}
if s.Err() != nil {
log.Fatal(s.Err())
}
if err := cmd.Wait(); err != nil {
log.Fatal(err)
}
I've got a command I have to run via the OS, let's call it 'login', that is interactive and therefore requires me to read from the stdin and pass it to the command's stdin pipe in order for it to work correctly. The only problem is the goroutine blocks on a read from stdin and I haven't been able to find a way to cancel a Reader in Go in order to get it to not hang on the blocking call. For example, from the perspective of the user, after the command looks as if it completed, you still have the opportunity to write to stdin once more (then the goroutine will move past the blocking read and exit)
Ideally I would like to avoid having to parse output from the command's StdoutPipe as that makes my code frail and prone to error if the strings of the login command were to change.
loginCmd := exec.Command("login")
stdin , err := loginCmd.StdinPipe()
if err != nil {
return err
}
out, err := loginCmd.StdoutPipe()
if err != nil {
return err
}
if err := loginCmd.Start(); err != nil {
return err
}
ctx, cancel := context.WithCancel(context.TODO())
var done sync.WaitGroup
done.Add(1)
ready := make(chan bool, 1)
defer cancel()
go func(ctx context.Context) {
reader := bufio.NewReader(os.Stdin)
for {
select {
case <- ctx.Done():
done.Done()
return
default:
//blocks on this line, if a close can unblock the read, then it should exit normally via the ctx.Done() case
line, err :=reader.ReadString('\n')
if err != nil {
fmt.Println("Error: ", err.Error())
}
stdin.Write([]byte(line))
}
}
}(ctx)
var bytesRead = 4096
output := make([]byte, bytesRead)
reader := bufio.NewReader(out)
for err == nil {
bytesRead, err = reader.Read(output)
if err != nil && err != io.EOF {
return err
}
fmt.Printf("%s", output[:bytesRead])
}
if err := loginCmd.Wait(); err != nil {
return err
}
cancel()
done.Wait()