package main
import (
"fmt"
"io"
"io/ioutil"
"os"
)
func main() {
file, err := os.Open("HelloWorld")
if nil != err {
fmt.Println(err)
}
defer file.Close()
fileTo, err := os.Create("fileTo")
if nil != err {
fmt.Println(err)
}
defer file.Close()
_, err = io.Copy(fileTo, file)
if nil != err {
fmt.Println(err)
}
fileByteOne, err := ioutil.ReadAll(file)
if nil != err {
fmt.Println(err)
}
fmt.Println(fileByteOne)
}
io.Copy() will erase the file content, the output is :
[]
Copy(dst Writer, src Reader) copies from src to dst, it will erase the src content. Is there
any way to avoid erasing?
io.Copy(fileTo, file) will erase the file content
It won't. But it will move the read position to EOF, meaning the next ioutil.ReadAll() will start at ... EOF.
You could close it and re-open 'file'before your ioutil.ReadAll().
By the way, you have two defer file.Close() instances: the second one should be defer fileTo.Close().
Or, simpler, reset it with a SectionReader.Seek(), as suggested by PeterSO's answer.
_, err = file.Seek(0, io.SeekStart)
It is also illustrated in GoByExamples Reading Files:
There is no built-in rewind, but Seek(0, 0) accomplishes this.
(os.SEEK_SET is define in os constants, as 0)
const SEEK_SET int = 0 // seek relative to the origin of the file
Now (2020) deprecated and replaced with io.SeekStart.
See also "Golang, a proper way to rewind file pointer".
Reset from the end of the file to the start of the file with a seek. For example.
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
)
func main() {
file, err := os.Open("HelloWorld")
if err != nil {
fmt.Println(err)
}
defer file.Close()
fileTo, err := os.Create("fileTo")
if err != nil {
fmt.Println(err)
}
defer fileTo.Close()
_, err = io.Copy(fileTo, file)
if err != nil {
fmt.Println(err)
}
_, err = file.Seek(0, os.SEEK_SET) // start of file
if err != nil {
fmt.Println(err)
}
fileByteOne, err := ioutil.ReadAll(file)
if err != nil {
fmt.Println(err)
}
fmt.Println(fileByteOne)
}
Output:
[72 101 108 108 111 44 32 87 111 114 108 100 33 10]
Related
I just tried to download webp image from url, but I found something different when I try to process the stored image.
If I download the image from the browser, it can be decoded using x/image/webp package, but if I store the image using http.Get() then create a new file then io.Copy() the image, it says:
"missing RIFF chunk header"
I assume that I need to write some RIFF chunk header when I store it using golang code.
func main(){
response, e := http.Get(URL)
if e != nil {
log.Fatal(e)
}
defer response.Body.Close()
//open a file for writing
file, err := os.Create('tv.webp')
if err != nil {
log.Fatal(err)
}
defer file.Close()
// Use io.Copy to just dump the response body to the file. This supports huge files
_, err = io.Copy(file, response.Body)
if err != nil {
log.Fatal(err)
}
fmt.Println("Success!")
imgData, err := os.Open("tv.webp")
if err != nil {
fmt.Println(err)
return
}
log.Printf("%+v", imgData)
image, err := webp.Decode(imgData)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(image.Bounds())
}
Here is the URL IMG URL
download file is not webp type. it's png.
package main
import (
"fmt"
"image"
"io"
"log"
"net/http"
"os"
_ "image/png"
)
func main() {
response, e := http.Get("https://www.sony.com/is/image/gwtprod/0abe7672ff4c6cb4a0a4d4cc143fd05b?fmt=png-alpha")
if e != nil {
log.Fatal(e)
}
defer response.Body.Close()
file, err := os.Create("dump")
if err != nil {
log.Fatal(err)
}
defer file.Close()
_, err = io.Copy(file, response.Body)
if err != nil {
log.Fatal(err)
}
fmt.Println("Success!")
imageFile, err := os.Open("dump")
if err != nil {
panic(err)
}
m, name, err := image.Decode(imageFile)
if err != nil {
panic(err)
}
fmt.Println("image type is ", name, m.Bounds())
}
Let's say I have an io.ReadWriteSeeker that is reading an writing from a certain file.
At some point I decide I want to remove some bytes from the file (specifically the end) and reduce its length.
Whats the right way to go about this?
I can't just overwrite it by seeking because I want to end up with a shorter file.
io.CopyN will similarly not work (as src is smaller than dest)
If the io.ReadWriteSeeker value also supports a Truncate method, like an *os.File value.
For example,
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
)
func truncate(rws io.ReadWriteSeeker, size int64) error {
type Truncater interface {
Truncate(size int64) error
}
t, ok := rws.(Truncater)
if !ok {
return fmt.Errorf("truncate: unable to truncate")
}
return t.Truncate(size)
}
func main() {
filename := `/tmp/truncate.test.file`
f, err := os.Create(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
defer f.Close()
n, err := f.Write([]byte("test data"))
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
data, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
fmt.Println(string(data))
// truncate io.ReadWriteSeeker
err = truncate(f, int64(n-1))
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
data, err = ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
fmt.Println(string(data))
os.Remove(filename)
}
Playground: https://play.golang.org/p/pp4IUSoKo4M
Output:
test data
test dat
I am trying to make a program which writes at provided offsets in the file, like i can start writing from 20th offset etc.
here is one of sample code i was using as reference
package main
import (
"fmt"
"io/ioutil"
"os"
)
const (
filename = "sample.txt"
start_data = "12345"
)
func printContents() {
data, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
fmt.Println("CONTENTS:", string(data))
}
func main() {
err := ioutil.WriteFile(filename, []byte(start_data), 0644)
if err != nil {
panic(err)
}
printContents()
f, err := os.OpenFile(filename, os.O_RDWR, 0644)
if err != nil {
panic(err)
}
defer f.Close()
if _, err := f.Seek(20, 0); err != nil {
panic(err)
}
if _, err := f.WriteAt([]byte("A"), 15); err != nil {
panic(err)
}
printContents()
}
But i am always getting the same file content which is beginning from start like
12345A
I tried changing the seek values to (0,0) and (20,0) and (10,1) randomly which results in same output
Also i tried changing WriteAt offset to other offset like 10, 20 but this also resulted in same.
I want to get a solution so that i can write at any specified position in file, suggest me what is wrong in this code.
It works as expected.
After running your code, your "sample.txt" file content is (16 bytes):
[49 50 51 52 53 0 0 0 0 0 0 0 0 0 0 65]
try:
package main
import (
"fmt"
"io/ioutil"
)
const (
filename = "sample.txt"
start_data = "12345"
)
func printContents() {
data, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
fmt.Println(data)
}
func main() {
printContents()
}
you need to write enough bytes first, the use WriteAt offset:
e.g. edit :
start_data = "1234567890123456789012345678901234567890"
then test your code:
package main
import (
"fmt"
"io/ioutil"
"os"
)
const (
filename = "sample.txt"
start_data = "1234567890123456789012345678901234567890"
)
func printContents() {
data, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
fmt.Println(string(data))
}
func main() {
err := ioutil.WriteFile(filename, []byte(start_data), 0644)
if err != nil {
panic(err)
}
printContents()
f, err := os.OpenFile(filename, os.O_RDWR, 0644)
if err != nil {
panic(err)
}
defer f.Close()
if _, err := f.Seek(20, 0); err != nil {
panic(err)
}
if _, err := f.WriteAt([]byte("A"), 15); err != nil {
panic(err)
}
printContents()
}
output:
1234567890123456789012345678901234567890
123456789012345A789012345678901234567890
I've written the following code to tar a file, code works but strangely if I untar the archive the file permissions are gone so I can't read it unless I then chmod the file:
package main
import (
"archive/tar"
"io/ioutil"
"log"
"os"
)
func main() {
c, err := os.Create("/path/to/tar/file/test.tar")
if err != nil {
log.Fatalln(err)
}
tw := tar.NewWriter(c)
f, err := os.Open("sample.txt")
if err != nil {
log.Fatalln(err)
}
fi, err := f.Stat()
if err != nil {
log.Fatalln(err)
}
hdr := &tar.Header{Name: f.Name(),
Size: fi.Size(),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
r, err := ioutil.ReadFile("sample.txt")
if err != nil {
log.Fatalln(err)
}
if _, err := tw.Write(r); err != nil {
log.Fatalln(err)
}
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
}
Any idea what I'm doing wrong?
You're not preserving the original permissions of the file. You're manually creating a header, and specifying only the name and size. Instead, use tar.FileInfoHeader to build the header.
package main
import (
"archive/tar"
"io/ioutil"
"log"
"os"
)
func main() {
c, err := os.Create("/path/to/tar/file/test.tar")
if err != nil {
log.Fatalln(err)
}
tw := tar.NewWriter(c)
f, err := os.Open("sample.txt")
if err != nil {
log.Fatalln(err)
}
fi, err := f.Stat()
if err != nil {
log.Fatalln(err)
}
// create header from FileInfo
hdr, err := tar.FileInfoHeader(fi, "")
if err != nil {
log.Fatalln(err)
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
// instead of reading the whole file into memory, prefer io.Copy
r, err := io.Copy(tw, f)
if err != nil {
log.Fatalln(err)
}
log.Printf("Wrote %d bytes\n", r)
}
Also note that I used io.Copy to copy data from the file (an io.Reader) to the tar writer (an io.Writer). This will work much better for larger files.
Also - pay special attention to this note from the docs:
Because os.FileInfo's Name method returns only the base name of the file it describes, it may be necessary to modify the Name field of the returned header to provide the full path name of the file.
In this simple example, you're just using sample.txt so you shouldn't run into trouble. If you wanted to preserve a directory structure in your tar, you may have to modify the Name field in the header.
Is it possible to extract a tar.xz package in golang? My understanding is it's possible to use the library for tar and sending it to an xz go library.
I recently created an XZ decompression package so it is now
possible to extract a tar.xz using only Go code.
The following code extracts the file myfile.tar.xz to the current
directory:
package main
import (
"archive/tar"
"fmt"
"io"
"log"
"os"
"github.com/xi2/xz"
)
func main() {
// Open a file
f, err := os.Open("myfile.tar.xz")
if err != nil {
log.Fatal(err)
}
// Create an xz Reader
r, err := xz.NewReader(f, 0)
if err != nil {
log.Fatal(err)
}
// Create a tar Reader
tr := tar.NewReader(r)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Fatal(err)
}
switch hdr.Typeflag {
case tar.TypeDir:
// create a directory
fmt.Println("creating: " + hdr.Name)
err = os.MkdirAll(hdr.Name, 0777)
if err != nil {
log.Fatal(err)
}
case tar.TypeReg, tar.TypeRegA:
// write a file
fmt.Println("extracting: " + hdr.Name)
w, err := os.Create(hdr.Name)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(w, tr)
if err != nil {
log.Fatal(err)
}
w.Close()
}
}
f.Close()
}
http://golang.org/pkg/archive/tar/#example_
also you can do
import "os/exec"
cmd := exec.Command("tar", "-x", "/your/archive.tar.xz")
err := cmd.Run()
There is no Lempel-Ziv-Markow encoder or decoder in the Go standard library. If you are allowed to assume that the platform your code runs on provides the xz utility, you could use stub functions like these:
import "os/exec"
// decompress xz compressed data stream r.
func UnxzReader(r io.Reader) (io.ReadCloser, error) {
unxz := exec.Command("xz", "-d")
unxz.Stdin = r
out, err := unxz.StdoutPipe()
if err != nil {
return nil, err
}
err = unxz.Start()
if err != nil {
return nil, err
}
// we are not interested in the exit status, but we should really collect
// that zombie process
go unxz.Wait()
return out, nil
}