This code is get all objects from s3 and delete objects.
getAllObjects is called from DeletePhotosFromS3.
I cloud get 2 different keys in objects that is in DeletePhotosFromS3.
But deleteObjects have 2 same keys. ex [{Key: 1}, {Key: 1}].
Why deleteObjects have 2 same keys and how to set objects in []*s3.ObjectIdentifier?
func getAllObject(userID string) (*[]string, error) {
var objects []string
svc := initS3()
config := model.NewConfig()
input := &s3.ListObjectsInput{
Bucket: aws.String(config.AWSS3Bucket),
Prefix: aws.String(userID),
MaxKeys: aws.Int64(2), // default 1000
}
result, err := svc.ListObjects(input)
if err != nil {
return &objects, err
}
for _, v := range result.Contents {
objects = append(objects, *v.Key)
}
return &objects, nil
}
func DeletePhotosFromS3(userID string) (error) {
var deleteObjects []*s3.ObjectIdentifier
svc := initS3()
config := model.NewConfig()
objects, err := getAllObject(userID) // called getAllObject
for _, v := range *objects {
deleteObjects = append(deleteObjects, &s3.ObjectIdentifier{Key: &v}) // Er
}
...
}
The iteration value v in your for loop is reused for each iteration. The Pointer &v will be the same for each item appended to the list. Fixed snippet:
for _, v := range *objects {
vcopy := v
deleteObjects = append(deleteObjects, &s3.ObjectIdentifier{Key: &vcopy})
}
Related
I have implemented a very simple Decode method (using gob.Decoder for now) - this works well for single responses - it would even work well for slices, but I need to implement a DecodeMany method where it is able to decode a set of individual responses (not a slice).
Working Decode method:
var v MyType
_ = Decode(&v)
...
func Decode(v interface{}) error {
buf, _ := DoSomething() // func DoSomething() ([]byte, error)
// error handling omitted for brevity
return gob.NewDecoder(bytes.NewReader(buf)).Decode(v)
}
What I'm trying to do for a DecodeMany method is to deal with a response that isn't necessarily a slice:
var vv []MyType
_ = DecodeMany(&vv)
...
func DecodeMany(vv []interface{}) error {
for _, g := range DoSomething() { // func DoSomething() []struct{Buf []bytes}
// Use g.Buf as an individual "interface{}"
// want something like:
var v interface{} /* Somehow create instance of single vv type? */
_ = gob.NewDecoder(bytes.NewReader(g.Buf)).Decode(v)
vv = append(vv, v)
}
return
}
Besides not compiling the above also has the error of:
cannot use &vv (value of type *[]MyType) as type []interface{} in argument to DecodeMany
If you want to modify the passed slice, it must be a pointer, else you must return a new slice. Also if the function is declared to have a param of type []interface{}, you can only pass a value of type []interface{} and no other slice types... Unless you use generics...
This is a perfect example to start using generics introduced in Go 1.18.
Change DecodeMany() to be generic, having a T type parameter being the slice element type:
When taking a pointer
func DecodeMany[T any](vv *[]T) error {
for _, g := range DoSomething() {
var v T
if err := gob.NewDecoder(bytes.NewReader(g.Buf)).Decode(&v); err != nil {
return err
}
*vv = append(*vv, v)
}
return nil
}
Here's a simple app to test it:
type MyType struct {
S int64
}
func main() {
var vv []MyType
if err := DecodeMany(&vv); err != nil {
panic(err)
}
fmt.Println(vv)
}
func DoSomething() (result []struct{ Buf []byte }) {
for i := 3; i < 6; i++ {
buf := &bytes.Buffer{}
v := MyType{S: int64(i)}
if err := gob.NewEncoder(buf).Encode(v); err != nil {
panic(err)
}
result = append(result, struct{ Buf []byte }{buf.Bytes()})
}
return
}
This outputs (try it on the Go Playground):
[{3} {4} {5}]
When returning a slice
If you choose to return the slice, you don't have to pass anything, but you need to assign the result:
func DecodeMany[T any]() ([]T, error) {
var result []T
for _, g := range DoSomething() {
var v T
if err := gob.NewDecoder(bytes.NewReader(g.Buf)).Decode(&v); err != nil {
return result, err
}
result = append(result, v)
}
return result, nil
}
Using it:
vv, err := DecodeMany[MyType]()
if err != nil {
panic(err)
}
fmt.Println(vv)
Try this one on the Go Playground.
Lets say the input can contain string or integer values
names = ["rahul", "rohit","srujan", "rahul"] --> output = ["rahul", "rohit","srujan"]
age=[12,18,12,21] --> output = [12,18,21]
we can make use of this function to filter duplicates
package main
import (
"fmt"
)
func unique(intSlice []int) []int {
keys := make(map[int]bool)
list := []int{}
for _, entry := range intSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
func main() {
intSlice := []int{1,5,3,6,9,9,4,2,3,1,5}
fmt.Println(intSlice)
uniqueSlice := unique(intSlice)
fmt.Println(uniqueSlice)
}
This works only if the input is either string or integer but not both
How to make sure this function works for array interface
Use the reflect package to write a function that works with any slice type:
func unique(src interface{}) interface{} {
srcv := reflect.ValueOf(src)
dstv := reflect.MakeSlice(srcv.Type(), 0, 0)
visited := make(map[interface{}]struct{})
for i := 0; i < srcv.Len(); i++ {
elemv := srcv.Index(i)
if _, ok := visited[elemv.Interface()]; ok {
continue
}
visited[elemv.Interface()] = struct{}{}
dstv = reflect.Append(dstv, elemv)
}
return dstv.Interface()
}
Use it like this:
uniqueIntSlice := unique(intSlice).([]int)
Run the code on the Go Playground.
How to make sure this function works for a (unsorted) slice of empty interface{}
Considered that empty interface{} are comparable (https://stackoverflow.com/a/54003329/4466350)
Thus, to answer your question, it is very simple to rewrite your original code
package main
import (
"fmt"
)
func main() {
intSlice := []interface{}{1, 5, 3, 6, 9, 9, 4, 2, 3, 1, 5}
fmt.Println(unique(intSlice))
}
func unique(src []interface{}) []interface{} {
keys := make(map[interface{}]bool)
list := []interface{}{}
for _, entry := range src {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
https://play.golang.org/p/vW7vgwz9yc1
If your question become, how to remove duplicates of any slice type, please check this other answer https://stackoverflow.com/a/65191679/4466350
Nothing elegant and very prone to errors, but you can us a function that receives two interface{} arguments, the first one is the slice to filter and the second is a pointer to the filtered slice, obviously if the first parameter is a slice of int, the second one MUST be s pointer to slice of int.
Inside the function you can check for the types of the parameters and treat them separately.
package main
import (
"fmt"
)
func unique(slice interface{}, filtered interface{}) error {
// Check for slice of string
if sliceOfString, ok := slice.([]string); ok {
// If slice is slice of string filtered MUST also be slice of string
filteredAsSliceOfString, ok := filtered.(*[]string)
if !ok {
return fmt.Errorf("filtered should be of type %T, got %T instead", &[]string{}, filtered)
}
keys := make(map[string]bool)
for _, entry := range sliceOfString {
if _, value := keys[entry]; !value {
keys[entry] = true
*filteredAsSliceOfString = append(*filteredAsSliceOfString, entry)
}
}
}else if sliceOfInt, ok := slice.([]int); ok {
// If slice is slice of int filtered MUST also be slice of int
filteredAsInt, ok := filtered.(*[]int)
if !ok {
return fmt.Errorf("filtered should be of type %T, got %T instead", &[]string{}, filtered)
}
keys := make(map[int]bool)
for _, entry := range sliceOfInt {
if _, value := keys[entry]; !value {
keys[entry] = true
*filteredAsInt = append(*filteredAsInt, entry)
}
}
} else {
return fmt.Errorf("only slice of in or slice of string is supported")
}
return nil
}
func main() {
intSlice := []int{1,5,3,6,9,9,4,2,3,1,5}
intSliceFiltered := make([]int, 0)
stringSlice := []string{"a", "b", "b", "c", "c", "c", "d"}
stringSliceFiltered := make([]string, 0)
fmt.Println(intSlice)
err := unique(intSlice, &intSliceFiltered) // Very important to send pointer in second parameter
if err != nil {
fmt.Printf("error filtering int slice: %v\n", err)
}
fmt.Println(intSliceFiltered)
fmt.Println(stringSlice)
err = unique(stringSlice, &stringSliceFiltered) // Very important to send pointer in second parameter
if err != nil {
fmt.Printf("error filtering string slice: %v\n", err)
}
fmt.Println(stringSliceFiltered)
}
As I said, it's not elegant. I haven't check this for errors.
Here it is running.
For example, we have 3 CSV files and common for all is Email column. In first file are Name and Email, in another are Email (plus different info) and no Name field. So, if I need to fill in 2 and 3 files field Name based on the correspondence of the Name and Đ•mail from the first file than... I wrote code like this:
package main
import (
"fmt"
"io/ioutil"
"log"
"path/filepath"
"strings"
"github.com/jszwec/csvutil"
)
type User struct {
Name string `csv:"name"`
Email string `csv:"email"`
}
type Good struct {
User
Dt string `csv:"details"`
}
type Strange struct {
User
St string `csv:"status"`
Dt string `csv:"details"`
}
var lst map[string]string
func readCSV(fn string, dat interface{}) error {
raw, err := ioutil.ReadFile(fn)
if err != nil {
return fmt.Errorf("Cannot read CSV: %w", err)
}
if err := csvutil.Unmarshal(raw, dat); err != nil {
return fmt.Errorf("Cannot unmarshal CSV: %w", err)
}
return nil
}
func fixNames(fl string, in interface{}) error {
if err := readCSV(fl, in); err != nil {
return fmt.Errorf("CSV: %w", err)
}
switch in.(type) {
case *[]Good:
var vals []Good
for _, v := range *in.(*[]Good) {
v.Name = lst[strings.TrimSpace(strings.ToLower(v.Email))]
vals = append(vals, v)
}
in = vals
case *[]Strange:
var vals []Strange
for _, v := range *in.(*[]Strange) {
v.Name = lst[strings.TrimSpace(strings.ToLower(v.Email))]
vals = append(vals, v)
}
in = vals
}
b, err := csvutil.Marshal(in)
if err != nil {
return fmt.Errorf("Cannot marshal CSV: %w", err)
}
ext := filepath.Ext(fl)
bas := filepath.Base(fl)
err = ioutil.WriteFile(bas[:len(bas)-len(ext)]+"-XIAOSE"+ext, b, 0644)
if err != nil {
return fmt.Errorf("Cannot save CSV: %w", err)
}
return nil
}
func main() {
var users []User
if err := readCSV("./Guitar_Contacts.csv", &users); err != nil {
log.Fatalf("CSV: %s", err)
}
lst = make(map[string]string)
for _, v := range users {
lst[strings.TrimSpace(strings.ToLower(v.Email))] = v.Name
}
var usersGood []Good
if err := fixNames("./Guitar-Good.csv", &usersGood); err != nil {
log.Fatalf("fix: %s", err)
}
var usersStrange []Strange
if err := fixNames("./Guitar-Uknown.csv", &usersStrange); err != nil {
log.Fatalf("fix: %s", err)
}
fmt.Println("OK")
}
in this code I don't like part in func fixNames where is switch:
switch in.(type) {
case *[]Good:
var vals []Good
for _, v := range *in.(*[]Good) {
v.Name = lst[strings.TrimSpace(strings.ToLower(v.Email))]
vals = append(vals, v)
}
in = vals
case *[]Strange:
var vals []Strange
for _, v := range *in.(*[]Strange) {
v.Name = lst[strings.TrimSpace(strings.ToLower(v.Email))]
vals = append(vals, v)
}
in = vals
}
because I just repeat code in part where *in.(SOME_TYPE). I want one loop and one action for different types, structs where are Name and Email fields...
Also was idea to do it with reflection smth. like this:
v := reflect.ValueOf(in)
v = v.Elem()
for i := 0; i < v.Len(); i++ {
fmt.Println(v.Index(i))
}
but I do not know what to do next, how to add in that v value for Name
You don't need reflection for this particular case. You can clean the code up by realizing that you are only working on the User part of the structs, and that you can simplify the type switch:
fix:=func(in *User) {
in.Name = lst[strings.TrimSpace(strings.ToLower(in.Email))]
}
switch k:=in.(type) {
case *[]Good:
for i := range *k {
fix( &(*k)[i].User )
}
case *[]Strange:
for i := range *k {
fix( &(*k)[i].User )
}
}
You have to repeat the for loop, but above code does the correction in place.
You can clean up a bit more by not passing a reference to the slice.
With reflect package, you can do that like this.
func fixNames(fl string, in interface{}) error {
//other code
v := reflect.ValueOf(in)
if v.Kind() == reflect.Ptr {
arr := v.Elem()
fmt.Println(arr.Len())
if arr.Kind() == reflect.Slice || arr.Kind() == reflect.Array {
for i := 0; i < arr.Len(); i++ {
elem := arr.Index(i)
f := elem.FieldByName("Name")
f.SetString("NameOfUser")
}
}
}
// other code
}
Also playground example: https://play.golang.org/p/KrGvLVprslH
I am using redigo to save some structs in redis. The thing is that for the same key I need to append new structs, but when I am trying to recover them I cannot unmarshal to an array.
Ie: (ignoring the errors intentionally)
type ADTO struct {
Value string
}
func main() {
pool := redis.Pool{
Dial: func() (conn redis.Conn, e error) {
return redis.Dial("tcp", "localhost:6379")
},
MaxIdle: 80,
MaxActive: 12000,
}
conn := pool.Get()
defer conn.Close()
key := "some-key"
defer conn.Do("DEL", key)
a := ADTO{Value: "a"}
bytes, _ := json.Marshal(a)
conn.Do("APPEND", key, bytes)
b := ADTO{Value: "b"}
bytes, _ = json.Marshal(b)
conn.Do("APPEND", key, bytes)
c := ADTO{Value: "c"}
bytes, _ = json.Marshal(c)
conn.Do("APPEND", key, bytes)
bytes, _ = redis.Bytes(conn.Do("GET", key))
adtos := make([]ADTO, 0)
// the following does not work
if err := json.Unmarshal(bytes, &adtos); err != nil {
return
}
}
If I only append a single struct and retrieving it, then it is working fine
I have tried with redis.ByteSlices with no luck
APPEND will only append to a string, it will not make a JSON array. After first insert, you'll have
{"Value":"a"}
Then after the second one, you'll have
{"Value":"a"}{"Value":"b"}
That's not a JSON array.
You can try using json.Decoder, and do something like:
b, _ = redis.Bytes(conn.Do("GET", key))
dec := json.NewDecoder(bytes.NewReader(b))
items := []ADTO{}
var x ADTO
for dec.Decode(&x) == nil {
items = append(items, x)
}
Assuming we have two yaml files
master.yaml
someProperty: "someVaue"
anotherProperty: "anotherValue"
override.yaml
someProperty: "overriddenVaue"
Is it possible to unmarshall, merge, and then write those changes to a file without having to define a struct for every property in the yaml file?
The master file has over 500 properties in it that are not at all important to the service at this point of execution, so ideally I'd be able to just unmarshal into a map, do a merge and write out in yaml again but I'm relatively new to go so wanted some opinions.
I've got some code to read the yaml into an interface but i'm unsure on the best approach to then merge the two.
var masterYaml interface{}
yamlBytes, _ := ioutil.ReadFile("master.yaml")
yaml.Unmarshal(yamlBytes, &masterYaml)
var overrideYaml interface{}
yamlBytes, _ = ioutil.ReadFile("override.yaml")
yaml.Unmarshal(yamlBytes, &overrideYaml)
I've looked into libraries like mergo but i'm not sure if that's the right approach.
I'm hoping that after the master I would be able to write out to file with properties
someProperty: "overriddenVaue"
anotherProperty: "anotherValue"
Assuming that you just want to merge at the top level, you can unmarshal into maps of type map[string]interface{}, as follows:
package main
import (
"io/ioutil"
"gopkg.in/yaml.v2"
)
func main() {
var master map[string]interface{}
bs, err := ioutil.ReadFile("master.yaml")
if err != nil {
panic(err)
}
if err := yaml.Unmarshal(bs, &master); err != nil {
panic(err)
}
var override map[string]interface{}
bs, err = ioutil.ReadFile("override.yaml")
if err != nil {
panic(err)
}
if err := yaml.Unmarshal(bs, &override); err != nil {
panic(err)
}
for k, v := range override {
master[k] = v
}
bs, err = yaml.Marshal(master)
if err != nil {
panic(err)
}
if err := ioutil.WriteFile("merged.yaml", bs, 0644); err != nil {
panic(err)
}
}
For a broader solution (with n input files), you can use this function. I have used #robox answer to do my solution:
func ReadValues(filenames ...string) (string, error) {
if len(filenames) <= 0 {
return "", errors.New("You must provide at least one filename for reading Values")
}
var resultValues map[string]interface{}
for _, filename := range filenames {
var override map[string]interface{}
bs, err := ioutil.ReadFile(filename)
if err != nil {
log.Info(err)
continue
}
if err := yaml.Unmarshal(bs, &override); err != nil {
log.Info(err)
continue
}
//check if is nil. This will only happen for the first filename
if resultValues == nil {
resultValues = override
} else {
for k, v := range override {
resultValues[k] = v
}
}
}
bs, err := yaml.Marshal(resultValues)
if err != nil {
log.Info(err)
return "", err
}
return string(bs), nil
}
So for this example you should call it with this order:
result, _ := ReadValues("master.yaml", "overwrite.yaml")
In the case you have an extra file newFile.yaml, you could also use this function:
result, _ := ReadValues("master.yaml", "overwrite.yaml", "newFile.yaml")
DEEP MERGE TWO YAML FILES
package main
import (
"fmt"
"io/ioutil"
"sigs.k8s.io/yaml"
)
func main() {
// declare two map to hold the yaml content
base := map[string]interface{}{}
currentMap := map[string]interface{}{}
// read one yaml file
data, _ := ioutil.ReadFile("conf.yaml")
if err := yaml.Unmarshal(data, &base); err != nil {
}
// read another yaml file
data1, _ := ioutil.ReadFile("conf1.yaml")
if err := yaml.Unmarshal(data1, ¤tMap); err != nil {
}
// merge both yaml data recursively
base = mergeMaps(base, currentMap)
// print merged map
fmt.Println(base)
}
func mergeMaps(a, b map[string]interface{}) map[string]interface{} {
out := make(map[string]interface{}, len(a))
for k, v := range a {
out[k] = v
}
for k, v := range b {
if v, ok := v.(map[string]interface{}); ok {
if bv, ok := out[k]; ok {
if bv, ok := bv.(map[string]interface{}); ok {
out[k] = mergeMaps(bv, v)
continue
}
}
}
out[k] = v
}
return out
}