Consider a text file like this:
Some text
here.
---
More text
another line.
---
Third part of text.
I want to split it into three parts, divided by the --- separator. The parts should be stored in a map.
Now, the exact same programs with different types.
When I use string, everything works fine:
KEY: 0
Some text
here.
KEY: 1
More text
another line.
KEY: 2
Third part of text.
https://play.golang.org/p/IcGdoUNcTEe
When I use []byte, things gets messed up:
KEY: 0
Third part of teKEY: 1
Third part of text.
ne.
KEY: 2
Third part of text.
https://play.golang.org/p/jqLhCrqsvOs
Why?
Program 1 (string):
func main() {
parts := parseParts([]byte(input))
for k, v := range parts {
fmt.Printf("KEY: %d\n%s", k, v)
}
}
func parseParts(input []byte) map[int]string {
parts := map[int]string{}
s := bufio.NewScanner(bytes.NewReader(input))
buf := bytes.Buffer{}
i := 0
for s.Scan() {
if s.Text() == "---" {
parts[i] = buf.String()
buf.Reset()
i++
continue
}
buf.Write(s.Bytes())
buf.WriteString("\n")
}
parts[i] = buf.String()
return parts
}
Program 2 ([]byte):
func main() {
parts := parseParts([]byte(input))
for k, v := range parts {
fmt.Printf("KEY: %d\n%s", k, v)
}
}
func parseParts(input []byte) map[int]string {
parts := map[int]string{}
s := bufio.NewScanner(bytes.NewReader(input))
buf := bytes.Buffer{}
i := 0
for s.Scan() {
if s.Text() == "---" {
parts[i] = buf.String()
buf.Reset()
i++
continue
}
buf.Write(s.Bytes())
buf.WriteString("\n")
}
parts[i] = buf.String()
return parts
}
In the string version,
parts[i] = buf.String()
sets parts[i] to a new string every time. In the []byte version,
parts[i] = buf.Bytes()
sets parts[i] to a byte slice backed by the same array every time. The contents of the backing array are the same for all three slices, but the lengths match the length when created, which is why all three slices show the same content but cut off at different places.
You could replace the byte slice line
parts[i] = buf.Bytes()
with something like this:
bb := buf.Bytes()
b := make([]byte, len(bb))
copy(b, bb)
parts[i] = b
in order to get the behavior to match the string version. But the string version is easier and better matches what you seem to be trying to do.
The difference is that bytes.Buffer.String copies the memory, while bytes.Buffer.Bytes does not. Quoting the documentation,
The slice is valid for use only until the next buffer modification (that is, only until the next call to a method like Read, Write, Reset, or Truncate).
Related
I try to do something very simple in Go but I do not manage to find any resources.
I receive an hexadump and I want to write it to a file but the content of both files (src and dst) do not match at all. Currently the only way I have find it's to manually add \x every 2 characters.
I tried to loop over my string and add \x the string looks identical but output is very different.
This code manually works:
binary.Write(f, binary.LittleEndian, []byte("\x00\x00\x00\x04\x0A\xFA\x64\xA7\x00\x03\x31\x30"))
But I did not manage to make it from string "000000040afa64a700033130"...
What i currently do (this is what I do in python3):
text := "000000040afa64a700033130"
j := 0
f, _ := os.OpenFile("gotest", os.O_WRONLY|os.O_CREATE, 0600)
for i := 0; i < len(text); i += 2 {
if (i + 2) <= len(text) {
j = i + 2
}
value, _ := strconv.ParseInt(hex, 16, 8)
binary.Write(f, binary.LittleEndian,value)
s = append(s, value)
}
If your hex data is in the from of a string and you want to write the raw bytes you'll have to convert it first, the easier way would be to use hex.Decode.
import (
"encoding/hex"
"io/ioutil"
)
func foo() {
stringData := []byte("48656c6c6f20476f7068657221")
hexData := make([]byte, hex.DecodedLen(len(stringData)))
_, err := hex.Decode(stringData, hexData)
// handle err
err := ioutil.WriteFile("filename", hexData, 0644)
// handle err
}
Based on your use you could swap over to using ioutil.WriteFile. It writes the given byte slice to a file, creating the file if it doesn't exist or truncating it in the case it already exists.
I was doing Day 07 of last year's Advent of Code and solved it in Python (like this).
I am trying to learn Go and tried a similar approach in Go as well. I am storing all the relations in a map like this:
map[baseColour1:[Colours{subColour1, noOfBags}, Colours{subColour2, noOfBags}...], baseColour2:...
Here is the complete code so far:
package main
import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"strconv"
)
// Part 1
// Find relation between all the colours
type Colours struct {
subColour string
noOfBag int
}
// To reuse for Part 2
func relationBetweenColours(input [][]string) (relation map[string][]Colours) {
relation = make(map[string][]Colours)
for _, rule := range input {
baseColour := strings.Join(rule[:2], " ")
relation[baseColour] = make([]Colours, 0)
for i := range rule {
match, _ := regexp.MatchString("\\d", rule[i])
if match {
subColour := strings.Join(rule[i + 1:i + 3], " ")
noOfBag, _ := strconv.Atoi(rule[i])
relation[baseColour] = append(relation[baseColour], Colours{subColour, noOfBag})
}
}
}
return
}
func checkMembership(input []string, element string) (bool) {
for _, value := range input {
if (value == element) { return true }
}
return false
}
func part1(relation map[string][]Colours) (count int) {
// Strip relation of extra information
relevantRelation := make(map[string][]string)
for baseColour, arrayOfColours := range relation {
relevantRelation[baseColour] = make([]string, 0)
for _, colour := range arrayOfColours {
relevantRelation[baseColour] = append(relevantRelation[baseColour], colour.subColour)
}
}
// Add indirectly related colours
for baseColour, arrayOfColours := range relevantRelation {
for _, colour := range arrayOfColours {
// Get an array of all colours which is not linked to baseColour
for _, subColour := range relevantRelation[colour] {
if !checkMembership(relevantRelation[baseColour], subColour) {
relevantRelation[baseColour] = append(relevantRelation[baseColour], subColour)
}
}
}
}
// Add no. of colours which can eventually contain a "shiny gold" bag
for _, arrayOfColours := range relevantRelation {
if checkMembership(arrayOfColours, "shiny gold") {
count++
}
}
return
}
func main() {
// Read input
file, _ := os.Open("input.txt")
defer file.Close()
scanner := bufio.NewScanner(file)
var input [][]string
for scanner.Scan() {
rule := strings.Split(scanner.Text(), " ")
input = append(input, rule)
}
relation := relationBetweenColours(input)
fmt.Println(part1(relation))
}
When I tried my code against the small sample input given in the question, it produces the correct result.
However, upon running it against the full input given to me, it produces seemingly different and wrong result each time. The output is anywhere between 10 to 50 and changes each time (The correct answer for my input for Part 1 is 213).
My question is:
What exactly is going on here which causes different result each time?
What can I do to overcome this problem and solve the question?
Golang maps are not ordered. So the way you store then and retrive values will have the impact on how the order would be read. One possible thing you can do is sort the map keys as you'd wish and then use it. Or you can create a slice with indexes/keys to map. This will have the order you set.
func getOrderedKeys(myMap map[string][]Colours) []string {
keys := make([]string, 0)
for k, _ := range collection {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
Now, every time you use this map, get sorted keys and then use the keys in order to get the map value.
If you do not want to have sorted keys, what other thing you can do is create a slice and use that as ordered keys.
func relationBetweenColours(input [][]string) (relation map[string][]Colours, keys []string) {
relation = make(map[string][]Colours)
for _, rule := range input {
baseColour := strings.Join(rule[:2], " ")
relation[baseColour] = make([]Colours, 0)
keys = append(keys, baseColour)
...
}
return
}
Then use the keys generated as such to iterate over map.
Bottom line is golang maps are not ordered.
I am trying to figure out why my code is not working. I wish to take a slice of numbers and strings, and separate it into three slices. For each element in the slice, if it is a string, append it to the strings slice, and if it is a positive number, append it to the positive numbers, and likewise with negative. Yet, here is the output
Names:
EvTremblay
45.39934611083154
-75.71148292845268
[Crestview -75.73795670904249
BellevueManor -75.73886856878032
Dutchie'sHole -75.66809864107668 ...
Positives:[45.344387632924054 45.37223315413918 ... ]
Negatives: []
Here is my code. Can someone tell me what is causing the Negatives array to not have any values?
func main() {
fmt.Printf("%q\n", strings.Split("a,b,c", ","))
var names []string
var positives, negatives []float64
bs, err := ioutil.ReadFile("poolss.txt")
if err != nil {
return
}
str := string(bs)
fmt.Println(str)
tokens := strings.Split(str, ",")
for _, token := range tokens {
if num, err := strconv.ParseFloat(token, 64); err == nil {
if num > 0 {
positives = append(positives, num)
} else {
negatives = append(negatives, num)
}
} else {
names = append(names, token)
}
fmt.Println(token)
}
fmt.Println(fmt.Sprintf("Strings: %v",names))
fmt.Println(fmt.Sprintf("Positives: %v", positives))
fmt.Println(fmt.Sprintf("Negatives: %v",negatives))
for i := range names{
fmt.Println(names[i])
fmt.Println(positives[i])
fmt.Println(negatives[i])
}
}
Your code has strings as a variable name:
var strings []string
and strings as a package name:
tokens := strings.Split(str, ",")
Don't do that!
strings.Split undefined (type []string has no field or method Split)
Playground: https://play.golang.org/p/HfZGj0jOT-P
Your problem above I think lies with the extra \n attached to each float probably - you get no negative entries if you end in a linefeed or you would get one if you have no linefeed at the end. So insert a printf so that you can see the errors you're getting from strconv.ParseFloat and all will become clear.
Some small points which may help:
Check errors, and don't depend on an error to be of only one type (this is what is confusing you here) - always print the error if it arrives, particularly when debugging
Don't use the name of a package for a variable (strings), it won't end well
Use a datastructure which reflects your data
Use the CSV package to read CSV data
So for example for storing the data you might want:
type Place struct {
Name string
Latitude int64
Longitude int64
}
Then read the data into that, depending on the fact that cols are in a given order, and store it in a []Place.
Here's what I tried, it works now! Thanks for the help, everyone!
func main() {
findRoute("poolss.csv", 5)
}
func findRoute( filename string, num int) []Edge {
var route []Edge
csvFile, err := os.Open(filename)
if err != nil {
return route
}
reader := csv.NewReader(bufio.NewReader(csvFile))
var pools []Pool
for {
line, error := reader.Read()
if error == io.EOF {
break
} else if error != nil {
log.Fatal(error)
}
lat, err := strconv.ParseFloat(line[1], 64)
long, err := strconv.ParseFloat(line[2], 64)
if err == nil {
pools = append(pools, Pool{
name: line[0],
latitude: lat,
longitude: long,
})
}
}
return route
}
How can I convert []interface to []strings or just as a joined single string with all elements in []interface ? Below is the screenshot showing exact value of "scope" which of type []interface with length of 2. In below code, where case is "slice" Currently i am doing this using reflect but this is not looking good. Wondering there should be some good way to join a slice\array of interface elements.
I also did try using json unmarshall like this "json.Unmarshal([]byte(jwtResp),&mymap)" but having trouble with type issues in converting jwt.MapClaims to byte[]
parsedkey, _ := jwt.ParseRSAPublicKeyFromPEM([]byte(key))
parsedToken, jwtErr := jwt.Parse(bearerToken, func(token *jwt.Token) (interface{}, error) {
return parsedkey, nil
})
jwtValues = make(map[string]string)
// we dont know which data types we are dealing with here, so using swtich case based on value data type
jwtResp := parsedToken.Claims.(jwt.MapClaims)
for k, v := range jwtResp {
switch reflect.TypeOf(v).Kind() {
case reflect.Slice:
fmt.Println("tp is : ", reflect.TypeOf(v)) // this is []interface{}
fmt.Println("type is : ", reflect.TypeOf(v).Kind()) // this is slice
s := reflect.ValueOf(v)
for i := 0; i < s.Len(); i++ {
jwtValues[k] += s.Index(i).Interface().(string)
jwtValues[k] += "|"
}
case reflect.String:
jwtValues[k] = v.(string)
default:
fmt.Println("unknown datatype")
}
}
Thanks for the suggestions . Below is the closest solution i found replacing 'switch' with 'if' condition along with type assertions. 'reflect' is costly. Using below code, i am finding if values of jwtResp is a slice, or string or something else. If slice, traverse through the values([]interface with string type elements) and join those as one concatenated string.
for k, v := range jwtResp {
if s, ok := v.(string); ok {
JwtValues[k] = s
} else if s, ok := v.([]interface{}); ok {
sslice := make([]string, len(s))
for i, v := range s {
sslice[i] = v.(string)
}
JwtValues[k] = strings.Join(sslice, "|")
} else {
logger.Log(util.LogDebug, "unknown data type")
}
}
Not sure to have fully understood your question, but it seems you're on the right track.
parts := make([]string, s.Len())
for i := 0; i < s.Len(); i++ {
parts = append(parts, s.Index(i).String())
}
jwtValues[k] = strings.Join(parts, "|")
I've got a Go text/template that renders a file, however I've found it difficult to structure the template cleanly while preserving the line breaks in the output.
I'd like to have additional, unnecessary newlines in the template to make it more readable, but strip them from the output. Any group of newlines more than a normal paragraph break should be condensed to a normal paragraph break, e.g.
lines with
too many breaks should become lines with
normal paragraph breaks.
The string is potentially too large to store safely in memory, so I want to keep it as an output stream.
My first attempt:
type condensingWriter struct {
writer io.Writer
lastLineIsEmpty bool
}
func (c condensingWriter) Write(b []byte) (n int, err error){
thisLineIsEmpty := strings.TrimSpace(string(b)) == ""
defer func(){
c.lastLineIsEmpty = thisLineIsEmpty
}()
if c.lastLineIsEmpty && thisLineIsEmpty{
return 0, nil
} else {
return c.writer.Write(b)
}
}
This doesn't work because I naively assumed that it would buffer on newline characters, but it doesn't.
Any suggestions on how to get this to work?
Inspired by zmb's approach, I've come up with the following package:
//Package striplines strips runs of consecutive empty lines from an output stream.
package striplines
import (
"io"
"strings"
)
// Striplines wraps an output stream, stripping runs of consecutive empty lines.
// You must call Flush before the output stream will be complete.
// Implements io.WriteCloser, Writer, Closer.
type Striplines struct {
Writer io.Writer
lastLine []byte
currentLine []byte
}
func (w *Striplines) Write(p []byte) (int, error) {
totalN := 0
s := string(p)
if !strings.Contains(s, "\n") {
w.currentLine = append(w.currentLine, p...)
return 0, nil
}
cur := string(append(w.currentLine, p...))
lastN := strings.LastIndex(cur, "\n")
s = cur[:lastN]
for _, line := range strings.Split(s, "\n") {
n, err := w.writeLn(line + "\n")
w.lastLine = []byte(line)
if err != nil {
return totalN, err
}
totalN += n
}
rem := cur[(lastN + 1):]
w.currentLine = []byte(rem)
return totalN, nil
}
// Close flushes the last of the output into the underlying writer.
func (w *Striplines) Close() error {
_, err := w.writeLn(string(w.currentLine))
return err
}
func (w *Striplines) writeLn(line string) (n int, err error) {
if strings.TrimSpace(string(w.lastLine)) == "" && strings.TrimSpace(line) == "" {
return 0, nil
} else {
return w.Writer.Write([]byte(line))
}
}
See it in action here: http://play.golang.org/p/t8BGPUMYhb
The general idea is you'll have to look for consecutive newlines anywhere in the input slice and if such cases exist, skip over all but the first newline character.
Additionally, you have to track whether the last byte written was a newline, so the next call to Write will know to eliminate a newline if necessary. You were on the right track by adding a bool to your writer type. However, you'll want to use a pointer receiver instead of a value receiver here, otherwise you'll be modifying a copy of the struct.
You would want to change
func (c condensingWriter) Write(b []byte)
to
func (c *condensingWriter) Write(b []byte)
You could try something like this. You'll have to test with larger inputs to make sure it handles all cases correctly.
package main
import (
"bytes"
"io"
"os"
)
var Newline byte = byte('\n')
type ReduceNewlinesWriter struct {
w io.Writer
lastByteNewline bool
}
func (r *ReduceNewlinesWriter) Write(b []byte) (int, error) {
// if the previous call to Write ended with a \n
// then we have to skip over any starting newlines here
i := 0
if r.lastByteNewline {
for i < len(b) && b[i] == Newline {
i++
}
b = b[i:]
}
r.lastByteNewline = b[len(b) - 1] == Newline
i = bytes.IndexByte(b, Newline)
if i == -1 {
// no newlines - just write the entire thing
return r.w.Write(b)
}
// write up to the newline
i++
n, err := r.w.Write(b[:i])
if err != nil {
return n, err
}
// skip over immediate newline and recurse
i++
for i < len(b) && b[i] == Newline {
i++
}
i--
m, err := r.Write(b[i:])
return n + m, nil
}
func main() {
r := ReduceNewlinesWriter{
w: os.Stdout,
}
io.WriteString(&r, "this\n\n\n\n\n\n\nhas\nmultiple\n\n\nnewline\n\n\n\ncharacters")
}