Can someone please point at a more efficient version of the following
b:=make([]byte,0,sizeTotal)
b=append(b,size...)
b=append(b,contentType...)
b=append(b,lenCallbackid...)
b=append(b,lenTarget...)
b=append(b,lenAction...)
b=append(b,lenContent...)
b=append(b,callbackid...)
b=append(b,target...)
b=append(b,action...)
b=append(b,content...)
every variable is a byte slice apart from size sizeTotal
Update:
Code:
type Message struct {
size uint32
contentType uint8
callbackId string
target string
action string
content string
}
var res []byte
var b []byte = make([]byte,0,4096)
func (m *Message)ToByte()[]byte{
callbackIdIntLen:=len(m.callbackId)
targetIntLen := len(m.target)
actionIntLen := len(m.action)
contentIntLen := len(m.content)
lenCallbackid:=make([]byte,4)
binary.LittleEndian.PutUint32(lenCallbackid, uint32(callbackIdIntLen))
callbackid := []byte(m.callbackId)
lenTarget := make([]byte,4)
binary.LittleEndian.PutUint32(lenTarget, uint32(targetIntLen))
target:=[]byte(m.target)
lenAction := make([]byte,4)
binary.LittleEndian.PutUint32(lenAction, uint32(actionIntLen))
action := []byte(m.action)
lenContent:= make([]byte,4)
binary.LittleEndian.PutUint32(lenContent, uint32(contentIntLen))
content := []byte(m.content)
sizeTotal:= 21+callbackIdIntLen+targetIntLen+actionIntLen+contentIntLen
size := make([]byte,4)
binary.LittleEndian.PutUint32(size, uint32(sizeTotal))
b=b[:0]
b=append(b,size...)
b=append(b,byte(m.contentType))
b=append(b,lenCallbackid...)
b=append(b,lenTarget...)
b=append(b,lenAction...)
b=append(b,lenContent...)
b=append(b,callbackid...)
b=append(b,target...)
b=append(b,action...)
b=append(b,content...)
res = b
return b
}
func FromByte(bytes []byte)(*Message){
size :=binary.LittleEndian.Uint32(bytes[0:4])
contentType :=bytes[4:5][0]
lenCallbackid:=binary.LittleEndian.Uint32(bytes[5:9])
lenTarget :=binary.LittleEndian.Uint32(bytes[9:13])
lenAction :=binary.LittleEndian.Uint32(bytes[13:17])
lenContent :=binary.LittleEndian.Uint32(bytes[17:21])
callbackid := string(bytes[21:21+lenCallbackid])
target:= string(bytes[21+lenCallbackid:21+lenCallbackid+lenTarget])
action:= string(bytes[21+lenCallbackid+lenTarget:21+lenCallbackid+lenTarget+lenAction])
content:=string(bytes[size-lenContent:size])
return &Message{size,contentType,callbackid,target,action,content}
}
Benchs:
func BenchmarkMessageToByte(b *testing.B) {
m:=NewMessage(uint8(3),"agsdggsdasagdsdgsgddggds","sometarSFAFFget","somFSAFSAFFSeaction","somfasfsasfafsejsonzhit")
for n := 0; n < b.N; n++ {
m.ToByte()
}
}
func BenchmarkMessageFromByte(b *testing.B) {
m:=NewMessage(uint8(1),"sagdsgaasdg","soSASFASFASAFSFASFAGmetarget","adsgdgsagdssgdsgd","agsdsdgsagdsdgasdg").ToByte()
for n := 0; n < b.N; n++ {
FromByte(m)
}
}
func BenchmarkStringToByte(b *testing.B) {
for n := 0; n < b.N; n++ {
_ = []byte("abcdefghijklmnoqrstuvwxyz")
}
}
func BenchmarkStringFromByte(b *testing.B) {
s:=[]byte("abcdefghijklmnoqrstuvwxyz")
for n := 0; n < b.N; n++ {
_ = string(s)
}
}
func BenchmarkUintToByte(b *testing.B) {
for n := 0; n < b.N; n++ {
i:=make([]byte,4)
binary.LittleEndian.PutUint32(i, uint32(99))
}
}
func BenchmarkUintFromByte(b *testing.B) {
i:=make([]byte,4)
binary.LittleEndian.PutUint32(i, uint32(99))
for n := 0; n < b.N; n++ {
binary.LittleEndian.Uint32(i)
}
}
Bench results:
BenchmarkMessageToByte 10000000 280 ns/op
BenchmarkMessageFromByte 10000000 293 ns/op
BenchmarkStringToByte 50000000 55.1 ns/op
BenchmarkStringFromByte 50000000 49.7 ns/op
BenchmarkUintToByte 1000000000 2.14 ns/op
BenchmarkUintFromByte 2000000000 1.71 ns/op
Provided memory is already allocated, a sequence of x=append(x,a...) is rather efficient in Go.
In your example, the initial allocation (make) probably costs more than the sequence of appends. It depends on the size of the fields. Consider the following benchmark:
package main
import (
"testing"
)
const sizeTotal = 25
var res []byte // To enforce heap allocation
func BenchmarkWithAlloc(b *testing.B) {
a := []byte("abcde")
for i := 0; i < b.N; i++ {
x := make([]byte, 0, sizeTotal)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
res = x // Make sure x escapes, and is therefore heap allocated
}
}
func BenchmarkWithoutAlloc(b *testing.B) {
a := []byte("abcde")
x := make([]byte, 0, sizeTotal)
for i := 0; i < b.N; i++ {
x = x[:0]
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
res = x
}
}
On my box, the result is:
testing: warning: no tests to run
PASS
BenchmarkWithAlloc 10000000 116 ns/op 32 B/op 1 allocs/op
BenchmarkWithoutAlloc 50000000 24.0 ns/op 0 B/op 0 allocs/op
Systematically reallocating the buffer (even a small one) makes this benchmark at least 5 times slower.
So your best hope to optimize this code it to make sure you do not reallocate a buffer for each packet you build. On the contrary, you should keep your buffer, and reuse it for each marshalling operation.
You can reset a slice while keeping its underlying buffer allocated with the following statement:
x = x[:0]
I looked carefully at that and made the following benchmarks.
package append
import "testing"
func BenchmarkAppend(b *testing.B) {
as := 1000
a := make([]byte, as)
s := make([]byte, 0, b.N*as)
for i := 0; i < b.N; i++ {
s = append(s, a...)
}
}
func BenchmarkCopy(b *testing.B) {
as := 1000
a := make([]byte, as)
s := make([]byte, 0, b.N*as)
for i := 0; i < b.N; i++ {
copy(s[i*as:(i+1)*as], a)
}
}
The results are
grzesiek#klapacjusz ~/g/s/t/append> go test -bench . -benchmem
testing: warning: no tests to run
PASS
BenchmarkAppend 10000000 202 ns/op 1000 B/op 0 allocs/op
BenchmarkCopy 10000000 201 ns/op 1000 B/op 0 allocs/op
ok test/append 4.564s
If the totalSize is big enough then your code makes no memory allocations. It copies only the amount of bytes it needs to copy. It is perfectly fine.
Related
I discovered very strange behaviour with go maps recently. The use case is to create a group of integers and have O(1) check for IsMember(id int).
The current implementation is :
func convertToMap(v []int64) map[int64]void {
out := make(map[int64]void, len(v))
for _, i := range v {
out[i] = void{}
}
return out
}
type Group struct {
members map[int64]void
}
type void struct{}
func (g *Group) IsMember(input string) (ok bool) {
memberID, _ := strconv.ParseInt(input, 10, 64)
_, ok = g.members[memberID]
return
}
When i benchmark the IsMember method, until 6 million members, everything looks fine. But above that the map look up is taking 1 second for each lookup!!
The benchmark test:
func BenchmarkIsMember(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
g := &Group{}
g.members = convertToMap(benchmarkV)
for N := 0; N < b.N && N < sizeOfGroup; N++ {
g.IsMember(benchmarkKVString[N])
}
}
var benchmarkV, benchmarkKVString = func(size int) ([]int64, []string{
v := make([]int64, size)
s := make([]string, size)
for i := range v {
val := rand.Int63()
v[i] = val
s[i] = strconv.FormatInt(val, 10)
}
return v, s
}(sizeOfGroup)
Benchmark numbers:
const sizeOfGroup = 6000000
BenchmarkIsMember-8 2000000 568 ns/op 50 B/op 0 allocs/op
const sizeOfGroup = 6830000
BenchmarkIsMember-8 1 1051725455 ns/op 178767208 B/op 25 allocs/op
Anything above group size of 6.8 million gives the same result.
Can someone help me to explain why this is happening, and can anything be done to make this performant while still using maps?
Also, i dont understand why so much memory is being allocated? Even if the time taken is due to collision and then linked list traversal, there shouldn't be any mem allocation, is my thought process wrong?
No need to measure extra allocation for converting slice to map because we just want to measure the lookup operation.
I've slightly modify the benchmark:
func BenchmarkIsMember(b *testing.B) {
fn := func(size int) ([]int64, []string) {
v := make([]int64, size)
s := make([]string, size)
for i := range v {
val := rand.Int63()
v[i] = val
s[i] = strconv.FormatInt(val, 10)
}
return v, s
}
for _, size := range []int{
6000000,
6800000,
6830000,
60000000,
} {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
var benchmarkV, benchmarkKVString = fn(size)
g := &deltaGroup{}
g.members = convertToMap(benchmarkV)
b.ReportAllocs()
b.ResetTimer()
for N := 0; N < b.N && N < size; N++ {
g.IsMember(benchmarkKVString[N])
}
})
}
}
And got the following results:
go test ./... -bench=. -benchtime=10s -cpu=1
goos: linux
goarch: amd64
pkg: trash
BenchmarkIsMember/size=6000000 2000000000 0.55 ns/op 0 B/op 0 allocs/op
BenchmarkIsMember/size=6800000 1000000000 1.27 ns/op 0 B/op 0 allocs/op
BenchmarkIsMember/size=6830000 1000000000 1.23 ns/op 0 B/op 0 allocs/op
BenchmarkIsMember/size=60000000 100000000 136 ns/op 0 B/op 0 allocs/op
PASS
ok trash 167.578s
Degradation isn't so significant as in your example.
func Benchmark_foreach1(b *testing.B) {
var test map[int]int
test = make(map[int]int)
for i := 0; i < 100000; i++ {
test[i] = 1
}
for i := 0; i < b.N; i++ {
for i, _ := range test {
if test[i] != 1 {
panic("ds")
}
}
}
}
func Benchmark_foreach2(b *testing.B) {
var test map[int]int
test = make(map[int]int)
for i := 0; i < 100000; i++ {
test[i] = 1
}
for i := 0; i < b.N; i++ {
for _, v := range test {
if v != 1 {
panic("heh")
}
}
}
}
run with result as below
goos: linux
goarch: amd64
Benchmark_foreach1-2 500 3172323 ns/op
Benchmark_foreach2-2 1000 1707214 ns/op
why is foreach-2 slow?
I think Benchmark_foreach2-2 is about 2 times faster - it requires 1707214 nanoseconds per operation, and first one takes 3172323. So second one is 3172323 / 1707214 = 1.85 times faster.
Reason: second doesn't need to take value from a memory again, it already used value in v variable.
The test[k] statement in BenchmarkForeachK takes time to randomly read the value, so BenchmarkForeachK takes more time than BenchmarkForeachV, 9362945 ns/op versus 4213940 ns/op .
For example,
package main
import "testing"
func testMap() map[int]int {
test := make(map[int]int)
for i := 0; i < 100000; i++ {
test[i] = 1
}
return test
}
func BenchmarkForeachK(b *testing.B) {
test := testMap()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for k := range test {
if test[k] != 1 {
panic("eh")
}
}
}
}
func BenchmarkForeachV(b *testing.B) {
test := testMap()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, v := range test {
if v != 1 {
panic("heh")
}
}
}
}
Output:
$ go test foreach_test.go -bench=.
BenchmarkForeachK-4 200 9362945 ns/op 0 B/op 0 allocs/op
BenchmarkForeachV-4 300 4213940 ns/op 0 B/op 0 allocs/op
I've been trying to understand slice preallocation with make and why it's a good idea. I noticed a large performance difference between preallocating a slice and appending to it vs just initializing it with 0 length/capacity and then appending to it. I wrote a set of very simple benchmarks:
import "testing"
func BenchmarkNoPreallocate(b *testing.B) {
for i := 0; i < b.N; i++ {
// Don't preallocate our initial slice
init := []int64{}
init = append(init, 5)
}
}
func BenchmarkPreallocate(b *testing.B) {
for i := 0; i < b.N; i++ {
// Preallocate our initial slice
init := make([]int64, 0, 1)
init = append(init, 5)
}
}
and was a little puzzled with the results:
$ go test -bench=. -benchmem
goos: linux
goarch: amd64
BenchmarkNoPreallocate-4 30000000 41.8 ns/op 8 B/op 1 allocs/op
BenchmarkPreallocate-4 2000000000 0.29 ns/op 0 B/op 0 allocs/op
I have a couple of questions:
Why are there no allocations (it shows 0 allocs/op) in the preallocation benchmark case? Certainly we're preallocating, but the allocation had to have happened at some point.
I imagine this may become clearer after the first question is answered, but how is the preallocation case so much quicker? Am I misinterpetting this benchmark?
Please let me know if anything is unclear. Thank you!
Go has an optimizing compiler. Constants are evaluated at compile time. Variables are evaluated at runtime. Constant values can be used to optimize compiler generated code. For example,
package main
import "testing"
func BenchmarkNoPreallocate(b *testing.B) {
for i := 0; i < b.N; i++ {
// Don't preallocate our initial slice
init := []int64{}
init = append(init, 5)
}
}
func BenchmarkPreallocateConst(b *testing.B) {
const (
l = 0
c = 1
)
for i := 0; i < b.N; i++ {
// Preallocate our initial slice
init := make([]int64, l, c)
init = append(init, 5)
}
}
func BenchmarkPreallocateVar(b *testing.B) {
var (
l = 0
c = 1
)
for i := 0; i < b.N; i++ {
// Preallocate our initial slice
init := make([]int64, l, c)
init = append(init, 5)
}
}
Output:
$ go test alloc_test.go -bench=. -benchmem
BenchmarkNoPreallocate-4 50000000 39.3 ns/op 8 B/op 1 allocs/op
BenchmarkPreallocateConst-4 2000000000 0.36 ns/op 0 B/op 0 allocs/op
BenchmarkPreallocateVar-4 50000000 28.2 ns/op 8 B/op 1 allocs/op
Another interesting set of benchmarks:
package main
import "testing"
func BenchmarkNoPreallocate(b *testing.B) {
const (
l = 0
c = 8 * 1024
)
for i := 0; i < b.N; i++ {
// Don't preallocate our initial slice
init := []int64{}
for j := 0; j < c; j++ {
init = append(init, 42)
}
}
}
func BenchmarkPreallocateConst(b *testing.B) {
const (
l = 0
c = 8 * 1024
)
for i := 0; i < b.N; i++ {
// Preallocate our initial slice
init := make([]int64, l, c)
for j := 0; j < cap(init); j++ {
init = append(init, 42)
}
}
}
func BenchmarkPreallocateVar(b *testing.B) {
var (
l = 0
c = 8 * 1024
)
for i := 0; i < b.N; i++ {
// Preallocate our initial slice
init := make([]int64, l, c)
for j := 0; j < cap(init); j++ {
init = append(init, 42)
}
}
}
Output:
$ go test peter_test.go -bench=. -benchmem
BenchmarkNoPreallocate-4 20000 75656 ns/op 287992 B/op 19 allocs/op
BenchmarkPreallocateConst-4 100000 22386 ns/op 65536 B/op 1 allocs/op
BenchmarkPreallocateVar-4 100000 22112 ns/op 65536 B/op 1 allocs/op
so I have the Quicksort algorithm implemented with concurrency (the one without as well). Now I wanted to compare the times. I wrote this:
func benchmarkConcurrentQuickSort(size int, b *testing.B) {
A := RandomArray(size)
var wg sync.WaitGroup
b.ResetTimer()
ConcurrentQuicksort(A, 0, len(A)-1, &wg)
wg.Wait()
}
func BenchmarkConcurrentQuickSort500(b *testing.B) {
benchmarkConcurrentQuickSort(500, b)
}
func BenchmarkConcurrentQuickSort1000(b *testing.B) {
benchmarkConcurrentQuickSort(1000, b)
}
func BenchmarkConcurrentQuickSort5000(b *testing.B) {
benchmarkConcurrentQuickSort(5000, b)
}
func BenchmarkConcurrentQuickSort10000(b *testing.B) {
benchmarkConcurrentQuickSort(10000, b)
}
func BenchmarkConcurrentQuickSort20000(b *testing.B) {
benchmarkConcurrentQuickSort(20000, b)
}
func BenchmarkConcurrentQuickSort1000000(b *testing.B) {
benchmarkConcurrentQuickSort(1000000, b)
}
The results are like this:
C:\projects\go\src\github.com\frynio\mysort>go test -bench=.
BenchmarkConcurrentQuickSort500-4 2000000000 0.00 ns/op
BenchmarkConcurrentQuickSort1000-4 2000000000 0.00 ns/op
BenchmarkConcurrentQuickSort5000-4 2000000000 0.00 ns/op
BenchmarkConcurrentQuickSort10000-4 2000000000 0.00 ns/op
BenchmarkConcurrentQuickSort20000-4 2000000000 0.00 ns/op
BenchmarkConcurrentQuickSort1000000-4 30 49635266 ns/op
PASS
ok github.com/frynio/mysort 8.342s
I can believe the last one, but I definitely think that sorting 500-element array takes longer than 1ns. What am i doing wrong? I am pretty sure that RandomArray returns array of wanted size, as we can see in the last benchmark. Why does it print out the 0.00 ns?
func RandomArray(n int) []int {
a := []int{}
for i := 0; i < n; i++ {
a = append(a, rand.Intn(500))
}
return a
}
// ConcurrentPartition - ConcurrentQuicksort function for partitioning the array (randomized choice of a pivot)
func ConcurrentPartition(A []int, p int, r int) int {
index := rand.Intn(r-p) + p
pivot := A[index]
A[index] = A[r]
A[r] = pivot
x := A[r]
j := p - 1
i := p
for i < r {
if A[i] <= x {
j++
tmp := A[j]
A[j] = A[i]
A[i] = tmp
}
i++
}
temp := A[j+1]
A[j+1] = A[r]
A[r] = temp
return j + 1
}
// ConcurrentQuicksort - a concurrent version of a quicksort algorithm
func ConcurrentQuicksort(A []int, p int, r int, wg *sync.WaitGroup) {
if p < r {
q := ConcurrentPartition(A, p, r)
wg.Add(2)
go func() {
ConcurrentQuicksort(A, p, q-1, wg)
wg.Done()
}()
go func() {
ConcurrentQuicksort(A, q+1, r, wg)
wg.Done()
}()
}
}
Package testing
A sample benchmark function looks like this:
func BenchmarkHello(b *testing.B) {
for i := 0; i < b.N; i++ {
fmt.Sprintf("hello")
}
}
The benchmark function must run the target code b.N times. During
benchmark execution, b.N is adjusted until the benchmark function
lasts long enough to be timed reliably.
I don't see a benchmark loop in your code. Try
func benchmarkConcurrentQuickSort(size int, b *testing.B) {
A := RandomArray(size)
var wg sync.WaitGroup
b.ResetTimer()
for i := 0; i < b.N; i++ {
ConcurrentQuicksort(A, 0, len(A)-1, &wg)
wg.Wait()
}
}
Output:
BenchmarkConcurrentQuickSort500-4 10000 122291 ns/op
BenchmarkConcurrentQuickSort1000-4 5000 221154 ns/op
BenchmarkConcurrentQuickSort5000-4 1000 1225230 ns/op
BenchmarkConcurrentQuickSort10000-4 500 2568024 ns/op
BenchmarkConcurrentQuickSort20000-4 300 5808130 ns/op
BenchmarkConcurrentQuickSort1000000-4 1 1371751710 ns/op
I have a tweak question. I had some repetitive pieces of code in my web-app requests where simple things like fmt.Sprintf("%d%d", 01293123, 234993923) happens.
Now I did some benchmarks and tested a bytes writer, which was quite slow (~400 ns/op). The sprintf itself was about 240 ns/op. Then I did strings joining:
var concatBuffStr []byte
func BenchmarkStringsJoin(b *testing.B) {
for n := 0; n < b.N; n++ {
concatBuffStr = []byte(strings.Join([]string{
strconv.Itoa(2),
strconv.Itoa(3),
}, " "))
}
}
152 ns/op
A big improvement already. But then I tried clunking 2 ints converted to bytes together
var concatBuffStr []byte
func BenchmarkConcatIntsAsBytes(b *testing.B) {
for n := 0; n < b.N; n++ {
aBuf := make([]byte, 8)
bBuf := make([]byte, 8)
binary.BigEndian.PutUint64(aBuf, uint64(2))
binary.BigEndian.PutUint64(bBuf, uint64(3))
concatBuffStr = append(aBuf, bBuf...)
}
}
57.8 ns/op
Amazing! But I could just avoid glueing them together and already reserve the full 16 bytes for 2 maxed out 64 bit uints spaces:
var concatBuffStr []byte
func BenchmarkCopyIntsAsBytes(b *testing.B) {
for n := 0; n < b.N; n++ {
concatBuffStr = make([]byte, 16)
bBuf := make([]byte, 8)
binary.BigEndian.PutUint64(concatBuffStr, uint64(123123))
binary.BigEndian.PutUint64(bBuf, uint64(3453455))
copy(concatBuffStr[8:], bBuf)
}
}
30.4 ns/op
By now we're 8 times faster than with the fmt.Sprintf() method.
Was wondering if there is an even faster way than this. I want to avoid unsafe tho.
I was also thinking of checking the max value of both 2 ints and if they are below the MAX of an uint32 or uint16 I could customize the logic. Benchmarking the uint16 downgrade by hand is around 23 ns/op, but that's not realistic as in the web-request itself it would still need to check the size of the ints and do extra logic, which will likely result in more overhead than that 7 ns/op gain.
Update
I managed to simplify it a little bit more:
var concatBuffStr []byte
func BenchmarkCopyIntsAsBytesShort(b *testing.B) {
for n := 0; n < b.N; n++ {
concatBuffStr = make([]byte, 16)
binary.BigEndian.PutUint64(concatBuffStr, uint64(123123))
binary.BigEndian.PutUint64(concatBuffStr[8:], uint64(3453455))
}
}
28.6 ns/op