Is my golang func run time analysis correct? - algorithm

enter image description here
This code snippet is for give two slice of binary number a1 and a2 to return sum slice r1, and I want to figure out how long spend with this code snippet figure out the result.
and I figure out the factorial result.
Is my analysis right?
my analysis for time complexity is:
cn + (n*n!) + c
the Code is:
func BinaryPlus(a1 []int, a2 []int) []int {
var r1 = make([]int, len(a1), 2*(len(a1)))
for i := 0; i < len(a1); i++ {
r1[i] = a1[i] + a2[i]
}
// 二分反转
ReverseSlice(r1)
r1 = append(r1, 0)
final := 0
for i := 0; final != 1; i++ {
isOver := 1
for j := 0; j < len(r1); j++ {
if r1[j] > 1 {
r1[j] = r1[j] % 2
r1[j+1] += 1
if r1[j+1] > 1 {
isOver = 0
}
}
}
if isOver == 1 {
final = 1
}
}
// 二分反转
ReverseSlice(r1)
return r1
}
func ReverseSlice(s interface{}) {
n := reflect.ValueOf(s).Len()
swap := reflect.Swapper(s)
for i, j := 0, n-1; i < j; i, j = i+1, j-1 {
swap(i, j)
}
}

It is not entirely clear that your code, as written, is correct. The size cap for your result array could be too small. Consider the case that len(a2) > 2*len(a1): the r1 := make(...) will not reserve enough in this case. Further, the initial for loop will miss adding in the more significant bits of a2.
Binary addition should have no more than O(n) complexity. You can do it with a single for loop. n = 1+max(len(a1),len(a2)):
package main
import (
"fmt"
"reflect"
)
func BinaryPlus(a1 []int, a2 []int) []int {
reserve := len(a1) + 1
if x := len(a2) + 1; x > reserve {
reserve = x
}
hold := 0
maxBit := 1
ans := make([]int, reserve)
for i := 1; i <= reserve; i++ {
hold = hold / 2
if i <= len(a1) {
hold += a1[len(a1)-i]
}
if i <= len(a2) {
hold += a2[len(a2)-i]
}
ans[reserve-i] = hold & 1
if hold != 0 && i > maxBit {
maxBit = i
}
}
return ans[reserve-maxBit:]
}
func main() {
tests := []struct {
a, b, want []int
}{
{
a: []int{1},
b: []int{0},
want: []int{1},
},
{
a: []int{1, 0},
b: []int{0, 0, 1},
want: []int{1, 1},
},
{
a: []int{1, 0, 0, 1},
b: []int{1, 1, 1, 1, 0},
want: []int{1, 0, 0, 1, 1, 1},
},
{
a: []int{0, 0},
b: []int{0, 0, 0, 0, 0},
want: []int{0},
},
}
bad := false
for i := 0; i < len(tests); i++ {
t := tests[i]
c := BinaryPlus(t.a, t.b)
if !reflect.DeepEqual(c, t.want) {
fmt.Println(t.a, "+", t.b, "=", c, "; wanted:", t.want)
bad = true
}
}
if bad {
fmt.Println("FAILED")
} else {
fmt.Println("PASSED")
}
}

Related

Sorting rows of a sparse CSC matrix Golang

I'm trying to analyze sparse matrices. Faced with the task of sorting rows in ascending order of the elements in them in the original matrix.
But I don't understand how to do this without damaging the empty elements.
I tried to bind the elements of the sum array to the rows and somehow move them. But some elements have been removed from the CSC structure.
It may be necessary to change the li/lj arrays themselves, but I don't have enough mathematical knowledge for this. More precisely, I don't understand how to track when elements should be rearranged unless additional elements (zeros) are explicitly specified in the structure.
package main
import (
"fmt"
)
type CSC struct {
a, lj, li []int
}
func getEl(i, j int, el *CSC) int {
for k := el.lj[j]; k < el.lj[j+1]; k++ {
if el.li[k] == i {
return el.a[k]
}
}
return 0
}
func maxSliceEl(lj []int) int {
max := 0
for _, v := range lj {
if v > max {
max = v
}
}
return max
}
func main() {
ma := CSC{
a: []int{8, 2, 5, 7, 1, 9, 2},
li: []int{0, 0, 1, 4, 4, 6, 4},
lj: []int{0, 1, 1, 4, 6, 7},
}
n := len(ma.lj) + 1
m := maxSliceEl(ma.li) - 1
fmt.Printf("Col: %v, Row: %v\n", n, m)
maxStr := []int{}
fmt.Println("Initial matrix:")
for i := 0; i < n; i++ {
sumStrEl := 0
for j := 0; j < m; j++ {
fmt.Print(getEl(i, j, &ma), " ")
sumStrEl += getEl(i, j, &ma)
}
maxStr = append(maxStr, sumStrEl)
fmt.Println("|sumStrEl: ", sumStrEl)
}
}
I found a solution to the problem by taking the structure as a solution: the sum of the elements + their index. The solution turned out to be simpler than expected, only the practice of solving sparse matrices was lacking. The position [i] of the sum must be passed to the getEl function as the first parameter.
package main
import (
"fmt"
"sort"
)
// Creating a CSC (CCS) matrix structure
type CSC struct {
// Array of values, column indexes, row indexing
a, lj, li []int
}
// Getting access to the element
func getEl(i, j int, el *CSC) int {
for k := el.lj[j]; k < el.lj[j+1]; k++ {
// If the element string is equal to the string of the searched element, then the element is found
if el.li[k] == i {
return el.a[k]
}
}
// Otherwise, we will return 0. It will be entered into the matrix
return 0
}
func maxSliceEl(lj []int) int {
max := 0
for _, v := range lj {
if v > max {
max = v
}
}
return max
}
type strInfo struct {
summa int
pos int
}
func main() {
// Set the CSC matrix
ma := CSC{
a: []int{8, 2, 5, 7, 1, 9, 2},
li: []int{0, 0, 1, 4, 4, 6, 4},
lj: []int{0, 1, 1, 4, 6, 7},
}
// Define the number of columns
n := len(ma.lj) + 1
// Define the number of rows
m := maxSliceEl(ma.li) - 1
fmt.Printf("Cols: %v, Rows: %v\n", m, n)
// Set a variable with a structure type for calculating
// the amount in a row and indexing each element in it
var stringsInfo []strInfo
fmt.Println("Initial matrix:")
for i := 0; i < n; i++ {
sumStrEl := 0
for j := 0; j < m; j++ {
sumStrEl += getEl(i, j, &ma)
fmt.Print(getEl(i, j, &ma), " ")
}
fmt.Println("|", sumStrEl)
// Adding a cell with the sum and index to the slice
var strI strInfo
strI.summa = sumStrEl
strI.pos = i
stringsInfo = append(stringsInfo, strI)
}
fmt.Println("stringsInfo: ", stringsInfo)
// Sorting the stringsInfo slice in ascending order of the sum elements
sort.Slice(stringsInfo, func(i, j int) (less bool) {
return stringsInfo[i].summa < stringsInfo[j].summa
})
fmt.Println("stringsInfo: ", stringsInfo)
fmt.Println("Sorted matrix:")
for i := range stringsInfo {
for j := 0; j < m; j++ {
// Output the matrix by idnex stringsInfo[i].pos
fmt.Print(getEl(stringsInfo[i].pos, j, &ma), " ")
}
fmt.Println("|", stringsInfo[i].summa)
}
}

Pass and modify 2d slice by reference

I'm trying to figure out how to change a multidimensional slice by reference.
func main() {
matrix := [][]int{
{1, 0, 0},
{1, 0, 0},
{0, 1, 1},
}
fmt.Println("Before")
printMatrix(matrix)
changeMatrixByReference(&matrix)
fmt.Println("After")
printMatrix(matrix)
}
func changeMatrixByReference(matrix *[][]int) {
//&matrix[0][0] = 3
}
func printMatrix(matrix [][]int) {
for i := 0; i < len(matrix); i++ {
for j := 0; j < len(matrix[0]); j++ {
fmt.Printf("%d", matrix[i][j])
}
fmt.Println("")
}
}
How can I change the matrix 2d slice inside the function changeMatrixByReference? I expect when printMatrix runs the second time matrix[0][0] becomes 3.
To set matrix[0][0] to 3, using pointer dereferencing:
(*matrix)[0][0] = 3
Try this:
package main
import "fmt"
func main() {
matrix := [][]int{
{1, 0, 0},
{1, 0, 0},
{0, 1, 1},
}
fmt.Println("Before")
printMatrix(matrix)
changeMatrixByReference(&matrix)
fmt.Println("After")
printMatrix(matrix)
}
func changeMatrixByReference(matrix *[][]int) {
(*matrix)[0][0] = 3
}
func printMatrix(matrix [][]int) {
for i := 0; i < len(matrix); i++ {
for j := 0; j < len(matrix[0]); j++ {
fmt.Printf("%d", matrix[i][j])
}
fmt.Println("")
}
}
For as long as you don't modify the slice header (like when adding element), you don't need a pointer, elements accessed by their index are stored in a backing array for which the slice header holds a pointer for you:
Try this:
package main
import "fmt"
func main() {
matrix := [][]int{
{1, 0, 0},
{1, 0, 0},
{0, 1, 1},
}
fmt.Println("Before")
printMatrix(matrix)
changeMatrixByReference(matrix)
fmt.Println("After")
printMatrix(matrix)
}
func changeMatrixByReference(matrix [][]int) {
matrix[0][0] = 3
}
func printMatrix(matrix [][]int) {
for i := 0; i < len(matrix); i++ {
for j := 0; j < len(matrix[0]); j++ {
fmt.Printf("%d", matrix[i][j])
}
fmt.Println("")
}
}
Output:
Before
100
100
011
After
300
100
011

Most efficient method of finding the number of common factors of two numbers

I have two numbers for example the numbers are 12 and 16.
factors of 12 are 1, 2, 3, 4, 6, 12
factors of 16 are 1, 2, 4, 8, 16
common factors of these two numbers are 1, 2 and 4.
So the number of common factors are 3. I need to build a Go program for finding the number common factors of two numbers. But the program should be efficient and with minimum number of loops or without loops.
I will provide my code and you can also contribute and suggest with another best methods.
package main
import "fmt"
var (
fs []int64
fd []int64
count int
)
func main() {
commonFactor(16, 12)
commonFactor(5, 10)
}
func commonFactor(num ...int64) {
count = 0
if num[0] < 1 || num[1] < 1 {
fmt.Println("\nFactors not computed")
return
}
for _, val := range num {
fs = make([]int64, 1)
fmt.Printf("\nFactors of %d: ", val)
fs[0] = 1
apf := func(p int64, e int) {
n := len(fs)
for i, pp := 0, p; i < e; i, pp = i+1, pp*p {
for j := 0; j < n; j++ {
fs = append(fs, fs[j]*pp)
}
}
}
e := 0
for ; val&1 == 0; e++ {
val >>= 1
}
apf(2, e)
for d := int64(3); val > 1; d += 2 {
if d*d > val {
d = val
}
for e = 0; val%d == 0; e++ {
val /= d
}
if e > 0 {
apf(d, e)
}
}
if fd == nil {
fd = fs
}
fmt.Println(fs)
}
for _, i := range fs {
for _, j := range fd {
if i == j {
count++
}
}
}
fmt.Println("Number of common factors =", count)
}
Output is :
Factors of 16: [1 2 4 8 16] Factors of 12: [1 2 4 3 6 12]
Number of common factors = 3
Factors of 5: [1 5] Factors of 10: [1 2 5 10]
Number of common factors = 2
Goplayground
Answer 1, with no loops just recursion
package main
import (
"fmt"
"os"
"strconv"
)
func factors(n int, t int, res *[]int) *[]int {
if t != 0 {
if (n/t)*t == n {
temp := append(*res, t)
res = &temp
}
res = factors(n, t-1, res)
}
return res
}
func cf(l1 []int, l2 []int, res *[]int) *[]int {
if len(l1) > 0 && len(l2) > 0 {
v1 := l1[0]
v2 := l2[0]
if v1 == v2 {
temp := append(*res, v1)
res = &temp
l2 = l2[1:]
}
if v2 > v1 {
l2 = l2[1:]
} else {
l1 = l1[1:]
}
res = cf(l1, l2, res)
}
return res
}
func main() {
n, err := strconv.Atoi(os.Args[1])
n2, err := strconv.Atoi(os.Args[2])
if err != nil {
fmt.Println("give a number")
panic(err)
}
factorlist1 := factors(n, n, &[]int{})
factorlist2 := factors(n2, n2, &[]int{})
fmt.Printf("factors of %d %v\n", n, factorlist1)
fmt.Printf("factors of %d %v\n", n2, factorlist2)
common := cf(*factorlist1, *factorlist2, &[]int{})
fmt.Printf("number of common factors = %d\n", len(*common))
}
However, this blows up with larger numbers such as 42512703
replacing the func that do the work with iterative versions can cope with bigger numbers
func factors(n int) []int {
res := []int{}
for t := n; t > 0; t-- {
if (n/t)*t == n {
res = append(res, t)
}
}
return res
}
func cf(l1 []int, l2 []int) []int {
res := []int{}
for len(l1) > 0 && len(l2) > 0 {
v1 := l1[0]
v2 := l2[0]
if v1 == v2 {
res = append(res, v1)
l2 = l2[1:]
}
if v2 > v1 {
l2 = l2[1:]
} else {
l1 = l1[1:]
}
}
return res
}
func Divisors(n int)int{
prev := []int{}
for i := n;i > 0;i--{
prev = append(prev,i)
}
var res int
for j := prev[0];j > 0;j--{
if n % j == 0 {
res++
}
}
return res
}

Change slice content and capacity inside a function in-place

I am trying to learn Go, so here is my very simple function for removing adjacent duplicates from slice for exercise from the book by Donovan & Kernighan.
Here is the code: https://play.golang.org/p/avHc1ixfck
package main
import "fmt"
func main() {
a := []int{0, 1, 1, 3, 3, 3}
removeDup(a)
fmt.Println(a)
}
func removeDup(s []int) {
n := len(s)
tmp := make([]int, 0, n)
tmp = append(tmp, s[0])
j := 1
for i := 1; i < n; i++ {
if s[i] != s[i-1] {
tmp = append(tmp, s[i])
j++
}
}
s = s[:len(tmp)]
copy(s, tmp)
}
It should print out [0 1 3] - and I checked, actually tmp at the end of the function it has desired form. However, the result is [0 1 3 3 3 3]. I guess there is something with copy function.
Can I somehow replace input slice s with the temp or trim it to desired length?
Option 1
Return a new slice as suggested by #zerkms.
https://play.golang.org/p/uGJiD3WApS
package main
import "fmt"
func main() {
a := []int{0, 1, 1, 3, 3, 3}
a = removeDup(a)
fmt.Println(a)
}
func removeDup(s []int) []int {
n := len(s)
tmp := make([]int, 0, n)
tmp = append(tmp, s[0])
for i := 1; i < n; i++ {
if s[i] != s[i-1] {
tmp = append(tmp, s[i])
}
}
return tmp
}
Option 2
Use pointers for pass-by-reference.
The same thing in effect as that of option1.
https://play.golang.org/p/80bE5Qkuuj
package main
import "fmt"
func main() {
a := []int{0, 1, 1, 3, 3, 3}
removeDup(&a)
fmt.Println(a)
}
func removeDup(sp *[]int) {
s := *sp
n := len(s)
tmp := make([]int, 0, n)
tmp = append(tmp, s[0])
for i := 1; i < n; i++ {
if s[i] != s[i-1] {
tmp = append(tmp, s[i])
}
}
*sp = tmp
}
Also, refer to following SO thread:
Does Go have no real way to shrink a slice? Is that an issue?
Here's two more slightly different ways to achieve what you want using sets and named types. The cool thing about named types is that you can create interfaces around them and can help with the readability of lots of code.
package main
import "fmt"
func main() {
// returning a list
a := []int{0, 1, 1, 3, 3, 3}
clean := removeDup(a)
fmt.Println(clean)
// creating and using a named type
nA := &newArrType{0, 1, 1, 3, 3, 3}
nA.removeDup2()
fmt.Println(nA)
// or... casting your orginal array to the named type
nB := newArrType(a)
nB.removeDup2()
fmt.Println(nB)
}
// using a set
// order is not kept, but a set is returned
func removeDup(s []int) (newArr []int) {
set := make(map[int]struct{})
for _, n := range s {
set[n] = struct{}{}
}
newArr = make([]int, 0, len(set))
for k := range set {
newArr = append(newArr, k)
}
return
}
// using named a typed
type newArrType []int
func (a *newArrType) removeDup2() {
x := *a
for i := range x {
f := i + 1
if f < len(x) {
if x[i] == x[f] {
x = x[:f+copy(x[f:], x[f+1:])]
}
}
}
// check the last 2 indexes
if x[len(x)-2] == x[len(x)-1] {
x = x[:len(x)-1+copy(x[len(x)-1:], x[len(x)-1+1:])]
}
*a = x
}

How to solve Spiral Matrix in go

https://leetcode.com/problems/spiral-matrix/
golang implement.
the result as follow:
Run Code Status: Runtime Error
Run Code Result: ×
Your input
[]
Your answer
Expected answer
[]
Show Diff
why [] is the test case ,it's just a one-dimensional slice ?
my code is :
func sprial(begin_r, begin_c, row, col int, matrix [][]int) []int {
s := make([]int, col*row, col*row+10)
k := 0
if row == 1 && col == 1 {
s[k] = matrix[begin_r][begin_c]
return s
} else if row == 1 {
return matrix[begin_r][begin_c : col-1]
} else if col == 1 {
return matrix[begin_r : row-1][begin_c]
} else {
for i := begin_c; i < col; i++ {
s[k] = matrix[begin_r][i]
k++
}
for i := begin_r + 1; i < row; i++ {
s[k] = matrix[i][col-1]
k++
}
for i := col - 2; i >= begin_c; i-- {
s[k] = matrix[row-1][i]
k++
}
for i := row - 2; i >= begin_r+1; i-- {
s[k] = matrix[i][begin_c]
k++
}
return s[:k-1]
}
}
func spiralOrder(matrix [][]int) []int {
m := len(matrix)
n := len(matrix[0])
i := 0
j := 0
// var rS []int
k := 0
//s1 := make([]int, m*n, m*n)
var s1 = []int{}
for {
if m <= 0 || n <= 0 {
break
}
s := sprial(i, j, m, n, matrix)
if k == 0 {
s1 = s
} else {
s1 = append(s1, s...)
}
i++
j++
m -= 2
n -= 2
k++
}
return s1
}
func spiralOrder(matrix [][]int) []int {
if len(matrix) == 0 || len(matrix[0]) == 0 {
return nil
}
m, n := len(matrix), len(matrix[0])
next := nextFunc(m, n)
res := make([]int, m*n)
for i := range res {
x, y := next()
res[i] = matrix[x][y]
}
return res
}
func nextFunc(m, n int) func() (int, int) {
top, down := 0, m-1
left, right := 0, n-1
x, y := 0, -1
dx, dy := 0, 1
return func() (int, int) {
x += dx
y += dy
switch {
case y+dy > right:
top++
dx, dy = 1, 0
case x+dx > down:
right--
dx, dy = 0, -1
case y+dy < left:
down--
dx, dy = -1, 0
case x+dx < top:
left++
dx, dy = 0, 1
}
return x, y
}
}
Source: https://github.com/aQuaYi/LeetCode-in-Go/blob/master/Algorithms/0054.spiral-matrix/spiral-matrix.go
This repository has most of the solutions to LeetCode problems in a very optimal manner. Please do take a look. Hope it helps.

Resources