I'm trying to figure out which random algorithm Array#sample uses, but I'm getting a bit lost hunting around in the Ruby C code.
I find it helpful to dig in pry to examine actual source code although you can find it also by going to Ruby Docs and hover the method names to reveal the click to toggle source with magnifying glass which will also show you the same source code you can find here in pry, if you've done gem install pry-doc
arr = []
cd arr
show-method sample
(#<Array>):1> show-method sample
From: array.c (C Method):
Owner: Array
Visibility: public
Number of lines: 103
static VALUE
rb_ary_sample(int argc, VALUE *argv, VALUE ary)
{
VALUE nv, result;
VALUE opts, randgen = rb_cRandom;
long n, len, i, j, k, idx[10];
long rnds[numberof(idx)];
if (OPTHASH_GIVEN_P(opts)) {
VALUE rnd;
ID keyword_ids[1];
keyword_ids[0] = id_random;
rb_get_kwargs(opts, keyword_ids, 0, 1, &rnd);
if (rnd != Qundef) {
randgen = rnd;
}
}
len = RARRAY_LEN(ary);
if (argc == 0) {
if (len < 2)
i = 0;
else
i = RAND_UPTO(len);
return rb_ary_elt(ary, i);
}
rb_scan_args(argc, argv, "1", &nv);
n = NUM2LONG(nv);
if (n < 0) rb_raise(rb_eArgError, "negative sample number");
if (n > len) n = len;
if (n <= numberof(idx)) {
for (i = 0; i < n; ++i) {
rnds[i] = RAND_UPTO(len - i);
}
}
k = len;
len = RARRAY_LEN(ary);
if (len < k && n <= numberof(idx)) {
for (i = 0; i < n; ++i) {
if (rnds[i] >= len) return rb_ary_new_capa(0);
}
}
if (n > len) n = len;
switch (n) {
case 0:
return rb_ary_new_capa(0);
case 1:
i = rnds[0];
return rb_ary_new_from_values(1, &RARRAY_AREF(ary, i));
case 2:
i = rnds[0];
j = rnds[1];
if (j >= i) j++;
return rb_ary_new_from_args(2, RARRAY_AREF(ary, i), RARRAY_AREF(ary, j));
case 3:
i = rnds[0];
j = rnds[1];
k = rnds[2];
{
long l = j, g = i;
if (j >= i) l = i, g = ++j;
if (k >= l && (++k >= g)) ++k;
}
return rb_ary_new_from_args(3, RARRAY_AREF(ary, i), RARRAY_AREF(ary, j), RARRAY_AREF(ary, k));
}
if (n <= numberof(idx)) {
long sorted[numberof(idx)];
sorted[0] = idx[0] = rnds[0];
for (i=1; i<n; i++) {
k = rnds[i];
for (j = 0; j < i; ++j) {
if (k < sorted[j]) break;
++k;
}
memmove(&sorted[j+1], &sorted[j], sizeof(sorted[0])*(i-j));
sorted[j] = idx[i] = k;
}
result = rb_ary_new_capa(n);
RARRAY_PTR_USE(result, ptr_result, {
for (i=0; i<n; i++) {
ptr_result[i] = RARRAY_AREF(ary, idx[i]);
}
});
}
else {
result = rb_ary_dup(ary);
RBASIC_CLEAR_CLASS(result);
RB_GC_GUARD(ary);
RARRAY_PTR_USE(result, ptr_result, {
for (i=0; i<n; i++) {
j = RAND_UPTO(len-i) + i;
nv = ptr_result[j];
ptr_result[j] = ptr_result[i];
ptr_result[i] = nv;
}
});
RBASIC_SET_CLASS_RAW(result, rb_cArray);
}
ARY_SET_LEN(result, n);
return result;
}
So we can see
VALUE opts, randgen = rb_cRandom;
This would indicate the ruby Random class c is used for the randomness.
The code alone wouldn't tell us much about the algo
So lookup of https://ruby-doc.org/core-2.5.0/Random.html tells us it uses
PRNGs are currently implemented as a modified Mersenne Twister with a period of 2**19937-1.
And what the heck is a Mersenne Twister? I don't know but it sounds cool 😎 so https://en.wikipedia.org/wiki/Mersenne_Twister tells us about it and that it is exstensively used:
The Mersenne Twister is the default PRNG for the following software systems: Microsoft Excel,[3] GAUSS,[4] GLib,[5] GNU Multiple Precision Arithmetic Library,[6] GNU Octave,[7] GNU Scientific Library,[8] gretl,[9] IDL,[10] Julia,[11] CMU Common Lisp,[12] Embeddable Common Lisp,[13] Steel Bank Common Lisp,[14] Maple,[15] MATLAB,[16] Free Pascal,[17] PHP,[18] Python,[19][20] R,[21] Ruby,[22] SageMath,[23] Scilab,[24] Stata.[25]
Related
Given an array and some value X, find the number of pairs such that i < j , a[i] = a[j] and (i * j) % X == 0
Array size <= 10^5
I am thinking of this problem for a while but only could come up with the brute force solution(by checking all pairs) which will obviously time-out [O(N^2) time complexity]
Any better approach?
First of all, store separate search structures for each distinct A[i] as we iterate.
i * j = k * X
i = k * X / j
Let X / j be some fraction. Since i is an integer, k would be of the form m * least_common_multiple(X, j) / X, where m is natural.
Example 1: j = 20, X = 60:
lcm(60, 20) = 60
matching `i`s would be of the form:
(m * 60 / 60) * 60 / 20
=> m * q, where q = 3
Example 2: j = 6, X = 2:
lcm(2, 6) = 6
matching `i`s would be of the form:
(m * 6 / 2) * 2 / 6
=> m * q, where q = 1
Next, I would consider how to efficiently query the number of multiples of a number in a sorted list of arbitrary naturals. One way is to hash the frequency of divisors of each i we add to the search structure of A[i]. But first consider i as j and add to the result the count of divisors q that already exist in the hash map.
JavaScript code with brute force testing at the end:
function gcd(a, b){
return b ? gcd(b, a % b) : a;
}
function getQ(X, j){
return X / gcd(X, j);
}
function addDivisors(n, map){
let m = 1;
while (m*m <= n){
if (n % m == 0){
map[m] = -~map[m];
const l = n / m;
if (l != m)
map[l] = -~map[l];
}
m += 1;
}
}
function f(A, X){
const Ais = {};
let result = 0;
for (let j=1; j<A.length; j++){
if (A[j] == A[0])
result += 1;
// Search
if (Ais.hasOwnProperty(A[j])){
const q = getQ(X, j);
result += Ais[A[j]][q] || 0;
// Initialise this value's
// search structure
} else {
Ais[A[j]] = {};
}
// Add divisors for j
addDivisors(j, Ais[A[j]]);
}
return result;
}
function bruteForce(A, X){
let result = 0;
for (let j=1; j<A.length; j++){
for (let i=0; i<j; i++){
if (A[i] == A[j] && (i*j % X) == 0)
result += 1;
}
}
return result;
}
var numTests = 1000;
var n = 100;
var m = 50;
var x = 100;
for (let i=0; i<numTests; i++){
const A = [];
for (let j=0; j<n; j++)
A.push(Math.ceil(Math.random() * m));
const X = Math.ceil(Math.random() * x);
const _brute = bruteForce(A, X);
const _f = f(A, X);
if (_brute != _f){
console.log("Mismatch!");
console.log(X, JSON.stringify(A));
console.log(_brute, _f);
break;
}
}
console.log("Done testing.")
Just in case If someone needed the java version of this answer - https://stackoverflow.com/a/69690416/19325755 explanation has been provided in that answer.
I spent lot of time in understanding the javascript code so I thought the people who are comfortable with java can refer this for better understanding.
import java.util.HashMap;
public class ThisProblem {
public static void main(String[] args) {
int t = 1000;
int n = 100;
int m = 50;
int x = 100;
for(int i = 0; i<t; i++) {
int[] A = new int[n];
for(int j = 0; j<n; j++) {
A[j] = ((int)Math.random()*m)+1;
}
int X = ((int)Math.random()*x)+1;
int optR = createMaps(A, X);
int brute = bruteForce(A, X);
if(optR != brute) {
System.out.println("Wrong Answer");
break;
}
}
System.out.println("Test Completed");
}
public static int bruteForce(int[] A, int X) {
int result = 0;
int n = A.length;
for(int i = 1; i<n; i++) {
for(int j = 0; j<i; j++) {
if(A[i] == A[j] && (i*j)%X == 0)
result++;
}
}
return result;
}
public static int gcd(int a, int b) {
return b==0 ? a : gcd(b, a%b);
}
public static int getQ(int X, int j) {
return X/gcd(X, j);
}
public static void addDivisors(int n, HashMap<Integer, Integer> map) {
int m = 1;
while(m*m <= n) {
if(n%m == 0) {
map.put(m, map.getOrDefault(m, 0)+1);
int l = n/m;
if(l != m) {
map.put(l, map.getOrDefault(l, 0)+1);
}
}
m++;
}
}
public static int createMaps(int[] A, int X) {
int result = 0;
HashMap<Integer, HashMap<Integer, Integer>> contentsOfA = new HashMap<>();
int n = A.length;
for(int i = 1; i<n; i++) {
if(A[i] == A[0])
result++;
if(contentsOfA.containsKey(A[i])) {
int q = getQ(X, i);
result += contentsOfA.get(A[i]).getOrDefault(q, 0);
} else {
contentsOfA.put(A[i], new HashMap<>());
}
addDivisors(i, contentsOfA.get(A[i]));
}
return result;
}
}
Given`en an array of integers. We have to find the length of the longest subsequence of integers such that gcd of any two consecutive elements in the sequence is greater than 1.
for ex: if array = [12, 8, 2, 3, 6, 9]
then one such subsequence can be = {12, 8, 2, 6, 9}
other one can be= {12, 3, 6, 9}
I tried to solve this problem by dynamic programming. Assume that maxCount is the array such that maxCount[i] will have the length of such longest subsequence
ending at index i.
`maxCount[0]=1 ;
for(i=1; i<N; i++)
{
max = 1 ;
for(j=i-1; j>=0; j--)
{
if(gcd(arr[i], arr[j]) > 1)
{
temp = maxCount[j] + 1 ;
if(temp > max)
max = temp ;
}
}
maxCount[i]=max;
}``
max = 0;
for(i=0; i<N; i++)
{
if(maxCount[i] > max)
max = maxCount[i] ;
}
cout<<max<<endl ;
`
But, this approach is getting timeout. As its time complexity is O(N^2). Can we improve the time complexity?
The condition "gcd is greater than 1" means that numbers have at least one common divisor. So, let dp[i] equals to the length of longest sequence finishing on a number divisible by i.
int n;
cin >> n;
const int MAX_NUM = 100 * 1000;
static int dp[MAX_NUM];
for(int i = 0; i < n; ++i)
{
int x;
cin >> x;
int cur = 1;
vector<int> d;
for(int i = 2; i * i <= x; ++i)
{
if(x % i == 0)
{
cur = max(cur, dp[i] + 1);
cur = max(cur, dp[x / i] + 1);
d.push_back(i);
d.push_back(x / i);
}
}
if(x > 1)
{
cur = max(cur, dp[x] + 1);
d.push_back(x);
}
for(int j : d)
{
dp[j] = cur;
}
}
cout << *max_element(dp, dp + MAX_NUM) << endl;
This solution has O(N * sqrt(MAX_NUM)) complexity. Actually you can calculate dp values only for prime numbers. To implement this you should be able to get prime factorization in less than O(N^0.5) time (this method, for example). That optimization should cast complexity to O(N * factorization + Nlog(N)). As memory optimization, you can replace dp array with map or unordered_map.
GCD takes log m time, where m is the maximum number in the array. Therefore, using a Segment Tree and binary search, one can reduce the time complexity to O(n log (m² * n)) (with O(n log m) preprocessing). This list details other data structures that can be used for RMQ-type queries and to reduce the complexity further.
Here is one possible implementation of this:
#include <bits/stdc++.h>
using namespace std;
struct SegTree {
using ftype = function<int(int, int)>;
vector<int> vec;
int l, og, dummy;
ftype f;
template<typename T> SegTree(const vector<T> &v, const T &x, const ftype &func) : og(v.size()), f(func), l(1), dummy(x) {
assert(og >= 1);
while (l < og) l *= 2;
vec = vector<int>(l*2);
for (int i = l; i < l+og; i++) vec[i] = v[i-l];
for (int i = l+og; i < 2*l; i++) vec[i] = dummy;
for (int i = l-1; i >= 1; i--) {
if (vec[2*i] == dummy && vec[2*i+1] == dummy) vec[i] = dummy;
else if (vec[2*i] == dummy) vec[i] = vec[2*i+1];
else if (vec[2*i+1] == dummy) vec[i] = vec[2*i];
else vec[i] = f(vec[2*i], vec[2*i+1]);
}
}
SegTree() {}
void valid(int x) {assert(x >= 0 && x < og);}
int get(int a, int b) {
valid(a); valid(b); assert(b >= a);
a += l; b += l;
int s = vec[a];
a++;
while (a <= b) {
if (a % 2 == 1) {
if (vec[a] != dummy) s = f(s, vec[a]);
a++;
}
if (b % 2 == 0) {
if (vec[b] != dummy) s = f(s, vec[b]);
b--;
}
a /= 2; b /= 2;
}
return s;
}
void add(int x, int c) {
valid(x);
x += l;
vec[x] += c;
for (x /= 2; x >= 1; x /= 2) {
if (vec[2*x] == dummy && vec[2*x+1] == dummy) vec[x] = dummy;
else if (vec[2*x] == dummy) vec[x] = vec[2*x+1];
else if (vec[2*x+1] == dummy) vec[x] = vec[2*x];
else vec[x] = f(vec[2*x], vec[2*x+1]);
}
}
void update(int x, int c) {add(x, c-vec[x+l]);}
};
// Constructor (where val is something that an element in the array is
// guaranteed to never reach):
// SegTree st(vec, val, func);
// finds longest subsequence where GCD is greater than 1
int longest(const vector<int> &vec) {
int l = vec.size();
SegTree st(vec, -1, [](int a, int b){return __gcd(a, b);});
// checks if a certain length is valid in O(n log (m² * n)) time
auto valid = [&](int n) -> bool {
for (int i = 0; i <= l-n; i++) {
if (st.get(i, i+n-1) != 1) {
return true;
}
}
return false;
};
int length = 0;
// do a "binary search" on the best possible length
for (int i = l; i >= 1; i /= 2) {
while (length+i <= l && valid(length+i)) {
length += i;
}
}
return length;
}
In the draft section 7.2.1.3 of The art of computer programming, generating all combinations, Knuth introduced Algorithm C for generating Chase's sequence.
He also mentioned a similar algorithm (based on the following equation) working with index-list without source code (exercise 45 of the draft).
I finally worked out a c++ version which I think is quite ugly. To generate all C_n^m combination, the memory complexity is about 3 (m+1) and the time complexity is bounded by O(m n^m)
class chase_generator_t{
public:
using size_type = ptrdiff_t;
enum class GET : char{ VALUE, INDEX };
chase_generator_t(size_type _n) : n(_n){}
void choose(size_type _m){
m = _m;
++_m;
index.resize(_m);
threshold.resize(_m + 1);
tag.resize(_m);
for (size_type i = 0, j = n - m; i != _m; ++i){
index[i] = j + i;
tag[i] = tag_t::DECREASE;
using std::max;
threshold[i] = max(i - 1, (index[i] - 3) | 1);
}
threshold[_m] = n;
}
bool get(size_type &x, size_type &y, GET const which){
if (which == GET::VALUE) return __get<false>(x, y);
return __get<true>(x, y);
}
size_type get_n() const{
return n;
}
size_type get_m() const{
return m;
}
size_type operator[](size_t const i) const{
return index[i];
}
private:
enum class tag_t : char{ DECREASE, INCREASE };
size_type n, m;
std::vector<size_type> index, threshold;
std::vector<tag_t> tag;
template<bool GetIndex>
bool __get(size_type &x, size_type &y){
using std::max;
size_type p = 0, i, q;
find:
q = p + 1;
if (index[p] == threshold[q]){
if (q >= m) return false;
p = q;
goto find;
}
x = GetIndex ? p : index[p];
if (tag[p] == tag_t::INCREASE){
using std::min;
increase:
index[p] = min(index[p] + 2, threshold[q]);
threshold[p] = index[p] - 1;
}
else if (index[p] && (i = (index[p] - 1) & ~1) >= p){
index[p] = i;
threshold[p] = max(p - 1, (index[p] - 3) | 1);
}
else{
tag[p] = tag_t::INCREASE;
i = p | 1;
if (index[p] == i) goto increase;
index[p] = i;
threshold[p] = index[p] - 1;
}
y = index[p];
for (q = 0; q != p; ++q){
tag[q] = tag_t::DECREASE;
threshold[q] = max(q - 1, (index[q] - 3) | 1);
}
return true;
}
};
Does any one has a better implementation, i.e. run faster with the same memory or use less memory with the same speed?
I think that the C code below is closer to what Knuth had in mind. Undoubtedly there are ways to make it more elegant (in particular, I'm leaving some scaffolding in case it helps with experimentation), though I'm skeptical that the array w can be disposed of. If storage is really important for some reason, then steal the sign bit from the a array.
#include <stdbool.h>
#include <stdio.h>
enum {
N = 10,
T = 5
};
static void next(int a[], bool w[], int *r) {
bool found_r = false;
int j;
for (j = *r; !w[j]; j++) {
int b = a[j] + 1;
int n = a[j + 1];
if (b < (w[j + 1] ? n - (2 - (n & 1)) : n)) {
if ((b & 1) == 0 && b + 1 < n) b++;
a[j] = b;
if (!found_r) *r = j > 1 ? j - 1 : 0;
return;
}
w[j] = a[j] - 1 >= j;
if (w[j] && !found_r) {
*r = j;
found_r = true;
}
}
int b = a[j] - 1;
if ((b & 1) != 0 && b - 1 >= j) b--;
a[j] = b;
w[j] = b - 1 >= j;
if (!found_r) *r = j;
}
int main(void) {
typedef char t_less_than_n[T < N ? 1 : -1];
int a[T + 1];
bool w[T + 1];
for (int j = 0; j < T + 1; j++) {
a[j] = N - (T - j);
w[j] = true;
}
int r = 0;
do {
for (int j = T - 1; j > -1; j--) printf("%x", a[j]);
putchar('\n');
if (false) {
for (int j = T - 1; j > -1; j--) printf("%d", w[j]);
putchar('\n');
}
next(a, w, &r);
} while (a[T] == N);
}
I'm trying to construct an algorithm that runs at O(nb) time with the following input/question:
input: an array A[1..n] of n different integers and an integer b (i am assuming that the numbers in A are sequential, starting at 1 ending at n, i.e. for n=4 A[1,2,3,4].
question: in how many ways can b be written as the sum of elements of the array when elements in A[] can only be used once?
I've kind of hit a wall on this one. I'm looking for some kind of recursive solution, but I don't see how to avoid using repeat numbers. Like, for instance, if we started at 1 and stored all the ways to make one (just 1) then 2 (just 2) then three (3 or 2+1) etc, it shouldn't be hard to see how many ways we can make larger numbers. But if, for instance, we take 5, we will see that it can be broken into 4+1, and 4 can be further broken down into 3+1, so then we would see 2 solutions (4+1, and 3+1+1), but one of those has a repeat of a number. Am I missing something obvious? Thanks so much!
Recursive and dynamic solutions in C:
#include <stddef.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
typedef unsigned char uchar;
typedef unsigned int uint;
typedef struct tAddend
{
struct tAddend* pPrev;
uint Value;
} tAddend;
void findRecursiveSolution(uint n, uint maxAddend, tAddend* pPrevAddend)
{
uint i;
for (i = maxAddend; ; i--)
{
if (n == 0)
{
while (pPrevAddend != NULL)
{
printf("+%u", pPrevAddend->Value);
pPrevAddend = pPrevAddend->pPrev;
}
printf("\n");
return;
}
if (n >= i && i > 0)
{
tAddend a;
a.pPrev = pPrevAddend;
a.Value = i;
findRecursiveSolution(n - i, i - 1, &a);
}
if (i <= 1)
{
break;
}
}
}
void printDynamicSolution(uchar** pTable, uint n, uint idx, uint sum, tAddend* pPrevAddend)
{
uchar el = pTable[idx][sum];
assert((el != 0) && (el != 5) && (el != 7));
if (el & 2) // 2,3,6 - other(s)
{
printDynamicSolution(pTable,
n,
idx - 1,
sum,
pPrevAddend);
}
if (el & 4) // self + other(s)
{
tAddend a;
a.pPrev = pPrevAddend;
a.Value = idx + 1;
printDynamicSolution(pTable,
n,
idx - 1,
sum - (idx + 1),
&a);
}
if (el & 1) // self, found a solution
{
tAddend a;
a.pPrev = pPrevAddend;
a.Value = idx + 1;
pPrevAddend = &a;
while (pPrevAddend != NULL)
{
printf("+%u", pPrevAddend->Value);
pPrevAddend = pPrevAddend->pPrev;
}
printf("\n");
}
}
void findDynamicSolution(uint n)
{
uchar** table;
uint i, j;
if (n == 0)
{
return;
}
// Allocate the DP table
table = malloc(sizeof(uchar*) * n);
if (table == NULL)
{
printf("not enough memory\n");
return;
}
for (i = 0; i < n; i++)
{
table[i] = malloc(n + 1);
if (table[i] == NULL)
{
while (i > 0)
{
free(table[--i]);
}
free(table);
printf("not enough memory\n");
return;
}
}
// Fill in the DP table
for (i = 0; i < n; i++)
{
for (j = 0; j <= n; j++)
{
if (i == 0)
{
table[i][j] = (i + 1 == j); // self
}
else
{
table[i][j] = (i + 1 == j) + // self
2 * (table[i - 1][j] != 0) + // other(s)
4 * ((j >= i + 1) && (table[i - 1][j - (i + 1)] != 0)); // self + other(s)
}
}
}
printDynamicSolution(table, n, n - 1, n, NULL);
for (i = 0; i < n; i++)
{
free(table[i]);
}
free(table);
}
int main(int argc, char** argv)
{
uint n;
if (argc != 2 || sscanf(argv[1], "%u", &n) != 1)
{
n = 10;
}
printf("Recursive Solution:\n");
findRecursiveSolution(n, n, NULL);
printf("\nDynamic Solution:\n");
findDynamicSolution(n);
return 0;
}
Output:
for 10:
Recursive Solution:
+10
+1+9
+2+8
+3+7
+1+2+7
+4+6
+1+3+6
+1+4+5
+2+3+5
+1+2+3+4
Dynamic Solution:
+1+2+3+4
+2+3+5
+1+4+5
+1+3+6
+4+6
+1+2+7
+3+7
+2+8
+1+9
+10
See also on ideone.
Let F(x,i) be the number of ways elements of A[1:i] can be summed to get x.
F(x,i+1) = F(x-A[i+1],i) + F(x,i)
That is it!
This is not a dynamic programming solution though. Non-recursive.
Assumption that arr is sorted in your case like [i....j] where a[i] <= a[j]
That's easy enough
void summer(int[] arr, int n , int b)
{
int lowerbound = 0;
int upperbound = n-1;
while (lowerbound < upperbound)
{
if(arr[lowerbound]+arr[upperbound] == b)
{
// print arr[lowerbound] and arr[upperbound]
lowerbound++; upperbound--;
}
else if(arr[lowerbound]+arr[upperbound] < b)
lowerbound++;
else
upperbound--;
}
}
The above program is easily modifiable to a recursive you need to only change the function definition by passing lowerbound and upperbound.
Case for termination is still lowerbound < upperbound
Base case is if arr[lowerbound] +arr[upperbound] == b
Edited based on comments
You will need to use a modified version of integer knapsack problem. The values of [i,j] both need to be modified accordingly. You are having the problem because you are not most probably modifying your i carefully, Increase your i accordingly then their will not be repetition like the one you are having.
Can someone give an example for finding greatest common divisor algorithm for more than two numbers?
I believe programming language doesn't matter.
Start with the first pair and get their GCD, then take the GCD of that result and the next number. The obvious optimization is you can stop if the running GCD ever reaches 1. I'm watching this one to see if there are any other optimizations. :)
Oh, and this can be easily parallelized since the operations are commutative/associative.
The GCD of 3 numbers can be computed as gcd(a, b, c) = gcd(gcd(a, b), c). You can apply the Euclidean algorithm, the extended Euclidian or the binary GCD algorithm iteratively and get your answer. I'm not aware of any other (smarter?) ways to find a GCD, unfortunately.
A little late to the party I know, but a simple JavaScript implementation, utilising Sam Harwell's description of the algorithm:
function euclideanAlgorithm(a, b) {
if(b === 0) {
return a;
}
const remainder = a % b;
return euclideanAlgorithm(b, remainder)
}
function gcdMultipleNumbers(...args) { //ES6 used here, change as appropriate
const gcd = args.reduce((memo, next) => {
return euclideanAlgorithm(memo, next)}
);
return gcd;
}
gcdMultipleNumbers(48,16,24,96) //8
I just updated a Wiki page on this.
[https://en.wikipedia.org/wiki/Binary_GCD_algorithm#C.2B.2B_template_class]
This takes an arbitrary number of terms.
use GCD(5, 2, 30, 25, 90, 12);
template<typename AType> AType GCD(int nargs, ...)
{
va_list arglist;
va_start(arglist, nargs);
AType *terms = new AType[nargs];
// put values into an array
for (int i = 0; i < nargs; i++)
{
terms[i] = va_arg(arglist, AType);
if (terms[i] < 0)
{
va_end(arglist);
return (AType)0;
}
}
va_end(arglist);
int shift = 0;
int numEven = 0;
int numOdd = 0;
int smallindex = -1;
do
{
numEven = 0;
numOdd = 0;
smallindex = -1;
// count number of even and odd
for (int i = 0; i < nargs; i++)
{
if (terms[i] == 0)
continue;
if (terms[i] & 1)
numOdd++;
else
numEven++;
if ((smallindex < 0) || terms[i] < terms[smallindex])
{
smallindex = i;
}
}
// check for exit
if (numEven + numOdd == 1)
continue;
// If everything in S is even, divide everything in S by 2, and then multiply the final answer by 2 at the end.
if (numOdd == 0)
{
shift++;
for (int i = 0; i < nargs; i++)
{
if (terms[i] == 0)
continue;
terms[i] >>= 1;
}
}
// If some numbers in S are even and some are odd, divide all the even numbers by 2.
if (numEven > 0 && numOdd > 0)
{
for (int i = 0; i < nargs; i++)
{
if (terms[i] == 0)
continue;
if ((terms[i] & 1) == 0)
terms[i] >>= 1;
}
}
//If every number in S is odd, then choose an arbitrary element of S and call it k.
//Replace every other element, say n, with | n−k | / 2.
if (numEven == 0)
{
for (int i = 0; i < nargs; i++)
{
if (i == smallindex || terms[i] == 0)
continue;
terms[i] = abs(terms[i] - terms[smallindex]) >> 1;
}
}
} while (numEven + numOdd > 1);
// only one remaining element multiply the final answer by 2s at the end.
for (int i = 0; i < nargs; i++)
{
if (terms[i] == 0)
continue;
return terms[i] << shift;
}
return 0;
};
For golang, using remainder
func GetGCD(a, b int) int {
for b != 0 {
a, b = b, a%b
}
return a
}
func GetGCDFromList(numbers []int) int {
var gdc = numbers[0]
for i := 1; i < len(numbers); i++ {
number := numbers[i]
gdc = GetGCD(gdc, number)
}
return gdc
}
In Java (not optimal):
public static int GCD(int[] a){
int j = 0;
boolean b=true;
for (int i = 1; i < a.length; i++) {
if(a[i]!=a[i-1]){
b=false;
break;
}
}
if(b)return a[0];
j=LeastNonZero(a);
System.out.println(j);
for (int i = 0; i < a.length; i++) {
if(a[i]!=j)a[i]=a[i]-j;
}
System.out.println(Arrays.toString(a));
return GCD(a);
}
public static int LeastNonZero(int[] a){
int b = 0;
for (int i : a) {
if(i!=0){
if(b==0||i<b)b=i;
}
}
return b;
}