Sufficient algorithm for swapping elements to meet a specific condition - algorithm

You are given two positive number N and K, find the smallest number of swapping any two number from N to make two new numbers A and B and the difference (A - B) equals to K, if there any more than one solution, use the solution with the biggest A.
For e.g.
We have N = 9834216 and K = 8826, we swap 16 to form the new number 9168342, A = 9168 and B = 342, and A - B = 9168 - 342 = 8826.

let N = 9834216, K = 8826;
let count = 0;
function fn(nArr, k, n) {
if (n == 0) {
let temp = -k;
if (temp <= 0) {
return;
}
let tArr = [...nArr];
while (temp > 0) {
let temp1 = temp % 10;
let tId = tArr.indexOf(temp1);
if (tId < 0) {
return;
}
tArr.splice(tId, 1);
temp = Math.floor(temp / 10);
}
if (tArr.length == 0) {
console.log((K - k) + "-" + (-k) + "=" + K);
count++;
}
} else {
for (let i in nArr) {
let tArr = [...nArr];
tArr.splice(i, 1);
fn(tArr, k - Math.pow(10, n - 1) * nArr[i], n - 1);
}
}
}
function getAB(N, K) {
let nArr = [],
n = N;
while (n > 0) {
nArr.push(n % 10);
n = Math.floor(n / 10);
}
nArr.sort();
nArr.reverse();
for (let i = nArr.length; i > nArr.length / 2; i--) {
fn(nArr, K, i);
}
}
getAB(N, K);
console.log(count + " solutions");

Related

Express a given number as a sum of four squares

I am looking for an algorithm that expresses a given number as a sum of (up to) four squares.
Examples
       120 = 82 + 62 + 42 + 22
       6 = 02 + 12 + 12 + 22
       20 = 42 + 22 + 02+ 02
My approach
Take the square root and repeat this repeatedly for the remainder:
while (count != 4) {
root = (int) Math.sqrt(N)
N -= root * root
count++
}
But this fails when N is 23, even though there is a solution:
       32 + 32+ 22 + 12
Question
Is there any other algorithm to do that?
Is it always possible?
###Always possible?
Yes, the Lagrange's four square theorem states that:
every natural number can be represented as the sum of four integer squares.
It has been proved in several ways.
###Algorithm
There are some smarter algorithms, but I would suggest the following algorithm:
Factorise the number into prime factors. They don't have to be prime, but the smaller they are, the better: so primes are best. Then solve the task for each of these factors as below, and combine any resulting 4 squares with the previously found 4 squares with the Euler's four-square identity.
         (a2 + b2 + c2 + d2)
(A2 + B2 + C2 + D2) =
               (aA + bB + cC + dD)2 +
               (aB − bA + cD − dC)2 +
               (aC − bD − cA + dB)2 +
               (aD + bC − cB − dA)2
Given a number n (one of the factors mentioned above), get the greatest square that is not greater than n, and see if n minus this square can be written as the sum of three squares using the Legendre's three-square theorem: it is possible, if and only when this number is NOT of the following form:
        4a(8b+7)
If this square is not found suitable, try the next smaller one, ... until you find one. It guaranteed there will be one, and most are found within a few retries.
Try to find an actual second square term in the same way as in step 1, but now test its viability using Fermat's theorem on sums of two squares which in extension means that:
if all the prime factors of n congruent to 3 modulo 4 occur to an even exponent, then n is expressible as a sum of two squares. The converse also holds.
If this square is not found suitable, try the next smaller one, ... until you find one. It's guaranteed there will be one.
Now we have a remainder after subtracting two squares. Try subtracting a third square until that yields another square, which means we have a solution. This step can be improved by first factoring out the largest square divisor. Then when the two square terms are identified, each can then be multiplied again by the square root of that square divisor.
This is roughly the idea. For finding prime factors there are several solutions. Below I will just use the Sieve of Eratosthenes.
This is JavaScript code, so you can run it immediately -- it will produce a random number as input and display it as the sum of four squares:
function divisor(n, factor) {
var divisor = 1;
while (n % factor == 0) {
n = n / factor;
divisor = divisor * factor;
}
return divisor;
}
function getPrimesUntil(n) {
// Prime sieve algorithm
var range = Math.floor(Math.sqrt(n)) + 1;
var isPrime = Array(n).fill(1);
var primes = [2];
for (var m = 3; m < range; m += 2) {
if (isPrime[m]) {
primes.push(m);
for (var k = m * m; k <= n; k += m) {
isPrime[k] = 0;
}
}
}
for (var m = range + 1 - (range % 2); m <= n; m += 2) {
if (isPrime[m]) primes.push(m);
}
return {
primes: primes,
factorize: function (n) {
var p, count, primeFactors;
// Trial division algorithm
if (n < 2) return [];
primeFactors = [];
for (p of this.primes) {
count = 0;
while (n % p == 0) {
count++;
n /= p;
}
if (count) primeFactors.push({value: p, count: count});
}
if (n > 1) {
primeFactors.push({value: n, count: 1});
}
return primeFactors;
}
}
}
function squareTerms4(n) {
var n1, n2, n3, n4, sq, sq1, sq2, sq3, sq4, primes, factors, f, f3, factors3, ok,
res1, res2, res3, res4;
primes = getPrimesUntil(n);
factors = primes.factorize(n);
res1 = n > 0 ? 1 : 0;
res2 = res3 = res4 = 0;
for (f of factors) { // For each of the factors:
n1 = f.value;
// 1. Find a suitable first square
for (sq1 = Math.floor(Math.sqrt(n1)); sq1>0; sq1--) {
n2 = n1 - sq1*sq1;
// A number can be written as a sum of three squares
// <==> it is NOT of the form 4^a(8b+7)
if ( (n2 / divisor(n2, 4)) % 8 !== 7 ) break; // found a possibility
}
// 2. Find a suitable second square
for (sq2 = Math.floor(Math.sqrt(n2)); sq2>0; sq2--) {
n3 = n2 - sq2*sq2;
// A number can be written as a sum of two squares
// <==> all its prime factors of the form 4a+3 have an even exponent
factors3 = primes.factorize(n3);
ok = true;
for (f3 of factors3) {
ok = (f3.value % 4 != 3) || (f3.count % 2 == 0);
if (!ok) break;
}
if (ok) break;
}
// To save time: extract the largest square divisor from the previous factorisation:
sq = 1;
for (f3 of factors3) {
sq *= Math.pow(f3.value, (f3.count - f3.count % 2) / 2);
f3.count = f3.count % 2;
}
n3 /= sq*sq;
// 3. Find a suitable third square
sq4 = 0;
// b. Find square for the remaining value:
for (sq3 = Math.floor(Math.sqrt(n3)); sq3>0; sq3--) {
n4 = n3 - sq3*sq3;
// See if this yields a sum of two squares:
sq4 = Math.floor(Math.sqrt(n4));
if (n4 == sq4*sq4) break; // YES!
}
// Incorporate the square divisor back into the step-3 result:
sq3 *= sq;
sq4 *= sq;
// 4. Merge this quadruple of squares with any previous
// quadruple we had, using the Euler square identity:
while (f.count--) {
[res1, res2, res3, res4] = [
Math.abs(res1*sq1 + res2*sq2 + res3*sq3 + res4*sq4),
Math.abs(res1*sq2 - res2*sq1 + res3*sq4 - res4*sq3),
Math.abs(res1*sq3 - res2*sq4 - res3*sq1 + res4*sq2),
Math.abs(res1*sq4 + res2*sq3 - res3*sq2 - res4*sq1)
];
}
}
// Return the 4 squares in descending order (for convenience):
return [res1, res2, res3, res4].sort( (a,b) => b-a );
}
// Produce the result for some random input number
var n = Math.floor(Math.random() * 1000000);
var solution = squareTerms4(n);
// Perform the sum of squares to see it is correct:
var check = solution.reduce( (a,b) => a+b*b, 0 );
if (check !== n) throw "FAILURE: difference " + n + " - " + check;
// Print the result
console.log(n + ' = ' + solution.map( x => x+'²' ).join(' + '));
The article by by Michael Barr on the subject probably represents a more time-efficient method, but the text is more intended as a proof than an algorithm. However, if you need more time-efficiency you could consider that, together with a more efficient factorisation algorithm.
It's always possible -- it's a theorem in number theory called "Lagrange's four square theorem."
To solve it efficiently: the paper Randomized algorithms in number theory (Rabin, Shallit) gives a method that runs in expected O((log n)^2) time.
There is interesting discussion about the implementation here: https://math.stackexchange.com/questions/483101/rabin-and-shallit-algorithm
Found via Wikipedia:Langrange's four square theorem.
Here is solution , Simple 4 loops
max = square_root(N)
for(int i=0;i<=max;i++)
for(int j=0;j<=max;j++)
for(int k=0;k<=max;k++)
for(int l=0;l<=max;l++)
if(i*i+j*j+k*k+l*l==N){
found
break;
}
So you can test for any numbers. You can use break condition after two loops if sum exceeds then break it.
const fourSquares = (n) => {
const result = [];
for (let i = 0; i <= n; i++) {
for (let j = 0; j <= n; j++) {
for (let k = 0; k <= n; k++) {
for (let l = 0; l <= n; l++) {
if (i * i + j * j + k * k + l * l === n) {
result.push(i, j, k, l);
return result;
}
}
}
}
}
return result;
}
It's running too long
const fourSquares = (n) => {
const result = [];
for (let i = 0; i <= n; i++) {
for (let j = 0; j <= (n - i * i); j++) {
for (let k = 0; k <= (n - i * i - j * j); k++) {
for (let l = 0; l <= (n - i * i - j * j - k * k); l++) {
if (i * i + j * j + k * k + l * l === n) {
result.push(i, j, k, l);
return result;
}
}
}
}
}
return result;
}
const fourSquares = (n) => {
const result = [];
for (let i = 0; i * i <= n; i++) {
for (let j = 0; j * j <= n; j++) {
for (let k = 0; k * k <= n; k++) {
for (let l = 0; l * l <= n; l++) {
if (i * i + j * j + k * k + l * l === n) {
result.push(i, j, k, l);
return result;
}
}
}
}
}
return result;
}
const fourSquares = (n) => {
let a = Math.sqrt(n);
let b = Math.sqrt(n - a * a);
let c = Math.sqrt(n - a * a - b * b);
let d = Math.sqrt(n - a * a - b * b - c * c);
if (n === a * a + b * b + c * c + d * d) {
return [a, b, c, d];
}
}

Find order statistic in union of 2 sorted lists on logarithmic time [duplicate]

This is a homework question, binary search has already been introduced:
Given two arrays, respectively N and M elements in ascending order, not necessarily unique:
What is a time efficient algorithm to find the kth smallest element in the union of both arrays?
They say it takes O(logN + logM) where N and M are the arrays lengths.
Let's name the arrays a and b. Obviously we can ignore all a[i] and b[i] where i > k.
First let's compare a[k/2] and b[k/2]. Let b[k/2] > a[k/2]. Therefore we can discard also all b[i], where i > k/2.
Now we have all a[i], where i < k and all b[i], where i < k/2 to find the answer.
What is the next step?
I hope I am not answering your homework, as it has been over a year since this question was asked. Here is a tail recursive solution that will take log(len(a)+len(b)) time.
Assumption: The inputs are correct, i.e., k is in the range [0, len(a)+len(b)].
Base cases:
If length of one of the arrays is 0, the answer is kth element of the second array.
Reduction steps:
If mid index of a + mid index of b is less than k:
If mid element of a is greater than mid element of b, we can ignore the first half of b, adjust k.
Otherwise, ignore the first half of a, adjust k.
If k is less than sum of mid indices of a and b:
If mid element of a is greater than mid element of b, we can safely ignore second half of a.
Otherwise, we can ignore second half of b.
Code:
def kthlargest(arr1, arr2, k):
if len(arr1) == 0:
return arr2[k]
elif len(arr2) == 0:
return arr1[k]
mida1 = len(arr1) // 2 # integer division
mida2 = len(arr2) // 2
if mida1 + mida2 < k:
if arr1[mida1] > arr2[mida2]:
return kthlargest(arr1, arr2[mida2+1:], k - mida2 - 1)
else:
return kthlargest(arr1[mida1+1:], arr2, k - mida1 - 1)
else:
if arr1[mida1] > arr2[mida2]:
return kthlargest(arr1[:mida1], arr2, k)
else:
return kthlargest(arr1, arr2[:mida2], k)
Please note that my solution is creating new copies of smaller arrays in every call, this can be easily eliminated by only passing start and end indices on the original arrays.
You've got it, just keep going! And be careful with the indexes...
To simplify a bit I'll assume that N and M are > k, so the complexity here is O(log k), which is O(log N + log M).
Pseudo-code:
i = k/2
j = k - i
step = k/4
while step > 0
if a[i-1] > b[j-1]
i -= step
j += step
else
i += step
j -= step
step /= 2
if a[i-1] > b[j-1]
return a[i-1]
else
return b[j-1]
For the demonstration you can use the loop invariant i + j = k, but I won't do all your homework :)
Many people answered this "kth smallest element from two sorted array" question, but usually with only general ideas, not a clear working code or boundary conditions analysis.
Here I'd like to elaborate it carefully with the way I went though to help some novices to understand, with my correct working Java code. A1 and A2 are two sorted ascending arrays, with size1 and size2 as length respectively. We need to find the k-th smallest element from the union of those two arrays. Here we reasonably assume that (k > 0 && k <= size1 + size2), which implies that A1 and A2 can't be both empty.
First, let's approach this question with a slow O(k) algorithm. The method is to compare the first element of both array, A1[0] and A2[0]. Take the smaller one, say A1[0] away into our pocket. Then compare A1[1] with A2[0], and so on. Repeat this action until our pocket reached k elements. Very important: In the first step, we can only commit to A1[0] in our pocket. We can NOT include or exclude A2[0]!!!
The following O(k) code gives you one element before the correct answer. Here I use it to show my idea, and analysis boundary condition. I have correct code after this one:
private E kthSmallestSlowWithFault(int k) {
int size1 = A1.length, size2 = A2.length;
int index1 = 0, index2 = 0;
// base case, k == 1
if (k == 1) {
if (size1 == 0) {
return A2[index2];
} else if (size2 == 0) {
return A1[index1];
} else if (A1[index1].compareTo(A2[index2]) < 0) {
return A1[index1];
} else {
return A2[index2];
}
}
/* in the next loop, we always assume there is one next element to compare with, so we can
* commit to the smaller one. What if the last element is the kth one?
*/
if (k == size1 + size2) {
if (size1 == 0) {
return A2[size2 - 1];
} else if (size2 == 0) {
return A1[size1 - 1];
} else if (A1[size1 - 1].compareTo(A2[size2 - 1]) < 0) {
return A1[size1 - 1];
} else {
return A2[size2 - 1];
}
}
/*
* only when k > 1, below loop will execute. In each loop, we commit to one element, till we
* reach (index1 + index2 == k - 1) case. But the answer is not correct, always one element
* ahead, because we didn't merge base case function into this loop yet.
*/
int lastElementFromArray = 0;
while (index1 + index2 < k - 1) {
if (A1[index1].compareTo(A2[index2]) < 0) {
index1++;
lastElementFromArray = 1;
// commit to one element from array A1, but that element is at (index1 - 1)!!!
} else {
index2++;
lastElementFromArray = 2;
}
}
if (lastElementFromArray == 1) {
return A1[index1 - 1];
} else {
return A2[index2 - 1];
}
}
The most powerful idea is that in each loop, we always use the base case approach. After committed to the current smallest element, we get one step closer to the target: the k-th smallest element. Never jump into the middle and make yourself confused and lost!
By observing the above code base case k == 1, k == size1+size2, and combine with that A1 and A2 can't both be empty. We can turn the logic into below more concise style.
Here is a slow but correct working code:
private E kthSmallestSlow(int k) {
// System.out.println("this is an O(k) speed algorithm, very concise");
int size1 = A1.length, size2 = A2.length;
int index1 = 0, index2 = 0;
while (index1 + index2 < k - 1) {
if (size1 > index1 && (size2 <= index2 || A1[index1].compareTo(A2[index2]) < 0)) {
index1++; // here we commit to original index1 element, not the increment one!!!
} else {
index2++;
}
}
// below is the (index1 + index2 == k - 1) base case
// also eliminate the risk of referring to an element outside of index boundary
if (size1 > index1 && (size2 <= index2 || A1[index1].compareTo(A2[index2]) < 0)) {
return A1[index1];
} else {
return A2[index2];
}
}
Now we can try a faster algorithm runs at O(log k). Similarly, compare A1[k/2] with A2[k/2]; if A1[k/2] is smaller, then all the elements from A1[0] to A1[k/2] should be in our pocket. The idea is to not just commit to one element in each loop; the first step contains k/2 elements. Again, we can NOT include or exclude A2[0] to A2[k/2] anyway. So in the first step, we can't go more than k/2 elements. For the second step, we can't go more than k/4 elements...
After each step, we get much closer to k-th element. At the same time each step get smaller and smaller, until we reach (step == 1), which is (k-1 == index1+index2). Then we can refer to the simple and powerful base case again.
Here is the working correct code:
private E kthSmallestFast(int k) {
// System.out.println("this is an O(log k) speed algorithm with meaningful variables name");
int size1 = A1.length, size2 = A2.length;
int index1 = 0, index2 = 0, step = 0;
while (index1 + index2 < k - 1) {
step = (k - index1 - index2) / 2;
int step1 = index1 + step;
int step2 = index2 + step;
if (size1 > step1 - 1
&& (size2 <= step2 - 1 || A1[step1 - 1].compareTo(A2[step2 - 1]) < 0)) {
index1 = step1; // commit to element at index = step1 - 1
} else {
index2 = step2;
}
}
// the base case of (index1 + index2 == k - 1)
if (size1 > index1 && (size2 <= index2 || A1[index1].compareTo(A2[index2]) < 0)) {
return A1[index1];
} else {
return A2[index2];
}
}
Some people may worry what if (index1+index2) jump over k-1? Could we miss the base case (k-1 == index1+index2)? That's impossible. You can add up 0.5+0.25+0.125..., and you will never go beyond 1.
Of course, it is very easy to turn the above code into recursive algorithm:
private E kthSmallestFastRecur(int k, int index1, int index2, int size1, int size2) {
// System.out.println("this is an O(log k) speed algorithm with meaningful variables name");
// the base case of (index1 + index2 == k - 1)
if (index1 + index2 == k - 1) {
if (size1 > index1 && (size2 <= index2 || A1[index1].compareTo(A2[index2]) < 0)) {
return A1[index1];
} else {
return A2[index2];
}
}
int step = (k - index1 - index2) / 2;
int step1 = index1 + step;
int step2 = index2 + step;
if (size1 > step1 - 1 && (size2 <= step2 - 1 || A1[step1 - 1].compareTo(A2[step2 - 1]) < 0)) {
index1 = step1;
} else {
index2 = step2;
}
return kthSmallestFastRecur(k, index1, index2, size1, size2);
}
Hope the above analysis and Java code could help you to understand. But never copy my code as your homework! Cheers ;)
Here's a C++ iterative version of #lambdapilgrim's solution (see the explanation of the algorithm there):
#include <cassert>
#include <iterator>
template<class RandomAccessIterator, class Compare>
typename std::iterator_traits<RandomAccessIterator>::value_type
nsmallest_iter(RandomAccessIterator firsta, RandomAccessIterator lasta,
RandomAccessIterator firstb, RandomAccessIterator lastb,
size_t n,
Compare less) {
assert(issorted(firsta, lasta, less) && issorted(firstb, lastb, less));
for ( ; ; ) {
assert(n < static_cast<size_t>((lasta - firsta) + (lastb - firstb)));
if (firsta == lasta) return *(firstb + n);
if (firstb == lastb) return *(firsta + n);
size_t mida = (lasta - firsta) / 2;
size_t midb = (lastb - firstb) / 2;
if ((mida + midb) < n) {
if (less(*(firstb + midb), *(firsta + mida))) {
firstb += (midb + 1);
n -= (midb + 1);
}
else {
firsta += (mida + 1);
n -= (mida + 1);
}
}
else {
if (less(*(firstb + midb), *(firsta + mida)))
lasta = (firsta + mida);
else
lastb = (firstb + midb);
}
}
}
It works for all 0 <= n < (size(a) + size(b)) indexes and has O(log(size(a)) + log(size(b))) complexity.
Example
#include <functional> // greater<>
#include <iostream>
#define SIZE(a) (sizeof(a) / sizeof(*a))
int main() {
int a[] = {5,4,3};
int b[] = {2,1,0};
int k = 1; // find minimum value, the 1st smallest value in a,b
int i = k - 1; // convert to zero-based indexing
int v = nsmallest_iter(a, a + SIZE(a), b, b + SIZE(b),
SIZE(a)+SIZE(b)-1-i, std::greater<int>());
std::cout << v << std::endl; // -> 0
return v;
}
My attempt for first k numbers, kth number in 2 sorted arrays, and in n sorted arrays:
// require() is recognizable by node.js but not by browser;
// for running/debugging in browser, put utils.js and this file in <script> elements,
if (typeof require === "function") require("./utils.js");
// Find K largest numbers in two sorted arrays.
function k_largest(a, b, c, k) {
var sa = a.length;
var sb = b.length;
if (sa + sb < k) return -1;
var i = 0;
var j = sa - 1;
var m = sb - 1;
while (i < k && j >= 0 && m >= 0) {
if (a[j] > b[m]) {
c[i] = a[j];
i++;
j--;
} else {
c[i] = b[m];
i++;
m--;
}
}
debug.log(2, "i: "+ i + ", j: " + j + ", m: " + m);
if (i === k) {
return 0;
} else if (j < 0) {
while (i < k) {
c[i++] = b[m--];
}
} else {
while (i < k) c[i++] = a[j--];
}
return 0;
}
// find k-th largest or smallest number in 2 sorted arrays.
function kth(a, b, kd, dir){
sa = a.length; sb = b.length;
if (kd<1 || sa+sb < kd){
throw "Mission Impossible! I quit!";
}
var k;
//finding the kd_th largest == finding the smallest k_th;
if (dir === 1){ k = kd;
} else if (dir === -1){ k = sa + sb - kd + 1;}
else throw "Direction has to be 1 (smallest) or -1 (largest).";
return find_kth(a, b, k, sa-1, 0, sb-1, 0);
}
// find k-th smallest number in 2 sorted arrays;
function find_kth(c, d, k, cmax, cmin, dmax, dmin){
sc = cmax-cmin+1; sd = dmax-dmin+1; k0 = k; cmin0 = cmin; dmin0 = dmin;
debug.log(2, "=k: " + k +", sc: " + sc + ", cmax: " + cmax +", cmin: " + cmin + ", sd: " + sd +", dmax: " + dmax + ", dmin: " + dmin);
c_comp = k0-sc;
if (c_comp <= 0){
cmax = cmin0 + k0-1;
} else {
dmin = dmin0 + c_comp-1;
k -= c_comp-1;
}
d_comp = k0-sd;
if (d_comp <= 0){
dmax = dmin0 + k0-1;
} else {
cmin = cmin0 + d_comp-1;
k -= d_comp-1;
}
sc = cmax-cmin+1; sd = dmax-dmin+1;
debug.log(2, "#k: " + k +", sc: " + sc + ", cmax: " + cmax +", cmin: " + cmin + ", sd: " + sd +", dmax: " + dmax + ", dmin: " + dmin + ", c_comp: " + c_comp + ", d_comp: " + d_comp);
if (k===1) return (c[cmin]<d[dmin] ? c[cmin] : d[dmin]);
if (k === sc+sd) return (c[cmax]>d[dmax] ? c[cmax] : d[dmax]);
m = Math.floor((cmax+cmin)/2);
n = Math.floor((dmax+dmin)/2);
debug.log(2, "m: " + m + ", n: "+n+", c[m]: "+c[m]+", d[n]: "+d[n]);
if (c[m]<d[n]){
if (m === cmax){ // only 1 element in c;
return d[dmin+k-1];
}
k_next = k-(m-cmin+1);
return find_kth(c, d, k_next, cmax, m+1, dmax, dmin);
} else {
if (n === dmax){
return c[cmin+k-1];
}
k_next = k-(n-dmin+1);
return find_kth(c, d, k_next, cmax, cmin, dmax, n+1);
}
}
function traverse_at(a, ae, h, l, k, at, worker, wp){
var n = ae ? ae.length : 0;
var get_node;
switch (at){
case "k": get_node = function(idx){
var node = {};
var pos = l[idx] + Math.floor(k/n) - 1;
if (pos<l[idx]){ node.pos = l[idx]; }
else if (pos > h[idx]){ node.pos = h[idx];}
else{ node.pos = pos; }
node.idx = idx;
node.val = a[idx][node.pos];
debug.log(6, "pos: "+pos+"\nnode =");
debug.log(6, node);
return node;
};
break;
case "l": get_node = function(idx){
debug.log(6, "a["+idx+"][l["+idx+"]]: "+a[idx][l[idx]]);
return a[idx][l[idx]];
};
break;
case "h": get_node = function(idx){
debug.log(6, "a["+idx+"][h["+idx+"]]: "+a[idx][h[idx]]);
return a[idx][h[idx]];
};
break;
case "s": get_node = function(idx){
debug.log(6, "h["+idx+"]-l["+idx+"]+1: "+(h[idx] - l[idx] + 1));
return h[idx] - l[idx] + 1;
};
break;
default: get_node = function(){
debug.log(1, "!!! Exception: get_node() returns null.");
return null;
};
break;
}
worker.init();
debug.log(6, "--* traverse_at() *--");
var i;
if (!wp){
for (i=0; i<n; i++){
worker.work(get_node(ae[i]));
}
} else {
for (i=0; i<n; i++){
worker.work(get_node(ae[i]), wp);
}
}
return worker.getResult();
}
sumKeeper = function(){
var res = 0;
return {
init : function(){ res = 0;},
getResult: function(){
debug.log(5, "## sumKeeper.getResult: returning: "+res);
return res;
},
work : function(node){ if (node!==null) res += node;}
};
}();
maxPicker = function(){
var res = null;
return {
init : function(){ res = null;},
getResult: function(){
debug.log(5, "## maxPicker.getResult: returning: "+res);
return res;
},
work : function(node){
if (res === null){ res = node;}
else if (node!==null && node > res){ res = node;}
}
};
}();
minPicker = function(){
var res = null;
return {
init : function(){ res = null;},
getResult: function(){
debug.log(5, "## minPicker.getResult: returning: ");
debug.log(5, res);
return res;
},
work : function(node){
if (res === null && node !== null){ res = node;}
else if (node!==null &&
node.val !==undefined &&
node.val < res.val){ res = node; }
else if (node!==null && node < res){ res = node;}
}
};
}();
// find k-th smallest number in n sorted arrays;
// need to consider the case where some of the subarrays are taken out of the selection;
function kth_n(a, ae, k, h, l){
var n = ae.length;
debug.log(2, "------** kth_n() **-------");
debug.log(2, "n: " +n+", k: " + k);
debug.log(2, "ae: ["+ae+"], len: "+ae.length);
debug.log(2, "h: [" + h + "]");
debug.log(2, "l: [" + l + "]");
for (var i=0; i<n; i++){
if (h[ae[i]]-l[ae[i]]+1>k) h[ae[i]]=l[ae[i]]+k-1;
}
debug.log(3, "--after reduction --");
debug.log(3, "h: [" + h + "]");
debug.log(3, "l: [" + l + "]");
if (n === 1)
return a[ae[0]][k-1];
if (k === 1)
return traverse_at(a, ae, h, l, k, "l", minPicker);
if (k === traverse_at(a, ae, h, l, k, "s", sumKeeper))
return traverse_at(a, ae, h, l, k, "h", maxPicker);
var kn = traverse_at(a, ae, h, l, k, "k", minPicker);
debug.log(3, "kn: ");
debug.log(3, kn);
var idx = kn.idx;
debug.log(3, "last: k: "+k+", l["+kn.idx+"]: "+l[idx]);
k -= kn.pos - l[idx] + 1;
l[idx] = kn.pos + 1;
debug.log(3, "next: "+"k: "+k+", l["+kn.idx+"]: "+l[idx]);
if (h[idx]<l[idx]){ // all elements in a[idx] selected;
//remove a[idx] from the arrays.
debug.log(4, "All elements selected in a["+idx+"].");
debug.log(5, "last ae: ["+ae+"]");
ae.splice(ae.indexOf(idx), 1);
h[idx] = l[idx] = "_"; // For display purpose only.
debug.log(5, "next ae: ["+ae+"]");
}
return kth_n(a, ae, k, h, l);
}
function find_kth_in_arrays(a, k){
if (!a || a.length<1 || k<1) throw "Mission Impossible!";
var ae=[], h=[], l=[], n=0, s, ts=0;
for (var i=0; i<a.length; i++){
s = a[i] && a[i].length;
if (s>0){
ae.push(i); h.push(s-1); l.push(0);
ts+=s;
}
}
if (k>ts) throw "Too few elements to choose from!";
return kth_n(a, ae, k, h, l);
}
/////////////////////////////////////////////////////
// tests
// To show everything: use 6.
debug.setLevel(1);
var a = [2, 3, 5, 7, 89, 223, 225, 667];
var b = [323, 555, 655, 673];
//var b = [99];
var c = [];
debug.log(1, "a = (len: " + a.length + ")");
debug.log(1, a);
debug.log(1, "b = (len: " + b.length + ")");
debug.log(1, b);
for (var k=1; k<a.length+b.length+1; k++){
debug.log(1, "================== k: " + k + "=====================");
if (k_largest(a, b, c, k) === 0 ){
debug.log(1, "c = (len: "+c.length+")");
debug.log(1, c);
}
try{
result = kth(a, b, k, -1);
debug.log(1, "===== The " + k + "-th largest number: " + result);
} catch (e) {
debug.log(0, "Error message from kth(): " + e);
}
debug.log("==================================================");
}
debug.log(1, "################# Now for the n sorted arrays ######################");
debug.log(1, "####################################################################");
x = [[1, 3, 5, 7, 9],
[-2, 4, 6, 8, 10, 12],
[8, 20, 33, 212, 310, 311, 623],
[8],
[0, 100, 700],
[300],
[],
null];
debug.log(1, "x = (len: "+x.length+")");
debug.log(1, x);
for (var i=0, num=0; i<x.length; i++){
if (x[i]!== null) num += x[i].length;
}
debug.log(1, "totoal number of elements: "+num);
// to test k in specific ranges:
var start = 0, end = 25;
for (k=start; k<end; k++){
debug.log(1, "=========================== k: " + k + "===========================");
try{
result = find_kth_in_arrays(x, k);
debug.log(1, "====== The " + k + "-th smallest number: " + result);
} catch (e) {
debug.log(1, "Error message from find_kth_in_arrays: " + e);
}
debug.log(1, "=================================================================");
}
debug.log(1, "x = (len: "+x.length+")");
debug.log(1, x);
debug.log(1, "totoal number of elements: "+num);
The complete code with debug utils can be found at: https://github.com/brainclone/teasers/tree/master/kth
Most of the answers I found here focus on both arrays. while it's good but it's harder to implement as there are a lot of edge cases that we need to take care of. Besides that most of the implementations are recursive which adds the space complexity of recursion stack. So instead of focusing on both arrays I decided to just focus on the smaller array and do the binary search on just the smaller array and adjust the pointer for the second array based on the value of the pointer in the first Array. By the following implementation, we have the complexity of O(log(min(n,m)) with O(1) space complexity.
public static int kth_two_sorted(int []a, int b[],int k){
if(a.length > b.length){
return kth_two_sorted(b,a,k);
}
if(a.length + a.length < k){
throw new RuntimeException("wrong argument");
}
int low = 0;
int high = k;
if(a.length <= k){
high = a.length-1;
}
while(low <= high){
int sizeA = low+(high - low)/2;
int sizeB = k - sizeA;
boolean shrinkLeft = false;
boolean extendRight = false;
if(sizeA != 0){
if(sizeB !=b.length){
if(a[sizeA-1] > b[sizeB]){
shrinkLeft = true;
high = sizeA-1;
}
}
}
if(sizeA!=a.length){
if(sizeB!=0){
if(a[sizeA] < b[sizeB-1]){
extendRight = true;
low = sizeA;
}
}
}
if(!shrinkLeft && !extendRight){
return Math.max(a[sizeA-1],b[sizeB-1]) ;
}
}
throw new IllegalArgumentException("we can't be here");
}
We have a range of [low, high] for array a and we narrow this range as we go further through the algorithm. sizeA shows how many of items from k items are from array a and it derives from the value of low and high. sizeB is the same definition except we calculate the value such a way that sizeA+sizeB=k. The based on the values on those two borders with conclude that we have to extend to the right side in array a or shrink to the left side. if we stuck in the same position it means that we found the solution and we will return the max of values in the position of sizeA-1 from a and sizeB-1 from b.
Here's my code based on Jules Olleon's solution:
int getNth(vector<int>& v1, vector<int>& v2, int n)
{
int step = n / 4;
int i1 = n / 2;
int i2 = n - i1;
while(!(v2[i2] >= v1[i1 - 1] && v1[i1] > v2[i2 - 1]))
{
if (v1[i1 - 1] >= v2[i2 - 1])
{
i1 -= step;
i2 += step;
}
else
{
i1 += step;
i2 -= step;
}
step /= 2;
if (!step) step = 1;
}
if (v1[i1 - 1] >= v2[i2 - 1])
return v1[i1 - 1];
else
return v2[i2 - 1];
}
int main()
{
int a1[] = {1,2,3,4,5,6,7,8,9};
int a2[] = {4,6,8,10,12};
//int a1[] = {1,2,3,4,5,6,7,8,9};
//int a2[] = {4,6,8,10,12};
//int a1[] = {1,7,9,10,30};
//int a2[] = {3,5,8,11};
vector<int> v1(a1, a1+9);
vector<int> v2(a2, a2+5);
cout << getNth(v1, v2, 5);
return 0;
}
Here is my implementation in C, you can refer to #Jules Olléon 's explains for the algorithm: the idea behind the algorithm is that we maintain i + j = k, and find such i and j so that a[i-1] < b[j-1] < a[i] (or the other way round). Now since there are i elements in 'a' smaller than b[j-1], and j-1 elements in 'b' smaller than b[j-1], b[j-1] is the i + j-1 + 1 = kth smallest element. To find such i,j the algorithm does a dichotomic search on the arrays.
int find_k(int A[], int m, int B[], int n, int k) {
if (m <= 0 )return B[k-1];
else if (n <= 0) return A[k-1];
int i = ( m/double (m + n)) * (k-1);
if (i < m-1 && i<k-1) ++i;
int j = k - 1 - i;
int Ai_1 = (i > 0) ? A[i-1] : INT_MIN, Ai = (i<m)?A[i]:INT_MAX;
int Bj_1 = (j > 0) ? B[j-1] : INT_MIN, Bj = (j<n)?B[j]:INT_MAX;
if (Ai >= Bj_1 && Ai <= Bj) {
return Ai;
} else if (Bj >= Ai_1 && Bj <= Ai) {
return Bj;
}
if (Ai < Bj_1) { // the answer can't be within A[0,...,i]
return find_k(A+i+1, m-i-1, B, n, j);
} else { // the answer can't be within A[0,...,i]
return find_k(A, m, B+j+1, n-j-1, i);
}
}
Here's my solution. The C++ code prints the kth smallest value as well as the number of iterations to get the kth smallest value using a loop, which in my opinion is in the order of log(k). The code however requires k to be smaller than the length of the first array which is a limitation.
#include <iostream>
#include <vector>
#include<math.h>
using namespace std;
template<typename comparable>
comparable kthSmallest(vector<comparable> & a, vector<comparable> & b, int k){
int idx1; // Index in the first array a
int idx2; // Index in the second array b
comparable maxVal, minValPlus;
float iter = k;
int numIterations = 0;
if(k > a.size()){ // Checks if k is larger than the size of first array
cout << " k is larger than the first array" << endl;
return -1;
}
else{ // If all conditions are satisfied, initialize the indexes
idx1 = k - 1;
idx2 = -1;
}
for ( ; ; ){
numIterations ++;
if(idx2 == -1 || b[idx2] <= a[idx1] ){
maxVal = a[idx1];
minValPlus = b[idx2 + 1];
idx1 = idx1 - ceil(iter/2); // Binary search
idx2 = k - idx1 - 2; // Ensures sum of indices = k - 2
}
else{
maxVal = b[idx2];
minValPlus = a[idx1 + 1];
idx2 = idx2 - ceil(iter/2); // Binary search
idx1 = k - idx2 - 2; // Ensures sum of indices = k - 2
}
if(minValPlus >= maxVal){ // Check if kth smallest value has been found
cout << "The number of iterations to find the " << k << "(th) smallest value is " << numIterations << endl;
return maxVal;
}
else
iter/=2; // Reduce search space of binary search
}
}
int main(){
//Test Cases
vector<int> a = {2, 4, 9, 15, 22, 34, 45, 55, 62, 67, 78, 85};
vector<int> b = {1, 3, 6, 8, 11, 13, 15, 20, 56, 67, 89};
// Input k < a.size()
int kthSmallestVal;
for (int k = 1; k <= a.size() ; k++){
kthSmallestVal = kthSmallest<int>( a ,b ,k );
cout << k <<" (th) smallest Value is " << kthSmallestVal << endl << endl << endl;
}
}
Basically, via this approach you can discard k/2 elements at each step.
The K will recursively change from k => k/2 => k/4 => ... till it reaches 1.
So, Time Complexity is O(logk)
At k=1 , we get the lowest of the two arrays.
The following code is in JAVA. Please note that the we are subtracting 1 (-1) in the code from the indices because Java array's index starts from 0 and not 1, eg. k=3 is represented by the element in 2nd index of an array.
private int kthElement(int[] arr1, int[] arr2, int k) {
if (k < 1 || k > (arr1.length + arr2.length))
return -1;
return helper(arr1, 0, arr1.length - 1, arr2, 0, arr2.length - 1, k);
}
private int helper(int[] arr1, int low1, int high1, int[] arr2, int low2, int high2, int k) {
if (low1 > high1) {
return arr2[low2 + k - 1];
} else if (low2 > high2) {
return arr1[low1 + k - 1];
}
if (k == 1) {
return Math.min(arr1[low1], arr2[low2]);
}
int i = Math.min(low1 + k / 2, high1 + 1);
int j = Math.min(low2 + k / 2, high2 + 1);
if (arr1[i - 1] > arr2[j - 1]) {
return helper(arr1, low1, high1, arr2, j, high2, k - (j - low2));
} else {
return helper(arr1, i, high1, arr2, low2, high2, k - (i - low1));
}
}
The first pseudo code provided above, does not work for many values. For example,
here are two arrays.
int[] a = { 1, 5, 6, 8, 9, 11, 15, 17, 19 };
int[] b = { 4, 7, 8, 13, 15, 18, 20, 24, 26 };
It did not work for k=3 and k=9 in it. I have another solution. It is given below.
private static void traverse(int pt, int len) {
int temp = 0;
if (len == 1) {
int val = 0;
while (k - (pt + 1) - 1 > -1 && M[pt] < N[k - (pt + 1) - 1]) {
if (val == 0)
val = M[pt] < N[k - (pt + 1) - 1] ? N[k - (pt + 1) - 1]
: M[pt];
else {
int t = M[pt] < N[k - (pt + 1) - 1] ? N[k - (pt + 1) - 1]
: M[pt];
val = val < t ? val : t;
}
++pt;
}
if (val == 0)
val = M[pt] < N[k - (pt + 1) - 1] ? N[k - (pt + 1) - 1] : M[pt];
System.out.println(val);
return;
}
temp = len / 2;
if (M[pt + temp - 1] < N[k - (pt + temp) - 1]) {
traverse(pt + temp, temp);
} else {
traverse(pt, temp);
}
}
But... it is also not working for k=5. There is this even/odd catch of k which is not letting it to be simple.
public class KthSmallestInSortedArray {
public static void main(String[] args) {
int a1[] = {2, 3, 10, 11, 43, 56},
a2[] = {120, 13, 14, 24, 34, 36},
k = 4;
System.out.println(findKthElement(a1, a2, k));
}
private static int findKthElement(int a1[], int a2[], int k) {
/** Checking k must less than sum of length of both array **/
if (a1.length + a2.length < k) {
throw new IllegalArgumentException();
}
/** K must be greater than zero **/
if (k <= 0) {
throw new IllegalArgumentException();
}
/**
* Finding begin, l and end such that
* begin <= l < end
* a1[0].....a1[l-1] and
* a2[0]....a2[k-l-1] are the smallest k numbers
*/
int begin = Math.max(0, k - a2.length);
int end = Math.min(a1.length, k);
while (begin < end) {
int l = begin + (end - begin) / 2;
/** Can we include a1[l] in the k smallest numbers */
if ((l < a1.length) &&
(k - l > 0) &&
(a1[l] < a2[k - l - 1])) {
begin = l + 1;
} else if ((l > 0) &&
(k - l < a2.length) &&
(a1[l - 1] > a2[k - 1])) {
/**
* This is the case where we can discard
* a[l-1] from the set of k smallest numbers
*/
end = l;
} else {
/**
* We found our answer since both inequalities were
* false
*/
begin = l;
break;
}
}
if (begin == 0) {
return a2[k - 1];
} else if (begin == k) {
return a1[k - 1];
} else {
return Math.max(a1[begin - 1], a2[k - begin - 1]);
}
}
}
Here is mine solution in java . Will try to further optimize it
public class FindKLargestTwoSortedArray {
public static void main(String[] args) {
int[] arr1 = { 10, 20, 40, 80 };
int[] arr2 = { 15, 35, 50, 75 };
FindKLargestTwoSortedArray(arr1, 0, arr1.length - 1, arr2, 0,
arr2.length - 1, 6);
}
public static void FindKLargestTwoSortedArray(int[] arr1, int start1,
int end1, int[] arr2, int start2, int end2, int k) {
if ((start1 <= end1 && start1 >= 0 && end1 < arr1.length)
&& (start2 <= end2 && start2 >= 0 && end2 < arr2.length)) {
int midIndex1 = (start1 + (k - 1) / 2);
midIndex1 = midIndex1 >= arr1.length ? arr1.length - 1 : midIndex1;
int midIndex2 = (start2 + (k - 1) / 2);
midIndex2 = midIndex2 >= arr2.length ? arr2.length - 1 : midIndex2;
if (arr1[midIndex1] == arr2[midIndex2]) {
System.out.println("element is " + arr1[midIndex1]);
} else if (arr1[midIndex1] < arr2[midIndex2]) {
if (k == 1) {
System.out.println("element is " + arr1[midIndex1]);
return;
} else if (k == 2) {
System.out.println("element is " + arr2[midIndex2]);
return;
}else if (midIndex1 == arr1.length-1 || midIndex2 == arr2.length-1 ) {
if(k==(arr1.length+arr2.length)){
System.out.println("element is " + arr2[midIndex2]);
return;
}else if(k==(arr1.length+arr2.length)-1){
System.out.println("element is " + arr1[midIndex1]);
return;
}
}
int remainingElementToSearch = k - (midIndex1-start1);
FindKLargestTwoSortedArray(
arr1,
midIndex1,
(midIndex1 + remainingElementToSearch) >= arr1.length ? arr1.length-1
: (midIndex1 + remainingElementToSearch), arr2,
start2, midIndex2, remainingElementToSearch);
} else if (arr1[midIndex1] > arr2[midIndex2]) {
FindKLargestTwoSortedArray(arr2, start2, end2, arr1, start1,
end1, k);
}
} else {
return;
}
}
}
This is inspired from Algo at wonderful youtube video
Link to code complexity (log(n)+log(m))
Link to Code (log(n)*log(m))
Implementation of (log(n)+log(m)) solution
I would like to add my explanation to the problem.
This is a classic problem where we have to use the fact that the two arrays are sorted .
we have been given two sorted arrays arr1 of size sz1 and arr2 of size sz2
a)Lets suppose if
Checking If k is valid
k is > (sz1+sz2)
then we cannot find kth smallest element in union of both sorted arrays ryt So return Invalid data.
b)Now if above condition holds false and we have valid and feasible value of k,
Managing Edge Cases
We will append both the arrays by -infinity values at front and +infinity values at end to cover the edge cases of k = 1,2 and k = (sz1+sz2-1),(sz1+sz2)etc.
Now both the arrays have size (sz1+2) and (sz2+2) respectively
Main Algorithm
Now,we will do binary search on arr1 .We will do binary search on arr1 looking for an index i , startIndex <= i <= endIndex
such that if we find corresponding index j in arr2 using constraint {(i+j) = k},then if
if (arr2[j-1] < arr1[i] < arr2[j]),then arr1[i] is the kth smallest (Case 1)
else if (arr1[i-1] < arr2[j] < arr1[i]) ,then arr2[i] is the kth smallest (Case 2)
else signifies either arr1[i] < arr2[j-1] < arr2[j] (Case3)
or arr2[j-1] < arr2[j] < arr1[i] (Case4)
Since we know that the kth smallest element has (k-1) elements smaller than it in union of both the arrays ryt? So,
In Case1, what we did , we ensured that there are a total of (k-1) smaller elements to arr1[i] because elements smaller than arr1[i] in arr1 array are i-1 in number than we know (arr2[j-1] < arr1[i] < arr2[j]) and number of elements smaller than arr1[i] in arr2 is j-1 because j is found using (i-1)+(j-1) = (k-1) So kth smallest element will be arr1[i]
But answer may not always come from the first array ie arr1 so we checked for case2 which also satisfies similarly like case 1 because (i-1)+(j-1) = (k-1) . Now if we have (arr1[i-1] < arr2[j] < arr1[i]) we have a total of k-1 elements smaller than arr2[j] in union of both the arrays so its the kth smallest element.
In case3 , to form it to any of case 1 or case 2, we need to increment i and j will be found according using constraint {(i+j) = k} ie in binary search move to right part ie make startIndex = middleIndex
In case4, to form it to any of case 1 or case 2, we need to decrement i and j will be found according using constraint {(i+j) = k} ie in binary search move to left part ie make endIndex = middleIndex.
Now how to decide startIndex and endIndex at beginning of binary search over arr1
with startindex = 1 and endIndex = ??.We need to decide.
If k > sz1,endIndex = (sz1+1) , else endIndex = k;
Because if k is greater than the size of the first array we may have to do binary search over the entire array arr1 else we only need to take first k elements of it because sz1-k elements can never contribute in calculating kth smallest.
CODE Shown Below
// Complexity O(log(n)+log(m))
#include<bits/stdc++.h>
using namespace std;
#define f(i,x,y) for(int i = (x);i < (y);++i)
#define F(i,x,y) for(int i = (x);i > (y);--i)
int max(int a,int b){return (a > b?a:b);}
int min(int a,int b){return (a < b?a:b);}
int mod(int a){return (a > 0?a:((-1)*(a)));}
#define INF 1000000
int func(int *arr1,int *arr2,int sz1,int sz2,int k)
{
if((k <= (sz1+sz2))&&(k > 0))
{
int s = 1,e,i,j;
if(k > sz1)e = sz1+1;
else e = k;
while((e-s)>1)
{
i = (e+s)/2;
j = ((k-1)-(i-1));
j++;
if(j > (sz2+1)){s = i;}
else if((arr1[i] >= arr2[j-1])&&(arr1[i] <= arr2[j]))return arr1[i];
else if((arr2[j] >= arr1[i-1])&&(arr2[j] <= arr1[i]))return arr2[j];
else if(arr1[i] < arr2[j-1]){s = i;}
else if(arr1[i] > arr2[j]){e = i;}
else {;}
}
i = e,j = ((k-1)-(i-1));j++;
if((arr1[i] >= arr2[j-1])&&(arr1[i] <= arr2[j]))return arr1[i];
else if((arr2[j] >= arr1[i-1])&&(arr2[j] <= arr1[i]))return arr2[j];
else
{
i = s,j = ((k-1)-(i-1));j++;
if((arr1[i] >= arr2[j-1])&&(arr1[i] <= arr2[j]))return arr1[i];
else return arr2[j];
}
}
else
{
cout << "Data Invalid" << endl;
return -INF;
}
}
int main()
{
int n,m,k;
cin >> n >> m >> k;
int arr1[n+2];
int arr2[m+2];
f(i,1,n+1)
cin >> arr1[i];
f(i,1,m+1)
cin >> arr2[i];
arr1[0] = -INF;
arr2[0] = -INF;
arr1[n+1] = +INF;
arr2[m+1] = +INF;
int val = func(arr1,arr2,n,m,k);
if(val != -INF)cout << val << endl;
return 0;
}
For Solution of complexity (log(n)*log(m))
Just i missed using advantage of the fact that for each i the j can be found using constraint {(i-1)+(j-1)=(k-1)} So for each i i was further applying binary search on second array to find j such that arr2[j] <= arr1[i].So this solution can be optimized further
#include <bits/stdc++.h>
using namespace std;
int findKthElement(int a[],int start1,int end1,int b[],int start2,int end2,int k){
if(start1 >= end1)return b[start2+k-1];
if(start2 >= end2)return a[start1+k-1];
if(k==1)return min(a[start1],b[start2]);
int aMax = INT_MAX;
int bMax = INT_MAX;
if(start1+k/2-1 < end1) aMax = a[start1 + k/2 - 1];
if(start2+k/2-1 < end2) bMax = b[start2 + k/2 - 1];
if(aMax > bMax){
return findKthElement(a,start1,end1,b,start2+k/2,end2,k-k/2);
}
else{
return findKthElement(a,start1 + k/2,end1,b,start2,end2,k-k/2);
}
}
int main(void){
int t;
scanf("%d",&t);
while(t--){
int n,m,k;
cout<<"Enter the size of 1st Array"<<endl;
cin>>n;
int arr[n];
cout<<"Enter the Element of 1st Array"<<endl;
for(int i = 0;i<n;i++){
cin>>arr[i];
}
cout<<"Enter the size of 2nd Array"<<endl;
cin>>m;
int arr1[m];
cout<<"Enter the Element of 2nd Array"<<endl;
for(int i = 0;i<m;i++){
cin>>arr1[i];
}
cout<<"Enter The Value of K";
cin>>k;
sort(arr,arr+n);
sort(arr1,arr1+m);
cout<<findKthElement(arr,0,n,arr1,0,m,k)<<endl;
}
return 0;
}
Time Complexcity is O(log(min(n,m)))
Below C# code to Find the k-th Smallest Element in the Union of Two Sorted Arrays. Time Complexity : O(logk)
public static int findKthSmallestElement1(int[] A, int startA, int endA, int[] B, int startB, int endB, int k)
{
int n = endA - startA;
int m = endB - startB;
if (n <= 0)
return B[startB + k - 1];
if (m <= 0)
return A[startA + k - 1];
if (k == 1)
return A[startA] < B[startB] ? A[startA] : B[startB];
int midA = (startA + endA) / 2;
int midB = (startB + endB) / 2;
if (A[midA] <= B[midB])
{
if (n / 2 + m / 2 + 1 >= k)
return findKthSmallestElement1(A, startA, endA, B, startB, midB, k);
else
return findKthSmallestElement1(A, midA + 1, endA, B, startB, endB, k - n / 2 - 1);
}
else
{
if (n / 2 + m / 2 + 1 >= k)
return findKthSmallestElement1(A, startA, midA, B, startB, endB, k);
else
return findKthSmallestElement1(A, startA, endA, B, midB + 1, endB, k - m / 2 - 1);
}
}
Check this code.
import math
def findkthsmallest():
A=[1,5,10,22,30,35,75,125,150,175,200]
B=[15,16,20,22,25,30,100,155,160,170]
lM=0
lN=0
hM=len(A)-1
hN=len(B)-1
k=17
while True:
if k==1:
return min(A[lM],B[lN])
cM=hM-lM+1
cN=hN-lN+1
tmp = cM/float(cM+cN)
iM=int(math.ceil(tmp*k))
iN=k-iM
iM=lM+iM-1
iN=lN+iN-1
if A[iM] >= B[iN]:
if iN == hN or A[iM] < B[iN+1]:
return A[iM]
else:
k = k - (iN-lN+1)
lN=iN+1
hM=iM-1
if B[iN] >= A[iM]:
if iM == hM or B[iN] < A[iM+1]:
return B[iN]
else:
k = k - (iM-lM+1)
lM=iM+1
hN=iN-1
if hM < lM:
return B[lN+k-1]
if hN < lN:
return A[lM+k-1]
if __name__ == '__main__':
print findkthsmallest();

Fast code to determine if any two subsets of columns have the same sum

For a given n and m I iterate over all n by m partial circulant matrices with entries that are either 0 or 1. I want to find if there is a matrix such that there are no two subsets of the columns that give the same sum. Here when we add columns we just do it elementwise. My current code uses constraint programming via ortools. However it is not as fast I would like. For n = 7 and m = 12 it takes over 3 minutes and for n = 10, m = 18 it doesn't terminate even though there are only 2^18 = 262144 different matrices to consider. Here is my code.
from scipy.linalg import circulant
import numpy as np
import itertools
from ortools.constraint_solver import pywrapcp as cs
n = 7
m = 12
def isdetecting(matrix):
X = np.array([solver.IntVar(values) for i in range(matrix.shape[1])])
X1 = X.tolist()
for row in matrix:
x = X[row].tolist()
solver.Add(solver.Sum(x) == 0)
db = solver.Phase(X1, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
count = 0
while (solver.NextSolution() and count < 2):
solution = [x.Value() for x in X1]
count += 1
solver.EndSearch()
if (count < 2):
return True
values = [-1,0,1]
solver = cs.Solver("scip")
for row in itertools.product([0,1],repeat = m):
M = np.array(circulant(row)[0:n], dtype=bool)
if isdetecting(M):
print M.astype(int)
break
Can this problem be solved fast enough so that n = 10, m = 18 can be solved?
One problem is that you are declaring the "solver" variable globally and it seems to confuse or-tools to reuse it many times. When moving it inside "isdetecting", then the (7,12) problem is solved much faster, in about 7 seconds (compared to 2:51 minutes for the original model). I haven't checked it for the larger problem, though.
Also, it might be good idea to test different labelings (instead of solver.INT_VAR_DEFAULT and solver.INT_VALUE_DEFAULT), though binary value tend to be not very sensitive to different labelings. See the code for another labeling.
def isdetecting(matrix):
solver = cs.Solver("scip") # <----
X = np.array([solver.IntVar(values) for i in range(matrix.shape[1])])
X1 = X.tolist()
for row in matrix:
x = X[row].tolist()
solver.Add(solver.Sum(x) == 0)
# db = solver.Phase(X1, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
db = solver.Phase(X1, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_CENTER_VALUE)
solver.NewSearch(db)
count = 0
while (solver.NextSolution() and count < 2):
solution = [x.Value() for x in X1]
count += 1
solver.EndSearch()
if (count < 2):
print "FOUND"
return True
Edit: Here are constraints to remove the all-0 solutions as mentioned in the comments. What I know of, it require a separate list. It now takes a little longer (10.4s vs 7s).
X1Abs = [solver.IntVar(values, 'X1Abs[%i]' % i) for i in range(X1_len)]
for i in range(X1_len):
solver.Add(X1Abs[i] == abs(X1[i]))
solver.Add(solver.Sum(X1Abs) > 0)
Something like this is what I had in mind. I'd estimate the running time for command line parameters 10 18 at less than 8 hours on my machine.
public class Search {
public static void main(String[] args) {
int n = Integer.parseInt(args[0]);
int m = Integer.parseInt(args[1]);
int row = search(n, m);
if (row >= 0) {
printRow(m, row);
}
}
private static int search(int n, int m) {
if (n < 0 || m < n || m >= 31 || powOverflows(m + 1, n)) {
throw new IllegalArgumentException();
}
long[] column = new long[m];
long[] sums = new long[1 << m];
int row = 1 << m;
while (row-- > 0) {
System.err.println(row);
for (int j = 0; j < m; j++) {
column[j] = 0;
for (int i = 0; i < n; i++) {
column[j] = (column[j] * (m + 1)) + ((row >> ((i + j) % m)) & 1);
}
}
for (int subset = 0; subset < (1 << m); subset++) {
long sum = 0;
for (int j = 0; j < m; j++) {
if (((subset >> j) & 1) == 1) {
sum += column[j];
}
}
sums[subset] = sum;
}
java.util.Arrays.sort(sums);
boolean duplicate = false;
for (int k = 1; k < (1 << m); k++) {
if (sums[k - 1] == sums[k]) {
duplicate = true;
break;
}
}
if (!duplicate) {
break;
}
}
return row;
}
private static boolean powOverflows(long b, int e) {
if (b <= 0 || e < 0) {
throw new IllegalArgumentException();
}
if (e == 0) {
return false;
}
long max = Long.MAX_VALUE;
while (e > 1) {
if (b > Integer.MAX_VALUE) {
return true;
}
if ((e & 1) == 1) {
max /= b;
}
b *= b;
e >>= 1;
}
return b > max;
}
private static void printRow(int m, int row) {
for (int j = 0; j < m; j++) {
System.out.print((row >> j) & 1);
}
System.out.println();
}
}

Discover long patterns

Given a sorted list of numbers, I would like to find the longest subsequence where the differences between successive elements are geometrically increasing. So if the list is
1, 2, 3, 4, 7, 15, 27, 30, 31, 81
then the subsequence is 1, 3, 7, 15, 31. Alternatively consider 1, 2, 5, 6, 11, 15, 23, 41, 47 which has subsequence 5, 11, 23, 47 with a = 3 and k = 2.
Can this be solved in O(n2) time? Where n is the length of the list.
I am interested both in the general case where the progression of differences is ak, ak2, ak3, etc., where both a and k are integers, and in the special case where a = 1, so the progression of difference is k, k2, k3, etc.
Update
I have made an improvement of the algorithm that it takes an average of O(M + N^2) and memory needs of O(M+N). Mainly is the same that the protocol described below, but to calculate the possible factors A,K for ech diference D, I preload a table. This table takes less than a second to be constructed for M=10^7.
I have made a C implementation that takes less than 10minutes to solve N=10^5 diferent random integer elements.
Here is the source code in C: To execute just do: gcc -O3 -o findgeo findgeo.c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <memory.h>
#include <time.h>
struct Factor {
int a;
int k;
struct Factor *next;
};
struct Factor *factors = 0;
int factorsL=0;
void ConstructFactors(int R) {
int a,k,C;
int R2;
struct Factor *f;
float seconds;
clock_t end;
clock_t start = clock();
if (factors) free(factors);
factors = malloc (sizeof(struct Factor) *((R>>1) + 1));
R2 = R>>1 ;
for (a=0;a<=R2;a++) {
factors[a].a= a;
factors[a].k=1;
factors[a].next=NULL;
}
factorsL=R2+1;
R2 = floor(sqrt(R));
for (k=2; k<=R2; k++) {
a=1;
C=a*k*(k+1);
while (C<R) {
C >>= 1;
f=malloc(sizeof(struct Factor));
*f=factors[C];
factors[C].a=a;
factors[C].k=k;
factors[C].next=f;
a++;
C=a*k*(k+1);
}
}
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Construct Table: %f\n",seconds);
}
void DestructFactors() {
int i;
struct Factor *f;
for (i=0;i<factorsL;i++) {
while (factors[i].next) {
f=factors[i].next->next;
free(factors[i].next);
factors[i].next=f;
}
}
free(factors);
factors=NULL;
factorsL=0;
}
int ipow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp & 1)
result *= base;
exp >>= 1;
base *= base;
}
return result;
}
void findGeo(int **bestSolution, int *bestSolutionL,int *Arr, int L) {
int i,j,D;
int mustExistToBeBetter;
int R=Arr[L-1]-Arr[0];
int *possibleSolution;
int possibleSolutionL=0;
int exp;
int NextVal;
int idx;
int kMax,aMax;
float seconds;
clock_t end;
clock_t start = clock();
kMax = floor(sqrt(R));
aMax = floor(R/2);
ConstructFactors(R);
*bestSolutionL=2;
*bestSolution=malloc(0);
possibleSolution = malloc(sizeof(int)*(R+1));
struct Factor *f;
int *H=malloc(sizeof(int)*(R+1));
memset(H,0, sizeof(int)*(R+1));
for (i=0;i<L;i++) {
H[ Arr[i]-Arr[0] ]=1;
}
for (i=0; i<L-2;i++) {
for (j=i+2; j<L; j++) {
D=Arr[j]-Arr[i];
if (D & 1) continue;
f = factors + (D >>1);
while (f) {
idx=Arr[i] + f->a * f->k - Arr[0];
if ((f->k <= kMax)&& (f->a<aMax)&&(idx<=R)&&H[idx]) {
if (f->k ==1) {
mustExistToBeBetter = Arr[i] + f->a * (*bestSolutionL);
} else {
mustExistToBeBetter = Arr[i] + f->a * f->k * (ipow(f->k,*bestSolutionL) - 1)/(f->k-1);
}
if (mustExistToBeBetter< Arr[L-1]+1) {
idx= floor(mustExistToBeBetter - Arr[0]);
} else {
idx = R+1;
}
if ((idx<=R)&&H[idx]) {
possibleSolution[0]=Arr[i];
possibleSolution[1]=Arr[i] + f->a*f->k;
possibleSolution[2]=Arr[j];
possibleSolutionL=3;
exp = f->k * f->k * f->k;
NextVal = Arr[j] + f->a * exp;
idx=NextVal - Arr[0];
while ( (idx<=R) && H[idx]) {
possibleSolution[possibleSolutionL]=NextVal;
possibleSolutionL++;
exp = exp * f->k;
NextVal = NextVal + f->a * exp;
idx=NextVal - Arr[0];
}
if (possibleSolutionL > *bestSolutionL) {
free(*bestSolution);
*bestSolution = possibleSolution;
possibleSolution = malloc(sizeof(int)*(R+1));
*bestSolutionL=possibleSolutionL;
kMax= floor( pow (R, 1/ (*bestSolutionL) ));
aMax= floor(R / (*bestSolutionL));
}
}
}
f=f->next;
}
}
}
if (*bestSolutionL == 2) {
free(*bestSolution);
possibleSolutionL=0;
for (i=0; (i<2)&&(i<L); i++ ) {
possibleSolution[possibleSolutionL]=Arr[i];
possibleSolutionL++;
}
*bestSolution = possibleSolution;
*bestSolutionL=possibleSolutionL;
} else {
free(possibleSolution);
}
DestructFactors();
free(H);
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("findGeo: %f\n",seconds);
}
int compareInt (const void * a, const void * b)
{
return *(int *)a - *(int *)b;
}
int main(void) {
int N=100000;
int R=10000000;
int *A = malloc(sizeof(int)*N);
int *Sol;
int SolL;
int i;
int *S=malloc(sizeof(int)*R);
for (i=0;i<R;i++) S[i]=i+1;
for (i=0;i<N;i++) {
int r = rand() % (R-i);
A[i]=S[r];
S[r]=S[R-i-1];
}
free(S);
qsort(A,N,sizeof(int),compareInt);
/*
int step = floor(R/N);
A[0]=1;
for (i=1;i<N;i++) {
A[i]=A[i-1]+step;
}
*/
findGeo(&Sol,&SolL,A,N);
printf("[");
for (i=0;i<SolL;i++) {
if (i>0) printf(",");
printf("%d",Sol[i]);
}
printf("]\n");
printf("Size: %d\n",SolL);
free(Sol);
free(A);
return EXIT_SUCCESS;
}
Demostration
I will try to demonstrate that the algorithm that I proposed is in average for an equally distributed random sequence. I’m not a mathematician and I am not used to do this kind of demonstrations, so please fill free to correct me any error that you can see.
There are 4 indented loops, the two firsts are the N^2 factor. The M is for the calculation of the possible factors table).
The third loop is executed only once in average for each pair. You can see this checking the size of the pre-calculated factors table. It’s size is M when N->inf. So the average steps for each pair is M/M=1.
So the proof happens to check that the forth loop. (The one that traverses the good made sequences is executed less that or equal O(N^2) for all the pairs.
To demonstrate that, I will consider two cases: one where M>>N and other where M ~= N. Where M is the maximum difference of the initial array: M= S(n)-S(1).
For the first case, (M>>N) the probability to find a coincidence is p=N/M. To start a sequence, it must coincide the second and the b+1 element where b is the length of the best sequence until now. So the loop will enter times. And the average length of this series (supposing an infinite series) is . So the total number of times that the loop will be executed is . And this is close to 0 when M>>N. The problem here is when M~=N.
Now lets consider this case where M~=N. Lets consider that b is the best sequence length until now. For the case A=k=1, then the sequence must start before N-b, so the number of sequences will be N-b, and the times that will go for the loop will be a maximum of (N-b)*b.
For A>1 and k=1 we can extrapolate to where d is M/N (the average distance between numbers). If we add for all A’s from 1 to dN/b then we see a top limit of:
For the cases where k>=2, we see that the sequence must start before , So the loop will enter an average of and adding for all As from 1 to dN/k^b, it gives a limit of
Here, the worst case is when b is minimum. Because we are considering minimum series, lets consider a very worst case of b= 2 so the number of passes for the 4th loop for a given k will be less than
.
And if we add all k’s from 2 to infinite will be:
So adding all the passes for k=1 and k>=2, we have a maximum of:
Note that d=M/N=1/p.
So we have two limits, One that goes to infinite when d=1/p=M/N goes to 1 and other that goes to infinite when d goes to infinite. So our limit is the minimum of both, and the worst case is when both equetions cross. So if we solve the equation:
we see that the maximum is when d=1.353
So it is demonstrated that the forth loops will be processed less than 1.55N^2 times in total.
Of course, this is for the average case. For the worst case I am not able to find a way to generate series whose forth loop are higher than O(N^2), and I strongly believe that they does not exist, but I am not a mathematician to prove it.
Old Answer
Here is a solution in average of O((n^2)*cube_root(M)) where M is the difference between the first and last element of the array. And memory requirements of O(M+N).
1.- Construct an array H of length M so that M[i - S[0]]=true if i exists in the initial array and false if it does not exist.
2.- For each pair in the array S[j], S[i] do:
2.1 Check if it can be the first and third elements of a possible solution. To do so, calculate all possible A,K pairs that meet the equation S(i) = S(j) + AK + AK^2. Check this SO question to see how to solve this problem. And check that exist the second element: S[i]+ A*K
2.2 Check also that exist the element one position further that the best solution that we have. For example, if the best solution that we have until now is 4 elements long then check that exist the element A[j] + AK + AK^2 + AK^3 + AK^4
2.3 If 2.1 and 2.2 are true, then iterate how long is this series and set as the bestSolution until now is is longer that the last.
Here is the code in javascript:
function getAKs(A) {
if (A / 2 != Math.floor(A / 2)) return [];
var solution = [];
var i;
var SR3 = Math.pow(A, 1 / 3);
for (i = 1; i <= SR3; i++) {
var B, C;
C = i;
B = A / (C * (C + 1));
if (B == Math.floor(B)) {
solution.push([B, C]);
}
B = i;
C = (-1 + Math.sqrt(1 + 4 * A / B)) / 2;
if (C == Math.floor(C)) {
solution.push([B, C]);
}
}
return solution;
}
function getBestGeometricSequence(S) {
var i, j, k;
var bestSolution = [];
var H = Array(S[S.length-1]-S[0]);
for (i = 0; i < S.length; i++) H[S[i] - S[0]] = true;
for (i = 0; i < S.length; i++) {
for (j = 0; j < i; j++) {
var PossibleAKs = getAKs(S[i] - S[j]);
for (k = 0; k < PossibleAKs.length; k++) {
var A = PossibleAKs[k][0];
var K = PossibleAKs[k][17];
var mustExistToBeBetter;
if (K==1) {
mustExistToBeBetter = S[j] + A * bestSolution.length;
} else {
mustExistToBeBetter = S[j] + A * K * (Math.pow(K,bestSolution.length) - 1)/(K-1);
}
if ((H[S[j] + A * K - S[0]]) && (H[mustExistToBeBetter - S[0]])) {
var possibleSolution=[S[j],S[j] + A * K,S[i]];
exp = K * K * K;
var NextVal = S[i] + A * exp;
while (H[NextVal - S[0]] === true) {
possibleSolution.push(NextVal);
exp = exp * K;
NextVal = NextVal + A * exp;
}
if (possibleSolution.length > bestSolution.length) {
bestSolution = possibleSolution;
}
}
}
}
}
return bestSolution;
}
//var A= [ 1, 2, 3,5,7, 15, 27, 30,31, 81];
var A=[];
for (i=1;i<=3000;i++) {
A.push(i);
}
var sol=getBestGeometricSequence(A);
$("#result").html(JSON.stringify(sol));
You can check the code here: http://jsfiddle.net/6yHyR/1/
I maintain the other solution because I believe that it is still better when M is very big compared to N.
Just to start with something, here is a simple solution in JavaScript:
var input = [0.7, 1, 2, 3, 4, 7, 15, 27, 30, 31, 81],
output = [], indexes, values, i, index, value, i_max_length,
i1, i2, i3, j1, j2, j3, difference12a, difference23a, difference12b, difference23b,
scale_factor, common_ratio_a, common_ratio_b, common_ratio_c,
error, EPSILON = 1e-9, common_ratio_is_integer,
resultDiv = $("#result");
for (i1 = 0; i1 < input.length - 2; ++i1) {
for (i2 = i1 + 1; i2 < input.length - 1; ++i2) {
scale_factor = difference12a = input[i2] - input[i1];
for (i3 = i2 + 1; i3 < input.length; ++i3) {
difference23a = input[i3] - input[i2];
common_ratio_1a = difference23a / difference12a;
common_ratio_2a = Math.round(common_ratio_1a);
error = Math.abs((common_ratio_2a - common_ratio_1a) / common_ratio_1a);
common_ratio_is_integer = error < EPSILON;
if (common_ratio_2a > 1 && common_ratio_is_integer) {
indexes = [i1, i2, i3];
j1 = i2;
j2 = i3
difference12b = difference23a;
for (j3 = j2 + 1; j3 < input.length; ++j3) {
difference23b = input[j3] - input[j2];
common_ratio_1b = difference23b / difference12b;
common_ratio_2b = Math.round(common_ratio_1b);
error = Math.abs((common_ratio_2b - common_ratio_1b) / common_ratio_1b);
common_ratio_is_integer = error < EPSILON;
if (common_ratio_is_integer && common_ratio_2a === common_ratio_2b) {
indexes.push(j3);
j1 = j2;
j2 = j3
difference12b = difference23b;
}
}
values = [];
for (i = 0; i < indexes.length; ++i) {
index = indexes[i];
value = input[index];
values.push(value);
}
output.push(values);
}
}
}
}
if (output !== []) {
i_max_length = 0;
for (i = 1; i < output.length; ++i) {
if (output[i_max_length].length < output[i].length)
i_max_length = i;
}
for (i = 0; i < output.length; ++i) {
if (output[i_max_length].length == output[i].length)
resultDiv.append("<p>[" + output[i] + "]</p>");
}
}
Output:
[1, 3, 7, 15, 31]
I find the first three items of every subsequence candidate, calculate the scale factor and the common ratio from them, and if the common ratio is integer, then I iterate over the remaining elements after the third one, and add those to the subsequence, which fit into the geometric progression defined by the first three items. As a last step, I select the sebsequence/s which has/have the largest length.
In fact it is exactly the same question as Longest equally-spaced subsequence, you just have to consider the logarithm of your data. If the sequence is a, ak, ak^2, ak^3, the logarithmique value is ln(a), ln(a) + ln(k), ln(a)+2ln(k), ln(a)+3ln(k), so it is equally spaced. The opposite is of course true. There is a lot of different code in the question above.
I don't think the special case a=1 can be resolved more efficiently than an adaptation from an algorithm above.
Here is my solution in Javascript. It should be close to O(n^2) except may be in some pathological cases.
function bsearch(Arr,Val, left,right) {
if (left == right) return left;
var m=Math.floor((left + right) /2);
if (Val <= Arr[m]) {
return bsearch(Arr,Val,left,m);
} else {
return bsearch(Arr,Val,m+1,right);
}
}
function findLongestGeometricSequence(S) {
var bestSolution=[];
var i,j,k;
var H={};
for (i=0;i<S.length;i++) H[S[i]]=true;
for (i=0;i<S.length;i++) {
for (j=0;j<i;j++) {
for (k=j+1;k<i;) {
var possibleSolution=[S[j],S[k],S[i]];
var K = (S[i] - S[k]) / (S[k] - S[j]);
var A = (S[k] - S[j]) * (S[k] - S[j]) / (S[i] - S[k]);
if ((Math.floor(K) == K) && (Math.floor(A)==A)) {
exp= K*K*K;
var NextVal= S[i] + A * exp;
while (H[NextVal] === true) {
possibleSolution.push(NextVal);
exp = exp * K;
NextVal= NextVal + A * exp;
}
if (possibleSolution.length > bestSolution.length)
bestSolution=possibleSolution;
K--;
} else {
K=Math.floor(K);
}
if (K>0) {
var NextPossibleMidValue= (S[i] + K*S[j]) / (K +1);
k++;
if (S[k]<NextPossibleMidValue) {
k=bsearch(S,NextPossibleMidValue, k+1, i);
}
} else {
k=i;
}
}
}
}
return bestSolution;
}
function Run() {
var MyS= [0.7, 1, 2, 3, 4, 5,6,7, 15, 27, 30,31, 81];
var sol = findLongestGeometricSequence(MyS);
alert(JSON.stringify(sol));
}
Small Explanation
If we take 3 numbers of the array S(j) < S(k) < S(i) then you can calculate a and k so that: S(k) = S(j) + a*k and S(i) = S(k) + a*k^2 (2 equations and 2 incognits). With that in mind, you can check if exist a number in the array that is S(next) = S(i) + a*k^3. If that is the case, then continue checknng for S(next2) = S(next) + a*k^4 and so on.
This would be a O(n^3) solution, but you can hava advantage that k must be integer in order to limit the S(k) points selected.
In case that a is known, then you can calculate a(k) and you need to check only one number in the third loop, so this case will be clearly a O(n^2).
I think this task is related with not so long ago posted Longest equally-spaced subsequence. I've just modified my algorithm in Python a little bit:
from math import sqrt
def add_precalc(precalc, end, (a, k), count, res, N):
if end + a * k ** res[1]["count"] > N: return
x = end + a * k ** count
if x > N or x < 0: return
if precalc[x] is None: return
if (a, k) not in precalc[x]:
precalc[x][(a, k)] = count
return
def factors(n):
res = []
for x in range(1, int(sqrt(n)) + 1):
if n % x == 0:
y = n / x
res.append((x, y))
res.append((y, x))
return res
def work(input):
precalc = [None] * (max(input) + 1)
for x in input: precalc[x] = {}
N = max(input)
res = ((0, 0), {"end":0, "count":0})
for i, x in enumerate(input):
for y in input[i::-1]:
for a, k in factors(x - y):
if (a, k) in precalc[x]: continue
add_precalc(precalc, x, (a, k), 2, res, N)
for step, count in precalc[x].iteritems():
count += 1
if count > res[1]["count"]: res = (step, {"end":x, "count":count})
add_precalc(precalc, x, step, count, res, N)
precalc[x] = None
d = [res[1]["end"]]
for x in range(res[1]["count"] - 1, 0, -1):
d.append(d[-1] - res[0][0] * res[0][1] ** x)
d.reverse()
return d
explanation
Traversing the array
For each previous element of the array calculate factors of the difference between current and taken previous element and then precalculate next possible element of the sequence and saving it to precalc array
So when arriving at element i there're already all possible sequences with element i in the precalc array, so we have to calculate next possible element and save it to precalc.
Currently there's one place in algorithm that could be slow - factorization of each previous number. I think it could be made faster with two optimizations:
more effective factorization algorithm
find a way not to see at each element of array, using the fact that array is sorted and there's already a precalculated sequences
Python:
def subseq(a):
seq = []
aset = set(a)
for i, x in enumerate(a):
# elements after x
for j, x2 in enumerate(a[i+1:]):
j += i + 1 # enumerate starts j at 0, we want a[j] = x2
bk = x2 - x # b*k (assuming k and k's exponent start at 1)
# given b*k, bruteforce values of k
for k in range(1, bk + 1):
items = [x, x2] # our subsequence so far
nextdist = bk * k # what x3 - x2 should look like
while items[-1] + nextdist in aset:
items.append(items[-1] + nextdist)
nextdist *= k
if len(items) > len(seq):
seq = items
return seq
Running time is O(dn^3), where d is the (average?) distance between two elements,
and n is of course len(a).

Check if an array element is a sum of two earlier elements using recursion

I was solving practice questions from a book when I stumbled upon this one :
*Describe a recursive algorithm that will check if an array A of
integers contains an integer A[i] that is the sum of two integers
that appear earlier in A, that is, such that
A[i] = A[j] +A[k] for j,k < i.
*
I have been thinking about this for a few hours but haven't been able to come up with a good recursive algorithm.
A recursive solution without any loops (pseudocode):
bool check (A, i, j, k)
if (A[j] + A[k] == A[i])
return true
else
if (k + 1 < j) return check (A, i, j, k + 1)
else if (j + 1 < i) return check (A, i, j + 1, 0)
else if (i + 1 < A.size) return check (A, i + 1, 1, 0)
else return false
The recursive function is called with check(A, 2, 1, 0). To highlight the main part of the algorithm it does not check if the array initially has more than two elements.
Not very efficient but..
search(A, j, k) {
for (int i = 0; i < A.length; i++) {
if (A[i] == A[j] + A[k]) {
return i;
}
}
if (k + 1 == A.length) {
if (j + 1 < A.length) {
return search(A, j + 1, 0);
}
return -1; // not found
}
return search (A, j, k + 1);
}
Start the search with
search(A, 0, 0);
In python. The first function (search is less efficient O(n3)), but it also gives the j and k, the second one is more efficient (O(n2)), but only returns i.
def search(A, i):
for j in xrange(i):
for k in xrange(i):
if A[i] == (A[j] + A[k]):
return i, j, k
if i > 0:
return search(A, i - 1)
def search2(A, i, sums):
if A[i] in sums:
return i
if i == len(A) - 1:
return None
for j in range(i + 1):
sums.add(A[i] + A[j])
return search2(A, i + 1, sums)
if __name__ == '__main__':
print search([1, 4, 3], 2)
print search([1, 3, 4], 2)
print search2([1, 4, 3], 0, set())
print search2([1, 3, 4], 0, set())
It will print:
None
(2, 0, 1)
None
2
/**
* Describe a recursive algorithm that will check if an array A of integers contains
* an integer A[i] that is the sum of two integers that appear earlier in A,
* that is, such that A[i] = A[j]+A[k] for j,k < i.
* #param A - array
* #param i - initial starting index (0)
* #param j - initival value for j (0)
* #param k - initial value for k (0)
* #param n - length of A - 1
* #return - true if combination of previous 2 elements , false otherwise
*/
public boolean checkIfPreviousTwo(int[] A, int i, int j, int k, int n){
if(i >= n) return false;
if(j < i && k < i){
if(A[j] + A[k] == A[i]) return true;
return(
checkIfPreviousTwo(A, i, j + 1, k, n) ||
checkIfPreviousTwo(A, i, j, k + 1, n)
);
}
return checkIfPreviousTwo(A, i + 1, j, k, n);
}
This algorithm should be fairly efficient (well, O(n2)):
import Data.Set (Set, empty, fromList, member, union)
-- Helper function (which does all the work)
hassum' :: (Ord a, Num a) => Set a -> [a] -> [a] -> Bool
-- Parameters:
-- 1. All known sums upto the current element
-- 2. The already handles elements
-- 3. The not yet checked elements
-- If there are no elements left to check, there is no sum
hassum' _ _ [] = False
-- Otherwise...
hassum' sums done (x:xs)
-- Check if the next element is a known sum
| x `member` sums = True
-- Otherwise calculate new possible sums and check the remaining elements
| otherwise = hassum' sums' done' xs
where sums' = sums `union` fromList [x+d | d <- done]
done' = x:done
-- Main function
hassum :: (Ord a, Num a) => [a] -> Bool
hassum as = hassum' empty [] as
I hope you can make sense of it even if you might not know Haskell.
The Java version, it also return the index of i,j,k.
the running time of the worst case is O(N^2)
=1= using recursion
private static void findSum(Object[] nums, long k, int[] ids/* indexes*/) {
// walk from both sides towards center
int l = ids[0];
int r = ids[1];
if (l == r) {
ids[0] = -1;
ids[1] = -1;
return;
}
int sum = (Integer) nums[l] + (Integer) nums[r];
if (sum == k) {
return;
}
if (sum < k) {
ids[0]++;
} else {
ids[1]--;
}
findSum(nums, k, ids);
}
private static int binarySearchPositionIndexOf(List<Integer> list, int l, int r, int k) {
int m = (l + r) / 2;
if (m == l) { // end recursion
return r;
}
int mv = list.get(m);
if (mv == k) {
return m;
}
if (mv < k) {
return binarySearchPositionIndexOf(list, m, r, k);
}
return binarySearchPositionIndexOf(list, l, m, k);
}
private static void check(List<Integer> data, List<Integer> shadow, int i, int[] ids) {
if (i == data.size()) {
ids[0] = -1;
ids[1] = -1;
return;
}
// sort it in
int indexAfterSort = -1;
int v = data.get(i);
if (v >= data.get(i - 1)) {
indexAfterSort = i;
} else if (v <= data.get(0)) {
indexAfterSort = 0;
} else if (data.size() == 3) {
indexAfterSort = i - 1;
} else {
indexAfterSort = binarySearchPositionIndexOf(data, 0, i - 1, data.get(i));
}
if (indexAfterSort != i) {
data.add(indexAfterSort, data.remove(i));
shadow.add(indexAfterSort, shadow.remove(i));
}
// find sum
if (indexAfterSort >= 2) {
List<Integer> next = data.subList(0, indexAfterSort); //[)
ids[0] = 0;
ids[1] = next.size() - 1;
findSum(next.toArray(), data.get(indexAfterSort), ids);
}
// recursion
if (ids[0] == -1 && ids[1] == -1) {
check(data, shadow, i + 1, ids);
return;
}
ids[0] = shadow.get(ids[0]);
ids[1] = shadow.get(ids[1]);
ids[2] = i;
}
public static int[] check(final int[] array) {
List shadow = new LinkedList() {{
for (int i = 0; i < array.length; i++) {
add(i);
}
}};
if (array[0] > array[1]) {
array[0] ^= array[1];
array[1] ^= array[0];
array[0] ^= array[1];
shadow.add(0, shadow.remove(1));
}
int[] resultIndex = new int[3];
resultIndex[0] = -1;
resultIndex[1] = -1;
check(new LinkedList<Integer>() {{
for (int i = 0; i < array.length; i++) {
add(array[i]);
}
}}, shadow, 2, resultIndex);
return resultIndex;
}
Test
#Test(timeout = 10L, expected = Test.None.class)
public void test() {
int[] array = new int[]{4, 10, 15, 2, 7, 1, 20, 25};
int[] backup = array.clone();
int[] result = check(array);
Assert.assertEquals(backup[result[2]], 25);
Assert.assertEquals(result[2], 7);
Assert.assertEquals(backup[result[0]], 10);
Assert.assertEquals(result[0], 1);
Assert.assertEquals(backup[result[1]], 15);
Assert.assertEquals(result[1], 2);
array = new int[]{4, 10, 15, 2, 7, 1, 10, 125};
backup = array.clone();
result = check(array);
Assert.assertEquals(result[0], -1);
Assert.assertEquals(result[1], -1);
}
=2= simple one without recurison:
// running time n + n^2
// O(n^2)
public static int[] check2(final int[] array) {
int[] r = new int[3];
r[0] = -1;
r[1] = -1;
r[2] = -1;
Map<Integer, List<Integer>> map = new HashMap(array.length);
for (int i = 0; i < array.length; i++) {
int v = array[i];
List<Integer> ids = map.get(v);
if (ids == null) {
ids = new LinkedList();
}
ids.add(i);
map.put(v, ids);
}
for (int k = 0; k < array.length; k++) {
int K = array[k];
for (int j = 0; j < array.length; j++) {
int I = K - array[j];
if (map.keySet().contains(I)) {
List<Integer> ids = map.get(I);
for (int i : ids) {
if (i != j) {
r[0] = j;
r[1] = i;
r[2] = k;
return r;
}
}
}
}
}
return r;
}
Test:
int[] array = new int[]{0,8,8};
int[] result = check2(array);
Assert.assertEquals(array[result[2]], 8);
Assert.assertEquals(result[2], 1);
Assert.assertEquals(array[result[0]], 0);
Assert.assertEquals(result[0], 0);
Assert.assertEquals(array[result[1]], 8);
Assert.assertEquals(result[1], 1);

Resources