Given a sorted list of numbers, I would like to find the longest subsequence where the differences between successive elements are geometrically increasing. So if the list is
1, 2, 3, 4, 7, 15, 27, 30, 31, 81
then the subsequence is 1, 3, 7, 15, 31. Alternatively consider 1, 2, 5, 6, 11, 15, 23, 41, 47 which has subsequence 5, 11, 23, 47 with a = 3 and k = 2.
Can this be solved in O(n2) time? Where n is the length of the list.
I am interested both in the general case where the progression of differences is ak, ak2, ak3, etc., where both a and k are integers, and in the special case where a = 1, so the progression of difference is k, k2, k3, etc.
Update
I have made an improvement of the algorithm that it takes an average of O(M + N^2) and memory needs of O(M+N). Mainly is the same that the protocol described below, but to calculate the possible factors A,K for ech diference D, I preload a table. This table takes less than a second to be constructed for M=10^7.
I have made a C implementation that takes less than 10minutes to solve N=10^5 diferent random integer elements.
Here is the source code in C: To execute just do: gcc -O3 -o findgeo findgeo.c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <memory.h>
#include <time.h>
struct Factor {
int a;
int k;
struct Factor *next;
};
struct Factor *factors = 0;
int factorsL=0;
void ConstructFactors(int R) {
int a,k,C;
int R2;
struct Factor *f;
float seconds;
clock_t end;
clock_t start = clock();
if (factors) free(factors);
factors = malloc (sizeof(struct Factor) *((R>>1) + 1));
R2 = R>>1 ;
for (a=0;a<=R2;a++) {
factors[a].a= a;
factors[a].k=1;
factors[a].next=NULL;
}
factorsL=R2+1;
R2 = floor(sqrt(R));
for (k=2; k<=R2; k++) {
a=1;
C=a*k*(k+1);
while (C<R) {
C >>= 1;
f=malloc(sizeof(struct Factor));
*f=factors[C];
factors[C].a=a;
factors[C].k=k;
factors[C].next=f;
a++;
C=a*k*(k+1);
}
}
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Construct Table: %f\n",seconds);
}
void DestructFactors() {
int i;
struct Factor *f;
for (i=0;i<factorsL;i++) {
while (factors[i].next) {
f=factors[i].next->next;
free(factors[i].next);
factors[i].next=f;
}
}
free(factors);
factors=NULL;
factorsL=0;
}
int ipow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp & 1)
result *= base;
exp >>= 1;
base *= base;
}
return result;
}
void findGeo(int **bestSolution, int *bestSolutionL,int *Arr, int L) {
int i,j,D;
int mustExistToBeBetter;
int R=Arr[L-1]-Arr[0];
int *possibleSolution;
int possibleSolutionL=0;
int exp;
int NextVal;
int idx;
int kMax,aMax;
float seconds;
clock_t end;
clock_t start = clock();
kMax = floor(sqrt(R));
aMax = floor(R/2);
ConstructFactors(R);
*bestSolutionL=2;
*bestSolution=malloc(0);
possibleSolution = malloc(sizeof(int)*(R+1));
struct Factor *f;
int *H=malloc(sizeof(int)*(R+1));
memset(H,0, sizeof(int)*(R+1));
for (i=0;i<L;i++) {
H[ Arr[i]-Arr[0] ]=1;
}
for (i=0; i<L-2;i++) {
for (j=i+2; j<L; j++) {
D=Arr[j]-Arr[i];
if (D & 1) continue;
f = factors + (D >>1);
while (f) {
idx=Arr[i] + f->a * f->k - Arr[0];
if ((f->k <= kMax)&& (f->a<aMax)&&(idx<=R)&&H[idx]) {
if (f->k ==1) {
mustExistToBeBetter = Arr[i] + f->a * (*bestSolutionL);
} else {
mustExistToBeBetter = Arr[i] + f->a * f->k * (ipow(f->k,*bestSolutionL) - 1)/(f->k-1);
}
if (mustExistToBeBetter< Arr[L-1]+1) {
idx= floor(mustExistToBeBetter - Arr[0]);
} else {
idx = R+1;
}
if ((idx<=R)&&H[idx]) {
possibleSolution[0]=Arr[i];
possibleSolution[1]=Arr[i] + f->a*f->k;
possibleSolution[2]=Arr[j];
possibleSolutionL=3;
exp = f->k * f->k * f->k;
NextVal = Arr[j] + f->a * exp;
idx=NextVal - Arr[0];
while ( (idx<=R) && H[idx]) {
possibleSolution[possibleSolutionL]=NextVal;
possibleSolutionL++;
exp = exp * f->k;
NextVal = NextVal + f->a * exp;
idx=NextVal - Arr[0];
}
if (possibleSolutionL > *bestSolutionL) {
free(*bestSolution);
*bestSolution = possibleSolution;
possibleSolution = malloc(sizeof(int)*(R+1));
*bestSolutionL=possibleSolutionL;
kMax= floor( pow (R, 1/ (*bestSolutionL) ));
aMax= floor(R / (*bestSolutionL));
}
}
}
f=f->next;
}
}
}
if (*bestSolutionL == 2) {
free(*bestSolution);
possibleSolutionL=0;
for (i=0; (i<2)&&(i<L); i++ ) {
possibleSolution[possibleSolutionL]=Arr[i];
possibleSolutionL++;
}
*bestSolution = possibleSolution;
*bestSolutionL=possibleSolutionL;
} else {
free(possibleSolution);
}
DestructFactors();
free(H);
end = clock();
seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("findGeo: %f\n",seconds);
}
int compareInt (const void * a, const void * b)
{
return *(int *)a - *(int *)b;
}
int main(void) {
int N=100000;
int R=10000000;
int *A = malloc(sizeof(int)*N);
int *Sol;
int SolL;
int i;
int *S=malloc(sizeof(int)*R);
for (i=0;i<R;i++) S[i]=i+1;
for (i=0;i<N;i++) {
int r = rand() % (R-i);
A[i]=S[r];
S[r]=S[R-i-1];
}
free(S);
qsort(A,N,sizeof(int),compareInt);
/*
int step = floor(R/N);
A[0]=1;
for (i=1;i<N;i++) {
A[i]=A[i-1]+step;
}
*/
findGeo(&Sol,&SolL,A,N);
printf("[");
for (i=0;i<SolL;i++) {
if (i>0) printf(",");
printf("%d",Sol[i]);
}
printf("]\n");
printf("Size: %d\n",SolL);
free(Sol);
free(A);
return EXIT_SUCCESS;
}
Demostration
I will try to demonstrate that the algorithm that I proposed is in average for an equally distributed random sequence. I’m not a mathematician and I am not used to do this kind of demonstrations, so please fill free to correct me any error that you can see.
There are 4 indented loops, the two firsts are the N^2 factor. The M is for the calculation of the possible factors table).
The third loop is executed only once in average for each pair. You can see this checking the size of the pre-calculated factors table. It’s size is M when N->inf. So the average steps for each pair is M/M=1.
So the proof happens to check that the forth loop. (The one that traverses the good made sequences is executed less that or equal O(N^2) for all the pairs.
To demonstrate that, I will consider two cases: one where M>>N and other where M ~= N. Where M is the maximum difference of the initial array: M= S(n)-S(1).
For the first case, (M>>N) the probability to find a coincidence is p=N/M. To start a sequence, it must coincide the second and the b+1 element where b is the length of the best sequence until now. So the loop will enter times. And the average length of this series (supposing an infinite series) is . So the total number of times that the loop will be executed is . And this is close to 0 when M>>N. The problem here is when M~=N.
Now lets consider this case where M~=N. Lets consider that b is the best sequence length until now. For the case A=k=1, then the sequence must start before N-b, so the number of sequences will be N-b, and the times that will go for the loop will be a maximum of (N-b)*b.
For A>1 and k=1 we can extrapolate to where d is M/N (the average distance between numbers). If we add for all A’s from 1 to dN/b then we see a top limit of:
For the cases where k>=2, we see that the sequence must start before , So the loop will enter an average of and adding for all As from 1 to dN/k^b, it gives a limit of
Here, the worst case is when b is minimum. Because we are considering minimum series, lets consider a very worst case of b= 2 so the number of passes for the 4th loop for a given k will be less than
.
And if we add all k’s from 2 to infinite will be:
So adding all the passes for k=1 and k>=2, we have a maximum of:
Note that d=M/N=1/p.
So we have two limits, One that goes to infinite when d=1/p=M/N goes to 1 and other that goes to infinite when d goes to infinite. So our limit is the minimum of both, and the worst case is when both equetions cross. So if we solve the equation:
we see that the maximum is when d=1.353
So it is demonstrated that the forth loops will be processed less than 1.55N^2 times in total.
Of course, this is for the average case. For the worst case I am not able to find a way to generate series whose forth loop are higher than O(N^2), and I strongly believe that they does not exist, but I am not a mathematician to prove it.
Old Answer
Here is a solution in average of O((n^2)*cube_root(M)) where M is the difference between the first and last element of the array. And memory requirements of O(M+N).
1.- Construct an array H of length M so that M[i - S[0]]=true if i exists in the initial array and false if it does not exist.
2.- For each pair in the array S[j], S[i] do:
2.1 Check if it can be the first and third elements of a possible solution. To do so, calculate all possible A,K pairs that meet the equation S(i) = S(j) + AK + AK^2. Check this SO question to see how to solve this problem. And check that exist the second element: S[i]+ A*K
2.2 Check also that exist the element one position further that the best solution that we have. For example, if the best solution that we have until now is 4 elements long then check that exist the element A[j] + AK + AK^2 + AK^3 + AK^4
2.3 If 2.1 and 2.2 are true, then iterate how long is this series and set as the bestSolution until now is is longer that the last.
Here is the code in javascript:
function getAKs(A) {
if (A / 2 != Math.floor(A / 2)) return [];
var solution = [];
var i;
var SR3 = Math.pow(A, 1 / 3);
for (i = 1; i <= SR3; i++) {
var B, C;
C = i;
B = A / (C * (C + 1));
if (B == Math.floor(B)) {
solution.push([B, C]);
}
B = i;
C = (-1 + Math.sqrt(1 + 4 * A / B)) / 2;
if (C == Math.floor(C)) {
solution.push([B, C]);
}
}
return solution;
}
function getBestGeometricSequence(S) {
var i, j, k;
var bestSolution = [];
var H = Array(S[S.length-1]-S[0]);
for (i = 0; i < S.length; i++) H[S[i] - S[0]] = true;
for (i = 0; i < S.length; i++) {
for (j = 0; j < i; j++) {
var PossibleAKs = getAKs(S[i] - S[j]);
for (k = 0; k < PossibleAKs.length; k++) {
var A = PossibleAKs[k][0];
var K = PossibleAKs[k][17];
var mustExistToBeBetter;
if (K==1) {
mustExistToBeBetter = S[j] + A * bestSolution.length;
} else {
mustExistToBeBetter = S[j] + A * K * (Math.pow(K,bestSolution.length) - 1)/(K-1);
}
if ((H[S[j] + A * K - S[0]]) && (H[mustExistToBeBetter - S[0]])) {
var possibleSolution=[S[j],S[j] + A * K,S[i]];
exp = K * K * K;
var NextVal = S[i] + A * exp;
while (H[NextVal - S[0]] === true) {
possibleSolution.push(NextVal);
exp = exp * K;
NextVal = NextVal + A * exp;
}
if (possibleSolution.length > bestSolution.length) {
bestSolution = possibleSolution;
}
}
}
}
}
return bestSolution;
}
//var A= [ 1, 2, 3,5,7, 15, 27, 30,31, 81];
var A=[];
for (i=1;i<=3000;i++) {
A.push(i);
}
var sol=getBestGeometricSequence(A);
$("#result").html(JSON.stringify(sol));
You can check the code here: http://jsfiddle.net/6yHyR/1/
I maintain the other solution because I believe that it is still better when M is very big compared to N.
Just to start with something, here is a simple solution in JavaScript:
var input = [0.7, 1, 2, 3, 4, 7, 15, 27, 30, 31, 81],
output = [], indexes, values, i, index, value, i_max_length,
i1, i2, i3, j1, j2, j3, difference12a, difference23a, difference12b, difference23b,
scale_factor, common_ratio_a, common_ratio_b, common_ratio_c,
error, EPSILON = 1e-9, common_ratio_is_integer,
resultDiv = $("#result");
for (i1 = 0; i1 < input.length - 2; ++i1) {
for (i2 = i1 + 1; i2 < input.length - 1; ++i2) {
scale_factor = difference12a = input[i2] - input[i1];
for (i3 = i2 + 1; i3 < input.length; ++i3) {
difference23a = input[i3] - input[i2];
common_ratio_1a = difference23a / difference12a;
common_ratio_2a = Math.round(common_ratio_1a);
error = Math.abs((common_ratio_2a - common_ratio_1a) / common_ratio_1a);
common_ratio_is_integer = error < EPSILON;
if (common_ratio_2a > 1 && common_ratio_is_integer) {
indexes = [i1, i2, i3];
j1 = i2;
j2 = i3
difference12b = difference23a;
for (j3 = j2 + 1; j3 < input.length; ++j3) {
difference23b = input[j3] - input[j2];
common_ratio_1b = difference23b / difference12b;
common_ratio_2b = Math.round(common_ratio_1b);
error = Math.abs((common_ratio_2b - common_ratio_1b) / common_ratio_1b);
common_ratio_is_integer = error < EPSILON;
if (common_ratio_is_integer && common_ratio_2a === common_ratio_2b) {
indexes.push(j3);
j1 = j2;
j2 = j3
difference12b = difference23b;
}
}
values = [];
for (i = 0; i < indexes.length; ++i) {
index = indexes[i];
value = input[index];
values.push(value);
}
output.push(values);
}
}
}
}
if (output !== []) {
i_max_length = 0;
for (i = 1; i < output.length; ++i) {
if (output[i_max_length].length < output[i].length)
i_max_length = i;
}
for (i = 0; i < output.length; ++i) {
if (output[i_max_length].length == output[i].length)
resultDiv.append("<p>[" + output[i] + "]</p>");
}
}
Output:
[1, 3, 7, 15, 31]
I find the first three items of every subsequence candidate, calculate the scale factor and the common ratio from them, and if the common ratio is integer, then I iterate over the remaining elements after the third one, and add those to the subsequence, which fit into the geometric progression defined by the first three items. As a last step, I select the sebsequence/s which has/have the largest length.
In fact it is exactly the same question as Longest equally-spaced subsequence, you just have to consider the logarithm of your data. If the sequence is a, ak, ak^2, ak^3, the logarithmique value is ln(a), ln(a) + ln(k), ln(a)+2ln(k), ln(a)+3ln(k), so it is equally spaced. The opposite is of course true. There is a lot of different code in the question above.
I don't think the special case a=1 can be resolved more efficiently than an adaptation from an algorithm above.
Here is my solution in Javascript. It should be close to O(n^2) except may be in some pathological cases.
function bsearch(Arr,Val, left,right) {
if (left == right) return left;
var m=Math.floor((left + right) /2);
if (Val <= Arr[m]) {
return bsearch(Arr,Val,left,m);
} else {
return bsearch(Arr,Val,m+1,right);
}
}
function findLongestGeometricSequence(S) {
var bestSolution=[];
var i,j,k;
var H={};
for (i=0;i<S.length;i++) H[S[i]]=true;
for (i=0;i<S.length;i++) {
for (j=0;j<i;j++) {
for (k=j+1;k<i;) {
var possibleSolution=[S[j],S[k],S[i]];
var K = (S[i] - S[k]) / (S[k] - S[j]);
var A = (S[k] - S[j]) * (S[k] - S[j]) / (S[i] - S[k]);
if ((Math.floor(K) == K) && (Math.floor(A)==A)) {
exp= K*K*K;
var NextVal= S[i] + A * exp;
while (H[NextVal] === true) {
possibleSolution.push(NextVal);
exp = exp * K;
NextVal= NextVal + A * exp;
}
if (possibleSolution.length > bestSolution.length)
bestSolution=possibleSolution;
K--;
} else {
K=Math.floor(K);
}
if (K>0) {
var NextPossibleMidValue= (S[i] + K*S[j]) / (K +1);
k++;
if (S[k]<NextPossibleMidValue) {
k=bsearch(S,NextPossibleMidValue, k+1, i);
}
} else {
k=i;
}
}
}
}
return bestSolution;
}
function Run() {
var MyS= [0.7, 1, 2, 3, 4, 5,6,7, 15, 27, 30,31, 81];
var sol = findLongestGeometricSequence(MyS);
alert(JSON.stringify(sol));
}
Small Explanation
If we take 3 numbers of the array S(j) < S(k) < S(i) then you can calculate a and k so that: S(k) = S(j) + a*k and S(i) = S(k) + a*k^2 (2 equations and 2 incognits). With that in mind, you can check if exist a number in the array that is S(next) = S(i) + a*k^3. If that is the case, then continue checknng for S(next2) = S(next) + a*k^4 and so on.
This would be a O(n^3) solution, but you can hava advantage that k must be integer in order to limit the S(k) points selected.
In case that a is known, then you can calculate a(k) and you need to check only one number in the third loop, so this case will be clearly a O(n^2).
I think this task is related with not so long ago posted Longest equally-spaced subsequence. I've just modified my algorithm in Python a little bit:
from math import sqrt
def add_precalc(precalc, end, (a, k), count, res, N):
if end + a * k ** res[1]["count"] > N: return
x = end + a * k ** count
if x > N or x < 0: return
if precalc[x] is None: return
if (a, k) not in precalc[x]:
precalc[x][(a, k)] = count
return
def factors(n):
res = []
for x in range(1, int(sqrt(n)) + 1):
if n % x == 0:
y = n / x
res.append((x, y))
res.append((y, x))
return res
def work(input):
precalc = [None] * (max(input) + 1)
for x in input: precalc[x] = {}
N = max(input)
res = ((0, 0), {"end":0, "count":0})
for i, x in enumerate(input):
for y in input[i::-1]:
for a, k in factors(x - y):
if (a, k) in precalc[x]: continue
add_precalc(precalc, x, (a, k), 2, res, N)
for step, count in precalc[x].iteritems():
count += 1
if count > res[1]["count"]: res = (step, {"end":x, "count":count})
add_precalc(precalc, x, step, count, res, N)
precalc[x] = None
d = [res[1]["end"]]
for x in range(res[1]["count"] - 1, 0, -1):
d.append(d[-1] - res[0][0] * res[0][1] ** x)
d.reverse()
return d
explanation
Traversing the array
For each previous element of the array calculate factors of the difference between current and taken previous element and then precalculate next possible element of the sequence and saving it to precalc array
So when arriving at element i there're already all possible sequences with element i in the precalc array, so we have to calculate next possible element and save it to precalc.
Currently there's one place in algorithm that could be slow - factorization of each previous number. I think it could be made faster with two optimizations:
more effective factorization algorithm
find a way not to see at each element of array, using the fact that array is sorted and there's already a precalculated sequences
Python:
def subseq(a):
seq = []
aset = set(a)
for i, x in enumerate(a):
# elements after x
for j, x2 in enumerate(a[i+1:]):
j += i + 1 # enumerate starts j at 0, we want a[j] = x2
bk = x2 - x # b*k (assuming k and k's exponent start at 1)
# given b*k, bruteforce values of k
for k in range(1, bk + 1):
items = [x, x2] # our subsequence so far
nextdist = bk * k # what x3 - x2 should look like
while items[-1] + nextdist in aset:
items.append(items[-1] + nextdist)
nextdist *= k
if len(items) > len(seq):
seq = items
return seq
Running time is O(dn^3), where d is the (average?) distance between two elements,
and n is of course len(a).
Related
I am looking for an algorithm that expresses a given number as a sum of (up to) four squares.
Examples
120 = 82 + 62 + 42 + 22
6 = 02 + 12 + 12 + 22
20 = 42 + 22 + 02+ 02
My approach
Take the square root and repeat this repeatedly for the remainder:
while (count != 4) {
root = (int) Math.sqrt(N)
N -= root * root
count++
}
But this fails when N is 23, even though there is a solution:
32 + 32+ 22 + 12
Question
Is there any other algorithm to do that?
Is it always possible?
###Always possible?
Yes, the Lagrange's four square theorem states that:
every natural number can be represented as the sum of four integer squares.
It has been proved in several ways.
###Algorithm
There are some smarter algorithms, but I would suggest the following algorithm:
Factorise the number into prime factors. They don't have to be prime, but the smaller they are, the better: so primes are best. Then solve the task for each of these factors as below, and combine any resulting 4 squares with the previously found 4 squares with the Euler's four-square identity.
(a2 + b2 + c2 + d2)
(A2 + B2 + C2 + D2) =
(aA + bB + cC + dD)2 +
(aB − bA + cD − dC)2 +
(aC − bD − cA + dB)2 +
(aD + bC − cB − dA)2
Given a number n (one of the factors mentioned above), get the greatest square that is not greater than n, and see if n minus this square can be written as the sum of three squares using the Legendre's three-square theorem: it is possible, if and only when this number is NOT of the following form:
4a(8b+7)
If this square is not found suitable, try the next smaller one, ... until you find one. It guaranteed there will be one, and most are found within a few retries.
Try to find an actual second square term in the same way as in step 1, but now test its viability using Fermat's theorem on sums of two squares which in extension means that:
if all the prime factors of n congruent to 3 modulo 4 occur to an even exponent, then n is expressible as a sum of two squares. The converse also holds.
If this square is not found suitable, try the next smaller one, ... until you find one. It's guaranteed there will be one.
Now we have a remainder after subtracting two squares. Try subtracting a third square until that yields another square, which means we have a solution. This step can be improved by first factoring out the largest square divisor. Then when the two square terms are identified, each can then be multiplied again by the square root of that square divisor.
This is roughly the idea. For finding prime factors there are several solutions. Below I will just use the Sieve of Eratosthenes.
This is JavaScript code, so you can run it immediately -- it will produce a random number as input and display it as the sum of four squares:
function divisor(n, factor) {
var divisor = 1;
while (n % factor == 0) {
n = n / factor;
divisor = divisor * factor;
}
return divisor;
}
function getPrimesUntil(n) {
// Prime sieve algorithm
var range = Math.floor(Math.sqrt(n)) + 1;
var isPrime = Array(n).fill(1);
var primes = [2];
for (var m = 3; m < range; m += 2) {
if (isPrime[m]) {
primes.push(m);
for (var k = m * m; k <= n; k += m) {
isPrime[k] = 0;
}
}
}
for (var m = range + 1 - (range % 2); m <= n; m += 2) {
if (isPrime[m]) primes.push(m);
}
return {
primes: primes,
factorize: function (n) {
var p, count, primeFactors;
// Trial division algorithm
if (n < 2) return [];
primeFactors = [];
for (p of this.primes) {
count = 0;
while (n % p == 0) {
count++;
n /= p;
}
if (count) primeFactors.push({value: p, count: count});
}
if (n > 1) {
primeFactors.push({value: n, count: 1});
}
return primeFactors;
}
}
}
function squareTerms4(n) {
var n1, n2, n3, n4, sq, sq1, sq2, sq3, sq4, primes, factors, f, f3, factors3, ok,
res1, res2, res3, res4;
primes = getPrimesUntil(n);
factors = primes.factorize(n);
res1 = n > 0 ? 1 : 0;
res2 = res3 = res4 = 0;
for (f of factors) { // For each of the factors:
n1 = f.value;
// 1. Find a suitable first square
for (sq1 = Math.floor(Math.sqrt(n1)); sq1>0; sq1--) {
n2 = n1 - sq1*sq1;
// A number can be written as a sum of three squares
// <==> it is NOT of the form 4^a(8b+7)
if ( (n2 / divisor(n2, 4)) % 8 !== 7 ) break; // found a possibility
}
// 2. Find a suitable second square
for (sq2 = Math.floor(Math.sqrt(n2)); sq2>0; sq2--) {
n3 = n2 - sq2*sq2;
// A number can be written as a sum of two squares
// <==> all its prime factors of the form 4a+3 have an even exponent
factors3 = primes.factorize(n3);
ok = true;
for (f3 of factors3) {
ok = (f3.value % 4 != 3) || (f3.count % 2 == 0);
if (!ok) break;
}
if (ok) break;
}
// To save time: extract the largest square divisor from the previous factorisation:
sq = 1;
for (f3 of factors3) {
sq *= Math.pow(f3.value, (f3.count - f3.count % 2) / 2);
f3.count = f3.count % 2;
}
n3 /= sq*sq;
// 3. Find a suitable third square
sq4 = 0;
// b. Find square for the remaining value:
for (sq3 = Math.floor(Math.sqrt(n3)); sq3>0; sq3--) {
n4 = n3 - sq3*sq3;
// See if this yields a sum of two squares:
sq4 = Math.floor(Math.sqrt(n4));
if (n4 == sq4*sq4) break; // YES!
}
// Incorporate the square divisor back into the step-3 result:
sq3 *= sq;
sq4 *= sq;
// 4. Merge this quadruple of squares with any previous
// quadruple we had, using the Euler square identity:
while (f.count--) {
[res1, res2, res3, res4] = [
Math.abs(res1*sq1 + res2*sq2 + res3*sq3 + res4*sq4),
Math.abs(res1*sq2 - res2*sq1 + res3*sq4 - res4*sq3),
Math.abs(res1*sq3 - res2*sq4 - res3*sq1 + res4*sq2),
Math.abs(res1*sq4 + res2*sq3 - res3*sq2 - res4*sq1)
];
}
}
// Return the 4 squares in descending order (for convenience):
return [res1, res2, res3, res4].sort( (a,b) => b-a );
}
// Produce the result for some random input number
var n = Math.floor(Math.random() * 1000000);
var solution = squareTerms4(n);
// Perform the sum of squares to see it is correct:
var check = solution.reduce( (a,b) => a+b*b, 0 );
if (check !== n) throw "FAILURE: difference " + n + " - " + check;
// Print the result
console.log(n + ' = ' + solution.map( x => x+'²' ).join(' + '));
The article by by Michael Barr on the subject probably represents a more time-efficient method, but the text is more intended as a proof than an algorithm. However, if you need more time-efficiency you could consider that, together with a more efficient factorisation algorithm.
It's always possible -- it's a theorem in number theory called "Lagrange's four square theorem."
To solve it efficiently: the paper Randomized algorithms in number theory (Rabin, Shallit) gives a method that runs in expected O((log n)^2) time.
There is interesting discussion about the implementation here: https://math.stackexchange.com/questions/483101/rabin-and-shallit-algorithm
Found via Wikipedia:Langrange's four square theorem.
Here is solution , Simple 4 loops
max = square_root(N)
for(int i=0;i<=max;i++)
for(int j=0;j<=max;j++)
for(int k=0;k<=max;k++)
for(int l=0;l<=max;l++)
if(i*i+j*j+k*k+l*l==N){
found
break;
}
So you can test for any numbers. You can use break condition after two loops if sum exceeds then break it.
const fourSquares = (n) => {
const result = [];
for (let i = 0; i <= n; i++) {
for (let j = 0; j <= n; j++) {
for (let k = 0; k <= n; k++) {
for (let l = 0; l <= n; l++) {
if (i * i + j * j + k * k + l * l === n) {
result.push(i, j, k, l);
return result;
}
}
}
}
}
return result;
}
It's running too long
const fourSquares = (n) => {
const result = [];
for (let i = 0; i <= n; i++) {
for (let j = 0; j <= (n - i * i); j++) {
for (let k = 0; k <= (n - i * i - j * j); k++) {
for (let l = 0; l <= (n - i * i - j * j - k * k); l++) {
if (i * i + j * j + k * k + l * l === n) {
result.push(i, j, k, l);
return result;
}
}
}
}
}
return result;
}
const fourSquares = (n) => {
const result = [];
for (let i = 0; i * i <= n; i++) {
for (let j = 0; j * j <= n; j++) {
for (let k = 0; k * k <= n; k++) {
for (let l = 0; l * l <= n; l++) {
if (i * i + j * j + k * k + l * l === n) {
result.push(i, j, k, l);
return result;
}
}
}
}
}
return result;
}
const fourSquares = (n) => {
let a = Math.sqrt(n);
let b = Math.sqrt(n - a * a);
let c = Math.sqrt(n - a * a - b * b);
let d = Math.sqrt(n - a * a - b * b - c * c);
if (n === a * a + b * b + c * c + d * d) {
return [a, b, c, d];
}
}
I'm looking for an algorithm which computes all permutations of a bitstring of given length (n) and amount of bits set (k). For example while n=4 and k=2 the algorithm shall output:
1100
1010
1001
0011
0101
0110
I'm aware of Gosper's Hack which generates the needed permutations in lexicographic order. But i need them to be generated in such a manner, that two consecutive permutations differ in only two (or at least a constant number of) bitpositions (like in the above example).
Another bithack to do that would be awesome, but also a algorithmic description would help me alot.
Walking bit algorithm
To generate permutations of a binary sequence by swapping exactly one set bit with an unset bit in each step (i.e. the Hamming distance between consecutive permutations equals two), you can use this "walking bit" algorithm; the way it works is similar to creating the (reverse) lexicographical order, but the set bits walk right and left alternately, and as a result some parts of the sequence are mirrored. This is probably better explained with an example:
Recursive implementation
A recursive algorithm would receive a sequence of n bits, with k bits set, either all on the left or all on the right. It would then keep a 1 at the end, recurse with the rest of the sequence, move the set bit and keep 01 at the end, recurse with the rest of the bits, move the set bit and keep 001 at the end, etc... until the last recursion with only set bits. As you can see, this creates alternating left-to-right and right-to-left recursions.
When the algorithm is called with a sequence with only one bit set, this is the deepest recursion level, and the set bit walks from one end to the other.
Code example 1
Here's a simple recursive JavaScript implementation:
function walkingBits(n, k) {
var seq = [];
for (var i = 0; i < n; i++) seq[i] = 0;
walk (n, k, 1, 0);
function walk(n, k, dir, pos) {
for (var i = 1; i <= n - k + 1; i++, pos += dir) {
seq[pos] = 1;
if (k > 1) walk(n - i, k - 1, i%2 ? dir : -dir, pos + dir * (i%2 ? 1 : n - i))
else document.write(seq + "<BR>");
seq[pos] = 0;
}
}
}
walkingBits(7,3);
Translated into C++ that could be something like this:
#include <iostream>
#include <string>
void walkingBits(int n, int k, int dir = 1, int pos = 0, bool top = true) {
static std::string seq;
if (top) seq.resize(n, '0');
for (int i = 1; i <= n - k + 1; i++, pos += dir) {
seq[pos] = '1';
if (k > 1) walkingBits(n - i, k - 1, i % 2 ? dir : -dir, pos + dir * (i % 2 ? 1 : n - i), false);
else std::cout << seq << '\n';
seq[pos] = '0';
}
if (top) seq.clear();
}
int main() {
walkingBits(7, 3);
}
(See also [this C++11 version][3], written by VolkerK in response to a question about the above code.)
(Rextester seems to have been hacked, so I've pasted Volker's code below.)
#include <iostream>
#include <vector>
#include <functional>
void walkingBits(size_t n, size_t k) {
std::vector<bool> seq(n, false);
std::function<void(const size_t, const size_t, const int, size_t)> walk = [&](const size_t n, const size_t k, const int dir, size_t pos){
for (size_t i = 1; i <= n - k + 1; i++, pos += dir) {
seq[pos] = true;
if (k > 1) {
walk(n - i, k - 1, i % 2 ? dir : -dir, pos + dir * (i % 2 ? 1 : n - i));
}
else {
for (bool v : seq) {
std::cout << v;
}
std::cout << std::endl;;
}
seq[pos] = false;
}
};
walk(n, k, 1, 0);
}
int main() {
walkingBits(7, 3);
return 0;
}
Code example 2
Or, if you prefer code where elements of an array are actually being swapped:
function walkingBits(n, k) {
var seq = [];
for (var i = 0; i < n; i++) seq[i] = i < k ? 1 : 0;
document.write(seq + "<BR>");
walkRight(n, k, 0);
function walkRight(n, k, pos) {
if (k == 1) for (var p = pos + 1; p < pos + n; p++) swap(p - 1, p)
else for (var i = 1; i <= n - k; i++) {
[walkLeft, walkRight][i % 2](n - i, k - 1, pos + i);
swap(pos + i - 1, pos + i + (i % 2 ? 0 : k - 1));
}
}
function walkLeft(n, k, pos) {
if (k == 1) for (var p = pos + n - 1; p > pos; p--) swap(p - 1, p)
else for (var i = 1; i <= n - k; i++) {
[walkRight, walkLeft][i % 2](n - i, k - 1, pos);
swap(pos + n - i - (i % 2 ? 1 : k), pos + n - i);
}
}
function swap(a, b) {
var c = seq[a]; seq[a] = seq[b]; seq[b] = c;
document.write(seq + "<BR>");
}
}
walkingBits(7,3);
Code example 3
Here the recursion is rolled out into an iterative implementation, with each of the set bits (i.e. each of the recursion levels) represented by an object {o,d,n,p} which holds the offset from the leftmost position, the direction the set bit is moving in, the number of bits (i.e. the length of this part of the sequence), and the current position of the set bit within this part.
function walkingBits(n, k) {
var b = 0, seq = [], bit = [{o: 0, d: 1, n: n, p: 0}];
for (var i = 0; i < n; i++) seq.push(0);
while (bit[0].p <= n - k) {
seq[bit[b].o + bit[b].p * bit[b].d] = 1;
while (++b < k) {
bit[b] = {
o: bit[b-1].o + bit[b-1].d * (bit[b-1].p %2 ? bit[b-1].n-1 : bit[b-1].p+1),
d: bit[b-1].d * (bit[b-1].p %2 ? -1 : 1),
n: bit[b-1].n - bit[b-1].p - 1,
p: 0
}
seq[bit[b].o + bit[b].p * bit[b].d] = 1;
}
document.write(seq + "<BR>");
b = k - 1;
do seq[bit[b].o + bit[b].p * bit[b].d] = 0;
while (++bit[b].p > bit[b].n + b - k && b--);
}
}
walkingBits(7, 3); // n >= k > 0
Transforming lexicographical order into walking bit
Because the walking bit algorithm is a variation of the algorithm to generate the permutations in (reverse) lexicographical order, each permutation in the lexicographical order can be transformed into its corresponding permutation in the walking bit order, by mirroring the appropriate parts of the binary sequence.
So you can use any algorithm (e.g. Gosper's Hack) to create the permutations in lexicographical or reverse lexicographical order, and then transform each one to get the walking bit order.
Practically, this means iterating over the binary sequence from left to right, and if you find a set bit after an odd number of zeros, reversing the rest of the sequence and iterating over it from right to left, and so on...
Code example 4
In the code below the permutations for n,k = 7,3 are generated in reverse lexicographical order, and then transformed one-by-one:
function lexi2walk(lex) {
var seq = [], ofs = 0, pos = 0, dir = 1;
for (var i = 0; i < lex.length; ++i) {
if (seq[ofs + pos * dir] = lex[i]) {
if (pos % 2) ofs -= (dir *= -1) * (pos + lex.length - 1 - i)
else ofs += dir * (pos + 1);
pos = 0;
} else ++pos;
}
return seq;
}
function revLexi(seq) {
var max = true, pos = seq.length, set = 1;
while (pos-- && (max || !seq[pos])) if (seq[pos]) ++set; else max = false;
if (pos < 0) return false;
seq[pos] = 0;
while (++pos < seq.length) seq[pos] = set-- > 0 ? 1 : 0;
return true;
}
var s = [1,1,1,0,0,0,0];
document.write(s + " → " + lexi2walk(s) + "<br>");
while (revLexi(s)) document.write(s + " → " + lexi2walk(s) + "<br>");
Homogeneous Gray path
The permutation order created by this algorithm is similar, but not identical, to the one created by the "homogeneous Gray path for combinations" algorithm described by D. Knuth in The Art of Computer Programming vol. 4a, sect. 7.2.1.3, formula (31) & fig. 26c.
This is easy to achieve with recursion:
public static void nextPerm(List<Integer> list, int num, int index, int n, int k) {
if(k == 0) {
list.add(num);
return;
}
if(index == n) return;
int mask = 1<<index;
nextPerm(list, num^mask, index+1, n, k-1);
nextPerm(list, num, index+1, n, k);
}
Running this with the client:
public static void main(String[] args) {
ArrayList<Integer> list = new ArrayList<Integer>();
nextPerm(list, 0, 0, 4, 2);
}
Output:
0011
0101
1001
0110
1010
1100
The idea is to start with the initial number, and consider changing a bit, one index at a time, and to keep track of how many times you changed the bits. Once you changed the bits k times (when k == 0), store the number and terminate the branch.
I've just done the following Codility Peaks problem. The problem is as follows:
A non-empty zero-indexed array A consisting of N integers is given.
A peak is an array element which is larger than its neighbors. More precisely, it is an index P such that 0 < P < N − 1, A[P − 1] < A[P] and A[P] > A[P + 1].
For example, the following array A:
A[0] = 1
A[1] = 2
A[2] = 3
A[3] = 4
A[4] = 3
A[5] = 4
A[6] = 1
A[7] = 2
A[8] = 3
A[9] = 4
A[10] = 6
A[11] = 2
has exactly three peaks: 3, 5, 10.
We want to divide this array into blocks containing the same number of elements. More precisely, we want to choose a number K that will yield the following blocks:
A[0], A[1], ..., A[K − 1],
A[K], A[K + 1], ..., A[2K − 1],
...
A[N − K], A[N − K + 1], ..., A[N − 1].
What's more, every block should contain at least one peak. Notice that extreme elements of the blocks (for example A[K − 1] or A[K]) can also be peaks, but only if they have both neighbors (including one in an adjacent blocks).
The goal is to find the maximum number of blocks into which the array A can be divided.
Array A can be divided into blocks as follows:
one block (1, 2, 3, 4, 3, 4, 1, 2, 3, 4, 6, 2). This block contains three peaks.
two blocks (1, 2, 3, 4, 3, 4) and (1, 2, 3, 4, 6, 2). Every block has a peak.
three blocks (1, 2, 3, 4), (3, 4, 1, 2), (3, 4, 6, 2). Every block has a peak.
Notice in particular that the first block (1, 2, 3, 4) has a peak at A[3], because A[2] < A[3] > A[4], even though A[4] is in the adjacent block.
However, array A cannot be divided into four blocks, (1, 2, 3), (4, 3, 4), (1, 2, 3) and (4, 6, 2), because the (1, 2, 3) blocks do not contain a peak. Notice in particular that the (4, 3, 4) block contains two peaks: A[3] and A[5].
The maximum number of blocks that array A can be divided into is three.
Write a function:
class Solution { public int solution(int[] A); }
that, given a non-empty zero-indexed array A consisting of N integers, returns the maximum number of blocks into which A can be divided.
If A cannot be divided into some number of blocks, the function should return 0.
For example, given:
A[0] = 1
A[1] = 2
A[2] = 3
A[3] = 4
A[4] = 3
A[5] = 4
A[6] = 1
A[7] = 2
A[8] = 3
A[9] = 4
A[10] = 6
A[11] = 2
the function should return 3, as explained above.
Assume that:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [0..1,000,000,000].
Complexity:
expected worst-case time complexity is O(N*log(log(N)))
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
My Question
So I solve this with what to me appears to be the brute force solution – go through every group size from 1..N, and check whether every group has at least one peak. The first 15 minutes I was trying to solve this I was trying to figure out some more optimal way, since the required complexity is O(N*log(log(N))).
This is my "brute-force" code that passes all the tests, including the large ones, for a score of 100/100:
public int solution(int[] A) {
int N = A.length;
ArrayList<Integer> peaks = new ArrayList<Integer>();
for(int i = 1; i < N-1; i++){
if(A[i] > A[i-1] && A[i] > A[i+1]) peaks.add(i);
}
for(int size = 1; size <= N; size++){
if(N % size != 0) continue;
int find = 0;
int groups = N/size;
boolean ok = true;
for(int peakIdx : peaks){
if(peakIdx/size > find){
ok = false;
break;
}
if(peakIdx/size == find) find++;
}
if(find != groups) ok = false;
if(ok) return groups;
}
return 0;
}
My question is how do I deduce that this is in fact O(N*log(log(N))), as it's not at all obvious to me, and I was surprised I pass the test cases. I'm looking for even the simplest complexity proof sketch that would convince me of this runtime. I would assume that a log(log(N)) factor means some kind of reduction of a problem by a square root on each iteration, but I have no idea how this applies to my problem. Thanks a lot for any help
You're completely right: to get the log log performance the problem needs to be reduced.
A n.log(log(n)) solution in python [below]. Codility no longer test 'performance' on this problem (!) but the python solution scores 100% for accuracy.
As you've already surmised:
Outer loop will be O(n) since it is testing whether each size of block is a clean divisor
Inner loop must be O(log(log(n))) to give O(n log(log(n))) overall.
We can get good inner loop performance because we only need to perform d(n), the number of divisors of n. We can store a prefix sum of peaks-so-far, which uses the O(n) space allowed by the problem specification. Checking whether a peak has occurred in each 'group' is then an O(1) lookup operation using the group start and end indices.
Following this logic, when the candidate block size is 3 the loop needs to perform n / 3 peak checks. The complexity becomes a sum: n/a + n/b + ... + n/n where the denominators (a, b, ...) are the factors of n.
Short story: The complexity of n.d(n) operations is O(n.log(log(n))).
Longer version:
If you've been doing the Codility Lessons you'll remember from the Lesson 8: Prime and composite numbers that the sum of harmonic number operations will give O(log(n)) complexity. We've got a reduced set, because we're only looking at factor denominators. Lesson 9: Sieve of Eratosthenes shows how the sum of reciprocals of primes is O(log(log(n))) and claims that 'the proof is non-trivial'. In this case Wikipedia tells us that the sum of divisors sigma(n) has an upper bound (see Robin's inequality, about half way down the page).
Does that completely answer your question? Suggestions on how to improve my python code are also very welcome!
def solution(data):
length = len(data)
# array ends can't be peaks, len < 3 must return 0
if len < 3:
return 0
peaks = [0] * length
# compute a list of 'peaks to the left' in O(n) time
for index in range(2, length):
peaks[index] = peaks[index - 1]
# check if there was a peak to the left, add it to the count
if data[index - 1] > data[index - 2] and data[index - 1] > data[index]:
peaks[index] += 1
# candidate is the block size we're going to test
for candidate in range(3, length + 1):
# skip if not a factor
if length % candidate != 0:
continue
# test at each point n / block
valid = True
index = candidate
while index != length:
# if no peak in this block, break
if peaks[index] == peaks[index - candidate]:
valid = False
break
index += candidate
# one additional check since peaks[length] is outside of array
if index == length and peaks[index - 1] == peaks[index - candidate]:
valid = False
if valid:
return length / candidate
return 0
Credits:
Major kudos to #tmyklebu for his SO answer which helped me a lot.
I'm don't think that the time complexity of your algorithm is O(Nlog(logN)).
However, it is certainly much lesser than O(N^2). This is because your inner loop is entered only k times where k is the number of factors of N. The number of factors of an integer can be seen in this link: http://www.cut-the-knot.org/blue/NumberOfFactors.shtml
I may be inaccurate but from the link it seems,
k ~ logN * logN * logN ...
Also, the inner loop has a complexity of O(N) since the number of peaks can be N/2 in the worst case.
Hence, in my opinion, the complexity of your algorithm is O(NlogN) at best but it must be sufficient to clear all test cases.
#radicality
There's at least one point where you can optimize the number of passes in the second loop to O(sqrt(N)) -- collect divisors of N and iterate through them only.
That will make your algo a little less "brute force".
Problem definition allows for O(N) space complexity. You can store divisors without violating this condition.
This is my solution based on prefix sums. Hope it helps:
class Solution {
public int solution(int[] A) {
int n = A.length;
int result = 1;
if (n < 3)
return 0;
int[] prefixSums = new int[n];
for (int i = 1; i < n-1; i++)
if (A[i] > A[i-1] && A[i] > A[i+1])
prefixSums[i] = prefixSums[i-1] + 1;
else
prefixSums[i] = prefixSums[i-1];
prefixSums[n-1] = prefixSums[n-2];
if (prefixSums[n-1] <= 1)
return prefixSums[n-1];
for (int i = 2; i <= prefixSums[n-2]; i++) {
if (n % i != 0)
continue;
int prev = 0;
boolean containsPeak = true;
for (int j = n/i - 1; j < n; j += n/i) {
if (prefixSums[j] == prev) {
containsPeak = false;
break;
}
prev = prefixSums[j];
}
if (containsPeak)
result = i;
}
return result;
}
}
def solution(A):
length = len(A)
if length <= 2:
return 0
peek_indexes = []
for index in range(1, length-1):
if A[index] > A[index - 1] and A[index] > A[index + 1]:
peek_indexes.append(index)
for block in range(3, int((length/2)+1)):
if length % block == 0:
index_to_check = 0
temp_blocks = 0
for peek_index in peek_indexes:
if peek_index >= index_to_check and peek_index < index_to_check + block:
temp_blocks += 1
index_to_check = index_to_check + block
if length/block == temp_blocks:
return temp_blocks
if len(peek_indexes) > 0:
return 1
else:
return 0
print(solution([1, 2, 3, 4, 3, 4, 1, 2, 3, 4, 6, 2, 1, 2, 5, 2]))
I just found the factors at first,
then just iterated in A and tested all number of blocks to see which is the greatest block division.
This is the code that got 100 (in java)
https://app.codility.com/demo/results/training9593YB-39H/
A javascript solution with complexity of O(N * log(log(N))).
function solution(A) {
let N = A.length;
if (N < 3) return 0;
let peaks = 0;
let peaksTillNow = [ 0 ];
let dividers = [];
for (let i = 1; i < N - 1; i++) {
if (A[i - 1] < A[i] && A[i] > A[i + 1]) peaks++;
peaksTillNow.push(peaks);
if (N % i === 0) dividers.push(i);
}
peaksTillNow.push(peaks);
if (peaks === 0) return 0;
let blocks;
let result = 1;
for (blocks of dividers) {
let K = N / blocks;
let prevPeaks = 0;
let OK = true;
for (let i = 1; i <= blocks; i++) {
if (peaksTillNow[i * K - 1] > prevPeaks) {
prevPeaks = peaksTillNow[i * K - 1];
} else {
OK = false;
break;
}
}
if (OK) result = blocks;
}
return result;
}
Solution with C# code
public int GetPeaks(int[] InputArray)
{
List<int> lstPeaks = new List<int>();
lstPeaks.Add(0);
for (int Index = 1; Index < (InputArray.Length - 1); Index++)
{
if (InputArray[Index - 1] < InputArray[Index] && InputArray[Index] > InputArray[Index + 1])
{
lstPeaks.Add(1);
}
else
{
lstPeaks.Add(0);
}
}
lstPeaks.Add(0);
int totalEqBlocksWithPeaks = 0;
for (int factor = 1; factor <= InputArray.Length; factor++)
{
if (InputArray.Length % factor == 0)
{
int BlockLength = InputArray.Length / factor;
int BlockCount = factor;
bool isAllBlocksHasPeak = true;
for (int CountIndex = 1; CountIndex <= BlockCount; CountIndex++)
{
int BlockStartIndex = CountIndex == 1 ? 0 : (CountIndex - 1) * BlockLength;
int BlockEndIndex = (CountIndex * BlockLength) - 1;
if (!(lstPeaks.GetRange(BlockStartIndex, BlockLength).Sum() > 0))
{
isAllBlocksHasPeak = false;
}
}
if (isAllBlocksHasPeak)
totalEqBlocksWithPeaks++;
}
}
return totalEqBlocksWithPeaks;
}
There is actually an O(n) runtime complexity solution for this task, so this is a humble attempt to share that.
The trick to go from the proposed O(n * loglogn) solutions to O(n) is to calculate the maximum gap between any two peaks (or a leading or trailing peak to the corresponding endpoint).
This can be done while building the peak hash in the first O(n) loop.
Then, if the gap is 'g' between two consecutive peaks, then the minimum group size must be 'g/2'. It will simply be 'g' between start and first peak, or last peak and end. Also, there will be at least one peak in any group from group size 'g', so the range to check for is: g/2, 1+g/2, 2+g/2, ... g.
Therefore, the runtime is the sum over d = g/2, g/2+1, ... g) * n/d where 'd' is the divisor'.
(sum over d = g/2, 1 + g/2, ... g) * n/d = n/(g/2) + n/(1 + g/2) + ... + (n/g)
if g = 5, this n/5 + n/6 + n/7 + n/8 + n/9 + n/10 = n(1/5+1/6+1/7+1/8+1/9+1/10)
If you replace each item with the largest element, then you get sum <= n * (1/5 + 1/5 + 1/5 + 1/5 + 1/5) = n
Now, generalising this, every element is replaced with n / (g/2).
The number of items from g/2 to g is 1 + g/2 since there are (g - g/2 + 1) items.
So, the whole sum is: n/(g/2) * (g/2 + 1) = n + 2n/g < 3n.
Therefore, the bound on the total number of operations is O(n).
The code, implementing this in C++, is here:
int solution(vector<int> &A)
{
int sizeA = A.size();
vector<bool> hash(sizeA, false);
int min_group_size = 2;
int pi = 0;
for (int i = 1, pi = 0; i < sizeA - 1; ++i) {
const int e = A[i];
if (e > A[i - 1] && e > A[i + 1]) {
hash[i] = true;
int diff = i - pi;
if (pi) diff /= 2;
if (diff > min_group_size) min_group_size = diff;
pi = i;
}
}
min_group_size = min(min_group_size, sizeA - pi);
vector<int> hash_next(sizeA, 0);
for (int i = sizeA - 2; i >= 0; --i) {
hash_next[i] = hash[i] ? i : hash_next[i + 1];
}
for (int group_size = min_group_size; group_size <= sizeA; ++group_size) {
if (sizeA % group_size != 0) continue;
int number_of_groups = sizeA / group_size;
int group_index = 0;
for (int peak_index = 0; peak_index < sizeA; peak_index = group_index * group_size) {
peak_index = hash_next[peak_index];
if (!peak_index) break;
int lower_range = group_index * group_size;
int upper_range = lower_range + group_size - 1;
if (peak_index > upper_range) {
break;
}
++group_index;
}
if (number_of_groups == group_index) return number_of_groups;
}
return 0;
}
var prev, curr, total = 0;
for (var i=1; i<A.length; i++) {
if (curr == 0) {
curr = A[i];
} else {
if(A[i] != curr) {
if (prev != 0) {
if ((prev < curr && A[i] < curr) || (prev > curr && A[i] > curr)) {
total += 1;
}
} else {
prev = curr;
total += 1;
}
prev = curr;
curr = A[i];
}
}
}
if(prev != curr) {
total += 1;
}
return total;
I agree with GnomeDePlume answer... the piece on looking for the divisors on the proposed solution is O(N), and that could be decreased to O(sqrt(N)) by using the algorithm provided on the lesson text.
So just adding, here is my solution using Java that solves the problem on the required complexity.
Be aware, it has way more code then yours - some cleanup (debug sysouts and comments) would always be possible :-)
public int solution(int[] A) {
int result = 0;
int N = A.length;
// mark accumulated peaks
int[] peaks = new int[N];
int count = 0;
for (int i = 1; i < N -1; i++) {
if (A[i-1] < A[i] && A[i+1] < A[i])
count++;
peaks[i] = count;
}
// set peaks count on last elem as it will be needed during div checks
peaks[N-1] = count;
// check count
if (count > 0) {
// if only one peak, will need the whole array
if (count == 1)
result = 1;
else {
// at this point (peaks > 1) we know at least the single group will satisfy the criteria
// so set result to 1, then check for bigger numbers of groups
result = 1;
// for each divisor of N, check if that number of groups work
Integer[] divisors = getDivisors(N);
// result will be at least 1 at this point
boolean candidate;
int divisor, startIdx, endIdx;
// check from top value to bottom - stop when one is found
// for div 1 we know num groups is 1, and we already know that is the minimum. No need to check.
// for div = N we know it's impossible, as all elements would have to be peaks (impossible by definition)
for (int i = divisors.length-2; i > 0; i--) {
candidate = true;
divisor = divisors[i];
for (int j = 0; j < N; j+= N/divisor) {
startIdx = (j == 0 ? j : j-1);
endIdx = j + N/divisor-1;
if (peaks[startIdx] == peaks[endIdx]) {
candidate = false;
break;
}
}
// if all groups had at least 1 peak, this is the result!
if (candidate) {
result = divisor;
break;
}
}
}
}
return result;
}
// returns ordered array of all divisors of N
private Integer[] getDivisors(int N) {
Set<Integer> set = new TreeSet<Integer>();
double sqrt = Math.sqrt(N);
int i = 1;
for (; i < sqrt; i++) {
if (N % i == 0) {
set.add(i);
set.add(N/i);
}
}
if (i * i == N)
set.add(i);
return set.toArray(new Integer[]{});
}
Thanks,
Davi
I was asked this question in a job interview, and I'd like to know how others would solve it. I'm most comfortable with Java, but solutions in other languages are welcome.
Given an array of numbers, nums, return an array of numbers products, where products[i] is the product of all nums[j], j != i.
Input : [1, 2, 3, 4, 5]
Output: [(2*3*4*5), (1*3*4*5), (1*2*4*5), (1*2*3*5), (1*2*3*4)]
= [120, 60, 40, 30, 24]
You must do this in O(N) without using division.
An explanation of polygenelubricants method is:
The trick is to construct the arrays (in the case for 4 elements):
{ 1, a[0], a[0]*a[1], a[0]*a[1]*a[2], }
{ a[1]*a[2]*a[3], a[2]*a[3], a[3], 1, }
Both of which can be done in O(n) by starting at the left and right edges respectively.
Then, multiplying the two arrays element-by-element gives the required result.
My code would look something like this:
int a[N] // This is the input
int products_below[N];
int p = 1;
for (int i = 0; i < N; ++i) {
products_below[i] = p;
p *= a[i];
}
int products_above[N];
p = 1;
for (int i = N - 1; i >= 0; --i) {
products_above[i] = p;
p *= a[i];
}
int products[N]; // This is the result
for (int i = 0; i < N; ++i) {
products[i] = products_below[i] * products_above[i];
}
If you need the solution be O(1) in space as well, you can do this (which is less clear in my opinion):
int a[N] // This is the input
int products[N];
// Get the products below the current index
int p = 1;
for (int i = 0; i < N; ++i) {
products[i] = p;
p *= a[i];
}
// Get the products above the current index
p = 1;
for (int i = N - 1; i >= 0; --i) {
products[i] *= p;
p *= a[i];
}
Here is a small recursive function (in C++) to do the modification in-place. It requires O(n) extra space (on stack) though. Assuming the array is in a and N holds the array length, we have:
int multiply(int *a, int fwdProduct, int indx) {
int revProduct = 1;
if (indx < N) {
revProduct = multiply(a, fwdProduct*a[indx], indx+1);
int cur = a[indx];
a[indx] = fwdProduct * revProduct;
revProduct *= cur;
}
return revProduct;
}
Here's my attempt to solve it in Java. Apologies for the non-standard formatting, but the code has a lot of duplication, and this is the best I can do to make it readable.
import java.util.Arrays;
public class Products {
static int[] products(int... nums) {
final int N = nums.length;
int[] prods = new int[N];
Arrays.fill(prods, 1);
for (int
i = 0, pi = 1 , j = N-1, pj = 1 ;
(i < N) && (j >= 0) ;
pi *= nums[i++] , pj *= nums[j--] )
{
prods[i] *= pi ; prods[j] *= pj ;
}
return prods;
}
public static void main(String[] args) {
System.out.println(
Arrays.toString(products(1, 2, 3, 4, 5))
); // prints "[120, 60, 40, 30, 24]"
}
}
The loop invariants are pi = nums[0] * nums[1] *.. nums[i-1] and pj = nums[N-1] * nums[N-2] *.. nums[j+1]. The i part on the left is the "prefix" logic, and the j part on the right is the "suffix" logic.
Recursive one-liner
Jasmeet gave a (beautiful!) recursive solution; I've turned it into this (hideous!) Java one-liner. It does in-place modification, with O(N) temporary space in the stack.
static int multiply(int[] nums, int p, int n) {
return (n == nums.length) ? 1
: nums[n] * (p = multiply(nums, nums[n] * (nums[n] = p), n + 1))
+ 0*(nums[n] *= p);
}
int[] arr = {1,2,3,4,5};
multiply(arr, 1, 0);
System.out.println(Arrays.toString(arr));
// prints "[120, 60, 40, 30, 24]"
Translating Michael Anderson's solution into Haskell:
otherProducts xs = zipWith (*) below above
where below = scanl (*) 1 $ init xs
above = tail $ scanr (*) 1 xs
Sneakily circumventing the "no divisions" rule:
sum = 0.0
for i in range(a):
sum += log(a[i])
for i in range(a):
output[i] = exp(sum - log(a[i]))
Here you go, simple and clean solution with O(N) complexity:
int[] a = {1,2,3,4,5};
int[] r = new int[a.length];
int x = 1;
r[0] = 1;
for (int i=1;i<a.length;i++){
r[i]=r[i-1]*a[i-1];
}
for (int i=a.length-1;i>0;i--){
x=x*a[i];
r[i-1]=x*r[i-1];
}
for (int i=0;i<r.length;i++){
System.out.println(r[i]);
}
Travel Left->Right and keep saving product. Call it Past. -> O(n)
Travel Right -> left keep the product. Call it Future. -> O(n)
Result[i] = Past[i-1] * future[i+1] -> O(n)
Past[-1] = 1; and Future[n+1]=1;
O(n)
C++, O(n):
long long prod = accumulate(in.begin(), in.end(), 1LL, multiplies<int>());
transform(in.begin(), in.end(), back_inserter(res),
bind1st(divides<long long>(), prod));
Here is my solution in modern C++. It makes use of std::transform and is pretty easy to remember.
Online code (wandbox).
#include<algorithm>
#include<iostream>
#include<vector>
using namespace std;
vector<int>& multiply_up(vector<int>& v){
v.insert(v.begin(),1);
transform(v.begin()+1, v.end()
,v.begin()
,v.begin()+1
,[](auto const& a, auto const& b) { return b*a; }
);
v.pop_back();
return v;
}
int main() {
vector<int> v = {1,2,3,4,5};
auto vr = v;
reverse(vr.begin(),vr.end());
multiply_up(v);
multiply_up(vr);
reverse(vr.begin(),vr.end());
transform(v.begin(),v.end()
,vr.begin()
,v.begin()
,[](auto const& a, auto const& b) { return b*a; }
);
for(auto& i: v) cout << i << " ";
}
Precalculate the product of the numbers to the left and to the right of each element.
For every element the desired value is the product of it's neigbors's products.
#include <stdio.h>
unsigned array[5] = { 1,2,3,4,5};
int main(void)
{
unsigned idx;
unsigned left[5]
, right[5];
left[0] = 1;
right[4] = 1;
/* calculate products of numbers to the left of [idx] */
for (idx=1; idx < 5; idx++) {
left[idx] = left[idx-1] * array[idx-1];
}
/* calculate products of numbers to the right of [idx] */
for (idx=4; idx-- > 0; ) {
right[idx] = right[idx+1] * array[idx+1];
}
for (idx=0; idx <5 ; idx++) {
printf("[%u] Product(%u*%u) = %u\n"
, idx, left[idx] , right[idx] , left[idx] * right[idx] );
}
return 0;
}
Result:
$ ./a.out
[0] Product(1*120) = 120
[1] Product(1*60) = 60
[2] Product(2*20) = 40
[3] Product(6*5) = 30
[4] Product(24*1) = 24
(UPDATE: now I look closer, this uses the same method as Michael Anderson, Daniel Migowski and polygenelubricants above)
Tricky:
Use the following:
public int[] calc(int[] params) {
int[] left = new int[n-1]
in[] right = new int[n-1]
int fac1 = 1;
int fac2 = 1;
for( int i=0; i<n; i++ ) {
fac1 = fac1 * params[i];
fac2 = fac2 * params[n-i];
left[i] = fac1;
right[i] = fac2;
}
fac = 1;
int[] results = new int[n];
for( int i=0; i<n; i++ ) {
results[i] = left[i] * right[i];
}
Yes, I am sure i missed some i-1 instead of i, but thats the way to solve it.
This is O(n^2) but f# is soooo beautiful:
List.fold (fun seed i -> List.mapi (fun j x -> if i=j+1 then x else x*i) seed)
[1;1;1;1;1]
[1..5]
There also is a O(N^(3/2)) non-optimal solution. It is quite interesting, though.
First preprocess each partial multiplications of size N^0.5(this is done in O(N) time complexity). Then, calculation for each number's other-values'-multiple can be done in 2*O(N^0.5) time(why? because you only need to multiple the last elements of other ((N^0.5) - 1) numbers, and multiply the result with ((N^0.5) - 1) numbers that belong to the group of the current number). Doing this for each number, one can get O(N^(3/2)) time.
Example:
4 6 7 2 3 1 9 5 8
partial results:
4*6*7 = 168
2*3*1 = 6
9*5*8 = 360
To calculate the value of 3, one needs to multiply the other groups' values 168*360, and then with 2*1.
public static void main(String[] args) {
int[] arr = { 1, 2, 3, 4, 5 };
int[] result = { 1, 1, 1, 1, 1 };
for (int i = 0; i < arr.length; i++) {
for (int j = 0; j < i; j++) {
result[i] *= arr[j];
}
for (int k = arr.length - 1; k > i; k--) {
result[i] *= arr[k];
}
}
for (int i : result) {
System.out.println(i);
}
}
This solution i came up with and i found it so clear what do you think!?
Based on Billz answer--sorry I can't comment, but here is a scala version that correctly handles duplicate items in the list, and is probably O(n):
val list1 = List(1, 7, 3, 3, 4, 4)
val view = list1.view.zipWithIndex map { x => list1.view.patch(x._2, Nil, 1).reduceLeft(_*_)}
view.force
returns:
List(1008, 144, 336, 336, 252, 252)
Adding my javascript solution here as I didn't find anyone suggesting this.
What is to divide, except to count the number of times you can extract a number from another number? I went through calculating the product of the whole array, and then iterate over each element, and substracting the current element until zero:
//No division operation allowed
// keep substracting divisor from dividend, until dividend is zero or less than divisor
function calculateProducsExceptCurrent_NoDivision(input){
var res = [];
var totalProduct = 1;
//calculate the total product
for(var i = 0; i < input.length; i++){
totalProduct = totalProduct * input[i];
}
//populate the result array by "dividing" each value
for(var i = 0; i < input.length; i++){
var timesSubstracted = 0;
var divisor = input[i];
var dividend = totalProduct;
while(divisor <= dividend){
dividend = dividend - divisor;
timesSubstracted++;
}
res.push(timesSubstracted);
}
return res;
}
Just 2 passes up and down. Job done in O(N)
private static int[] multiply(int[] numbers) {
int[] multiplied = new int[numbers.length];
int total = 1;
multiplied[0] = 1;
for (int i = 1; i < numbers.length; i++) {
multiplied[i] = numbers[i - 1] * multiplied[i - 1];
}
for (int j = numbers.length - 2; j >= 0; j--) {
total *= numbers[j + 1];
multiplied[j] = total * multiplied[j];
}
return multiplied;
}
def productify(arr, prod, i):
if i < len(arr):
prod.append(arr[i - 1] * prod[i - 1]) if i > 0 else prod.append(1)
retval = productify(arr, prod, i + 1)
prod[i] *= retval
return retval * arr[i]
return 1
if __name__ == "__main__":
arr = [1, 2, 3, 4, 5]
prod = []
productify(arr, prod, 0)
print(prod)
Well,this solution can be considered that of C/C++.
Lets say we have an array "a" containing n elements
like a[n],then the pseudo code would be as below.
for(j=0;j<n;j++)
{
prod[j]=1;
for (i=0;i<n;i++)
{
if(i==j)
continue;
else
prod[j]=prod[j]*a[i];
}
One more solution, Using division. with twice traversal.
Multiply all the elements and then start dividing it by each element.
{-
Recursive solution using sqrt(n) subsets. Runs in O(n).
Recursively computes the solution on sqrt(n) subsets of size sqrt(n).
Then recurses on the product sum of each subset.
Then for each element in each subset, it computes the product with
the product sum of all other products.
Then flattens all subsets.
Recurrence on the run time is T(n) = sqrt(n)*T(sqrt(n)) + T(sqrt(n)) + n
Suppose that T(n) ≤ cn in O(n).
T(n) = sqrt(n)*T(sqrt(n)) + T(sqrt(n)) + n
≤ sqrt(n)*c*sqrt(n) + c*sqrt(n) + n
≤ c*n + c*sqrt(n) + n
≤ (2c+1)*n
∈ O(n)
Note that ceiling(sqrt(n)) can be computed using a binary search
and O(logn) iterations, if the sqrt instruction is not permitted.
-}
otherProducts [] = []
otherProducts [x] = [1]
otherProducts [x,y] = [y,x]
otherProducts a = foldl' (++) [] $ zipWith (\s p -> map (*p) s) solvedSubsets subsetOtherProducts
where
n = length a
-- Subset size. Require that 1 < s < n.
s = ceiling $ sqrt $ fromIntegral n
solvedSubsets = map otherProducts subsets
subsetOtherProducts = otherProducts $ map product subsets
subsets = reverse $ loop a []
where loop [] acc = acc
loop a acc = loop (drop s a) ((take s a):acc)
Here is my code:
int multiply(int a[],int n,int nextproduct,int i)
{
int prevproduct=1;
if(i>=n)
return prevproduct;
prevproduct=multiply(a,n,nextproduct*a[i],i+1);
printf(" i=%d > %d\n",i,prevproduct*nextproduct);
return prevproduct*a[i];
}
int main()
{
int a[]={2,4,1,3,5};
multiply(a,5,1,0);
return 0;
}
Here's a slightly functional example, using C#:
Func<long>[] backwards = new Func<long>[input.Length];
Func<long>[] forwards = new Func<long>[input.Length];
for (int i = 0; i < input.Length; ++i)
{
var localIndex = i;
backwards[i] = () => (localIndex > 0 ? backwards[localIndex - 1]() : 1) * input[localIndex];
forwards[i] = () => (localIndex < input.Length - 1 ? forwards[localIndex + 1]() : 1) * input[localIndex];
}
var output = new long[input.Length];
for (int i = 0; i < input.Length; ++i)
{
if (0 == i)
{
output[i] = forwards[i + 1]();
}
else if (input.Length - 1 == i)
{
output[i] = backwards[i - 1]();
}
else
{
output[i] = forwards[i + 1]() * backwards[i - 1]();
}
}
I'm not entirely certain that this is O(n), due to the semi-recursion of the created Funcs, but my tests seem to indicate that it's O(n) in time.
To be complete here is the code in Scala:
val list1 = List(1, 2, 3, 4, 5)
for (elem <- list1) println(list1.filter(_ != elem) reduceLeft(_*_))
This will print out the following:
120
60
40
30
24
The program will filter out the current elem (_ != elem); and multiply the new list with reduceLeft method. I think this will be O(n) if you use scala view or Iterator for lazy eval.
// This is the recursive solution in Java
// Called as following from main product(a,1,0);
public static double product(double[] a, double fwdprod, int index){
double revprod = 1;
if (index < a.length){
revprod = product2(a, fwdprod*a[index], index+1);
double cur = a[index];
a[index] = fwdprod * revprod;
revprod *= cur;
}
return revprod;
}
A neat solution with O(n) runtime:
For each element calculate the product of all the elements that occur before that and it store in an array "pre".
For each element calculate the product of all the elements that occur after that element and store it in an array "post"
Create a final array "result", for an element i,
result[i] = pre[i-1]*post[i+1];
Here is the ptyhon version
# This solution use O(n) time and O(n) space
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
N = len(nums)
if N == 0: return
# Initialzie list of 1, size N
l_prods, r_prods = [1]*N, [1]*N
for i in range(1, N):
l_prods[i] = l_prods[i-1] * nums[i-1]
for i in reversed(range(N-1)):
r_prods[i] = r_prods[i+1] * nums[i+1]
result = [x*y for x,y in zip(l_prods,r_prods)]
return result
# This solution use O(n) time and O(1) space
def productExceptSelfSpaceOptimized(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
N = len(nums)
if N == 0: return
# Initialzie list of 1, size N
result = [1]*N
for i in range(1, N):
result[i] = result[i-1] * nums[i-1]
r_prod = 1
for i in reversed(range(N)):
result[i] *= r_prod
r_prod *= nums[i]
return result
I'm use to C#:
public int[] ProductExceptSelf(int[] nums)
{
int[] returnArray = new int[nums.Length];
List<int> auxList = new List<int>();
int multTotal = 0;
// If no zeros are contained in the array you only have to calculate it once
if(!nums.Contains(0))
{
multTotal = nums.ToList().Aggregate((a, b) => a * b);
for (int i = 0; i < nums.Length; i++)
{
returnArray[i] = multTotal / nums[i];
}
}
else
{
for (int i = 0; i < nums.Length; i++)
{
auxList = nums.ToList();
auxList.RemoveAt(i);
if (!auxList.Contains(0))
{
returnArray[i] = auxList.Aggregate((a, b) => a * b);
}
else
{
returnArray[i] = 0;
}
}
}
return returnArray;
}
Here is simple Scala version in Linear O(n) time:
def getProductEff(in:Seq[Int]):Seq[Int] = {
//create a list which has product of every element to the left of this element
val fromLeft = in.foldLeft((1, Seq.empty[Int]))((ac, i) => (i * ac._1, ac._2 :+ ac._1))._2
//create a list which has product of every element to the right of this element, which is the same as the previous step but in reverse
val fromRight = in.reverse.foldLeft((1,Seq.empty[Int]))((ac,i) => (i * ac._1,ac._2 :+ ac._1))._2.reverse
//merge the two list by product at index
in.indices.map(i => fromLeft(i) * fromRight(i))
}
This works because essentially the answer is an array which has product of all elements to the left and to the right.
import java.util.Arrays;
public class Pratik
{
public static void main(String[] args)
{
int[] array = {2, 3, 4, 5, 6}; // OUTPUT: 360 240 180 144 120
int[] products = new int[array.length];
arrayProduct(array, products);
System.out.println(Arrays.toString(products));
}
public static void arrayProduct(int array[], int products[])
{
double sum = 0, EPSILON = 1e-9;
for(int i = 0; i < array.length; i++)
sum += Math.log(array[i]);
for(int i = 0; i < array.length; i++)
products[i] = (int) (EPSILON + Math.exp(sum - Math.log(array[i])));
}
}
OUTPUT:
[360, 240, 180, 144, 120]
Time complexity : O(n)
Space complexity: O(1)
I am given an input, "N", i have to find the number of list of length N, which starts with 1, such that the next number to be added is at most 1 more than the max number added till now. For Example,
N = 3, possible lists => (111, 112, 121, 122, 123), [113, or 131 is not possible as while adding '3' to the list, the maximum number present in the list would be '1', thus we can add only 1 or 2].
N = 4, the list 1213 is possible as while adding 3, the maximum number in the list is '2', thus 3 can be added.
Problem is to count the number of such lists possible for a given input "N".
My code is :-
public static void Main(string[] args)
{
var noOfTestCases = Convert.ToInt32(Console.ReadLine());
var listOfOutput = new List<long>();
for (int i = 0; i < noOfTestCases; i++)
{
var requiredSize = Convert.ToInt64(Console.ReadLine());
long result;
const long listCount = 1;
const long listMaxTillNow = 1;
if (requiredSize < 3)
result = requiredSize;
else
{
SeqCount.Add(requiredSize, 0);
AddElementToList(requiredSize, listCount, listMaxTillNow);
result = SeqCount[requiredSize];
}
listOfOutput.Add(result);
}
foreach (var i in listOfOutput)
{
Console.WriteLine(i);
}
}
private static Dictionary<long, long> SeqCount = new Dictionary<long, long>();
private static void AddElementToList(long requiredSize, long listCount, long listMaxTillNow)
{
if (listCount == requiredSize)
{
SeqCount[requiredSize] = SeqCount[requiredSize] + 1;
return;
}
var listMaxTillNowNew = listMaxTillNow + 1;
for(var i = listMaxTillNowNew; i > 0; i--)
{
AddElementToList(requiredSize, listCount + 1,
i == listMaxTillNowNew ? listMaxTillNowNew : listMaxTillNow);
}
return;
}
Which is the brute force method. I wish to know what might be the best algorithm for the problem?
PS : I only wish to know the number of such lists, so i am sure creating all the list won't be required. (The way i am doing in the code)
I am not at all good in algorithms, so please excuse for the long question.
This problem is a classic example of a dynamic programming problem:
If you define a function dp(k, m) to be the number of lists of length k for which the maximum number is m, then you have a recurrence relation:
dp(1, 1) = 1
dp(1, m) = 0, for m > 1
dp(k, m) = dp(k-1, m) * m + dp(k-1, m-1)
Indeed, there is only one list of length 1 and its maximum element is 1.
When you are building a list of length k with max element m, you can take any of the (k-1)-lists with max = m and append 1 or 2 or .... or m. Or you can take a (k-1)-list with max element m-1 and append m. If you take a (k-1)-list with max element less than m-1 then by your rule you can't get a max of m by appending just one element.
You can compute dp(k,m) for all k = 1,...,N and m = 1,...,N+1 using dynamic programming in O(N^2) and then the answer to your question would be
dp(N,1) + dp(N,2) + ... + dp(N,N+1)
Thus the algorithm is O(N^2).
See below for the implementation of dp calculation in C#:
int[] arr = new int[N + 2];
for (int m = 1; m < N + 2; m++)
arr[m] = 0;
arr[1] = 1;
int[] newArr = new int[N + 2];
int[] tmp;
for (int k = 1; k < N; k++)
{
for (int m = 1; m < N + 2; m++)
newArr[m] = arr[m] * m + arr[m - 1];
tmp = arr;
arr = newArr;
newArr = tmp;
}
int answer = 0;strong text
for (int m = 1; m < N + 2; m++)
answer += arr[m];
Console.WriteLine("The answer for " + N + " is " + answer);
Well, I got interrupted by a fire this afternoon (really!) but FWIW, here's my contribution:
/*
* Counts the number of possible integer list on langth N, with the
* property that no integer in a list(starting with one) may be more
* than one greater than the greatest integer preceeding it in the list.
*
* I am calling this "Semi-Factorial" since it is somewhat similar to
* the factorial function and its constituent integer combinations.
*/
public int SemiFactorial(int N)
{
int sumCounts = 0;
// get a list of the counts of all valid lists of length N,
//whose maximum integer is listCounts[maxInt].
List<int> listCounts = SemiFactorialCounts(N);
for (int maxInt = 1; maxInt <= N; maxInt++)
{
// Get the number of lists, of length N-1 whose maximum integer
//is (maxInt):
int maxIntCnt = listCounts[maxInt];
// just sum them up
sumCounts += maxIntCnt;
}
return sumCounts;
}
// Returns a list of the counts of all valid lists of length N, and
//whose maximum integer is [i], where [i] is also its index in this
//returned list. (0 is not used).
public List<int> SemiFactorialCounts(int N)
{
List<int> cnts;
if (N == 0)
{
// no valid lists,
cnts = new List<int>();
// (zero isn't used)
cnts.Add(0);
}
else if (N == 1)
{
// the only valid list is {1},
cnts = new List<int>();
// (zero isn't used)
cnts.Add(0);
//so that's one list of length 1
cnts.Add(1);
}
else
{
// start with the maxInt counts of lists whose length is N-1:
cnts = SemiFactorialCounts(N - 1);
// add an entry for (N)
cnts.Add(0);
// (reverse order because we overwrite the list using values
// from the next lower index.)
for (int K = N; K > 0; K--)
{
// The number of lists of length N and maxInt K { SF(N,K) }
// Equals K times # of lists one shorter, but same maxInt,
// Plus, the number of lists one shorter with maxInt-1.
cnts[K] = K * cnts[K] + cnts[K - 1];
}
}
return cnts;
}
pretty similar to the others. Though I wouldn't call this "classic dynamic programming" so much as just "classic recursion".