Is there a way to print out the order of removal in the Josephus problem in O(n.logn) ?
Example
With number of people is n = 7 and number of skip k = 3. The order of elimination would be:
3, 6, 2, 7, 5, 1, 4
There's an approach that uses ordered set
(https://www.geeksforgeeks.org/ordered-set-gnu-c-pbds/):
Initialize an ordered set V, and insert the elements in the range [1, N] into V.
Initialize a variable, say pos as 0, to store the index of the removed element.
Iterate until the size of V is greater than 1, and perform the following steps:
Store the size of the set in a variable, say X
Update the value of pos to (pos + K) % X
Print the element pointed by pos in V and then erase it
Update pos to pos%V.size()
Print the last element stored at the beginning of set V
Here's the code:
#include <iostream>
using namespace std;
// Header files, namespaces to use
// ordered set
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace __gnu_pbds;
#define ordered_set \
tree<int, null_type, less<int>, rb_tree_tag, \
tree_order_statistics_node_update>
// Function to find the person who
// will get killed in the i'th step
void orderOfExecution(int N, int K)
{
// Create an ordered set
ordered_set V;
// Push elements in the range
// [1, N] in the set
for (int i = 1; i <= N; ++i)
V.insert(i);
// Stores the position to be removed
int pos = 0;
// Iterate until the size of the set
// is greater than 1
while (V.size() > 1) {
// Update the position
pos = (pos + K) % (int)V.size();
// Print the removed element
cout << *(V.find_by_order(pos)) << ' ';
// Erase it from the ordered set
V.erase(*(V.find_by_order(pos)));
// Update position
pos %= (int)V.size();
}
// Print the first element of the set
cout << *(V.find_by_order(0));
}
int main()
{
int N = 5, K = 2;
// Function Call
orderOfExecution(N, K);
return 0;
}
Time Complexity: O(N * log(N))
For better understanding, I recommend checking out this video:
https://youtu.be/KnsDFCcBJbY
You can build segment tree that can solve these type of operations
M(p, v): Modify a[p] := v
S(l, r): Calculate a[l] + a[l+1] + ... + a[r]
F(p, k): Find min(x) that (x >= p) and S(1, x) >= k
All of above operations can be done in O(log n) time
Using the algorithm described above you can achieve same result in O(n log n)
/*
#Author: SPyofgame
#License: Free to use
*/
#include <iostream>
#include <vector>
using namespace std;
/// Segment Tree Data Structure
struct Segtree
{
int n;
vector<int> t;
/// O(n)
/// Construct Segment Tree
void init(int lim)
{
for (n = 1; n < lim; n <<= 1);
t.assign(n << 1, 0);
}
/// O(log n)
/// Modify: a[pos] := v
void modify(int pos, int v, int ct, int lt, int rt)
{
if (rt - lt == 1)
{
t[ct] = v;
return ;
}
int mt = (lt + rt) >> 1;
if (mt > pos)
modify(pos, v, ct * 2 + 1, lt, mt);
else
modify(pos, v, ct * 2 + 2, mt, rt);
t[ct] = t[ct * 2 + 1] + t[ct * 2 + 2];
}
/// O(log n)
void modify(int pos, int v)
{
return modify(pos, v, 0, 0, n);
}
/// O(log n)
/// Query: Sigma(a[i] | l <= i <= r)
int query(int l, int r, int ct, int lt, int rt)
{
if (lt >= r || l >= rt) return 0;
if (lt >= l && r >= rt) return t[ct];
int mt = (lt + rt) >> 1;
int lv = query(l, r, ct * 2 + 1, lt, mt);
int rv = query(l, r, ct * 2 + 2, mt, rt);
return lv + rv;
}
/// O(log n)
int query(int l, int r)
{
return query(l, r + 1, 0, 0, n);
}
/// O(log n)
/// Search: Min(x | Query(1, x) >= k)
int search_for(int k, int ct, int lt, int rt)
{
if (k > t[ct]) return -1;
if (rt - lt == 1) return lt;
int mt = (lt + rt) >> 1;
int v = t[ct * 2 + 1];
int res = search_for(k - 0, ct * 2 + 1, lt, mt);
if (res == -1)
res = search_for(k - v, ct * 2 + 2, mt, rt);
return res;
}
/// O(log n)
int search_for(int k)
{
return search_for(k, 0, 0, n);
}
/// O(log n)
/// Search: Min(x | x >= pos & Query(1, x) >= k)
int search_for_at_least(int pos, int k)
{
return search_for(k + query(1, pos - 1), 0, 0, n);
}
};
int main()
{
// file("Test");
ios::sync_with_stdio(NULL);
cin.tie(NULL);
/// Input: Number of element and steps
int n, k;
cin >> n >> k;
Segtree T;
T.init(n + 1);
for (int x = 1; x <= n; ++x) /// O(n log n)
T.modify(x, 1);
int pos = 1;
for (int remain = n; remain >= 1; --remain) /// O(n log n)
{
/// Number of steps
int step = k + 1;
/// Move move (remain) times remain the same pos
step %= remain;
if (step == 0) step = remain; /// Current pos my not the result, therefore move more (remain) steps
/// The current segment is not the part we need to search
int right = T.query(pos, n);
if (step > right)
{
pos = 1; /// Set it back to first pos
step -= right; /// Number of step traveled
}
/// Search for real pos
pos = T.search_for_at_least(pos, step);
T.modify(pos, 0);
cout << pos << " ";
}
return 0;
}
You can also use Iterative Segment Tree
/*
#Author: SPyofgame
#License: Free to use
*/
#include <iostream>
using namespace std;
const int N = 1 << 18;
int T[N+N];
void init(int n)
{
for (int i = 0; i < n; ++i) T[i + N] = 1;
for (int i = N - 1; i > 0; --i) T[i] = T[i << 1] + T[i << 1 | 1];
}
int lower_bound(int x, int p = 1)
{
while (p < N) if (T[p <<= 1] < x) x -= T[p++];
return p - N;
}
void update(int p, int v)
{
for (T[p += N] = v; p > 1; p >>= 1) T[p >> 1] = T[p] + T[p ^ 1];
}
int main()
{
int n, k;
cin >> n >> k;
init(n);
for (int remain = n, pos = 0; remain > 0; --remain)
{
pos += remain + k;
pos %= remain;
int p = lower_bound(pos + 1);
cout << p + 1 << " ";
update(p, 0);
}
return 0;
}
Related
I have provided the code for sorting an array using the merge sort algorithm, I'm unable to find the error, this code is not giving the correctly sorted array as it's output. The function mergesort is called recursively to divide the array till its size is reduced to 1. Then multiple arrays are merged using the merge function.
#include <bits/stdc++.h>
using namespace std;
void merge(int a[], int m, int l, int h) {
int n1 = m - l + 1, n2 = h - m;
int t1[n1], t2[n2];
for (int i = 0; i < n1; i++) {
t1[i] = a[i + l];
}
for (int i = 0; i < n2; i++) {
t2[i] = a[i + m + 1];
}
int k = 0, p = 0, r = 0;
while (k < n1 && p < n2) {
if (t1[k] <= t2[p]) {
a[r] = t1[k];
k++;
r++;
} else {
a[r] = t2[p];
p++;
r++;
}
}
while (k < n1) {
a[r] = t1[k];
k++;
r++;
}
while (p < n2) {
a[r] = t2[p];
p++;
r++;
}
}
void mergesort(int a[], int l, int h) {
if (l < h) {
int m = l + (h - l) / 2;
mergesort(a, l, m);
mergesort(a, m + 1, h);
merge(a, m, l, h);
}
}
int main() {
int a[5] = { 1, 2, 3, 4, 5 };
mergesort(a, 0, 4);
for (int i = 0; i < 5; i++) {
cout << a[i] << " ";
}
return 0;
}
The bug in the merge function is r should be initialized to l, not 0. You are not merging the slices into the original position.
Also note that the last loop while (p < n2) in this function is redundant: the remaining elements in the right slice are already in the proper place in the original array.
Here is a modified version:
void merge(int a[], int m, int l, int h) {
int n1 = m - l + 1, n2 = h - m;
int t1[n1], t2[n2];
for (int i = 0; i < n1; i++) {
t1[i] = a[i + l];
}
for (int i = 0; i < n2; i++) {
t2[i] = a[i + m + 1];
}
int k = 0, p = 0, r = l;
while (k < n1 && p < n2) {
if (t1[k] <= t2[p]) {
a[r] = t1[k];
k++;
r++;
} else {
a[r] = t2[p];
p++;
r++;
}
}
while (k < n1) {
a[r] = t1[k];
k++;
r++;
}
}
To further simplify the code, here are some more remarks:
it is less confusing to make use the convention that h be the first index beyond the end of the slice. This way the initial call uses the array length and mergesort can compute the slice length as h - l.
variable name l looks confusingly close to number 1.
the arguments to merge are usually in the order l, m, h, and m is the index of the start of the right slice.
the right slice does not need saving.
using variable length arrays with automatic storage t1[n2] may cause a stack overflow for large arrays.
Here is a modified version:
#include <bits/stdc++.h>
using namespace std;
void merge(int a[], int lo, int m, int hi) {
int i, j, k;
int n1 = m - lo;
int t1[n1];
for (i = 0; i < n1; i++) {
t1[i] = a[lo + i];
}
i = 0;
j = m;
k = lo;
while (i < n1 && j < hi) {
if (t1[i] <= a[j]) {
a[k++] = t1[i++];
} else {
a[k++] = a[j++];
}
}
while (i < n1) {
a[k++] = t1[i++];
}
}
void mergesort(int a[], int lo, int hi) {
if (hi - lo >= 2) {
int m = lo + (hi - lo) / 2;
mergesort(a, lo, m);
mergesort(a, m, hi);
merge(a, lo, m, hi);
}
}
int main() {
int a[5] = { 1, 5, 2, 4, 3 };
mergesort(a, 0, 5);
for (int i = 0; i < 5; i++) {
cout << a[i] << " ";
}
cout << "\n";
return 0;
}
I am trying to find the number of distinct vectors in a set that has the following properties:
A set is k numbers starting from 1 to k+1
D is the number of elements that can be selected
V is the sum of the elements
Examples
k=3, d=3, v=6, the result is 7;
<1, 2, 3>, <1, 3, 2>, <2, 1, 3>, <2, 2, 2>, <2, 3, 1>, <3, 1, 2>, <3, 2, 1>
k=4, d=2, v=7, the result is 2;
<3, 4>, <4, 3>
In this case, <2, 5> is not valid because 5 exceeds the value of k.
I want to find out if there is a mathematical formula to calculate the result. If there isn't a formula, how efficiently can this algorithm be implemented? I have found a rather mysterious implementation but i wonder if it can be improved upon.
public static int NumberOfDistinctVectors(int k, int d ,int v) {
if((v > k * d) || (v < d)) return 0;
if(d == 1 || v == d) return 1;
if(v == d + 1) return d;
int alpha = 1, beta = 0;
if(1 < v + k - k * d)
alpha = v + k - k * d;
if(k < v - d + 1)
beta = k;
else
beta = v - d + 1;
int sum = 0;
for(int i = alpha; i <= beta; i++) {
sum += NumberOfDistinctVectors(k, d-1, v-i);
}
return sum;
}
The problem is very related to the following:
What is the number of combinations to distribute b identical objects in c groups
where no group contains more than n objects?
which is discussed here
Just think of your numbers being made of the object (+1). So in your case
c = d, because each group corresponds to one of your numbers
b = v-d, since you need to put at least one (+1) object into each of the d groups
n = k-1, since we assume a (+1) already in each group and don't want to get larger than k
Find the code below (using appache-commons for c(N,K))
public static int NumberOfDistinctVectors(int k, int d ,int v) {
return combinations(v-d, d, k-1);
}
//combinations to distribute b identical objects to c groups
//where no group has more than n objects
public static int combinations(int b, int c, int n)
{
int sum = 0;
for(int i = 0; i <= c; i++)
{
if(b+c-1-i*(n+1) >= c-1)
sum += Math.pow(-1, i) * CombinatoricsUtils.binomialCoefficient(c, i)
* CombinatoricsUtils.binomialCoefficient(b+c-1-i*(n+1), c-1);
}
return sum;
}
Let me also quote from the original answer:
"whether this is actually any more useful than the recurrence is
another question"
Here is another way of counting that may be more efficient. It is based on the formula for permutations with repetition. I have added comments in the code hoping it makes it a bit easier to follow.
public static int NumberOfDistinctVectors2(int k, int d, int v)
{
return NumberOfDistinctVectors2_rec(1, 0, k, d, v, 1, 1);
}
public static int NumberOfDistinctVectors2_rec(
int i, /* Current number being added */
int j, /* Amount of already picked numbers */
int k, /* Maximum number that can be picked */
int d, /* Total amount of numbers to pick */
int v, /* Remaining value */
long num, /* Numerator in "permutations with repetition" formula */
long den) /* Denominator in "permutations with repetition" formula */
{
// Amount of remaining numbers to pick
int rem = d - j;
// Remaining value is too big or too small
if (v < i * rem || v > k * rem) return 0;
// If no numbers to add then we are done
if (rem == 0) return Math.toIntExact(num / den);
// If only one number to add this can be used as a "shortcut"
if (rem == 1) return d * Math.toIntExact(num / den);
// Counted permutations
int count = 0;
// Maximum amount of repetitions for the current number
int maxRep = Math.min(v / i, rem);
// Factor to multiply the numerator
int numFactor = 1;
// Factor to multiply the denominator
int denFactor = 1;
// Consider adding repetitions of the current number
for (int r = 1; r <= maxRep; r++)
{
// The numerator is the factorial of the total amount of numbers
numFactor *= (j + r);
// The denominator is the product of the factorials of the number of repetitions of each number
denFactor *= r;
// We add "r" repetitions of the current number and count all possible permutations from there
count += NumberOfDistinctVectors2_rec(i + 1, j + r, k, d, v - i * r, num * numFactor, den * denFactor);
}
// Consider permutations that do not include the current number
count += NumberOfDistinctVectors2_rec(i + 1, j, k, d, v, num, den);
return count;
}
Here is a small class testing it where this method appears to be significantly faster (see it in Rextester).
class NumberOfDistinctVectorsTest
{
// Original method
public static int NumberOfDistinctVectors(int k, int d ,int v)
{
if((v > k * d) || (v < d)) return 0;
if(d == 1 || v == d) return 1;
if(v == d + 1) return d;
int alpha = 1, beta = 0;
if(1 < v + k - k * d)
alpha = v + k - k * d;
if(k < v - d + 1)
beta = k;
else
beta = v - d + 1;
int sum = 0;
for(int i = alpha; i <= beta; i++)
{
sum += NumberOfDistinctVectors(k, d-1, v-i);
}
return sum;
}
// New method
public static int NumberOfDistinctVectors2(int k, int d, int v)
{
return NumberOfDistinctVectors2_rec(1, 0, k, d, v, 1, 1);
}
public static int NumberOfDistinctVectors2_rec(int i, int j, int k, int d, int v, long num, long den)
{
int rem = d - j;
if (v < i * rem || v > k * rem) return 0;
if (rem == 0) return Math.toIntExact(num / den);
if (rem == 1) return d * Math.toIntExact(num / den);
int count = 0;
int maxRep = Math.min(v / i, rem);
int numFactor = 1;
int denFactor = 1;
for (int r = 1; r <= maxRep; r++)
{
numFactor *= (j + r);
denFactor *= r;
count += NumberOfDistinctVectors2_rec(i + 1, j + r, k, d, v - i * r, num * numFactor, den * denFactor);
}
count += NumberOfDistinctVectors2_rec(i + 1, j, k, d, v, num, den);
return count;
}
public static void main(final String[] args)
{
// Test 1
System.out.println(NumberOfDistinctVectors(3, 3, 6));
System.out.println(NumberOfDistinctVectors2(3, 3, 6));
// Test 2
System.out.println(NumberOfDistinctVectors(4, 2, 7));
System.out.println(NumberOfDistinctVectors2(4, 2, 7));
// Test 3
System.out.println(NumberOfDistinctVectors(12, 5, 20));
System.out.println(NumberOfDistinctVectors2(12, 5, 20));
// Test runtime
long startTime, endTime;
int reps = 100;
startTime = System.nanoTime();
for (int i = 0; i < reps; i++)
{
NumberOfDistinctVectors(12, 5, 20);
}
endTime = System.nanoTime();
double t1 = ((endTime - startTime) / (reps * 1000.));
startTime = System.nanoTime();
for (int i = 0; i < reps; i++)
{
NumberOfDistinctVectors2(12, 5, 20);
}
endTime = System.nanoTime();
double t2 = ((endTime - startTime) / (reps * 1000.));
System.out.println("Original method: " + t1 + "ms");
System.out.println("New method: " + t2 + "ms");
}
}
Output:
7
7
2
2
3701
3701
Original method: 45.64331ms
New method: 5.89364ms
EDIT: New test (run on JDoodle with Apache Commons 3.6.1) including SaiBot's answer:
import org.apache.commons.math3.util.CombinatoricsUtils;
public class NumberOfDistinctVectorsTest
{
// Original method
public static int NumberOfDistinctVectors(int k, int d ,int v)
{
if((v > k * d) || (v < d)) return 0;
if(d == 1 || v == d) return 1;
if(v == d + 1) return d;
int alpha = 1, beta = 0;
if(1 < v + k - k * d)
alpha = v + k - k * d;
if(k < v - d + 1)
beta = k;
else
beta = v - d + 1;
int sum = 0;
for(int i = alpha; i <= beta; i++)
{
sum += NumberOfDistinctVectors(k, d-1, v-i);
}
return sum;
}
// jdehesa method
public static int NumberOfDistinctVectors2(int k, int d, int v)
{
return NumberOfDistinctVectors2_rec(1, 0, k, d, v, 1, 1);
}
public static int NumberOfDistinctVectors2_rec(int i, int j, int k, int d, int v, long num, long den)
{
int rem = d - j;
if (v < i * rem || v > k * rem) return 0;
if (rem == 0) return Math.toIntExact(num / den);
if (rem == 1) return d * Math.toIntExact(num / den);
int count = 0;
int maxRep = Math.min(v / i, rem);
int numFactor = 1;
int denFactor = 1;
for (int r = 1; r <= maxRep; r++)
{
numFactor *= (j + r);
denFactor *= r;
count += NumberOfDistinctVectors2_rec(i + 1, j + r, k, d, v - i * r, num * numFactor, den * denFactor);
}
count += NumberOfDistinctVectors2_rec(i + 1, j, k, d, v, num, den);
return count;
}
// SaiBot method
public static int NumberOfDistinctVectors3(int k, int d ,int v)
{
return combinations(v-d, d, k-1);
}
//combinations to distribute b identical objects to c groups
//where no group has more than n objects
public static int combinations(int b, int c, int n)
{
int sum = 0;
for(int i = 0; i <= c; i++)
{
if(b+c-1-i*(n+1) >= c-1)
sum += Math.pow(-1, i) * CombinatoricsUtils.binomialCoefficient(c, i)
* CombinatoricsUtils.binomialCoefficient(b+c-1-i*(n+1), c-1);
}
return sum;
}
public static void main(final String[] args)
{
// Test 1
System.out.println(NumberOfDistinctVectors(3, 3, 6));
System.out.println(NumberOfDistinctVectors2(3, 3, 6));
System.out.println(NumberOfDistinctVectors3(3, 3, 6));
// Test 2
System.out.println(NumberOfDistinctVectors(4, 2, 7));
System.out.println(NumberOfDistinctVectors2(4, 2, 7));
System.out.println(NumberOfDistinctVectors3(4, 2, 7));
// Test 3
System.out.println(NumberOfDistinctVectors(12, 5, 20));
System.out.println(NumberOfDistinctVectors2(12, 5, 20));
System.out.println(NumberOfDistinctVectors3(12, 5, 20));
// Test runtime
long startTime, endTime;
int reps = 100;
startTime = System.nanoTime();
for (int i = 0; i < reps; i++)
{
NumberOfDistinctVectors(12, 5, 20);
}
endTime = System.nanoTime();
double t1 = ((endTime - startTime) / (reps * 1000.));
startTime = System.nanoTime();
for (int i = 0; i < reps; i++)
{
NumberOfDistinctVectors2(12, 5, 20);
}
endTime = System.nanoTime();
double t2 = ((endTime - startTime) / (reps * 1000.));
startTime = System.nanoTime();
for (int i = 0; i < reps; i++)
{
NumberOfDistinctVectors3(12, 5, 20);
}
endTime = System.nanoTime();
double t3 = ((endTime - startTime) / (reps * 1000.));
System.out.println("Original method: " + t1 + "ms");
System.out.println("jdehesa method: " + t2 + "ms");
System.out.println("SaiBot method: " + t3 + "ms");
}
}
Output:
7
7
7
2
2
2
3701
3701
3701
Original method: 97.81325ms
jdehesa method: 7.2753ms
SaiBot method: 2.70861ms
The timings are not very stable in JDoodle (I used it because it allows for Maven dependencies), but in general SaiBot's method is the fastest by far.
I am solving the problem to find the maximum number of max heaps that can be formed using n distinct integers (say 1..n). I have solved it using the following
recurrence with some help from this: https://www.quora.com/How-many-Binary-heaps-can-be-made-from-N-distinct-elements :
T(N) = N-1 (C) L * T(L) * T(R). where L is the number of nodes in the left subtree and R is the number of nodes in the right subtree. I have also implemented it in c++ using dynamic programming. But I am stuck in find the time complexity of it. Can someone help me with this?
#include <iostream>
using namespace std;
#define MAXN 105 //maximum value of n here
int dp[MAXN]; //dp[i] = number of max heaps for i distinct integers
int nck[MAXN][MAXN]; //nck[i][j] = number of ways to choose j elements form i elements, no order */
int log2[MAXN]; //log2[i] = floor of logarithm of base 2 of i
//to calculate nCk
int choose(int n, int k)
{
if (k > n)
return 0;
if (n <= 1)
return 1;
if (k == 0)
return 1;
if (nck[n][k] != -1)
return nck[n][k];
int answer = choose(n-1, k-1) + choose(n-1, k);
nck[n][k] = answer;
return answer;
}
//calculate l for give value of n
int getLeft(int n)
{
if (n == 1)
return 0;
int h = log2[n];
//max number of elements that can be present in the hth level of any heap
int numh = (1 << h); //(2 ^ h)
//number of elements that are actually present in last level(hth level)
//(2^h - 1)
int last = n - ((1 << h) - 1);
//if more than half-filled
if (last >= (numh / 2))
return (1 << h) - 1; // (2^h) - 1
else
return (1 << h) - 1 - ((numh / 2) - last);
}
//find maximum number of heaps for n
int numberOfHeaps(int n)
{
if (n <= 1)
return 1;
if (dp[n] != -1)
return dp[n];
int left = getLeft(n);
int ans = (choose(n-1, left) * numberOfHeaps(left)) * (numberOfHeaps(n-1-left));
dp[n] = ans;
return ans;
}
//function to intialize arrays
int solve(int n)
{
for (int i = 0; i <= n; i++)
dp[i] = -1;
for (int i = 0; i <= n; i++)
for (int j = 0; j <=n; j++)
nck[i][j] = -1;
int currLog2 = -1;
int currPower2 = 1;
//for each power of two find logarithm
for (int i = 1; i <= n; i++)
{
if (currPower2 == i)
{
currLog2++;
currPower2 *= 2;
}
log2[i] = currLog2;
}
return numberOfHeaps(n);
}
//driver function
int main()
{
int n=10;
cout << solve(n) << endl;
return 0;
}
Given`en an array of integers. We have to find the length of the longest subsequence of integers such that gcd of any two consecutive elements in the sequence is greater than 1.
for ex: if array = [12, 8, 2, 3, 6, 9]
then one such subsequence can be = {12, 8, 2, 6, 9}
other one can be= {12, 3, 6, 9}
I tried to solve this problem by dynamic programming. Assume that maxCount is the array such that maxCount[i] will have the length of such longest subsequence
ending at index i.
`maxCount[0]=1 ;
for(i=1; i<N; i++)
{
max = 1 ;
for(j=i-1; j>=0; j--)
{
if(gcd(arr[i], arr[j]) > 1)
{
temp = maxCount[j] + 1 ;
if(temp > max)
max = temp ;
}
}
maxCount[i]=max;
}``
max = 0;
for(i=0; i<N; i++)
{
if(maxCount[i] > max)
max = maxCount[i] ;
}
cout<<max<<endl ;
`
But, this approach is getting timeout. As its time complexity is O(N^2). Can we improve the time complexity?
The condition "gcd is greater than 1" means that numbers have at least one common divisor. So, let dp[i] equals to the length of longest sequence finishing on a number divisible by i.
int n;
cin >> n;
const int MAX_NUM = 100 * 1000;
static int dp[MAX_NUM];
for(int i = 0; i < n; ++i)
{
int x;
cin >> x;
int cur = 1;
vector<int> d;
for(int i = 2; i * i <= x; ++i)
{
if(x % i == 0)
{
cur = max(cur, dp[i] + 1);
cur = max(cur, dp[x / i] + 1);
d.push_back(i);
d.push_back(x / i);
}
}
if(x > 1)
{
cur = max(cur, dp[x] + 1);
d.push_back(x);
}
for(int j : d)
{
dp[j] = cur;
}
}
cout << *max_element(dp, dp + MAX_NUM) << endl;
This solution has O(N * sqrt(MAX_NUM)) complexity. Actually you can calculate dp values only for prime numbers. To implement this you should be able to get prime factorization in less than O(N^0.5) time (this method, for example). That optimization should cast complexity to O(N * factorization + Nlog(N)). As memory optimization, you can replace dp array with map or unordered_map.
GCD takes log m time, where m is the maximum number in the array. Therefore, using a Segment Tree and binary search, one can reduce the time complexity to O(n log (m² * n)) (with O(n log m) preprocessing). This list details other data structures that can be used for RMQ-type queries and to reduce the complexity further.
Here is one possible implementation of this:
#include <bits/stdc++.h>
using namespace std;
struct SegTree {
using ftype = function<int(int, int)>;
vector<int> vec;
int l, og, dummy;
ftype f;
template<typename T> SegTree(const vector<T> &v, const T &x, const ftype &func) : og(v.size()), f(func), l(1), dummy(x) {
assert(og >= 1);
while (l < og) l *= 2;
vec = vector<int>(l*2);
for (int i = l; i < l+og; i++) vec[i] = v[i-l];
for (int i = l+og; i < 2*l; i++) vec[i] = dummy;
for (int i = l-1; i >= 1; i--) {
if (vec[2*i] == dummy && vec[2*i+1] == dummy) vec[i] = dummy;
else if (vec[2*i] == dummy) vec[i] = vec[2*i+1];
else if (vec[2*i+1] == dummy) vec[i] = vec[2*i];
else vec[i] = f(vec[2*i], vec[2*i+1]);
}
}
SegTree() {}
void valid(int x) {assert(x >= 0 && x < og);}
int get(int a, int b) {
valid(a); valid(b); assert(b >= a);
a += l; b += l;
int s = vec[a];
a++;
while (a <= b) {
if (a % 2 == 1) {
if (vec[a] != dummy) s = f(s, vec[a]);
a++;
}
if (b % 2 == 0) {
if (vec[b] != dummy) s = f(s, vec[b]);
b--;
}
a /= 2; b /= 2;
}
return s;
}
void add(int x, int c) {
valid(x);
x += l;
vec[x] += c;
for (x /= 2; x >= 1; x /= 2) {
if (vec[2*x] == dummy && vec[2*x+1] == dummy) vec[x] = dummy;
else if (vec[2*x] == dummy) vec[x] = vec[2*x+1];
else if (vec[2*x+1] == dummy) vec[x] = vec[2*x];
else vec[x] = f(vec[2*x], vec[2*x+1]);
}
}
void update(int x, int c) {add(x, c-vec[x+l]);}
};
// Constructor (where val is something that an element in the array is
// guaranteed to never reach):
// SegTree st(vec, val, func);
// finds longest subsequence where GCD is greater than 1
int longest(const vector<int> &vec) {
int l = vec.size();
SegTree st(vec, -1, [](int a, int b){return __gcd(a, b);});
// checks if a certain length is valid in O(n log (m² * n)) time
auto valid = [&](int n) -> bool {
for (int i = 0; i <= l-n; i++) {
if (st.get(i, i+n-1) != 1) {
return true;
}
}
return false;
};
int length = 0;
// do a "binary search" on the best possible length
for (int i = l; i >= 1; i /= 2) {
while (length+i <= l && valid(length+i)) {
length += i;
}
}
return length;
}
I am trying to solve hacker rank similar pairs https://www.hackerrank.com/contests/101hack/challenges/similarpair problem. I cant figure out why its failing for large test cases. I am using segment trees to solve this problem in nlogn time. You can find my code below.
#include<iostream>
#include<vector>
using namespace std;
vector<int> graph[110001];
int T, ST[100001*4] = {0}, N, deg[100001] = {0};
void update(int node, int b, int e, int idx, int val) {
if(b > node || e < node) return;
if(b == e) {
ST[idx] += val;
return;
}
update(node, b, (b + e)/2, 2 * idx, val);
update(node, (b + e)/2 + 1, e, 2 * idx + 1, val);
ST[idx] = ST[2 * idx] + ST[2 * idx + 1];
}
long Query(int l, int r, int b, int e, int idx) {
if( l > e || r < b) return 0;
if(l <= b && r >= e) return ST[idx];
return Query(l, r, b, (b + e)/2, 2 * idx) + Query(l, r, (b + e)/2 + 1, e, 2 * idx + 1);
}
long long SimilarPairs(int node) {
int l = max(1, node - T), r = min(N, node + T);
long res = 0;
res = Query(l, r, 1, N, 1);
update(node, 1, N, 1, 1);
for(int i = 0; i < graph[node].size(); i++) {
res += SimilarPairs(graph[node][i]);
}
update(node, 1, N, 1, -1);
return res;
}
int main() {
long x, y, root, result, start;
cin >> N >> T;
for(int i = 0; i < N - 1; i++) {
cin >> x >> y;
graph[x].push_back(y);
deg[y]++;
}
for(int i = 1; i <= N; i++) if(!deg[i]) root = i;
result = SimilarPairs(root);
cout << result << endl;
cin.get();
return 0;
}
I get what you were doing. The problem is that you were missing some long longs. long is the same as an int (on 32 bits), so you must use long long everywhere, since the result does not necessarily fit in a 32 bit int.
This gets AC:
#include<iostream>
#include<vector>
using namespace std;
vector<int> graph[110001];
int T, N, deg[100001] = {0};
long long ST[100001*4] = {0};
void update(int node, int b, int e, int idx, int val) {
if(b > node || e < node) return;
if(b == e) {
ST[idx] += val;
return;
}
int m = (b + e) >> 1;
int q = idx << 1;
update(node, b, m, q, val);
update(node, m + 1, e, q + 1, val);
ST[idx] = ST[q] + ST[q+1];
}
long long Query(int l, int r, int b, int e, int idx) {
if( l > e || r < b) return 0;
if(l <= b && r >= e) return ST[idx];
int m = (b + e) >> 1;
int q = idx << 1;
return Query(l, r, b, m, q) + Query(l, r, m + 1, e, q + 1);
}
long long SimilarPairs(int node) {
int l = max(1, node - T), r = min(N, node + T);
long long res = 0;
res = Query(l, r, 1, N, 1);
update(node, 1, N, 1, 1);
for(int i = 0; i < graph[node].size(); i++) {
res += SimilarPairs(graph[node][i]);
}
update(node, 1, N, 1, -1);
return res;
}
int main() {
long x, y, root, start;
cin >> N >> T;
for(int i = 0; i < N - 1; i++) {
cin >> x >> y;
graph[x].push_back(y);
deg[y]++;
}
for(int i = 1; i <= N; i++) if(!deg[i]) root = i;
long long result = SimilarPairs(root);
cout << result << endl;
cin.get();
return 0;
}