Number of heaps using n distinct integers- Time complexity - algorithm

I am solving the problem to find the maximum number of max heaps that can be formed using n distinct integers (say 1..n). I have solved it using the following
recurrence with some help from this: https://www.quora.com/How-many-Binary-heaps-can-be-made-from-N-distinct-elements :
T(N) = N-1 (C) L * T(L) * T(R). where L is the number of nodes in the left subtree and R is the number of nodes in the right subtree. I have also implemented it in c++ using dynamic programming. But I am stuck in find the time complexity of it. Can someone help me with this?
#include <iostream>
using namespace std;
#define MAXN 105 //maximum value of n here
int dp[MAXN]; //dp[i] = number of max heaps for i distinct integers
int nck[MAXN][MAXN]; //nck[i][j] = number of ways to choose j elements form i elements, no order */
int log2[MAXN]; //log2[i] = floor of logarithm of base 2 of i
//to calculate nCk
int choose(int n, int k)
{
if (k > n)
return 0;
if (n <= 1)
return 1;
if (k == 0)
return 1;
if (nck[n][k] != -1)
return nck[n][k];
int answer = choose(n-1, k-1) + choose(n-1, k);
nck[n][k] = answer;
return answer;
}
//calculate l for give value of n
int getLeft(int n)
{
if (n == 1)
return 0;
int h = log2[n];
//max number of elements that can be present in the hth level of any heap
int numh = (1 << h); //(2 ^ h)
//number of elements that are actually present in last level(hth level)
//(2^h - 1)
int last = n - ((1 << h) - 1);
//if more than half-filled
if (last >= (numh / 2))
return (1 << h) - 1; // (2^h) - 1
else
return (1 << h) - 1 - ((numh / 2) - last);
}
//find maximum number of heaps for n
int numberOfHeaps(int n)
{
if (n <= 1)
return 1;
if (dp[n] != -1)
return dp[n];
int left = getLeft(n);
int ans = (choose(n-1, left) * numberOfHeaps(left)) * (numberOfHeaps(n-1-left));
dp[n] = ans;
return ans;
}
//function to intialize arrays
int solve(int n)
{
for (int i = 0; i <= n; i++)
dp[i] = -1;
for (int i = 0; i <= n; i++)
for (int j = 0; j <=n; j++)
nck[i][j] = -1;
int currLog2 = -1;
int currPower2 = 1;
//for each power of two find logarithm
for (int i = 1; i <= n; i++)
{
if (currPower2 == i)
{
currLog2++;
currPower2 *= 2;
}
log2[i] = currLog2;
}
return numberOfHeaps(n);
}
//driver function
int main()
{
int n=10;
cout << solve(n) << endl;
return 0;
}

Related

Convert a number m to n using minimum number of given operations

Question:
Given 2 integers N and M. Convert a number N to M using minimum number of given operations.
The operations are:
Square N (N = N^2)
Divide N by a prime integer P if N is divisible by P (N = N / P and N % P == 0)
Contrants:
N, M <= 10^9
Example:
N = 12, M = 18
The minimum operations are:
N /= 2 -> N = 6
N = N^2 -> N = 36
N /= 2 -> N = 18
My take:
I'm trying to use BFS to solve this problem. For each number, the available edges to other numberers are the operations. But it got Time Limit Exceeded. Is there any better way to solve this?
Here is my BFS code:
queue<pair<int,int> > q;
vector<long long> pr;
ll m,n;
bool prime[MAXN+1];
void solve()
{
while (!q.empty())
{
pii x=q.front();
q.pop();
if (x.first==m)
{
cout << x.second;
return;
}
if (x.first==1) continue;
for(ll k:pr)
{
if (k>x.first) break;
if (x.first%k==0) q.push({x.first/k,x.second+1});
}
q.push({x.first*x.first,x.second+1});
}
}
The algorithm uses the decomposition on N and M in prime factors, keeping trace of the corresponding exponents.
If M has a prime factor that N does not have, there is no solution (the code returns -1).
If N has some prime factors that M doesn't have, then the first step is to divide N by these primes.
The corresponding number of operations is the sum of the corresponding exponents.
At this stage, we get two arrays A and B corresponding to the exponents of the common prime factors, for N and M.
It is worth noting that at this stage, the values of the primes involved is not relevant anymore, only the exponents matter.
Then one must determine the minimum number of squares (= multiplications by 2 of the exponents).
The is the smallest k such that A[i] >= 2^k B[i] for all indices i.
The number of multiplications is added to the number of operations only once, as all exponents are multiplied by 2 at the same time.
Last step is to determine, for each pair (a, b) = (A[i], B[i]), the number of subtractions needed to go from a to b, while implementing exactly k multiplications by 2. This is performed with the following rules:
- if (k == 0) f(a, b, k) = a-b
- Else:
- if ((a-1)*2^k >= b: f(a, b, k) = 1 + f(a-1, b, k)
- else: f(a, b, k) = f(2*a, b, k-1)
The complexity is dominated by the decomposition in primes factors: O(sqrt(n))
Code:
This code is rather long, but a great part consists if helper routines needed for debugging and analysis.
#include <iostream>
#include <vector>
#include <cmath>
#include <algorithm>
void print (const std::vector<int> &v, const std::string s = "") {
std::cout << s;
for (auto &x: v) {
std::cout << x << " ";
}
std::cout << std::endl;
}
void print_decomp (int n, const std::vector<int> &primes, const std::vector<int> &mult) {
std::cout << n << " = ";
int k = primes.size();
for (int i = 0; i < k; ++i) {
std::cout << primes[i];
if (mult[i] > 1) std::cout << "^" << mult[i];
std::cout << " ";
}
std::cout << "\n";
}
void prime_decomp (int nn, std::vector<int> &primes, std::vector<int> &mult) {
int n = nn;
if (n <= 1) return;
if (n % 2 == 0) {
primes.push_back(2);
int cpt = 1;
n/= 2;
while (n%2 == 0) {n /= 2; cpt++;}
mult.push_back (cpt);
}
int max_prime = sqrt(n);
int p = 3;
while (p <= max_prime) {
if (n % p == 0) {
primes.push_back(p);
int cpt = 1;
n/= p;
while (n%p == 0) {n /= p; cpt++;}
mult.push_back (cpt);
max_prime = sqrt(n);
}
p += 2;
}
if (n != 1) {
primes.push_back(n);
mult.push_back (1);
}
print_decomp (nn, primes, mult);
}
// Determine the number of subtractions to go from a to b, with exactly k multiplications by 2
int n_sub (int a, int b, int k, int power2) {
if (k == 0){
if (b > a) exit(1);
return a - b;
}
//if (a == 1) return n_sub (2*a, b, k-1, power2/2);
if ((a-1)*power2 >= b) {
return 1 + n_sub(a-1, b, k, power2);
} else {
return n_sub (2*a, b, k-1, power2/2);
}
return 0;
}
// A return of -1 means no possibility
int n_operations (int N, int M) {
int count = 0;
if (N == M) return 0;
if (N == 1) return -1;
std::vector<int> primes_N, primes_M, expon_N, expon_M;
// Prime decomposition
prime_decomp(N, primes_N, expon_N);
prime_decomp (M, primes_M, expon_M);
// Compare decomposition, check if a solution can exist, set up two exponent arrays
std::vector<int> A, B;
int index_A = 0, index_B = 0;
int nA = primes_N.size();
int nB = primes_M.size();
while (true) {
if ((index_A == nA) && (index_B == nB)) {
break;
}
if ((index_A < nA) && (index_B < nB)) {
if (primes_N[index_A] == primes_M[index_B]) {
A.push_back(expon_N[index_A]);
B.push_back(expon_M[index_B]);
index_A++; index_B++;
continue;
}
if (primes_N[index_A] < primes_M[index_B]) {
count += expon_N[index_A];
index_A++;
continue;
}
return -1; // M has a prime that N doesn't have: impossibility to go to M
}
if (index_B != nB) { // impossibility
return -1;
}
for (int i = index_A; i < nA; ++i) {
count += expon_N[i]; // suppression of primes in N not in M
}
break;
}
std::cout << "1st step, count = " << count << "\n";
print (A, "exponents of N: ");
print (B, "exponents of M: ");
// Determination of the number of multiplications by two of the exponents (= number of squares)
int n = A.size();
int n_mult2 = 0;
int power2 = 1;
for (int i = 0; i < n; ++i) {
while (power2*A[i] < B[i]) {
power2 *= 2;
n_mult2++;
}
}
count += n_mult2;
std::cout << "number of squares = " << n_mult2 << " -> " << power2 << "\n";
// For each pair of exponent, determine the number of subtractions,
// with a fixed number of multiplication by 2
for (int i = 0; i < n; ++i) {
count += n_sub (A[i], B[i], n_mult2, power2);
}
return count;
}
int main() {
int N, M;
std::cin >> N >> M;
auto ans = n_operations (N, M);
std::cout << ans << "\n";
return 0;
}

Largest sum of all increasing subsequences of length k

I am currently stuck with the classic longest increasing subsequence problem, but there is a slight twist to it. Instead of just finding the longest increasing subsequence, I need to find the largest sum of all increasing subsequences that are of length k.
I have the following pseudo code implemented:
input = [4,13,5,14] k = 2
n = size of input
opt = array of size n which stores the highest increasing subsequence sum up to this index
counts = array of size n which stores the amount of values in the subsequence up to this index
highestSum = -1
FOR i in range(0, n)
high = new data object(value = 0, sum = 0, count = 0)
FOR j in range(i-1, 0, -1)
IF high.sum < opt[j] AND opt[j] < opt[i] AND counts[j] < k
high.value = input[j]
high.sum = opt[j]
high.count = counts[j]
opt[i] = high.sum + input[i]
counts[i] = high.count + 1
IF counts[i] == k
highestSum = higher value between (highestSum, opt[i])
return highestSum
This dynamic programming approach works in most cases, but for the list I outlined above it does not return the optimal subsequence sum. The optimal subsequence sum with length 2 should be 27 (13-14), but 18 is returned (4-14). This is due to the opt and counts array looking like this:
k = 2
input: 0 4 13 5 14
opt: 0 4 17 9 18
counts: 0 1 2 2 2
Due to 13 already having a subsequence of 4-13, and thus its count value (2) is no longer less than k, 14 is unable to accept 13 as a correct subsequence due to its count value.
Are there any suggestions as to what I can change?
You'll need k+1 sorted data structures, one for each possible length of subsequence currently found.
Each structure contains, by the last entry in an optimal subsequence, the current sum. That is, we only care about a subsequence that can lead to the best possible solution. (Technical note. Of those that can lead to the best solution, pick the one whose positions are lexicographically first.) Which will be sorted by increasing last entry, and decreasing sum.
In pseudocode it works like this.
initialize optimal[0..k]
optimal[0][min(sequence) - 1] = 0 # empty set.
for entry in sequence:
for i in k..1:
entry_prev = biggest < entry in optimal[i-1]
if entry_prev is not None:
this_sum = optimal[i-1][entry_prev] + entry
entry_smaller = biggest <= entry in optimal[i-1]
if entry_smaller is None or optimal[i][entry_smaller] < this_sum:
delete (e, v) from optimal[i] where entry <= e and ​v <= this_sum
​ insert (entry, this_sum) into optimal[i]
return optimal[k][largest entry in optimal[k]]
But you need this kind of 2-d structure to keep track of what might happen from here.
The total memory needed is O(k n) and running time will be O(k n log(n)).
It is possible to also reconstruct the optimal subsequence, but that requires a more complex data structure.
Here is a working solution in C++ that runs in O(logn * n * k) time with O(n*k) space. I think you can not make it faster but let me know if you find a faster solution. This is a modification of the solution for from https://stackoverflow.com/questions/16402854/number-of-increasing-subsequences-of-length-k. The key difference here is that we keep track of the maximum sum for each subsequences of different legths instead of accumulating the number of subsequences and we are iterating from the back of the array (since for increasing subsequences that have length larger than k the best k-length subarray will be at the end).
An other trick is that we use the array sums to map index + length combinations to maximum sums.
maxSumIncreasingKLenSeqDP function is the simple dynamic programming solution with O(n * n * k) time complexity.
#include <iostream>
#include <algorithm>
#include <unordered_map>
#include <limits.h>
using namespace std;
#include <random>
int maxSumIncreasingKLenSeq(int arr[], size_t n, int k){
// inverse compression: assign N-1, N-2, ... , 1 to smallest, ..., largest
size_t N = 1;
size_t compArr[n];
{
for(size_t i = 0; i<n; ++i)
compArr[i] = arr[i];
// descending order
sort(compArr, compArr + n, greater<int>());
unordered_map<int, size_t> compMap;
for(int val : compArr){
if(compMap.find(val) == compMap.end()){
compMap[val] = N;
++N;
}
}
for(size_t i = 0; i<n; ++i)
compArr[i] = compMap[arr[i]];
}
int sums[n * (k - 1) + n]; // key is combined from index and length by n * (length - 1) + index
for(size_t i = 0; i < n * (k - 1) + n; ++i)
sums[i] = -1;
for(size_t i = 0; i < n; ++i)
sums[i] = arr[i]; // i, 1
int BIT[N];
for(size_t len = 2; len <= k; ++len){
for(size_t i = 0; i<N; ++i)
BIT[i] = INT_MIN;
for(size_t i = 0; i < len - 1; ++i)
sums[n * (len - 1) + i] = INT_MIN;
for(int i = n - len; i >= 0; --i){
int val = sums[n * (len - 2) + i + 1]; // i + 1, len - 1
int idx = compArr[i + 1];
while(idx <= N){
BIT[idx] = max(val, BIT[idx]);
idx += (idx & (-idx));
}
// it does this:
//BIT[compArr[i + 1]] = sums[n * (len - 2) + i + 1];
idx = compArr[i] - 1;
int maxSum = INT_MIN;
while(idx > 0){
maxSum = max(BIT[idx], maxSum);
idx -= (idx & (-idx));
}
sums[n * (len - 1) + i] = maxSum;
// it does this:
//for(int j = 0; j < compArr[i]; ++j)
// sums[n * (len - 1) + i] = max(sums[n * (len - 1) + i], BIT[j]);
if(sums[n * (len - 1) + i] > INT_MIN)
sums[n * (len - 1) + i] += arr[i];
}
}
int maxSum = INT_MIN;
for(int i = n - k; i >= 0; --i)
maxSum = max(maxSum, sums[n * (k - 1) + i]); // i, k
return maxSum;
}
int maxSumIncreasingKLenSeqDP(int arr[], int n, int k){
int sums[n * (k - 1) + n]; // key is combined from index and length by n * (length - 1) + index
for(size_t i = 0; i < n; ++i)
sums[i] = arr[i]; // i, 1
for(int i = 2; i <= k; ++i)
sums[n * (i - 1) + n - 1] = INT_MIN; // n - 1, i
// moving backward since for increasing subsequences it will be the last k items
for(int i = n - 2; i >= 0; --i){
for(size_t len = 2; len <= k; ++len){
int idx = n * (len - 1) + i; // i, length
sums[idx] = INT_MIN;
for(int j = n - 1; j > i; --j){
if(arr[i] < arr[j])
sums[idx] = max(sums[idx], sums[n * (len - 2) + j]); // j, length - 1
}
if(sums[idx] > INT_MIN)
sums[idx] += arr[i];
}
}
int maxSum = INT_MIN;
for(int i = n - k; i >= 0; --i)
maxSum = max(maxSum, sums[n * (k - 1) + i]); // i, k
return maxSum;
}
int main(){
std::random_device dev;
std::mt19937 rng(dev());
std::uniform_int_distribution<std::mt19937::result_type> dist(1,10);
for(int len = 3; len < 10; ++len){
for(int i = 0; i < 10000; ++i){
int arr[100];
for(int n = 0; n < 100; ++n)
arr[n] = dist(rng);
int res = maxSumIncreasingKLenSeqDP(arr, 100, len);
int fastRes = maxSumIncreasingKLenSeq(arr, 100, len);
if(res != fastRes)
cout << "failed" << endl;
else
cout << "passed" << endl;
}
}
return 0;
}

Maximal Square with 0 inside

The question Maximal Square in https://leetcode.com/problems/maximal-square/description/ is easy to solve by DP. But how to solve the following up question:
Similar as Maximal Square question, but allows 0's inside a square, "inside" means the border of the square must be all 1.
For example, given the following matrix:
1 0 1 0 0
1 0 1 1 1
1 1 1 0 1
1 0 1 1 1
Return 9.
Update: Because the 3*3 matrix in the right bottom corner matches the requirement, the border must be all 1, and there can be 0 inside the square.
I thought up a O(n^3) algorithm: take maze[i][j] as the right bottom corner of the square if maze[i][j] == 1, enumerate the edge length of the square. If edge length is 3, consider whether maze[i - 2][j - 2], maze[i][j - 2], maze[i - 2][j], maze[i][j] forms a square with the numbers in each edge are all 1.
Is there any better algorithm?
Your problem can be solved in O (n * m) time and space complexity, where n is total rows and m is total columns in matrix. You may look at the code below where I have commented out to make it understandable.
Please, let me know if you have any doubt.
#include <bits/stdc++.h>
using namespace std;
void precalRowSum(vector< vector<int> >& grid, vector< vector<int> >&rowSum, int n, int m) {
// contiguous sum upto jth position in ith row
for (int i = 0; i < n; ++i) {
int sum = 0;
for (int j = 0; j < m; ++j) {
if (grid[i][j] == 1) {
sum++;
} else {
sum = 0;
}
rowSum[i][j] = sum;
}
}
}
void precalColSum(vector< vector<int> >& grid, vector< vector<int> >&colSum, int n, int m) {
// contiguous sum upto ith position in jth column
for (int j = 0; j < m; ++j) {
int sum = 0;
for (int i = 0; i < n; ++i) {
if (grid[i][j] == 1) {
sum++;
} else {
sum = 0;
}
colSum[i][j] = sum;
}
}
}
int solve(vector< vector<int> >& grid, int n, int m) {
vector< vector<int> >rowSum(n, vector<int>(m, 0));
vector< vector<int> >colSum(n, vector<int>(m, 0));
// calculate rowwise sum for 1
precalRowSum(grid, rowSum, n, m);
// calculate colwise sum for 1
precalColSum(grid, colSum, n, m);
vector< vector<int> >zerosHeight(n, vector<int>(m, 0));
int ans = 0;
for (int i = 0; i < (n - 1); ++i) {
for (int j = 0; j < m; ++j) {
zerosHeight[i][j] = ( grid[i][j] == 0 );
if (grid[i][j] == 0 && i > 0) {
zerosHeight[i][j] += zerosHeight[i - 1][j];
}
}
if (i == 0) continue;
// perform calculation on ith row
for (int j = 1; j < m; ) {
int height = zerosHeight[i][j];
if (!height) {
j++;
continue;
}
int cnt = 0;
while (j < m && height == zerosHeight[i][j]) {
j++;
cnt++;
}
if ( j == m) break;
if (cnt == height && (i - cnt) >= 0 ) {
// zeros are valid, now check validity for boundries
// Check validity of upper boundray, lower boundary, left boundary, right boundary respectively
if (rowSum[i - cnt][j] >= (cnt + 2) && rowSum[i + 1][j] >= (cnt + 2) &&
colSum[i + 1][j - cnt - 1] >= (cnt + 2) && colSum[i + 1][j] >= (cnt + 2) ){
ans = max(ans, (cnt + 2) * (cnt + 2) );
}
}
}
}
return ans;
}
int main() {
int n, m;
cin>>n>>m;
vector< vector<int> >grid;
for (int i = 0; i < n; ++i) {
vector<int>tmp;
for (int j = 0; j < m; ++j) {
int x;
cin>>x;
tmp.push_back(x);
}
grid.push_back(tmp);
}
cout<<endl;
cout<< solve(grid, n, m) <<endl;
return 0;
}

Dynamic Programming Coin Change Limited Coins

Dynamic Programming Change Problem (Limited Coins).
I'm trying to create a program that takes as INPUT:
int coinValues[]; //e.g [coin1,coin2,coin3]
int coinLimit[]; //e.g [2 coin1 available,1 coin2 available,...]
int amount; //the amount we want change for.
OUTPUT:
int DynProg[]; //of size amount+1.
And output should be an Array of size amount+1 of which each cell represents the optimal number of coins we need to give change for the amount of the cell's index.
EXAMPLE: Let's say that we have the cell of Array at index: 5 with a content of 2.
This means that in order to give change for the amount of 5(INDEX), you need 2(cell's content) coins (Optimal Solution).
Basically I need exactly the output of the first array of this video(C[p])
. It's exactly the same problem with the big DIFFERENCE of LIMITED COINS.
Link to Video.
Note: See the video to understand, ignore the 2nd array of the video, and have in mind that I don't need the combinations, but the DP array, so then I can find which coins to give as change.
Thank you.
Consider the next pseudocode:
for every coin nominal v = coinValues[i]:
loop coinLimit[i] times:
starting with k=0 entry, check for non-zero C[k]:
if C[k]+1 < C[k+v] then
replace C[k+v] with C[k]+1 and set S[k+v]=v
Is it clear?
O(nk) solution from an editorial I wrote a while ago:
We start with the basic DP solution that runs in O(k*sum(c)). We have our dp array, where dp[i][j] stores the least possible number of coins from the first i denominations that sum to j. We have the following transition: dp[i][j] = min(dp[i - 1][j - cnt * value[i]] + cnt) for cnt from 0 to j / value[i].
To optimize this to an O(nk) solution, we can use a deque to memorize the minimum values from the previous iteration and make the transitions O(1). The basic idea is that if we want to find the minimum of the last m values in some array, we can maintain an increasing deque that stores possible candidates for the minimum. At each step, we pop off values at the end of the deque greater than the current value before pushing the current value into the back deque. Since the current value is both further to the right and less than the values we popped off, we can be sure they will never be the minimum. Then, we pop off the first element in the deque if it is more than m elements away. The minimum value at each step is now simply the first element in the deque.
We can apply a similar optimization trick to this problem. For each coin type i, we compute the elements of the dp array in this order: For each possible value of j % value[i] in increasing order, we process the values of j which when divided by value[i] produces that remainder in increasing order. Now we can apply the deque optimization trick to find min(dp[i - 1][j - cnt * value[i]] + cnt) for cnt from 0 to j / value[i] in constant time.
Pseudocode:
let n = number of coin denominations
let k = amount of change needed
let v[i] = value of the ith denomination, 1 indexed
let c[i] = maximum number of coins of the ith denomination, 1 indexed
let dp[i][j] = the fewest number of coins needed to sum to j using the first i coin denominations
for i from 1 to k:
dp[0][i] = INF
for i from 1 to n:
for rem from 0 to v[i] - 1:
let d = empty double-ended-queue
for j from 0 to (k - rem) / v[i]:
let currval = rem + v[i] * j
if dp[i - 1][currval] is not INF:
while d is not empty and dp[i - 1][d.back() * v[i] + rem] + j - d.back() >= dp[i - 1][currval]:
d.pop_back()
d.push_back(j)
if d is not empty and j - d.front() > c[i]:
d.pop_front()
if d is empty:
dp[i][currval] = INF
else:
dp[i][currval] = dp[i - 1][d.front() * v[i] + rem] + j - d.front()
This is what you are looking for.
Assumptions made : Coin Values are in descending order
public class CoinChangeLimitedCoins {
public static void main(String[] args) {
int[] coins = { 5, 3, 2, 1 };
int[] counts = { 2, 1, 2, 1 };
int target = 9;
int[] nums = combine(coins, counts);
System.out.println(minCount(nums, target, 0, 0, 0));
}
private static int minCount(int[] nums, int target, int sum, int current, int count){
if(current > nums.length) return -1;
if(sum == target) return count;
if(sum + nums[current] <= target){
return minCount(nums, target, sum+nums[current], current+1, count+1);
} else {
return minCount(nums, target, sum, current+1, count);
}
}
private static int[] combine(int[] coins, int[] counts) {
int sum = 0;
for (int count : counts) {
sum += count;
}
int[] returnArray = new int[sum];
int returnArrayIndex = 0;
for (int i = 0; i < coins.length; i++) {
int count = counts[i];
while (count != 0) {
returnArray[returnArrayIndex] = coins[i];
returnArrayIndex++;
count--;
}
}
return returnArray;
}
}
You can check this question: Minimum coin change problem with limited amount of coins.
BTW, I created c++ program based above link's algorithm:
#include <iostream>
#include <map>
#include <vector>
#include <algorithm>
#include <limits>
using namespace std;
void copyVec(vector<int> from, vector<int> &to){
for(vector<int>::size_type i = 0; i < from.size(); i++)
to[i] = from[i];
}
vector<int> makeChangeWithLimited(int amount, vector<int> coins, vector<int> limits)
{
vector<int> change;
vector<vector<int>> coinsUsed( amount + 1 , vector<int>(coins.size()));
vector<int> minCoins(amount+1,numeric_limits<int>::max() - 1);
minCoins[0] = 0;
vector<int> limitsCopy(limits.size());
copy(limits.begin(), limits.end(), limitsCopy.begin());
for (vector<int>::size_type i = 0; i < coins.size(); ++i)
{
while (limitsCopy[i] > 0)
{
for (int j = amount; j >= 0; --j)
{
int currAmount = j + coins[i];
if (currAmount <= amount)
{
if (minCoins[currAmount] > minCoins[j] + 1)
{
minCoins[currAmount] = minCoins[j] + 1;
copyVec(coinsUsed[j], coinsUsed[currAmount]);
coinsUsed[currAmount][i] += 1;
}
}
}
limitsCopy[i] -= 1;
}
}
if (minCoins[amount] == numeric_limits<int>::max() - 1)
{
return change;
}
copy(coinsUsed[amount].begin(),coinsUsed[amount].end(), back_inserter(change) );
return change;
}
int main()
{
vector<int> coins;
coins.push_back(20);
coins.push_back(50);
coins.push_back(100);
coins.push_back(200);
vector<int> limits;
limits.push_back(100);
limits.push_back(100);
limits.push_back(50);
limits.push_back(20);
int amount = 0;
cin >> amount;
while(amount){
vector<int> change = makeChangeWithLimited(amount,coins,limits);
for(vector<int>::size_type i = 0; i < change.size(); i++){
cout << change[i] << "x" << coins[i] << endl;
}
if(change.empty()){
cout << "IMPOSSIBE\n";
}
cin >> amount;
}
system("pause");
return 0;
}
Code in c#
private static int MinCoinsChangeWithLimitedCoins(int[] coins, int[] counts, int sum)
{
var dp = new int[sum + 1];
Array.Fill(dp, int.MaxValue);
dp[0] = 0;
for (int i = 0; i < coins.Length; i++) // n
{
int coin = coins[i];
for (int j = 0; j < counts[i]; j++) //
{
for (int s = sum; s >= coin ; s--) // sum
{
int remainder = s - coin;
if (remainder >= 0 && dp[remainder] != int.MaxValue)
{
dp[s] = Math.Min(1 + dp[remainder], dp[s]);
}
}
}
}
return dp[sum] == int.MaxValue ? -1 : dp[sum];
}

Algorithm. How to find longest subsequence of integers in an array such that gcd of any two consecutive number in the sequence is greather than 1?

Given`en an array of integers. We have to find the length of the longest subsequence of integers such that gcd of any two consecutive elements in the sequence is greater than 1.
for ex: if array = [12, 8, 2, 3, 6, 9]
then one such subsequence can be = {12, 8, 2, 6, 9}
other one can be= {12, 3, 6, 9}
I tried to solve this problem by dynamic programming. Assume that maxCount is the array such that maxCount[i] will have the length of such longest subsequence
ending at index i.
`maxCount[0]=1 ;
for(i=1; i<N; i++)
{
max = 1 ;
for(j=i-1; j>=0; j--)
{
if(gcd(arr[i], arr[j]) > 1)
{
temp = maxCount[j] + 1 ;
if(temp > max)
max = temp ;
}
}
maxCount[i]=max;
}``
max = 0;
for(i=0; i<N; i++)
{
if(maxCount[i] > max)
max = maxCount[i] ;
}
cout<<max<<endl ;
`
But, this approach is getting timeout. As its time complexity is O(N^2). Can we improve the time complexity?
The condition "gcd is greater than 1" means that numbers have at least one common divisor. So, let dp[i] equals to the length of longest sequence finishing on a number divisible by i.
int n;
cin >> n;
const int MAX_NUM = 100 * 1000;
static int dp[MAX_NUM];
for(int i = 0; i < n; ++i)
{
int x;
cin >> x;
int cur = 1;
vector<int> d;
for(int i = 2; i * i <= x; ++i)
{
if(x % i == 0)
{
cur = max(cur, dp[i] + 1);
cur = max(cur, dp[x / i] + 1);
d.push_back(i);
d.push_back(x / i);
}
}
if(x > 1)
{
cur = max(cur, dp[x] + 1);
d.push_back(x);
}
for(int j : d)
{
dp[j] = cur;
}
}
cout << *max_element(dp, dp + MAX_NUM) << endl;
This solution has O(N * sqrt(MAX_NUM)) complexity. Actually you can calculate dp values only for prime numbers. To implement this you should be able to get prime factorization in less than O(N^0.5) time (this method, for example). That optimization should cast complexity to O(N * factorization + Nlog(N)). As memory optimization, you can replace dp array with map or unordered_map.
GCD takes log m time, where m is the maximum number in the array. Therefore, using a Segment Tree and binary search, one can reduce the time complexity to O(n log (m² * n)) (with O(n log m) preprocessing). This list details other data structures that can be used for RMQ-type queries and to reduce the complexity further.
Here is one possible implementation of this:
#include <bits/stdc++.h>
using namespace std;
struct SegTree {
using ftype = function<int(int, int)>;
vector<int> vec;
int l, og, dummy;
ftype f;
template<typename T> SegTree(const vector<T> &v, const T &x, const ftype &func) : og(v.size()), f(func), l(1), dummy(x) {
assert(og >= 1);
while (l < og) l *= 2;
vec = vector<int>(l*2);
for (int i = l; i < l+og; i++) vec[i] = v[i-l];
for (int i = l+og; i < 2*l; i++) vec[i] = dummy;
for (int i = l-1; i >= 1; i--) {
if (vec[2*i] == dummy && vec[2*i+1] == dummy) vec[i] = dummy;
else if (vec[2*i] == dummy) vec[i] = vec[2*i+1];
else if (vec[2*i+1] == dummy) vec[i] = vec[2*i];
else vec[i] = f(vec[2*i], vec[2*i+1]);
}
}
SegTree() {}
void valid(int x) {assert(x >= 0 && x < og);}
int get(int a, int b) {
valid(a); valid(b); assert(b >= a);
a += l; b += l;
int s = vec[a];
a++;
while (a <= b) {
if (a % 2 == 1) {
if (vec[a] != dummy) s = f(s, vec[a]);
a++;
}
if (b % 2 == 0) {
if (vec[b] != dummy) s = f(s, vec[b]);
b--;
}
a /= 2; b /= 2;
}
return s;
}
void add(int x, int c) {
valid(x);
x += l;
vec[x] += c;
for (x /= 2; x >= 1; x /= 2) {
if (vec[2*x] == dummy && vec[2*x+1] == dummy) vec[x] = dummy;
else if (vec[2*x] == dummy) vec[x] = vec[2*x+1];
else if (vec[2*x+1] == dummy) vec[x] = vec[2*x];
else vec[x] = f(vec[2*x], vec[2*x+1]);
}
}
void update(int x, int c) {add(x, c-vec[x+l]);}
};
// Constructor (where val is something that an element in the array is
// guaranteed to never reach):
// SegTree st(vec, val, func);
// finds longest subsequence where GCD is greater than 1
int longest(const vector<int> &vec) {
int l = vec.size();
SegTree st(vec, -1, [](int a, int b){return __gcd(a, b);});
// checks if a certain length is valid in O(n log (m² * n)) time
auto valid = [&](int n) -> bool {
for (int i = 0; i <= l-n; i++) {
if (st.get(i, i+n-1) != 1) {
return true;
}
}
return false;
};
int length = 0;
// do a "binary search" on the best possible length
for (int i = l; i >= 1; i /= 2) {
while (length+i <= l && valid(length+i)) {
length += i;
}
}
return length;
}

Resources