Related
Im trying to find all the possible variations of a number in the form of:
'1_2_3_4' where _ is a number between 0 to 9.
I was wondering what is the best approach to this problem.
This seems like the simplest method:
static void printPerms()
{
int n = 1020304;
for (int i = 0; i <= 9; i++, n += 90000)
for (int j = 0; j <= 9; j++, n += 900)
for (int k = 0; k <= 9; k++, n += 10)
System.out.println(n);
}
Or even this, which has a lovely symmetry:
static void printPerms()
{
int n = 1020304;
for (int ni = n + 900000; n <= ni; n += 90000)
for (int nj = n + 9000; n <= nj; n += 900)
for (int nk = n + 90; n <= nk; n += 10)
System.out.println(n);
}
import java.util.*;
public class Solution {
public static void main(String[] args){
int[] fillable = {1,-1,2,-1,3,-1,4};
for(int i=0;i<=9;++i){
for(int j=0;j<=9;++j){
for(int k=0;k<=9;++k){
fillable[1] = i;
fillable[3] = j;
fillable[5] = k;
System.out.println(Arrays.toString(fillable));
}
}
}
}
}
OUTPUT:
[1, 0, 2, 0, 3, 0, 4]
[1, 0, 2, 0, 3, 1, 4]
[1, 0, 2, 0, 3, 2, 4]
[1, 0, 2, 0, 3, 3, 4]
[1, 0, 2, 0, 3, 4, 4]
[1, 0, 2, 0, 3, 5, 4]
[1, 0, 2, 0, 3, 6, 4]
.
.
.
.
Time Complexity: O(10^n) where n is no. of places to fill in. If 3 empty places if fixed, then it is O(1).
Space Complexity: O(1)
Note: There is no better way to do this. You have to go through each and every combination.
Python style, assuming ASCII code representation:
n= "1020304"
while True:
n[5]+= 1
if n[5] == ':':
n[5]= '0'
n[3]+= 1
if n[3] == ':':
n[3]= '0'
n[1]+= 1
if n[1] == ':'=
break
Say you are given a set of coins such as 4 10¢, 4 5¢, and 4 1¢.
You are asked to place these coins on a 12-hour analog clock face, where the next coin you place must be placed at X hours after the previous coin, where X is the value of the previous coin.
So if you place a 1¢ on 12, the next coin you place goes at 1. If you place a 5¢ on 1, the next coin you place goes at 6. And so on.
How can you maximize the number of coins that can be placed on the clock before the next coin would have to be placed in a slot that is already taken?
This is a problem I came across which I have been unable to solve except via exhaustive search. If the inputs are made to be arbitrary, exhaustive search fails quickly-- say it's an arbitrary number of coins of arbitrary various known denominations, with an arbitrary number of hours on the clock. Then you can't do exhaustive search anymore, because it becomes factorial time and fails on basis of excessive computational time requirements.
As maraca mentioned probably there isn't a much better solution than backtracking without more restrictions. Maybe with a larger number of coins of given denominations space can be covered with 'patterns'. Like coins [5, 10, 10, 5, 10, 10, 5, x] cover first 8 places and next coin is placed in similar location as first one. So the process can be repeated if there are enough coins.
Number of possible coin combinations in this case is not large at all. It is 12! / (4! * 4! * 4!) = 34650. For sure number explodes with larger parameters. Here is simple python code that solves 3 times larger problem which has possible coin combinations 3*10^15.
max_positions = []
max_order = None
def add_coin(coins, position, coin_order, occupied_positions, num_hours):
global max_positions, max_order
if position in occupied_positions or not coins:
# Can't place on that position or there is nothing more to place
if len(occupied_positions) > len(max_positions):
max_positions = occupied_positions
max_order = coin_order
return not coins # if all is covered return true to stop search
#
for c, num_coins in coins: # Try each coin
# Copy coins to new list and remove one used
c_coins = [x for x in coins if x[0] != c]
if num_coins > 1:
c_coins.append((c, num_coins-1))
# Next iteration
if add_coin(c_coins,
(position + c) % num_hours,
coin_order + [c],
occupied_positions + [position],
num_hours):
return True
def solve_coins(coins, num_hours):
global max_positions, max_order
max_positions = []
max_order = None
add_coin(coins, 0, [], [], num_hours)
print len(max_positions), max_positions, max_order
solve_coins([(1, 4), (5, 4), (10, 4)], 12)
solve_coins([(1, 8), (5, 8), (10, 8)], 24)
solve_coins([(1, 12), (5, 12), (10, 12)], 36)
output:
12 [0, 1, 6, 4, 2, 3, 8, 9, 7, 5, 10, 11] [1, 5, 10, 10, 1, 5, 1, 10, 10, 5, 1, 5]
24 [0, 1, 6, 16, 17, 3, 4, 14, 19, 5, 15, 20, 21, 2, 7, 8, 13, 18, 23, 9, 10, 11, 12, 22] [1, 5, 10, 1, 10, 1, 10, 5, 10, 10, 5, 1, 5, 5, 1, 5, 5, 5, 10, 1, 1, 1, 10, 10]
36 [0, 1, 6, 16, 17, 22, 23, 28, 2, 12, 13, 18, 19, 29, 34, 3, 8, 9, 10, 11, 21, 31, 5, 15, 20, 30, 35, 4, 14, 24, 25, 26, 27, 32, 33, 7] [1, 5, 10, 1, 5, 1, 5, 10, 10, 1, 5, 1, 10, 5, 5, 5, 1, 1, 1, 10, 10, 10, 10, 5, 10, 5, 5, 10, 10, 1, 1, 1, 5, 1, 10, 5]
// Expressing the coins as a list of buckets with the same modulo allows
// you to efficiently find the next coin to test and you don't start to
// calculate with the first penny and then do the same again starting
// with the second penny (or a 13-coin on a 12-clock), it is basically the same.
// Additionally it allows to remove and insert items at the current position in O(1).
// Also reverting is much better than copying whole states on each recursive call.
private class Bucket {
public int number;
public LinkedList<Integer> numbers = new LinkedList<>();
public Bucket(int number, int hours) {
this.number = number % hours;
numbers.add(number);
}
}
private LinkedList<Bucket> coins; // not using interface List as you are supposed to
private LinkedList<Integer> best, current; // because of removeLast()
private boolean[] occupied;
private int hours, limit;
public List<Integer> findBest(int[] coins, int hours) {
this.hours = hours;
// create buckets of coins with the same modulo
Integer[] c = Arrays.stream(coins).boxed().toArray( Integer[]::new );
// sort descending because a lot of small coins in a row are more likely to create
// an impassable area on the next pass around the clock
Arrays.sort(c, new Comparator<Integer>(){
public int compare(Integer a, Integer b) {
return Integer.compare(b.intValue() % hours, a.intValue() % hours);
}
});
this.coins = new LinkedList<>();
Bucket b = new Bucket(c[0].intValue(), hours);
this.coins.add(b);
int mod = c[0].intValue() % hours, coinCount = 1;
for (int i = 1; i < c.length; i++) {
int m = c[i].intValue() % hours;
if (m == mod) { // same bucket
b.numbers.add(c[i]);
} else { // new bucket
b = new Bucket(c[i].intValue(), hours);
this.coins.add(b);
mod = m;
}
coinCount++;
if (mod == 0) // don't need more than one 0 value
break;
}
best = new LinkedList<>();
current = new LinkedList<>();
occupied = new boolean[hours];
limit = coinCount < hours ? coinCount : hours; // max coins that can be placed
findBest(0);
return best;
}
private void findBest(int pos) {
if (best.size() == limit) // already found optimal solution
return;
if (occupied[pos] || current.size() == limit) {
if (current.size() > best.size())
best = (LinkedList<Integer>)current.clone();
return;
}
occupied[pos] = true;
for (int i = 0; i < coins.size(); i++) {
Bucket b = coins.get(i);
current.add(b.numbers.removeLast());
boolean removed = false;
if (b.numbers.size() == 0) { // bucket empty
coins.remove(i);
removed = true;
}
findBest((pos + b.number) % hours);
if (removed)
coins.add(i, b);
b.numbers.add(current.removeLast());
}
occupied[pos] = false;
}
Output for the given example: 10 10 5 1 1 1 5 10 10 1 5 5
Here is a slightly more optimized version in JavaScript where the list is implemented manually, so that you can really see why removing and adding the currend bucket is O(1). Because the list is always read in order it is superior to the array in this case. Whith an array you need to shift many elements or skip a lot of empty ones, depending how you implement it, not with a list of buckets. Should be a little easier to understand than the Java code.
var head, occupied, current, best, h, limit;
document.body.innerHTML = solve([1,1,1,1,5,5,5,5,10,10,10,10], 12);
function solve(coins, hours) {
h = hours;
coins.sort(function(a, b) {
let x = a % hours, y = b % hours;
if (x > y)
return -1;
if (x < y)
return 1;
return 0;
});
let mod = coins[0] % hours;
head = {num: mod, vals: [coins[0]], next: null};
let b = head, coinCount = 1;
for (let i = 1; i < coins.length && mod != 0; i++) {
let m = coins[i] % hours;
if (m == mod) {
b.vals.push(coins[i]);
} else {
b.next = {num: m, vals: [coins[i]], next: null};
b = b.next;
mod = m;
}
coinCount++;
}
limit = coinCount < hours ? coinCount : hours;
occupied = [];
for (let i = 0; i < hours; i++)
occupied.push(false);
best = [];
current = [];
solveRec(0);
return JSON.stringify(best);
}
function solveRec(pos) {
occupied[pos] = true;
let b = head, prev = null;
while (b !== null) {
let m = (pos + b.num) % h;
if (!occupied[m]) {
current.push(b.vals.pop());
let rem = false;
if (b.vals.length == 0) {
if (prev == null)
head = b.next;
else
prev.next = b.next;
rem = true;
}
solveRec(m);
if (current.length > best.length)
best = current.slice();
if (best.length == limit)
return;
if (rem) {
if (prev == null)
head = b;
else
prev.next = b;
}
b.vals.push(current.pop());
} else if (current.length + 1 > best.length) {
best = current.slice();
best.push(b.vals[b.vals.length - 1]);
}
prev = b;
b = b.next;
}
occupied[pos] = false;
}
I am still working on this, but it is already much better than O(n!). I will try to fit a new O() on it soon.
The concept is pretty simple, basically you create the smallest combos of numbers and then link them together into longer and longer strings of numbers until the next step is not possible.
A key to this working is that you don't track the front or end of a list of numbers, only the sum (and the inner sums due to their being calculated at earlier steps). so long as that sum is never divisible by clock, it will remain a clean solution.
Each step attempts to "splice" the smaller combos into the next size bigger:
(1,3), (3,1) -> (1,3,1), (3,1,3)
Here is a brief example (simplified) of what the algo is doing:
clock: 4
coins: 4
coins: 1,2,3,3
*bold pass, others are skipped for 1 of 3 reasons (not enough in population to build combo, sum divisible by clock, (in actual algo) prevent duplicates: (1,1),(1,2),(1,3),(2,1),(2,2),(2,3),(3,1),(3,2),(3,3)
triples: (1,2,1), (2,1,2), (2,3,2), (3,2,3), (1,2,3), (2,3,3), (3,2,1), (3,3,2)
final combos:(1,2,3,x), (3,2,1,x)
This code is runnable standalone in c++ (but placeCoins is the algo):
I assume you will appropriate the algo to your purposes, but for anyone who wishes to run this cpp file, it will request clock size, coin count, and then after putting in coin count it will accept the next coin count number of inputs followed by enter as the coin values. The output will show the best counts, all orders at that count, and also during the algo it will show you the number of currently processing steps (which is where you can estimate complexity/ number of combos checked to see how much faster this is than exhaustive of any kind)
#include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <map>
using namespace std;
//min clock size 3
vector<vector<int>> placeCoins(int _clockSize, vector<int> _coins)
{
int totalCheckedCombos = 0;
vector<vector<int>> coinGroups;
vector<int> coinSet = _coins;
sort(coinSet.begin(), coinSet.end());
coinSet.erase(unique(coinSet.begin(), coinSet.end()), coinSet.end());
map<int, int> coinCounts;
for (int i = 0; i < coinSet.size(); i++)
{
coinCounts[coinSet.at(i)] = count(_coins.begin(), _coins.end(), coinSet.at(i));
}
cout << "pairs" << endl;
//generate fair pairs of coins
for (int i = 0; i < coinSet.size(); i++)
{
for (int ii = 0; ii < coinSet.size(); ii++)
{
if ((coinSet.at(i) + coinSet.at(ii)) % _clockSize != 0)
{
if (i == ii)
{
if (coinCounts[coinSet.at(i)] > 1)
{
coinGroups.push_back({ coinSet.at(i),coinSet.at(ii) });
}
}
else
{
coinGroups.push_back({ coinSet.at(i),coinSet.at(ii) });
}
}
}
}
cout << "combine" << endl;
//iteratively combine groups of coins
for (int comboSize = 3; comboSize < _clockSize; comboSize++)
{
totalCheckedCombos += coinGroups.size();
vector<vector<int>> nextSizeCombos;
for (int i = 0; i < coinGroups.size(); i++)
{
for (int ii = 0; ii < coinGroups.size(); ii++)
{
//check combo to match
bool match = true;
for (int a = 0; a < comboSize - 2; a++)
{
if (coinGroups.at(i).at(a+1) != coinGroups.at(ii).at(a))
{
match = false;
break;
}
}
//check sum
if (match)
{
vector<int> tempCombo = coinGroups.at(i);
int newVal = coinGroups.at(ii).at(coinGroups.at(ii).size()-1);
tempCombo.push_back(newVal);
if (coinCounts[newVal] >= count(tempCombo.begin(), tempCombo.end(), newVal))
{
if (accumulate(tempCombo.begin(), tempCombo.end(), 0) % _clockSize != 0)
{
nextSizeCombos.push_back(tempCombo);
}
}
}
}
}
if (nextSizeCombos.size() == 0)
{
//finished, no next size combos found
break;
}
else
{
cout << nextSizeCombos.size() << endl;
coinGroups = nextSizeCombos;
}
}
cout << "total combos checked: " << totalCheckedCombos << endl;
return coinGroups;
}
int main(int argc, char** argv) {
int clockSize;
int coinCount;
vector<int> coins = {};
cout << "enter clock size: " << endl;
cin >> clockSize;
cout << "count number: " << endl;
cin >> coinCount;
for (int i = 0; i < coinCount; i++)
{
int tempCoin;
cin >> tempCoin;
coins.push_back(tempCoin);
}
cout << "press enter to compute combos: " << endl;
cin.get();
cin.get();
vector<vector<int>> resultOrders = placeCoins(clockSize, coins);
for (int i = 0; i < resultOrders.size(); i++)
{
cout << resultOrders.at(0).size()+1 << endl;
for (int ii = 0; ii < resultOrders.at(i).size(); ii++)
{
cout << resultOrders.at(i).at(ii) << " , ";
}
cout <<"x"<< endl;
}
cin.get();
cin.get();
}
ps: although I debugged this to a stable state, it still could definitely use fine tuning and optimization, but that is variable to different languages, so I just got the algo to work and called it good enough. If you see something glaringly wrong or poor form, feel free to comment and i'll fix it (or edit it directly if you want).
Instead of a greedy approach, try the maximum result of choosing a coin vs. not choosing a coin.
def valueOfClock(capacity, coins, n, hour):
if (n == 0 or capacity == 0 or hour > 12):
return 0
# Choose next coin if value is greater than the capacity
if (coins[n-1] > capacity):
valueOfClock(capacity, coins, n-1, hours)
# Choose max value of either choosing the next coin or
# choosing the current coin
return max(valueOfClock(capacity, coins, n-1, hours),
valueOfClock(capacity-coins[n-1], coins, n-1, hours + coins[n-1]))
This is a problem from Introduction to algorithms course:
You have an array with n random positive integers (the array doesn't
need to be sorted or the elements unique). Suggest an O(n) algorithm
to find the largest sum of elements, that is divisible by n.
It's relatively easy to find it in O(n2) using dynamic programming and storing largest sum with remainder 0, 1, 2,..., n - 1. This is a JavaScript code:
function sum_mod_n(a)
{
var n = a.length;
var b = new Array(n);
b.fill(-1);
for (var i = 0; i < n; i++)
{
var u = a[i] % n;
var c = b.slice();
for (var j = 0; j < n; j++) if (b[j] > -1)
{
var v = (u + j) % n;
if (b[j] + a[i] > b[v]) c[v] = b[j] + a[i];
}
if (c[u] == -1) c[u] = a[i];
b = c;
}
return b[0];
}
It's also easy to find it in O(n) for contiguous elements, storing partial sums MOD n. Another sample:
function cont_mod_n(a)
{
var n = a.length;
var b = new Array(n);
b.fill(-1);
b[0] = 0;
var m = 0, s = 0;
for (var i = 0; i < n; i++)
{
s += a[i];
var u = s % n;
if (b[u] == -1) b[u] = s;
else if (s - b[u] > m) m = s - b[u];
}
return m;
}
But how about O(n) in the general case? Any suggestions will be appreciated! I consider this has something to deal with linear algebra but I'm not sure what exactly.
EDIT: Can this actually be done in O(n log n)?
Since you don't specify what random means (uniform? if so in what interval?) the only general solution is the one for arbitrary arrays and I don't think you can get any better than O(n2). This is the dynamic programming algorithm in Python:
def sum_div(positive_integers):
n = len(positive_integers)
# initialise the dynamic programming state
# the index runs on all possible reminders mod n
# the DP values keep track of the maximum sum you can have for that reminder
DP = [0] * n
for positive_integer in positive_integers:
for remainder, max_sum in list(enumerate(DP)):
max_sum_next = max_sum + positive_integer
remainder_next = max_sum_next % n
if max_sum_next > DP[remainder_next]:
DP[remainder_next] = max_sum_next
return DP[0]
You can probably work out a faster solution if you have an upper limit for the values in the array, e.g. n.
Very interesting question !
This is my JS code. I don't think that O(n^2) can be lowered, hence I suppose that the way is to find an algorithm being more efficient in terms of benchmarking.
My (corrected) approach boils down to explore paths of sums until the next matching one (i.e. divisible by _n) is computed. The source array progressively shrinks as next sums are found.
(I provided different examples at the top)
var _a = [1000, 1000, 1000, 1000, 1000, 1000, 99, 10, 9] ;
//var _a = [1000, 1000, 1000, 1000, 1000, 1000, 99, 10, 9, 11] ;
//var _a = [1, 6, 6, 6, 6, 6, 49] ;
//var _a = [ -1, 1, 2, 4 ] ;
//var _a = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] ;
//var _a = [1,1,1,1,1,1] ;
var _n = _a.length, _del_indexes = [] ;
var _rec = 0, _sum = 0, _start = 0, _test = 0 ;
console.log( "input array : ", _a );
console.log( "cardinality : ", _a.length );
while( _start < _a.length )
{
_test = 0 ;
for( var _i = _start ; _i < _a.length ; _i++ )
{
_sum += _a[_i%_n] ;
_del_indexes.push( _a[_i%_n] );
if ( ( _sum % _n ) == 0 )
{
_rec = _sum ;
_test = 1 ;
break ;
}
}
if ( _test )
{
for( var _d = 0 ; _d < _del_indexes.length ; _d++ ) _a.splice( _a.indexOf( _del_indexes[_d] ), 1 ) ;
_start = 0 ;
}
else _start++ ;
_del_indexes = [] ;
_sum = _rec ;
}
console.log( "Largest sum % " + _n + " is : ", _rec == 0 ? "none" : _rec );
I have a algorithm design puzzle that I could not solve.
The puzzle is formulated like this: There are N persons standing on a number line, each of them maybe standing on any integer number on that line. Multiple persons may stand on the same number. For any two persons to be able to communicate with each other, the distance between them should be less than K. The goal is to move them so that each pair of two persons can communicate each other (possibly via other people). In other words, we need to move them so that the distance between any neighboring two persons is smaller than K.
Question: What is the minimum number of total moves? It feels like this falls into greedy algorithm family or dynamic programming. Any hints are appreciated!
We can do the following in O(n):
Calculate the cost of moving all people to the right of person i towards person i at an acceptable distance:
costRight(A[i]) = costRight(A[i+1]) + (A[i+1] - A[i] - k + 1) * count of people to the right
K = 3; A = { 0, 3, 11, 17, 21}
costRight = {32, 28, 10, 2, 0}
Calculate the cost of moving all people to the left of person i towards person i at an acceptable distance:
costLeft(A[i]) = costLeft(A[i-1]) + (A[i] - A[i-1] - k + 1) * count of people to the left
K = 3; A = { 0, 3, 11, 17, 21}
costLeft = { 0, 1, 13, 25, 33}
costRight = {32, 28, 10, 2, 0}
Now that we have cost from both directions we can do this in O(n):
minCost = min(costRight + costLeft) for all A[i]
minCost = min(32 + 0, 28 + 1, 13 + 10, 25 + 2, 33 + 0) = 23
But sometimes that's no enough:
K = 3; A = { 0, 0, 1, 8, 8}
carry: -2 -4 3
costLeft = { 0, 0, 0, 11, 11}
carry: -3 5 -2
costRight = { 8, 8, 8, 0, 0}
The optimum is neither 11 nor 8. Test the current best by moving towards the greatest saving:
move 1 to 2, cost = 1
K = 3; A = { 0, 0, 2, 8, 8}
carry: -2 -2 -10
costLeft = { 0, 0, 0, 10, 10}
carry: -2 -2
costRight = { 6, 6, 6, 0, 0}
minCost = 1 + min(0 + 6, 0 + 6, 0 + 6, 10 + 0, 10 + 0) = 1 + 6 = 7
Not quite sure how to formularize this efficiently.
Here is a greedy algorithm written in Java, but I don't know if it gives the optimal solution in every case. Also it is more a proof of concept, there is some room for optimizations.
It is based on the fact that two neighbouring persons must not be more than K apart, the next neighbour must not be more than 2K away and so on. In each step we move the person that "violates these constraints most". The details of this calculation are in method calcForce.
package so;
import java.util.Arrays;
public class Main {
public static void main(String args[]) {
int[] position = new int[] {0, 0, 5, 11, 17, 23};
int k = 5;
solve(position, k);
}
private static void solve(int[] position, int k) {
if (!sorted(position)) {
throw new IllegalArgumentException("positions must be sorted");
}
int[] force = new int[position.length];
int steps = 0;
while (calcForce(position, k, force)) {
int mp = -1;
int mv = -1;
for (int i = 0; i < force.length; i++) {
if (mv < Math.abs(force[i])) {
mv = Math.abs(force[i]);
mp = i;
}
}
System.out.printf("move %d to the %s%n", mp, force[mp] > 0 ? "right" : "left");
if (force[mp] > 0) {
position[mp]++;
} else {
position[mp]--;
}
steps++;
}
System.out.printf("total: %d steps%n", steps);
}
private static boolean calcForce(int[] position, int k, int[] force) {
boolean commProblem = false;
Arrays.fill(force, 0);
for (int i = 0; i < position.length - 1; i++) {
for (int j = i + 1; j < position.length; j++) {
int f = position[j] - position[i] - (j - i) * k;
if (f > 0) {
force[i] += f;
force[j] -= f;
commProblem = true;
}
}
}
return commProblem;
}
private static boolean sorted(int[] position) {
for (int i = 0; i < position.length - 1; i++) {
if (position[i] > position[i+1]) {
return false;
}
}
return true;
}
}
You are given a sequence of numbers and you need to find a longest increasing subsequence from the given input(not necessary continuous).
I found the link to this(Longest increasing subsequence on Wikipedia) but need more explanation.
If anyone could help me understand the O(n log n) implementation, that will be really helpful. If you could explain the algo with an example, that will be really appreciated.
I saw the other posts as well and what I did not understand is:
L = 0
for i = 1, 2, ... n:
binary search for the largest positive j ≤ L such that X[M[j]] < X[i] (or set j = 0 if no such value exists)
above statement, from where to start binary search? how to initialize M[], X[]?
A simpler problem is to find the length of the longest increasing subsequence. You can focus on understanding that problem first. The only difference in the algorithm is that it doesn't use the P array.
x is the input of a sequence, so it can be initialized as:
x = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
m keeps track of the best subsequence of each length found so far. The best is the one with the smallest ending value (allowing a wider range of values to be added after it). The length and ending value is the only data needed to be stored for each subsequence.
Each element of m represents a subsequence. For m[j],
j is the length of the subsequence.
m[j] is the index (in x) of the last element of the subsequence.
so, x[m[j]] is the value of the last element of the subsequence.
L is the length of the longest subsequence found so far. The first L values of m are valid, the rest are uninitialized. m can start with the first element being 0, the rest uninitialized. L increases as the algorithm runs, and so does the number of initialized values of m.
Here's an example run. x[i], and m at the end of each iteration is given (but values of the sequence are used instead of indexes).
The search in each iteration is looking for where to place x[i]. It should be as far to the right as possible (to get the longest sequence), and be greater than the value to its left (so it's an increasing sequence).
0: m = [0, 0] - ([0] is a subsequence of length 1.)
8: m = [0, 0, 8] - (8 can be added after [0] to get a sequence of length 2.)
4: m = [0, 0, 4] - (4 is better than 8. This can be added after [0] instead.)
12: m = [0, 0, 4, 12] - (12 can be added after [...4])
2: m = [0, 0, 2, 12] - (2 can be added after [0] instead of 4.)
10: m = [0, 0, 2, 10]
6: m = [0, 0, 2, 6]
14: m = [0, 0, 2, 6, 14]
1: m = [0, 0, 1, 6, 14]
9: m = [0, 0, 1, 6, 9]
5: m = [0, 0, 1, 5, 9]
13: m = [0, 0, 1, 5, 9, 13]
3: m = [0, 0, 1, 3, 9, 13]
11: m = [0, 0, 1, 3, 9, 11]
7: m = [0, 0, 1, 3, 7, 11]
15: m = [0, 0, 1, 3, 7, 11, 15]
Now we know there is a subsequence of length 6, ending in 15. The actual values in the subsequence can be found by storing them in the P array during the loop.
Retrieving the best sub-sequence:
P stores the previous element in the longest subsequence (as an index of x), for each number, and is updated as the algorithm advances. For example, when we process 8, we know it comes after 0, so store the fact that 8 is after 0 in P. You can work backwards from the last number like a linked-list to get the whole sequence.
So for each number we know the number that came before it. To find the subsequence ending in 7, we look at P and see that:
7 is after 3
3 is after 1
1 is after 0
So we have the subsequence [0, 1, 3, 7].
The subsequences ending in 7 or 15 share some numbers:
15 is after 11
11 is after 9
9 is after 6
6 is after 2
2 is after 0
So we have the subsequences [0, 2, 6, 9, 11], and [0, 2, 6, 9, 11, 15] (the longest increasing subsequence)
One of the best explanation to this problem is given by MIT site.
http://people.csail.mit.edu/bdean/6.046/dp/
I hope it will clear all your doubts.
based on FJB's answer, java implementation:
public class Lis {
private static int[] findLis(int[] arr) {
int[] is = new int[arr.length];
int index = 0;
is[0] = index;
for (int i = 1; i < arr.length; i++) {
if (arr[i] < arr[is[index]]) {
for (int j = 0; j <= index; j++) {
if (arr[i] < arr[is[j]]) {
is[j] = i;
break;
}
}
} else if (arr[i] == arr[is[index]]) {
} else {
is[++index] = i;
}
}
int[] lis = new int[index + 1];
lis[index] = arr[is[index]];
for (int i = index - 1; i >= 0; i--) {
if (is[i] < is[i + 1]) {
lis[i] = arr[is[i]];
} else {
for (int j = is[i + 1] - 1; j >= 0; j--) {
if (arr[j] > arr[is[i]] && arr[j] < arr[is[i + 1]]) {
lis[i] = arr[j];
is[i] = j;
break;
}
}
}
}
return lis;
}
public static void main(String[] args) {
int[] arr = new int[] { 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11,
7, 15 };
for (int i : findLis(arr)) {
System.out.print(i + "-");
}
System.out.println();
arr = new int[] { 1, 9, 3, 8, 11, 4, 5, 6, 4, 19, 7, 1, 7 };
for (int i : findLis(arr)) {
System.out.print(i + "-");
}
System.out.println();
}
}
Below is the O(NLogN) longest increasing subsequence implementation:
// search for the index which can be replaced by the X. as the index can't be
//0 or end (because if 0 then replace in the findLIS() and if it's greater than the
//current maximum the just append)of the array "result" so most of the boundary
//conditions are not required.
public static int search(int[] result, int p, int r, int x)
{
if(p > r) return -1;
int q = (p+r)/2;
if(result[q] < x && result[q+1]>x)
{
return q+1;
}
else if(result[q] > x)
{
return search(result, p, q, x);
}
else
{
return search(result, q+1, r, x);
}
}
public static int findLIS(int[] a)
{
int[] result = new int[a.length];
result[0] = a[0];
int index = 0;
for(int i=1; i<a.length; i++)
{
int no = a[i];
if(no < result[0]) // replacing the min number
{
result[0] = no;
}
else if(no > result[index])//if the number is bigger then the current big then append
{
result[++index] = no;
}
else
{
int c = search(result, 0, index, no);
result[c] = no;
}
}
return index+1;
}
Late to the party, but here's a JavaScript implementation to go along with the others.. :)
var findLongestSubsequence = function(array) {
var longestPartialSubsequences = [];
var longestSubsequenceOverAll = [];
for (var i = 0; i < array.length; i++) {
var valueAtI = array[i];
var subsequenceEndingAtI = [];
for (var j = 0; j < i; j++) {
var subsequenceEndingAtJ = longestPartialSubsequences[j];
var valueAtJ = array[j];
if (valueAtJ < valueAtI && subsequenceEndingAtJ.length > subsequenceEndingAtI.length) {
subsequenceEndingAtI = subsequenceEndingAtJ;
}
}
longestPartialSubsequences[i] = subsequenceEndingAtI.concat();
longestPartialSubsequences[i].push(valueAtI);
if (longestPartialSubsequences[i].length > longestSubsequenceOverAll.length) {
longestSubsequenceOverAll = longestPartialSubsequences[i];
}
}
return longestSubsequenceOverAll;
};
Based on #fgb 's answer, I implemented the algorithm using c++ to find the longest strictly increasing sub-sequence. Hope this will be somewhat helpful.
M[i] is the index of the last element of the sequence whose length is i, P[i] is the index of the previous element of i in the sequence, which is used to print the whole sequence.
main() is used to run the simple test case: {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}.
#include <vector>
using std::vector;
int LIS(const vector<int> &v) {
int size = v.size(), max_len = 1;
// M[i] is the index of the last element of the sequence whose length is i
int *M = new int[size];
// P[i] is the index of the previous element of i in the sequence, which is used to print the whole sequence
int *P = new int[size];
M[0] = 0; P[0] = -1;
for (int i = 1; i < size; ++i) {
if (v[i] > v[M[max_len - 1]]) {
M[max_len] = i;
P[i] = M[max_len - 1];
++max_len;
continue;
}
// Find the position to insert i using binary search
int lo = 0, hi = max_len - 1;
while (lo <= hi) {
int mid = lo + ((hi - lo) >> 1);
if (v[i] < v[M[mid]]) {
hi = mid - 1;
} else if (v[i] > v[M[mid]]) {
lo = mid + 1;
} else {
lo = mid;
break;
}
}
P[i] = P[M[lo]]; // Modify the previous pointer
M[lo] = i;
}
// Print the whole subsequence
int i = M[max_len - 1];
while (i >= 0) {
printf("%d ", v[i]);
i = P[i];
}
printf("\n");
delete[] M, delete[] P;
return max_len;
}
int main(int argc, char* argv[]) {
int data[] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15};
vector<int> v;
v.insert(v.end(), data, data + sizeof(data) / sizeof(int));
LIS(v);
return 0;
}
The O(N lg N) solution comes from patience sorting of playing card. I found this from my code comment and hence sharing here. I believe it would be really easier to understand for everyone how it works. Also you can find all possible longest increasing sub-sequence list if you understand well.
https://www.cs.princeton.edu/courses/archive/spring13/cos423/lectures/LongestIncreasingSubsequence.pdf
Code:
vector<int> lisNlgN(vector<int> v) {
int n = v.size();
vector<int> piles = vector<int>(n, INT_MAX);
int maxLen = 0;
for(int i = 0; i < n; i++) {
int pos = lower_bound(piles.begin(), piles.end(), v[i]) - piles.begin();
piles[pos] = v[i];
maxLen = max(maxLen, pos+1); // Plus 1 because of 0-based index.
}
// // Print piles for debug purpose
// for (auto x : piles) cout << x << " ";
// cout << endl;
//
// // Print position for debug purpose
// for (auto x : position) cout << x << " ";
// cout << endl;
vector<int> ret = vector<int>(piles.begin(), piles.begin() + maxLen);
return ret;
}
Code:
vector<vector<int>> allPossibleLIS(vector<int> v) {
struct Card {
int val;
Card* parent = NULL;
Card(int val) {
this->val = val;
}
};
auto comp = [](Card* a, Card* b) {
return a->val < b->val;
};
int n = v.size();
// Convert integers into card node
vector<Card*> cards = vector<Card*>(n);
for (int i = 0; i < n; i++) cards[i] = new Card(v[i]);
vector<Card*> piles = vector<Card*>(n, new Card(INT_MAX));
vector<Card*> lastPileCards;
int maxLen = 0;
for(int i = 0; i < n; i++) {
int pos = lower_bound(piles.begin(), piles.end(), new Card(v[i]), comp) - piles.begin();
piles[pos] = cards[i];
// Link to top card of left pile
if (pos == 0) cards[i]->parent = NULL;
else cards[i]->parent = piles[pos-1];
// Plus 1 because of 0-based index.
if (pos+1 == maxLen) {
lastPileCards.push_back(cards[i]);
} else if (pos+1 > maxLen) {
lastPileCards.clear();
lastPileCards.push_back(cards[i]);
maxLen = pos + 1;
}
}
// Print for debug purpose
// printf("maxLen = %d\n", maxLen);
// printf("Total unique lis list = %d\n", lastPileCards.size());
vector<vector<int>> ret;
for (auto card : lastPileCards) {
vector<int> lis;
Card* c = card;
while (c != NULL) {
lis.push_back(c->val);
c = c->parent;
}
reverse(lis.begin(), lis.end());
ret.push_back(lis);
}
return ret;
}