Unclear understanding on Lowest Common Ancestor(LCA) Algorithm - algorithm

I was trying to learn LCA algorithm O(nlog n) preprocessing and O(log n) query.I am reading it from a russian site with help of google translate.But it is not translating good and I am having some tough time understanding it.Can anybody help me with this ?
This is pseudo code I have taken from that website
int n, l;
vector <vector <int>> g;
vector <int> tin, tout;
int timer;
vector <vector <int>> up;
void dfs (int v, int p = 0)
{
tin [v] = ++ timer;
up [v] [0] = p;
for (int i = 1; i <= l; i ++) /** 3)What is this going
up [v] [i] = up [up [v] [i-1]] [i-1];
for (i = 0 size_t; i <g [v] .size (); i ++)
{
to g = int [v] [i];
if (to! = p)
dfs (to, v);
}
tout [v] = ++ timer;
}
bool upper (int a, int b)
{
return tin [a] <= tin [b] && tout [a]> = tout [b];
}
int lca (int a, int b)
{
if (upper (a, b)) return a;
if (upper (b, a)) return b;
for (int i = l; i> = 0; --i) /** 2)What is this going
if (! upper (up [a] [i], b))
a = up [a] [i];
return up [a] [0];
}
int main () {
... Read n and g ...
tin.resize (n), tout.resize (n), up.resize (n);
l = 1;
/** 0)What is 'l' used for ?
while ((1 << l) <= n) ++ l; /** 1)What is this going
for (int i = 0; i <n; i ++)
up [i] .resize (l + 1);
dfs (0);
for (;;) //->query loop
{
int a, b; // The current query
int res = lca (a, b); // Response to a request
}
}
What I have understood
I know we are traversing the graph and storing the in-time and out-time of every vertex.
I understand what up[i][j] is It is the 2^j ancestor of i vertex.
I understand why up[v][0]=p (because 2^0i.e first ancestor of vertex v is its father only)
I understand what upper function do.It decides which vertex occured before A or B.
I understand upper(a,b) comes out to be true than lca is A and similarly second step.
What I don't understand is mentioned by me in the pseudo code.Please help me and please confirm if I have understood everything correct or not.
P.S-> Sorry for my english.Not much comfortable with this.

Related

Find the number of intersections of n line segments with endpoints on two parallel lines

Finding the number of intersections of n line segments with endpoints on two parallel lines.
Let there be two sets of n points:
A={p1,p2,…,pn} on y=0
B={q1,q2,…,qn} on y=1
Each point pi is connected to its corresponding point qi to form a line segment.
I need to write a code using divide-and-conquer algorithm which returns the number of intersection points of all n line segments.
for example:
input:
3
1 101
-234 234
567 765
output:
1
I coded as below but it I have wrong answers.
can anyone help me with this code or give me another solution for the question?
#include<iostream>
#include <vector>
#include<algorithm>
using namespace std;
void merge1(vector< pair <int, int> > vect, int l, int m, int r)
{
int n1 = m - l + 1;
int n2 = r - m;
vector< pair <int, int> > vect_c_l(n1);
vector< pair <int, int> > vect_c_r(n2);
for (int i = 0; i < n1; i++)
vect_c_l[i] = vect[l + i];
for (int j = 0; j < n2; j++)
vect_c_r[j] = vect[m + 1 + j];
int i = 0;
int j = 0;
int k = l;
while (i < n1 && j < n2) {
if (vect_c_l[i].first <= vect_c_r[j].first) {
vect[k] = vect_c_l[i];
i++;
}
else {
vect[k] = vect_c_r[j];
j++;
}
k++;
}
while (i < n1) {
vect[k] = vect_c_l[i];
i++;
k++;
}
while (j < n2) {
vect[k] = vect_c_r[j];
j++;
k++;
}
}
int merge2(vector< pair <int, int> > vect, int l, int m, int r)
{
int n1 = m - l + 1;
int n2 = r - m;
int inv_count = 0;
vector< pair <int, int> > vect_c_l(n1);
vector< pair <int, int> > vect_c_r(n2);
for (int i = 0; i < n1; i++)
vect_c_l[i] = vect[l + i];
for (int j = 0; j < n2; j++)
vect_c_r[j] = vect[m + 1 + j];
int i = 0;
int j = 0;
int k = l;
while (i < n1 && j < n2) {
if (vect_c_l[i].second < vect_c_r[j].second) {
vect[k] = vect_c_l[i];
i++;
}
else {
vect[k] = vect_c_r[j];
j++;
inv_count = inv_count + (m - i);
}
k++;
}
while (i < n1) {
vect[k] = vect_c_l[i];
i++;
k++;
}
while (j < n2) {
vect[k] = vect_c_r[j];
j++;
k++;
}
return inv_count;
}
void mergeSort1(vector< pair <int, int> > vect, int l, int r) {
if (l >= r) {
return;
}
int m = l + (r - l) / 2;
mergeSort1(vect, l, m);
mergeSort1(vect, m + 1, r);
merge1(vect, l, m, r);
}
int mergeSort2(vector< pair <int, int> > vect, int l, int r) {
int inv_count = 0;
if (r > l) {
int m = l + (r - l) / 2;
inv_count += mergeSort2(vect, l, m);
inv_count += mergeSort2(vect, m+ 1, r);
/*Merge the two parts*/
inv_count += merge2(vect, l, m + 1, r);
}
return inv_count;
}
int main() {
int n,c=0;
cin >> n;
int a, b;
vector< pair <int, int> > vect;
for (int i = 0;i < n;i++) {
cin >> a >> b;
vect.push_back(make_pair(a, b));
}
mergeSort1(vect,0,n-1);
cout << mergeSort2(vect,0, n - 1);
}
I'd take advantage of the idea that computing whether the segments intersect is much simpler than computing where they intersect. Two segments intersect if their x values are on different sides of one another on y=1 and y=0. (i.e. if both x values on one segment are both smaller than the others, or both larger).
Objects make this easy to state. Build a segment object who's main job is to determine whether it intersects another instance.
class Segment {
constructor(x) {
this.x0 = x[0];
this.x1 = x[1];
}
// answer whether the reciever intersects the passed segment
intersects(segment) {
// this is ambiguous in the problem, but assume touching endpoints
// count as intersections
if (this.x0 === segment.x0 || this.x1 === segment.x1) return true;
let sort0 = this.x0 < segment.x0
let sort1 = this.x1 < segment.x1
return sort0 !== sort1
}
}
let input = [
[1, 101],
[-234, 234],
[567, 765]
];
let segments = input.map(x => new Segment(x))
// check segments with one another in pairs
let pairs = segments.map((v, i) => segments.slice(i + 1).map(w => [v, w])).flat();
let intersections = pairs.reduce((acc, p) => p[0].intersects(p[1]) ? acc + 1 : acc, 0)
console.log(intersections)
You can also see the problem by abstracting from all the lines.
If there were no intersection that would mean that the order of indexes on both parallel lines are the same.
So the number of intersections are equal to the number of swaps you need to perform on neughbor -points to get the same order of indexes on both sides
In your example you have the two sequences of indexes
1,3,4,2 on the upper line
2,1,4,3 on the lower line
to convert the lower sequence by swapping neighbours, you need 4 swaps:
2,1,4,3 start
-> 1,2,4,3
-> 1,4,2,3
-> 1,4,3,2
-> 1,3,4,2 = upper sequence

Find the number of players cannot win the game?

We are given n players, each player has 3 values assigned A, B and C.
A player i cannot win if there exists another player j with all 3 values A[j] > A[i], B[j] > B[i] and C[j] > C[i]. We are asked to find number of players cannot win.
I tried this problem using brute force, which is a linear search over players array. But it's showing TLE.
For each player i, I am traversing the complete array to find if there exists any other player j for which the above condition holds true.
Code :
int count_players_cannot_win(vector<vector<int>> values) {
int c = 0;
int n = values.size();
for(int i = 0; i < n; i++) {
for(int j = 0; j!= i && j < n; j++) {
if(values[i][0] < values[j][0] && values[i][1] < values[j][1] && values[i][2] < values[j][2]) {
c += 1;
break;
}
}
}
return c;
}
And this approach is O(n^2), as for every player we are traversing the complete array. Thus it is giving the TLE.
Sample testcase :
Sample Input
3(number of players)
A B C
1 4 2
4 3 2
2 5 3
Sample Output :
1
Explanation :
Only player1 cannot win as there exists player3 whose all 3 values(A, B and C) are greater than that of player1.
Contraints :
n(number of players) <= 10^5
What would be optimal way to solve this problem?
Solution:
int n;
const int N = 4e5 + 1;
int tree[N];
int get_max(int i, int l, int r, int L) { // range query of max in range v[B+1: n]
if(r < L || n <= l)
return numeric_limits<int>::min();
else if(L <= l)
return tree[i];
int m = (l + r)/2;
return max(get_max(2*i+1, l, m, L), get_max(2*i+2, m+1, r, L));
}
void update(int i, int l, int r, int on, int v) { // point update in tree[on]
if(r < on || on < l)
return;
else if(l == r) {
tree[i] = max(tree[i], v);
return;
}
int m = (l + r)/2;
update(2*i+1, l, m, on, v);
update(2*i+2, m + 1, r, on, v);
tree[i] = max(tree[2*i+1], tree[2*i+2]);
}
bool comp(vector<int> a, vector<int> b) {
return a[0] != b[0] ? a[0] > b[0] : a[1] < b[1];
}
int solve(vector<vector<int>> &v) {
n = v.size();
vector<int> b(n, 0); // reduce the scale of range from [0,10^9] to [0,10^5]
for(int i = 0; i < n; i++) {
b[i] = v[i][1];
}
for(int i = 0; i < n; i++) {
cin >> v[i][2];
}
// sort on 0th col in reverse order
sort(v.begin(), v.end(), comp);
sort(b.begin(), b.end());
int ans = 0;
for(int i = 0; i < n;) {
int j = i;
while(j < n && v[j][0] == v[i][0]) {
int B = v[j][1];
int pos = lower_bound(b.begin(), b.end(), B) - b.begin(); // position of B in b[]
int mx = get_max(0, 0, n - 1, pos + 1);
if(mx > v[j][2])
ans += 1;
j++;
}
while(i < j) {
int B = v[i][1];
int C = v[i][2];
int pos = lower_bound(b.begin(), b.end(), B) - b.begin(); // position of B in b[]
update(0, 0, n - 1, pos, C);
i++;
}
}
return ans;
}
This solution uses segment tree, and thus solves the problem in
time O(n*log(n)) and space O(n).
Approach is explained in the accepted answer by #Primusa.
First lets assume that our input comes in the form of a list of tuples T = [(A[0], B[0], C[0]), (A[1], B[1], C[1]) ... (A[N - 1], B[N - 1], C[N - 1])]
The first observation we can make is that we can sort on T[0] (in reverse order). Then for each tuple (a, b, c), to determine if it cannot win, we ask if we've already seen a tuple (d, e, f) such that e > b && f > c. We don't need to check the first element because we are given that d > a* since T is sorted in reverse.
Okay, so now how do we check this second criteria?
We can reframe it like so: out of all tuples (d, e, f), that we've already seen with e > b, what is the maximum value of f? If the max value is greater than c, then we know that this tuple cannot win.
To handle this part we can use a segment tree with max updates and max range queries. When we encounter a tuple (d, e, f), we can set tree[e] = max(tree[e], f). tree[i] will represent the third element with i being the second element.
To answer a query like "what is the maximum value of f such that e > b", we do max(tree[b+1...]), to get the largest third element over a range of possible second elements.
Since we are only doing suffix queries, you can get away with using a modified fenwick tree, but it is easier to explain with a segment tree.
This will give us an O(NlogN) solution, for sorting T and doing O(logN) work with our segment tree for every tuple.
*Note: this should actually be d >= a. However it is easier to explain the algorithm when we pretend everything is unique. The only modification you need to make to accommodate duplicate values of the first element is to process your queries and updates in buckets of tuples of the same value. This means that we will perform our check for all tuples with the same first element, and only then do we update tree[e] = max(tree[e], f) for all of those tuples we performed the check on. This ensures that no tuple with the same first value has updated the tree already when another tuple is querying the tree.

Clarification of Answer... find the max possible two equal sum in a SET

I need a clarification of the answer of this question but I can not comment (not enough rep) so I ask a new question. Hope it is ok.
The problem is this:
Given an array, you have to find the max possible two equal sum, you
can exclude elements.
i.e 1,2,3,4,6 is given array we can have max two equal sum as 6+2 =
4+3+1
i.e 4,10,18, 22, we can get two equal sum as 18+4 = 22
what would be your approach to solve this problem apart from brute
force to find all computation and checking two possible equal sum?
edit 1: max no of array elements are N <= 50 and each element can be
up to 1<= K <=1000
edit 2: Total elements sum cannot be greater than 1000.
The approved answer says:
I suggest solving this using DP where instead of tracking A,B (the
size of the two sets), you instead track A+B,A-B (the sum and
difference of the two sets).
Then for each element in the array, try adding it to A, or B, or
neither.
The advantage of tracking the sum/difference is that you only need to
keep track of a single value for each difference, namely the largest
value of the sum you have seen for this difference.
What I do not undertand is:
If this was the subset sum problem I could solve it with DP, having a memoization matrix of (N x P), where N is the size of the set and P is the target sum...
But I can not figure it out how I should keep track A+B,A-B (as said for the author of the approved answer). Which should be the dimensions of the memoization matrix ? and how that helps to solve the problem ?
The author of the answer was kind enough to provide a code example but it is hard to me to undertand since I do not know python (I know java).
I think thinking how this solution relates to the single subset problem might be misleading for you. Here we are concerned with a maximum achievable sum, and what's more, we need to distinguish between two disjoint sets of numbers as we traverse. Clearly tracking specific combinations would be too expensive.
Looking at the difference between sets A and B, we can say:
A - B = d
A = d + B
Clearly, we want the highest sum when d = 0. How do we know that sum? It's (A + B) / 2!
For the transition in the dynamic program, we'd like to know if it's better to place the current element in A, B or neither. This is achieved like this:
e <- current element
d <- difference between A and B
(1) add e to A -> d + e
why?
A = d + B
(A + e) = d + e + B
(2) add e to B -> d - e
why?
A = d + B
A = d - e + (B + e)
(3) don't use e -> that's simply
what we already have stored for d
Let's look at Peter de Rivas' code for the transition:
# update a copy of our map, so
# we can reference previous values,
# while assigning new values
D2=D.copy()
# d is A - B
# s is A + B
for d,s in D.items():
# a new sum that includes element a
# we haven't decided if a
# will be in A or B
s2 = s + a
# d2 will take on each value here
# in turn, once d - a (adding a to B),
# and once d + a (adding a to A)
for d2 in [d-a, d+a]:
# The main transition:
# the two new differences,
# (d-a) and (d+a) as keys in
# our map get the highest sum
# seen so far, either (1) the
# new sum, s2, or (2) what we
# already stored (meaning `a`
# will be excluded here)
# so all three possibilities
# are covered.
D2[abs(d2)] = max(D2[abs(d2)], s2)
In the end we have stored the highest A + B seen for d = 0, where the elements in A and B form disjoint sets. Return (A + B) / 2.
Try this dp approch : it works fine.
/*
*
i/p ::
1
5
1 2 3 4 6
o/p : 8
1
4
4 10 18 22
o/p : 22
1
4
4 118 22 3
o/p : 0
*/
import java.util.Scanner;
public class TwoPipesOfMaxEqualLength {
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
int t = sc.nextInt();
while (t-- > 0) {
int n = sc.nextInt();
int[] arr = new int[n + 1];
for (int i = 1; i <= n; i++) {
arr[i] = sc.nextInt();
}
MaxLength(arr, n);
}
}
private static void MaxLength(int[] arr, int n) {
int dp[][] = new int[1005][1005];
int dp1[][] = new int[1005][1005];
// initialize dp with values as 0.
for (int i = 0; i <= 1000; i++) {
for (int j = 0; j <= 1000; j++)
dp[i][j] = 0;
}
// make (0,0) as 1.
dp[0][0] = 1;
for (int i = 1; i <= n; i++) {
for (int j = 0; j <= 1000; j++) {
for (int k = 0; k <= 1000; k++) {
if (j >= arr[i]) {
if (dp[j - arr[i]][k] == 1) {
dp1[j][k] = 1;## Heading ##
}
}
if (k >= arr[i]) {
if (dp[j][k - arr[i]] == 1) {
dp1[j][k] = 1;
}
}
if (dp[j][k] == 1) {
dp1[j][k] = 1;
}
}
}
for (int j = 0; j <= 1000; j++) {
for (int k = 0; k <= 1000; k++) {
dp[j][k] = dp1[j][k];
dp1[j][k] = 0;
}
}
}
int ans = 0;
for (int i = 1; i <= 1000; i++) {
if (dp[i][i] == 1) {
ans = i;
}
}
System.out.println(ans);
}
}
#include <bits/stdc++.h>
using namespace std;
/*
Brute force recursive solve.
*/
void solve(vector<int>&arr, int &ans, int p1, int p2, int idx, int mx_p){
// if p1 == p2, we have a potential answer
if(p1 == p2){
ans = max(ans, p1);
}
//base case 1:
if((p1>mx_p) || (p2>mx_p) || (idx >= arr.size())){
return;
}
// leave the current element
solve(arr, ans, p1, p2, idx+1, mx_p);
// add the current element to p1
solve(arr, ans, p1+arr[idx], p2, idx+1, mx_p);
// add the current element to p2
solve(arr, ans, p1, p2+arr[idx], idx+1, mx_p);
}
/*
Recursive solve with memoization.
*/
int solve(vector<vector<vector<int>>>&memo, vector<int>&arr,
int p1, int p2, int idx, int mx_p){
//base case 1:
if((p1>mx_p) || (p2>mx_p) || (idx>arr.size())){
return -1;
}
// memo'ed answer
if(memo[p1][p2][idx]>-1){
return memo[p1][p2][idx];
}
// if p1 == p2, we have a potential answer
if(p1 == p2){
memo[p1][p2][idx] = max(memo[p1][p2][idx], p1);
}
// leave the current element
memo[p1][p2][idx] = max(memo[p1][p2][idx], solve(memo, arr, p1, p2,
idx+1, mx_p));
// add the current element to p1
memo[p1][p2][idx] = max(memo[p1][p2][idx],
solve(memo, arr, p1+arr[idx], p2, idx+1, mx_p));
// add the current element to p2
memo[p1][p2][idx] = max(memo[p1][p2][idx],
solve(memo, arr, p1, p2+arr[idx], idx+1, mx_p));
return memo[p1][p2][idx];
}
int main(){
vector<int>arr = {1, 2, 3, 4, 7};
int ans = 0;
int mx_p = 0;
for(auto i:arr){
mx_p += i;
}
mx_p /= 2;
vector<vector<vector<int>>>memo(mx_p+1, vector<vector<int>>(mx_p+1,
vector<int>(arr.size()+1,-1)));
ans = solve(memo, arr, 0, 0, 0, mx_p);
ans = (ans>=0)?ans:0;
// solve(arr, ans, 0, 0, 0, mx_p);
cout << ans << endl;
return 0;
}

What is the time complexity of the right to left method in modular exponentiation?

http://en.wikipedia.org/wiki/Modular_exponentiation#Right-to-left_binary_method
What is the time complexity of the right to left method in modular exponentiation?
is it O(1) because we are going through the binary representation of e which is constant time?
int mod(int b, int e, int m) {
b = b % m;
int result = 1;
while (e) {
if (e % 2 == 1) {
result = (result * b) % m;
}
b = (b * b) % m;
e >>= 1;
}
return result;
}

How do I write merge in place? [duplicate]

I know the question is not too specific.
All I want is someone to tell me how to convert a normal merge sort into an in-place merge sort (or a merge sort with constant extra space overhead).
All I can find (on the net) is pages saying "it is too complex" or "out of scope of this text".
The only known ways to merge in-place (without any extra space) are too complex to be reduced to practical program. (taken from here)
Even if it is too complex, what is the basic concept of how to make the merge sort in-place?
Knuth left this as an exercise (Vol 3, 5.2.5). There do exist in-place merge sorts. They must be implemented carefully.
First, naive in-place merge such as described here isn't the right solution. It downgrades the performance to O(N2).
The idea is to sort part of the array while using the rest as working area for merging.
For example like the following merge function.
void wmerge(Key* xs, int i, int m, int j, int n, int w) {
while (i < m && j < n)
swap(xs, w++, xs[i] < xs[j] ? i++ : j++);
while (i < m)
swap(xs, w++, i++);
while (j < n)
swap(xs, w++, j++);
}
It takes the array xs, the two sorted sub-arrays are represented as ranges [i, m) and [j, n) respectively. The working area starts from w. Compare with the standard merge algorithm given in most textbooks, this one exchanges the contents between the sorted sub-array and the working area. As the result, the previous working area contains the merged sorted elements, while the previous elements stored in the working area are moved to the two sub-arrays.
However, there are two constraints that must be satisfied:
The work area should be within the bounds of the array. In other words, it should be big enough to hold elements exchanged in without causing any out-of-bound error.
The work area can be overlapped with either of the two sorted arrays; however, it must ensure that none of the unmerged elements are overwritten.
With this merging algorithm defined, it's easy to imagine a solution, which can sort half of the array; The next question is, how to deal with the rest of the unsorted part stored in work area as shown below:
... unsorted 1/2 array ... | ... sorted 1/2 array ...
One intuitive idea is to recursive sort another half of the working area, thus there are only 1/4 elements haven't been sorted yet.
... unsorted 1/4 array ... | sorted 1/4 array B | sorted 1/2 array A ...
The key point at this stage is that we must merge the sorted 1/4 elements B
with the sorted 1/2 elements A sooner or later.
Is the working area left, which only holds 1/4 elements, big enough to merge
A and B? Unfortunately, it isn't.
However, the second constraint mentioned above gives us a hint, that we can exploit it by arranging the working area to overlap with either sub-array if we can ensure the merging sequence that the unmerged elements won't be overwritten.
Actually, instead of sorting the second half of the working area, we can sort the first half, and put the working area between the two sorted arrays like this:
... sorted 1/4 array B | unsorted work area | ... sorted 1/2 array A ...
This setup effectively arranges the work area overlap with the sub-array A. This idea
is proposed in [Jyrki Katajainen, Tomi Pasanen, Jukka Teuhola. ``Practical in-place mergesort''. Nordic Journal of Computing, 1996].
So the only thing left is to repeat the above step, which reduces the working area from 1/2, 1/4, 1/8, … When the working area becomes small enough (for example, only two elements left), we can switch to a trivial insertion sort to end this algorithm.
Here is the implementation in ANSI C based on this paper.
void imsort(Key* xs, int l, int u);
void swap(Key* xs, int i, int j) {
Key tmp = xs[i]; xs[i] = xs[j]; xs[j] = tmp;
}
/*
* sort xs[l, u), and put result to working area w.
* constraint, len(w) == u - l
*/
void wsort(Key* xs, int l, int u, int w) {
int m;
if (u - l > 1) {
m = l + (u - l) / 2;
imsort(xs, l, m);
imsort(xs, m, u);
wmerge(xs, l, m, m, u, w);
}
else
while (l < u)
swap(xs, l++, w++);
}
void imsort(Key* xs, int l, int u) {
int m, n, w;
if (u - l > 1) {
m = l + (u - l) / 2;
w = l + u - m;
wsort(xs, l, m, w); /* the last half contains sorted elements */
while (w - l > 2) {
n = w;
w = l + (n - l + 1) / 2;
wsort(xs, w, n, l); /* the first half of the previous working area contains sorted elements */
wmerge(xs, l, l + n - w, n, u, w);
}
for (n = w; n > l; --n) /*switch to insertion sort*/
for (m = n; m < u && xs[m] < xs[m-1]; ++m)
swap(xs, m, m - 1);
}
}
Where wmerge is defined previously.
The full source code can be found here and the detailed explanation can be found here
By the way, this version isn't the fastest merge sort because it needs more swap operations. According to my test, it's faster than the standard version, which allocates extra spaces in every recursion. But it's slower than the optimized version, which doubles the original array in advance and uses it for further merging.
Including its "big result", this paper describes a couple of variants of in-place merge sort (PDF):
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.22.5514&rep=rep1&type=pdf
In-place sorting with fewer moves
Jyrki Katajainen, Tomi A. Pasanen
It is shown that an array of n
elements can be sorted using O(1)
extra space, O(n log n / log log n)
element moves, and n log2n + O(n log
log n) comparisons. This is the first
in-place sorting algorithm requiring
o(n log n) moves in the worst case
while guaranteeing O(n log n)
comparisons, but due to the constant
factors involved the algorithm is
predominantly of theoretical interest.
I think this is relevant too. I have a printout of it lying around, passed on to me by a colleague, but I haven't read it. It seems to cover basic theory, but I'm not familiar enough with the topic to judge how comprehensively:
http://comjnl.oxfordjournals.org/cgi/content/abstract/38/8/681
Optimal Stable Merging
Antonios Symvonis
This paper shows how to stably merge
two sequences A and B of sizes m and
n, m ≤ n, respectively, with O(m+n)
assignments, O(mlog(n/m+1))
comparisons and using only a constant
amount of additional space. This
result matches all known lower bounds...
It really isn't easy or efficient, and I suggest you don't do it unless you really have to (and you probably don't have to unless this is homework since the applications of inplace merging are mostly theoretical). Can't you use quicksort instead? Quicksort will be faster anyway with a few simpler optimizations and its extra memory is O(log N).
Anyway, if you must do it then you must. Here's what I found: one and two. I'm not familiar with the inplace merge sort, but it seems like the basic idea is to use rotations to facilitate merging two arrays without using extra memory.
Note that this is slower even than the classic merge sort that's not inplace.
The critical step is getting the merge itself to be in-place. It's not as difficult as those sources make out, but you lose something when you try.
Looking at one step of the merge:
[...list-sorted...|x...list-A...|y...list-B...]
We know that the sorted sequence is less than everything else, that x is less than everything else in A, and that y is less than everything else in B. In the case where x is less than or equal to y, you just move your pointer to the start of A on one. In the case where y is less than x, you've got to shuffle y past the whole of A to sorted. That last step is what makes this expensive (except in degenerate cases).
It's generally cheaper (especially when the arrays only actually contain single words per element, e.g., a pointer to a string or structure) to trade off some space for time and have a separate temporary array that you sort back and forth between.
An example of bufferless mergesort in C.
#define SWAP(type, a, b) \
do { type t=(a);(a)=(b);(b)=t; } while (0)
static void reverse_(int* a, int* b)
{
for ( --b; a < b; a++, b-- )
SWAP(int, *a, *b);
}
static int* rotate_(int* a, int* b, int* c)
/* swap the sequence [a,b) with [b,c). */
{
if (a != b && b != c)
{
reverse_(a, b);
reverse_(b, c);
reverse_(a, c);
}
return a + (c - b);
}
static int* lower_bound_(int* a, int* b, const int key)
/* find first element not less than #p key in sorted sequence or end of
* sequence (#p b) if not found. */
{
int i;
for ( i = b-a; i != 0; i /= 2 )
{
int* mid = a + i/2;
if (*mid < key)
a = mid + 1, i--;
}
return a;
}
static int* upper_bound_(int* a, int* b, const int key)
/* find first element greater than #p key in sorted sequence or end of
* sequence (#p b) if not found. */
{
int i;
for ( i = b-a; i != 0; i /= 2 )
{
int* mid = a + i/2;
if (*mid <= key)
a = mid + 1, i--;
}
return a;
}
static void ip_merge_(int* a, int* b, int* c)
/* inplace merge. */
{
int n1 = b - a;
int n2 = c - b;
if (n1 == 0 || n2 == 0)
return;
if (n1 == 1 && n2 == 1)
{
if (*b < *a)
SWAP(int, *a, *b);
}
else
{
int* p, * q;
if (n1 <= n2)
p = upper_bound_(a, b, *(q = b+n2/2));
else
q = lower_bound_(b, c, *(p = a+n1/2));
b = rotate_(p, b, q);
ip_merge_(a, p, b);
ip_merge_(b, q, c);
}
}
void mergesort(int* v, int n)
{
if (n > 1)
{
int h = n/2;
mergesort(v, h); mergesort(v+h, n-h);
ip_merge_(v, v+h, v+n);
}
}
An example of adaptive mergesort (optimized).
Adds support code and modifications to accelerate the merge when an auxiliary buffer of any size is available (still works without additional memory). Uses forward and backward merging, ring rotation, small sequence merging and sorting, and iterative mergesort.
#include <stdlib.h>
#include <string.h>
static int* copy_(const int* a, const int* b, int* out)
{
int count = b - a;
if (a != out)
memcpy(out, a, count*sizeof(int));
return out + count;
}
static int* copy_backward_(const int* a, const int* b, int* out)
{
int count = b - a;
if (b != out)
memmove(out - count, a, count*sizeof(int));
return out - count;
}
static int* merge_(const int* a1, const int* b1, const int* a2,
const int* b2, int* out)
{
while ( a1 != b1 && a2 != b2 )
*out++ = (*a1 <= *a2) ? *a1++ : *a2++;
return copy_(a2, b2, copy_(a1, b1, out));
}
static int* merge_backward_(const int* a1, const int* b1,
const int* a2, const int* b2, int* out)
{
while ( a1 != b1 && a2 != b2 )
*--out = (*(b1-1) > *(b2-1)) ? *--b1 : *--b2;
return copy_backward_(a1, b1, copy_backward_(a2, b2, out));
}
static unsigned int gcd_(unsigned int m, unsigned int n)
{
while ( n != 0 )
{
unsigned int t = m % n;
m = n;
n = t;
}
return m;
}
static void rotate_inner_(const int length, const int stride,
int* first, int* last)
{
int* p, * next = first, x = *first;
while ( 1 )
{
p = next;
if ((next += stride) >= last)
next -= length;
if (next == first)
break;
*p = *next;
}
*p = x;
}
static int* rotate_(int* a, int* b, int* c)
/* swap the sequence [a,b) with [b,c). */
{
if (a != b && b != c)
{
int n1 = c - a;
int n2 = b - a;
int* i = a;
int* j = a + gcd_(n1, n2);
for ( ; i != j; i++ )
rotate_inner_(n1, n2, i, c);
}
return a + (c - b);
}
static void ip_merge_small_(int* a, int* b, int* c)
/* inplace merge.
* #note faster for small sequences. */
{
while ( a != b && b != c )
if (*a <= *b)
a++;
else
{
int* p = b+1;
while ( p != c && *p < *a )
p++;
rotate_(a, b, p);
b = p;
}
}
static void ip_merge_(int* a, int* b, int* c, int* t, const int ts)
/* inplace merge.
* #note works with or without additional memory. */
{
int n1 = b - a;
int n2 = c - b;
if (n1 <= n2 && n1 <= ts)
{
merge_(t, copy_(a, b, t), b, c, a);
}
else if (n2 <= ts)
{
merge_backward_(a, b, t, copy_(b, c, t), c);
}
/* merge without buffer. */
else if (n1 + n2 < 48)
{
ip_merge_small_(a, b, c);
}
else
{
int* p, * q;
if (n1 <= n2)
p = upper_bound_(a, b, *(q = b+n2/2));
else
q = lower_bound_(b, c, *(p = a+n1/2));
b = rotate_(p, b, q);
ip_merge_(a, p, b, t, ts);
ip_merge_(b, q, c, t, ts);
}
}
static void ip_merge_chunk_(const int cs, int* a, int* b, int* t,
const int ts)
{
int* p = a + cs*2;
for ( ; p <= b; a = p, p += cs*2 )
ip_merge_(a, a+cs, p, t, ts);
if (a+cs < b)
ip_merge_(a, a+cs, b, t, ts);
}
static void smallsort_(int* a, int* b)
/* insertion sort.
* #note any stable sort with low setup cost will do. */
{
int* p, * q;
for ( p = a+1; p < b; p++ )
{
int x = *p;
for ( q = p; a < q && x < *(q-1); q-- )
*q = *(q-1);
*q = x;
}
}
static void smallsort_chunk_(const int cs, int* a, int* b)
{
int* p = a + cs;
for ( ; p <= b; a = p, p += cs )
smallsort_(a, p);
smallsort_(a, b);
}
static void mergesort_lower_(int* v, int n, int* t, const int ts)
{
int cs = 16;
smallsort_chunk_(cs, v, v+n);
for ( ; cs < n; cs *= 2 )
ip_merge_chunk_(cs, v, v+n, t, ts);
}
static void* get_buffer_(int size, int* final)
{
void* p = NULL;
while ( size != 0 && (p = malloc(size)) == NULL )
size /= 2;
*final = size;
return p;
}
void mergesort(int* v, int n)
{
/* #note buffer size may be in the range [0,(n+1)/2]. */
int request = (n+1)/2 * sizeof(int);
int actual;
int* t = (int*) get_buffer_(request, &actual);
/* #note allocation failure okay. */
int tsize = actual / sizeof(int);
mergesort_lower_(v, n, t, tsize);
free(t);
}
This answer has a code example, which implements the algorithm described in the paper Practical In-Place Merging by Bing-Chao Huang and Michael A. Langston. I have to admit that I do not understand the details, but the given complexity of the merge step is O(n).
From a practical perspective, there is evidence that pure in-place implementations are not performing better in real world scenarios. For example, the C++ standard defines std::inplace_merge, which is as the name implies an in-place merge operation.
Assuming that C++ libraries are typically very well optimized, it is interesting to see how it is implemented:
1) libstdc++ (part of the GCC code base): std::inplace_merge
The implementation delegates to __inplace_merge, which dodges the problem by trying to allocate a temporary buffer:
typedef _Temporary_buffer<_BidirectionalIterator, _ValueType> _TmpBuf;
_TmpBuf __buf(__first, __len1 + __len2);
if (__buf.begin() == 0)
std::__merge_without_buffer
(__first, __middle, __last, __len1, __len2, __comp);
else
std::__merge_adaptive
(__first, __middle, __last, __len1, __len2, __buf.begin(),
_DistanceType(__buf.size()), __comp);
Otherwise, it falls back to an implementation (__merge_without_buffer), which requires no extra memory, but no longer runs in O(n) time.
2) libc++ (part of the Clang code base): std::inplace_merge
Looks similar. It delegates to a function, which also tries to allocate a buffer. Depending on whether it got enough elements, it will choose the implementation. The constant-memory fallback function is called __buffered_inplace_merge.
Maybe even the fallback is still O(n) time, but the point is that they do not use the implementation if temporary memory is available.
Note that the C++ standard explicitly gives implementations the freedom to choose this approach by lowering the required complexity from O(n) to O(N log N):
Complexity:
Exactly N-1 comparisons if enough additional memory is available. If the memory is insufficient, O(N log N) comparisons.
Of course, this cannot be taken as a proof that constant space in-place merges in O(n) time should never be used. On the other hand, if it would be faster, the optimized C++ libraries would probably switch to that type of implementation.
This is my C version:
void mergesort(int *a, int len) {
int temp, listsize, xsize;
for (listsize = 1; listsize <= len; listsize*=2) {
for (int i = 0, j = listsize; (j+listsize) <= len; i += (listsize*2), j += (listsize*2)) {
merge(& a[i], listsize, listsize);
}
}
listsize /= 2;
xsize = len % listsize;
if (xsize > 1)
mergesort(& a[len-xsize], xsize);
merge(a, listsize, xsize);
}
void merge(int *a, int sizei, int sizej) {
int temp;
int ii = 0;
int ji = sizei;
int flength = sizei+sizej;
for (int f = 0; f < (flength-1); f++) {
if (sizei == 0 || sizej == 0)
break;
if (a[ii] < a[ji]) {
ii++;
sizei--;
}
else {
temp = a[ji];
for (int z = (ji-1); z >= ii; z--)
a[z+1] = a[z];
ii++;
a[f] = temp;
ji++;
sizej--;
}
}
}
I know I'm late to the game, but here's a solution I wrote yesterday. I also posted this elsewhere, but this appears to be the most popular merge-in-place thread on SO. I've also not seen this algorithm posted anywhere else, so hopefully this helps some people.
This algorithm is in its most simple form so that it can be understood. It can be significantly tweaked for extra speed. Average time complexity is: O(n.log₂n) for the stable in-place array merge, and O(n.(log₂n)²) for the overall sort.
// Stable Merge In Place Sort
//
//
// The following code is written to illustrate the base algorithm. A good
// number of optimizations can be applied to boost its overall speed
// For all its simplicity, it does still perform somewhat decently.
// Average case time complexity appears to be: O(n.(log₂n)²)
#include <stddef.h>
#include <stdio.h>
#define swap(x, y) (t=(x), (x)=(y), (y)=t)
// Both sorted sub-arrays must be adjacent in 'a'
// Assumes that both 'an' and 'bn' are always non-zero
// 'an' is the length of the first sorted section in 'a', referred to as A
// 'bn' is the length of the second sorted section in 'a', referred to as B
static void
merge_inplace(int A[], size_t an, size_t bn)
{
int t, *B = &A[an];
size_t pa, pb; // Swap partition pointers within A and B
// Find the portion to swap. We're looking for how much from the
// start of B can swap with the end of A, such that every element
// in A is less than or equal to any element in B. This is quite
// simple when both sub-arrays come at us pre-sorted
for(pa = an, pb = 0; pa>0 && pb<bn && B[pb] < A[pa-1]; pa--, pb++);
// Now swap last part of A with first part of B according to the
// indicies we found
for (size_t index=pa; index < an; index++)
swap(A[index], B[index-pa]);
// Now merge the two sub-array pairings. We need to check that either array
// didn't wholly swap out the other and cause the remaining portion to be zero
if (pa>0 && (an-pa)>0)
merge_inplace(A, pa, an-pa);
if (pb>0 && (bn-pb)>0)
merge_inplace(B, pb, bn-pb);
} // merge_inplace
// Implements a recursive merge-sort algorithm with an optional
// insertion sort for when the splits get too small. 'n' must
// ALWAYS be 2 or more. It enforces this when calling itself
static void
merge_sort(int a[], size_t n)
{
size_t m = n/2;
// Sort first and second halves only if the target 'n' will be > 1
if (m > 1)
merge_sort(a, m);
if ((n-m)>1)
merge_sort(a+m, n-m);
// Now merge the two sorted sub-arrays together. We know that since
// n > 1, then both m and n-m MUST be non-zero, and so we will never
// violate the condition of not passing in zero length sub-arrays
merge_inplace(a, m, n-m);
} // merge_sort
// Print an array */
static void
print_array(int a[], size_t size)
{
if (size > 0) {
printf("%d", a[0]);
for (size_t i = 1; i < size; i++)
printf(" %d", a[i]);
}
printf("\n");
} // print_array
// Test driver
int
main()
{
int a[] = { 17, 3, 16, 5, 14, 8, 10, 7, 15, 1, 13, 4, 9, 12, 11, 6, 2 };
size_t n = sizeof(a) / sizeof(a[0]);
merge_sort(a, n);
print_array(a, n);
return 0;
} // main
Leveraging C++ std::inplace_merge, in-place merge sort can be implemented as follows:
template< class _Type >
inline void merge_sort_inplace(_Type* src, size_t l, size_t r)
{
if (r <= l) return;
size_t m = l + ( r - l ) / 2; // computes the average without overflow
merge_sort_inplace(src, l, m);
merge_sort_inplace(src, m + 1, r);
std::inplace_merge(src + l, src + m + 1, src + r + 1);
}
More sorting algorithms, including parallel implementations, are available in https://github.com/DragonSpit/ParallelAlgorithms repo, which is open source and free.
I just tried in place merge algorithm for merge sort in JAVA by using the insertion sort algorithm, using following steps.
1) Two sorted arrays are available.
2) Compare the first values of each array; and place the smallest value into the first array.
3) Place the larger value into the second array by using insertion sort (traverse from left to right).
4) Then again compare the second value of first array and first value of second array, and do the same. But when swapping happens there is some clue on skip comparing the further items, but just swapping required.
I have made some optimization here; to keep lesser comparisons in insertion sort. The only drawback i found with this solutions is it needs larger swapping of array elements in the second array.
e.g)
First___Array : 3, 7, 8, 9
Second Array : 1, 2, 4, 5
Then 7, 8, 9 makes the second array to swap(move left by one) all its elements by one each time to place himself in the last.
So the assumption here is swapping items is negligible compare to comparing of two items.
https://github.com/skanagavelu/algorithams/blob/master/src/sorting/MergeSort.java
package sorting;
import java.util.Arrays;
public class MergeSort {
public static void main(String[] args) {
int[] array = { 5, 6, 10, 3, 9, 2, 12, 1, 8, 7 };
mergeSort(array, 0, array.length -1);
System.out.println(Arrays.toString(array));
int[] array1 = {4, 7, 2};
System.out.println(Arrays.toString(array1));
mergeSort(array1, 0, array1.length -1);
System.out.println(Arrays.toString(array1));
System.out.println("\n\n");
int[] array2 = {4, 7, 9};
System.out.println(Arrays.toString(array2));
mergeSort(array2, 0, array2.length -1);
System.out.println(Arrays.toString(array2));
System.out.println("\n\n");
int[] array3 = {4, 7, 5};
System.out.println(Arrays.toString(array3));
mergeSort(array3, 0, array3.length -1);
System.out.println(Arrays.toString(array3));
System.out.println("\n\n");
int[] array4 = {7, 4, 2};
System.out.println(Arrays.toString(array4));
mergeSort(array4, 0, array4.length -1);
System.out.println(Arrays.toString(array4));
System.out.println("\n\n");
int[] array5 = {7, 4, 9};
System.out.println(Arrays.toString(array5));
mergeSort(array5, 0, array5.length -1);
System.out.println(Arrays.toString(array5));
System.out.println("\n\n");
int[] array6 = {7, 4, 5};
System.out.println(Arrays.toString(array6));
mergeSort(array6, 0, array6.length -1);
System.out.println(Arrays.toString(array6));
System.out.println("\n\n");
//Handling array of size two
int[] array7 = {7, 4};
System.out.println(Arrays.toString(array7));
mergeSort(array7, 0, array7.length -1);
System.out.println(Arrays.toString(array7));
System.out.println("\n\n");
int input1[] = {1};
int input2[] = {4,2};
int input3[] = {6,2,9};
int input4[] = {6,-1,10,4,11,14,19,12,18};
System.out.println(Arrays.toString(input1));
mergeSort(input1, 0, input1.length-1);
System.out.println(Arrays.toString(input1));
System.out.println("\n\n");
System.out.println(Arrays.toString(input2));
mergeSort(input2, 0, input2.length-1);
System.out.println(Arrays.toString(input2));
System.out.println("\n\n");
System.out.println(Arrays.toString(input3));
mergeSort(input3, 0, input3.length-1);
System.out.println(Arrays.toString(input3));
System.out.println("\n\n");
System.out.println(Arrays.toString(input4));
mergeSort(input4, 0, input4.length-1);
System.out.println(Arrays.toString(input4));
System.out.println("\n\n");
}
private static void mergeSort(int[] array, int p, int r) {
//Both below mid finding is fine.
int mid = (r - p)/2 + p;
int mid1 = (r + p)/2;
if(mid != mid1) {
System.out.println(" Mid is mismatching:" + mid + "/" + mid1+ " for p:"+p+" r:"+r);
}
if(p < r) {
mergeSort(array, p, mid);
mergeSort(array, mid+1, r);
// merge(array, p, mid, r);
inPlaceMerge(array, p, mid, r);
}
}
//Regular merge
private static void merge(int[] array, int p, int mid, int r) {
int lengthOfLeftArray = mid - p + 1; // This is important to add +1.
int lengthOfRightArray = r - mid;
int[] left = new int[lengthOfLeftArray];
int[] right = new int[lengthOfRightArray];
for(int i = p, j = 0; i <= mid; ){
left[j++] = array[i++];
}
for(int i = mid + 1, j = 0; i <= r; ){
right[j++] = array[i++];
}
int i = 0, j = 0;
for(; i < left.length && j < right.length; ) {
if(left[i] < right[j]){
array[p++] = left[i++];
} else {
array[p++] = right[j++];
}
}
while(j < right.length){
array[p++] = right[j++];
}
while(i < left.length){
array[p++] = left[i++];
}
}
//InPlaceMerge no extra array
private static void inPlaceMerge(int[] array, int p, int mid, int r) {
int secondArrayStart = mid+1;
int prevPlaced = mid+1;
int q = mid+1;
while(p < mid+1 && q <= r){
boolean swapped = false;
if(array[p] > array[q]) {
swap(array, p, q);
swapped = true;
}
if(q != secondArrayStart && array[p] > array[secondArrayStart]) {
swap(array, p, secondArrayStart);
swapped = true;
}
//Check swapped value is in right place of second sorted array
if(swapped && secondArrayStart+1 <= r && array[secondArrayStart+1] < array[secondArrayStart]) {
prevPlaced = placeInOrder(array, secondArrayStart, prevPlaced);
}
p++;
if(q < r) { //q+1 <= r) {
q++;
}
}
}
private static int placeInOrder(int[] array, int secondArrayStart, int prevPlaced) {
int i = secondArrayStart;
for(; i < array.length; i++) {
//Simply swap till the prevPlaced position
if(secondArrayStart < prevPlaced) {
swap(array, secondArrayStart, secondArrayStart+1);
secondArrayStart++;
continue;
}
if(array[i] < array[secondArrayStart]) {
swap(array, i, secondArrayStart);
secondArrayStart++;
} else if(i != secondArrayStart && array[i] > array[secondArrayStart]){
break;
}
}
return secondArrayStart;
}
private static void swap(int[] array, int m, int n){
int temp = array[m];
array[m] = array[n];
array[n] = temp;
}
}

Resources