i implemented the Quick Sort Algorithm which given pseudo code in Introduction to Algorithms (Cormen, 3rd Edition) 7.1
When i tried algorithm with small sized arrays, result is true. But when i tried with N=50000 and array is already sorted like this;
N = {1, 2, 3, ..., 50000};
It gives StackOverflowError. I think it's happening because the function recurse itself 50000 times.
QuickSort(A, 0, 49999) => QuickSort(A, 0, 49998) => QuickSort(A, 0, 49997)... so go on.
Can i solve this problem? Or should i use different pivot position?
Here is my code;
public void sort(int[] arr){ QuickSort(arr, 0, arr.length - 1); }
private void QuickSort(int[] A, int left, int right){
if(left < right){
int index = Partition(A, left, right);
QuickSort(A, left, index - 1);
QuickSort(A, index + 1, right);
}
}
private int Partition(int[] A, int left, int right){
int pivot = A[right];
int wall = left-1;
for(int i=left; i<right; i++){
if(A[i] <= pivot){
Swap(A, ++wall, i);
}
}
Swap(A, wall + 1, right);
return wall + 1;
}
private void Swap(int[] A, int x, int y){
int keeper = A[x];
A[x] = A[y];
A[y] = keeper;
}
Yes, this pivot scheme is not right choice for sorted array. It causes very unbalanced partition, leads to O(N^2) complexity and very deep recursion level, as you noticed.
There are some approaches to improve this behavior.
For example, you can use random index for pivot like pivotIdx = start + rand() % (end-start+1);, or choose median-of-three method (median of the first, last and middle elements in index range).
P.S. An option to avoid stack overflow - call recursion for shorter segment at first, then for longer one.
https://en.wikipedia.org/wiki/Quicksort#Choice_of_pivot
Related
I've seen solutions on how to find the Kth smallest element in a sorted matrix, and I've also seen solutions on how to find the Kth smallest sum in two arrays.
But I found a question recently that asks to find the Kth smallest sum in a sorted MxN matrix. The sum must be made up of one element from each row. I'm really struggling develop anything close to a working solution, let alone a brute force solution. Any help would be greatly appreciated!
I thought this would be some kind of a heap problem... But perhaps it is a graph problem? I'm not that great with graphs.
I assume by "sorted MxN matrix", you mean each row of the matrix is sorted. If you already know how to merge 2 rows and take only the first K elements, you can do that same procedure to merge each and every row of the matrix. Ignore the Java conversion between int[] and List, the following code should work.
class Solution {
/**
* Runtime O(m * k * logk)
*/
public int kthSmallest(int[][] mat, int k) {
List<Integer> row = IntStream.of(mat[0]).boxed().collect(Collectors.toList());
for (int i = 1; i < mat.length; i++) {
row = kthSmallestPairs(row, mat[i], k);
}
return row.get(k - 1);
}
/**
* A pair is formed from one num of n1 and one num of n2. Find the k-th smallest sum of these pairs
* Queue size is maxed at k, hence this method run O(k logk)
*/
List<Integer> kthSmallestPairs(List<Integer> n1, int[] n2, int k) {
// 0 is n1's num, 1 is n2's num, 2 is n2's index
Queue<int[]> que = new PriorityQueue<>((a, b) -> a[0] + a[1] - b[0] - b[1]);
// first pair each num in n1 with the 0-th num of n2. Don't need to do more than k elements because those greater
// elements will never have a chance
for (int i = 0; i < n1.size() && i < k; i++) {
que.add(new int[] {n1.get(i), n2[0], 0});
}
List<Integer> res = new ArrayList<>();
while (!que.isEmpty() && k-- > 0) {
int[] top = que.remove();
res.add(top[0] + top[1]);
// index of n2 is top[2]
if (top[2] < n2.length - 1) {
int nextN2Idx = top[2] + 1;
que.add(new int[] {top[0], n2[nextN2Idx], nextN2Idx});
}
}
return res;
}
}
You can make a minHeap priority queue and save the sums and the corresponding index of rows in it. Then, once you pop the smallest sum so far, you can examine the next candidates for the smallest sum by incrementing index of each row by one.
Here are the data structures that you would need.
typedef pair<int,vector<int>> pi;
priority_queue<pi,vector<pi>,greater<pi>> pq;
You can try the question now, for help I have also added the code that I have written for this problem.
typedef pair<int,vector<int>> pi;
int kthSmallest(vector<vector<int>>& mat, int k) {
int m=mat.size();
int n=mat[0].size();
priority_queue<pi,vector<pi>,greater<pi>> pq;
int sum=0;
for(int i=0;i<m;i++)
sum+=mat[i][0];
vector<int> v;
for(int i=0;i<m;i++)
v.push_back(0);
pq.push({sum,v});
int count=1;
int ans=sum;
unordered_map<string,int> meep;
string s;
for(int i=0;i<m;i++)
s+="0";
meep[s]=1;
while(count<=k)
{
ans=pq.top().first;
v=pq.top().second;
// cout<<ans<<endl;
// for(int i=0;i<v.size();i++)
// cout<<v[i]<<" ";
// cout<<endl;
pq.pop();
for(int i=0;i<m;i++)
{
vector<int> temp;
sum=0;
int flag=0;
string luuul;
for(int j=0;j<m;j++)
{
if(i==j&&v[j]<n-1)
{
sum+=mat[j][v[j]+1];
temp.push_back(v[j]+1);
luuul+=to_string(v[j]+1);
}
else if(i==j&&v[j]==n-1)
{
flag=1;
break;
}
else
{
sum+=mat[j][v[j]];
temp.push_back(v[j]);
luuul+=to_string(v[j]);
}
}
if(!flag)
{
if(meep[luuul]==0)
pq.push({sum,temp});
meep[luuul]=1;
}
}
// cout<<endl;
count++;
}
return ans;
}
or every row we calculate all possible sums but keep the k smallest. We can use quickselect to do so in linear time.
The complexity below should be: O(n * m * k).
class Solution {
public:
int kthSmallest(vector<vector<int>>& mat, int k) {
vector<int> sums = { 0 }, cur = {};
for (const auto& row : mat) {
for (const int cel : row) {
for (const int sum : sums) {
cur.push_back(cel + sum);
}
}
int nth = min((int ) cur.size(), k);
nth_element(cur.begin(), cur.begin() + nth, cur.end());
sums.clear();
copy(cur.begin(), cur.begin() + nth, back_inserter(sums));
cur.clear();
}
return *max_element(sums.begin(), sums.end());
}
};
So the algorithm goes like this:
We know that elements of each row are sorted, so the minimum sum would be given by selecting the 1st element from each row.
We make a set storing {sum,vector of positions of the current elements we've chosen} sorted wrt to the sum.
So for finding the kth smallest sum, we repeat the following steps k-1 times: i) Take the element at the beginning of the set and erase it. ii) Find the next possible combinations with respect to the previous combination.
After exiting the loop return the sum of combination present at the beginning of the set.
The algorithm (using set) is properly explained with dry runs of test case containing all the corner case conditions. Do watch this youtube video by alGOds : https://youtu.be/ZYlVCy_vRp8
What signals the program to say, "Ok the first recursive quickSort call is done; proceed to the second recursive call"?
int partition (int arr[], int low, int high)
{
int pivot = arr[high]; // pivot
int i = (low - 1); // Index of smaller element
for (int j = low; j <= high- 1; j++)
{
if (arr[j] <= pivot)
{
i++; // increment index of smaller element
swap(&arr[i], &arr[j]);
}
}
swap(&arr[i + 1], &arr[high]);
return (i + 1);
}
void quickSort(int arr[], int low, int high)
{
if (low < high)
{
int pi = partition(arr, low, high);
quickSort(arr, low, pi - 1);
quickSort(arr, pi + 1, high);
}
}
Your actual question roots to the Recursion Stack.
Let's first understand Recursion, which basically constitutes a method that keeps calling itself on increasingly smaller cases and repeats the same non recursive procedure each time until it reaches base case, at which is stops.
In the case of QuickSort, the base case of the recursion are lists of size zero or one, which never need to be sorted. If this is not the case, the array not meant to be sorted. That's why we call the QuickSort method again, twice, on arrays of smaller sizes.
We recurse on the side of the array containing all the elements from A[0] to A[i - 2], and the side of array containing the elements A[i] to A[A.length - 1].
Why do we leave out A[i - 1]? Simple - It's already in its correct place.
I am referring to THIS problem and solution.
Firstly, I did not get why sum of frequencies is added in the recursive equation.
Can someone please help understand that with an example may be.
In Author's word.
We add sum of frequencies from i to j (see first term in the above
formula), this is added because every search will go through root and
one comparison will be done for every search.
In code, sum of frequencies (purpose of which I do not understand) ... corresponds to fsum.
int optCost(int freq[], int i, int j)
{
// Base cases
if (j < i) // If there are no elements in this subarray
return 0;
if (j == i) // If there is one element in this subarray
return freq[i];
// Get sum of freq[i], freq[i+1], ... freq[j]
int fsum = sum(freq, i, j);
// Initialize minimum value
int min = INT_MAX;
// One by one consider all elements as root and recursively find cost
// of the BST, compare the cost with min and update min if needed
for (int r = i; r <= j; ++r)
{
int cost = optCost(freq, i, r-1) + optCost(freq, r+1, j);
if (cost < min)
min = cost;
}
// Return minimum value
return min + fsum;
}
Secondly, this solution will just return the optimal cost. Any suggestions regarding how to get the actual bst ?
Why we need sum of frequencies
The idea behind sum of frequencies is to correctly calculate cost of particular tree. It behaves like accumulator value to store tree weight.
Imagine that on first level of recursion we start with all keys located on first level of the tree (we haven't picked any root element yet). Remember the weight function - it sums over all node weights multiplied by node level. For now weight of our tree equals to sum of weights of all keys because any of our keys can be located on any level (starting from first) and anyway we will have at least one weight for each key in our result.
1) Suppose that we found optimal root key, say key r. Next we move all our keys except r one level down because each of the elements left can be located at most on second level (first level is already occupied). Because of that we add weight of each key left to our sum because anyway for all of them we will have at least double weight. Keys left we split in two sub arrays according to r element(to the left from r and to the right) which we selected before.
2) Next step is to select optimal keys for second level, one from each of two sub arrays left from first step. After doing that we again move all keys left one level down and add their weights to the sum because they will be located at least on third level so we will have at least triple weight for each of them.
3) And so on.
I hope this explanation will give you some understanding of why we need this sum of frequencies.
Finding optimal bst
As author mentioned at the end of the article
2) In the above solutions, we have computed optimal cost only. The
solutions can be easily modified to store the structure of BSTs also.
We can create another auxiliary array of size n to store the structure
of tree. All we need to do is, store the chosen ‘r’ in the innermost
loop.
We can do just that. Below you will find my implementation.
Some notes about it:
1) I was forced to replace int[n][n] with utility class Matrix because I used Visual C++ and it does not support non-compile time constant expression as array size.
2) I used second implementation of the algorithm from article which you provided (with memorization) because it is much easier to add functionality to store optimal bst to it.
3) Author has mistake in his code:
Second loop for (int i=0; i<=n-L+1; i++) should have n-L as upper bound not n-L+1.
4) The way we store optimal bst is as follows:
For each pair i, j we store optimal key index. This is the same as for optimal cost but instead of storing optimal cost we store optimal key index. For example for 0, n-1 we will have index of the root key r of our result tree. Next we split our array in two according to root element index r and get their optimal key indexes. We can dot that by accessing matrix elements 0, r-1 and r+1, n-1. And so forth. Utility function 'PrintResultTree' uses this approach and prints result tree in in-order (left subtree, node, right subtree). So you basically get ordered list because it is binary search tree.
5) Please don't flame me for my code - I'm not really a c++ programmer. :)
int optimalSearchTree(int keys[], int freq[], int n, Matrix& optimalKeyIndexes)
{
/* Create an auxiliary 2D matrix to store results of subproblems */
Matrix cost(n,n);
optimalKeyIndexes = Matrix(n, n);
/* cost[i][j] = Optimal cost of binary search tree that can be
formed from keys[i] to keys[j].
cost[0][n-1] will store the resultant cost */
// For a single key, cost is equal to frequency of the key
for (int i = 0; i < n; i++)
cost.SetCell(i, i, freq[i]);
// Now we need to consider chains of length 2, 3, ... .
// L is chain length.
for (int L = 2; L <= n; L++)
{
// i is row number in cost[][]
for (int i = 0; i <= n - L; i++)
{
// Get column number j from row number i and chain length L
int j = i + L - 1;
cost.SetCell(i, j, INT_MAX);
// Try making all keys in interval keys[i..j] as root
for (int r = i; r <= j; r++)
{
// c = cost when keys[r] becomes root of this subtree
int c = ((r > i) ? cost.GetCell(i, r - 1) : 0) +
((r < j) ? cost.GetCell(r + 1, j) : 0) +
sum(freq, i, j);
if (c < cost.GetCell(i, j))
{
cost.SetCell(i, j, c);
optimalKeyIndexes.SetCell(i, j, r);
}
}
}
}
return cost.GetCell(0, n - 1);
}
Below is utility class Matrix:
class Matrix
{
private:
int rowCount;
int columnCount;
std::vector<int> cells;
public:
Matrix()
{
}
Matrix(int rows, int columns)
{
rowCount = rows;
columnCount = columns;
cells = std::vector<int>(rows * columns);
}
int GetCell(int rowNum, int columnNum)
{
return cells[columnNum + rowNum * columnCount];
}
void SetCell(int rowNum, int columnNum, int value)
{
cells[columnNum + rowNum * columnCount] = value;
}
};
And main method with utility function to print result tree in in-order:
//Print result tree in in-order
void PrintResultTree(
Matrix& optimalKeyIndexes,
int startIndex,
int endIndex,
int* keys)
{
if (startIndex == endIndex)
{
printf("%d\n", keys[startIndex]);
return;
}
else if (startIndex > endIndex)
{
return;
}
int currentOptimalKeyIndex = optimalKeyIndexes.GetCell(startIndex, endIndex);
PrintResultTree(optimalKeyIndexes, startIndex, currentOptimalKeyIndex - 1, keys);
printf("%d\n", keys[currentOptimalKeyIndex]);
PrintResultTree(optimalKeyIndexes, currentOptimalKeyIndex + 1, endIndex, keys);
}
int main(int argc, char* argv[])
{
int keys[] = { 10, 12, 20 };
int freq[] = { 34, 8, 50 };
int n = sizeof(keys) / sizeof(keys[0]);
Matrix optimalKeyIndexes;
printf("Cost of Optimal BST is %d \n", optimalSearchTree(keys, freq, n, optimalKeyIndexes));
PrintResultTree(optimalKeyIndexes, 0, n - 1, keys);
return 0;
}
EDIT:
Below you can find code to create simple tree like structure.
Here is utility TreeNode class
struct TreeNode
{
public:
int Key;
TreeNode* Left;
TreeNode* Right;
};
Updated main function with BuildResultTree function
void BuildResultTree(Matrix& optimalKeyIndexes,
int startIndex,
int endIndex,
int* keys,
TreeNode*& tree)
{
if (startIndex > endIndex)
{
return;
}
tree = new TreeNode();
tree->Left = NULL;
tree->Right = NULL;
if (startIndex == endIndex)
{
tree->Key = keys[startIndex];
return;
}
int currentOptimalKeyIndex = optimalKeyIndexes.GetCell(startIndex, endIndex);
tree->Key = keys[currentOptimalKeyIndex];
BuildResultTree(optimalKeyIndexes, startIndex, currentOptimalKeyIndex - 1, keys, tree->Left);
BuildResultTree(optimalKeyIndexes, currentOptimalKeyIndex + 1, endIndex, keys, tree->Right);
}
int main(int argc, char* argv[])
{
int keys[] = { 10, 12, 20 };
int freq[] = { 34, 8, 50 };
int n = sizeof(keys) / sizeof(keys[0]);
Matrix optimalKeyIndexes;
printf("Cost of Optimal BST is %d \n", optimalSearchTree(keys, freq, n, optimalKeyIndexes));
PrintResultTree(optimalKeyIndexes, 0, n - 1, keys);
TreeNode* tree = new TreeNode();
BuildResultTree(optimalKeyIndexes, 0, n - 1, keys, tree);
return 0;
}
I'm trying to implement the quickselect algorithm. Though, I have understood the theory behind it very well; I'm finding it difficult to convert it into a well functioning program.
Here is how I'm going step by step to implement it and where I am facing problem:
Problem: Find the 4th smallest element in A[] = {2,1,3,7,5,4,6}
k = 4.
index:0|1|2|3|4|5|6
Corresponding values: 2|1|3|7|5|4|6
initially, l = 0 and r = 6
Step 1) Taking pivot as the leftmost element (pivot will always be the leftmost in this problem)-
pivot_index = 0
pivot_value = 2
Step 2) Applying the partition algo; putting the pivot at the right place ([<p][p][>p])-
We get the following array: 1|2|3|7|5|4|6
where, pivot_index = i-1 = 1
and therefore, pivot_value = 2
Step 3) Compare pivot_index with k-
k=3, pivot_index = 1; k>pivot_index
Hence, Our k-th smallest number lies in the right part of the array.
Right array = i to r and we do not bother with the left part (l to i-1) anymore.
Step 4) We modify the value of k as k - (pivot_index) => 4-1 = 2; k = 3.
Here is the problem: Should not the value of k be 2? Because we have two values on the left part of the array: 1|2? Should we calculate k as k - (pivot_index+1)?
Let's assume k = 3 is correct.
Step 5) "New" array to work on: 3|7|5|4|6 with corresponding indexes: 2|3|4|5|6
Now, pivot_index = 2 and pivot_index = 3
Step 6) Applying partition algo on the above array-
3|7|5|4|6 (array remains unchanged as pivot itself is the lowest value).
i = 3
pivot_index = i-1 = 2
pivot_value = 3
Step 7) Compare pivot_index with k
k=3 and pivot_index=2
k > pivot_index
and so on....
Is this approach correct?
Here is my code which is not working. I have used a random number generator to select a random pivot, the pivot is then swapped with the first element in the array.
#include<stdio.h>
#include<stdlib.h>
void print_array(int arr[], int array_length){
int i;
for(i=0; i<array_length; ++i) {
printf("%d ", arr[i]);
}
}
int random_no(min, max){
int diff = max-min;
return (int) (((double)(diff+1)/RAND_MAX) * rand() + min);
}
void swap(int *a, int *b){
int temp;
temp = *a;
*a = *b;
*b = temp;
}
int get_kth_small(int arr[], int k, int l, int r){
if((r-l) >= 1){
k = k + (l-1);
int pivot_index = random_no(l, r);
int i, j;
swap(&arr[pivot_index], &arr[l]); //Switch the pivot with the first element in the array. Now, the pivit is in arr[l]
i=l+1;
for(j=l+1; j<=r; ++j){
if(arr[j]<arr[l]){
swap(&arr[j], &arr[i]);
++i;
}
}
swap(&arr[l], &arr[i-1]); //Switch the pivot to the correct place; <p, p, >p
printf("value of i-1: %d\n", i-1);
printf("Value of k: %d\n", k);
if(k == (i-1)){
printf("Found: %d\n", arr[i]);
return 0;
}
if(k>(i-1)){
k=k-(i-1);
get_kth_small(arr, k, i, r);
} else {
get_kth_small(arr, k, l, r-1);
}
//get_kth_small(arr, k, i, r);
//get_kth_small(arr, k, l, i-1);
}
}
void main(){
srand(time(NULL));
int arr[] = {2,1,3,7,5,4,6};
int arr_size = sizeof(arr)/sizeof(arr[0]);
int k = 3, l = 0;
int r = arr_size - 1;
//printf("Enter the value of k: ");
//scanf("%d", &k);
get_kth_small(arr, k, l, r);
print_array(arr, arr_size);
printf("\n");
}
What you describe is a valid way to implement quick select. There are numerous other approaches how to select the pivot and most of them will give a better expected complexity but in essence the algorithm is the same.
"Step 2: putting the pivot at the right place": don't do that. In fact you can't put the pivot at the right place, as you don't know what it is. The partitioning rule is to put all elements smaller or equal than the pivot before those larger. Just leave the pivot where it is!
Quick select goes as follows: to find the Kth among N elements, 1) choose a pivot value, 2) move all elements smaller or equal to the pivot before the others, forming two zones of length Nle and Ngt, 3) recurse on the relevant zone with (K, Nle) or (K-Nle, Ngt), until N=1.
Actually, any value can be taken for the pivot, even one not present in the array; but the partition must be such that Nle and Ngt are nonzero.
I have the following Quicksort that always chooses the first element of the subsequence as its pivot:
void qqsort(int array[], int start, int end) {
int i = start; // index of left-to-right scan
int k = end; // index of right-to-left scan
if (end - start >= 1) { // check that there are at least two elements to sort
int pivot = array[start]; // set the pivot as the first element in the partition
while (k > i) { // while the scan indices from left and right have not met,
while (array[i] <= pivot && i <= end && k > i) // from the left, look for the first element greater than the pivot
i++;
while (array[k] > pivot && k >= start && k >= i) // from the right, look for the first element not greater than the pivot
k--;
if (k > i) // if the left seekindex is still smaller than the right index, swap the corresponding elements
swap(array, i, k);
}
swap(array, start, k); // after the indices have crossed, swap the last element in the left partition with the pivot
qqsort(array, start, k - 1); // quicksort the left partition
qqsort(array, k + 1, end); // quicksort the right partition
} else { // if there is only one element in the partition, do not do any sorting
return;
}
}
Now as you can see, this algorithm always takes the first element to be the pivot: int pivot = array[start];
I want to modify this algorithm to make it always use the last element instead of the first element of the subsequence, because I want to analyze the physical running times of both implementations.
I tried changing the line int pivot = array[start]; to int pivot = array[end]; but the algorithm then outputted an unsorted sequence:
//Changes: int pivot = array[end];
unsorted: {5 4 3 2 1}
*sorted*: {1 2 5 4 3}
To test another pivot, I also tried using the center element of the subsequence but the algorithm still failed:
//Changes: int pivot = array[(start + end) / 2];
unsorted: {5 3 4 2 1}
*sorted*: {3 2 4 1 5}
Can someone please help me understand this algorithm correctly and tell me what changes do I need to make to successfully have this implementation always choose the last element of the subsequence as the pivot?
The Cause of the Problem
The problem is that you use int k = end;. It was fine to use int i = start; when you had the pivot element as the first element in the array because your checks in the loop will skim past it (array[i] <= pivot). However, when you use the last element as the pivot, k stops on the end index and switches the pivot to a position in the left half of the partition. Already you're in trouble because your pivot will most likely be somewhere inside of the left partition rather than at the border .
The Solution
To fix this, you need to set int k = end - 1; when you use the rightmost element as the pivot. You'll also need to change the lines for swapping the pivot to the border between the left and right partitions:
swap(array, i, end);
qqsort(array, start, i - 1);
qqsort(array, i + 1, end);
You have to use i for this because i will end up at the leftmost element of the right partition (which can then be swapped with the pivot being in the rightmost element and it will preserver the order). Lastly, you'll want to change k >= i to k > i in the while which decrements k or else there is small change of an array[-1] indexing error. This wasn't possible to happen before because i always at least was equal to i+1 by this point.
That should do it.
Sidenote:
This is a poorly written quicksort which I wouldn't recommend learning from. It has a some extraneous, unnecessary comparisons along with some other faults that I won't waste time listing. I would recommend using the quicksorts in this presentation by Sedgewick and Bentley.
I didn't test it, but check it anyway:
this
// after the indices have crossed,
// swap the last element in the left partition with the pivot
swap(array, start, k);
probably should be
swap(array, end, i);
or something similar, if we choose end as pivot.
Edit: That's an interesting partitioning algorithm, but it's not the standard one.
Well, the pivot is fixed in the logic of the partitioning.
The algorithm treats the first element as the Head and the rest elements as the Body to be partitioned.
After the partitioning is done, as a final step, the head (pivot) is swapped with the last element of the left partitioned part, to keep the ordering.
The only way I figured to use a different pivot, without changing the algorithm, is this:
...
if (end - start >= 1) {
// Swap the 1st element (Head) with the pivot
swap(array, start, pivot_index);
int pivot = array[start];
...
First hint: If the data are random, it does not matter, on the average, which value you choose as pivot. The only way to actually improve the "quality" of the pivot is to take more (e.g. 3) indices and use the one with median value of these.
Second hint: If you change the pivot value, you also need to change the pivot index. This is not named explicitly, but array[start] is swapped into the "middle" of the sorted subsequence at one point. You need to modify this line accordingly. If you take an index which is not at the edge of the subsequence, you need to swap it to the edge first, before the iteration.
Third hint: The code you provided is excessively commented. You should be able to actually understand this implementation.
Put a single
swap(array, start, end)
before initializing pivot
int pivot = array[start]
#include <time.h>
#include <stdlib.h>
#include<iostream>
#include<fstream>
using namespace std;
int counter=0;
void disp(int *a,int n)
{
for(int i=0;i<n;i++)
cout<<a[i]<<" ";
cout<<endl;
}
void swap(int a[],int p,int q)
{
int temp;
temp=a[p];
a[p]=a[q];
a[q]=temp;
}
int partition(int a[], int p, int start, int end)
{
swap(a,p,start);// to swap the pivot with the first element of the partition
counter+=end-start; // instead of (end-start+1)
int i=start+1;
for(int j=start+1 ; j<=end ; j++)
{
if(a[j]<a[start])
{
swap(a,j,i);
i++;
}
}
swap(a,start,i-1); // not swap(a,p,i-1) because p and start were already swaped..... this was the earlier mistake comitted
return i-1; // returning the adress of pivot
}
void quicksort(int a[],int start,int end)
{
if(start>=end)
return;
int p=end; // here we are choosing last element of the sub array as pivot
// here p is the index of the array where pivot is chosen randomly
int index=partition(a,p,start,end);
quicksort(a,start,index-1);
quicksort(a,index+1,end);
}
int main()
{
ifstream fin("data.txt");
int count=0;
int array[100000];
while(fin>>array[count])
{
count++;
}
quicksort(array,0,count-1);
/*
int a[]={32,56,34,45,23,54,78};
int n=sizeof(a)/sizeof(int);
disp(a,n);
quicksort(a,0,n-1);
disp(a,n);*/
cout<<endl<<counter;
return 0;
}
If you start monitoring each element from the 1st element of the array to the last - 1, keeping the last element as the pivot at every recursion, then you will get the answer in exact O(nlogn) time.
#include<stdio.h>
void quicksort(int [], int, int);
int main()
{
int n, i = 0, a[20];
scanf("%d", &n);
while(i < n)
scanf("%d", &a[i++]);
quicksort(a, 0, n - 1);
i = 0;
while(i < n)
printf("%d", a[i++]);
}
void quicksort(int a[], int p, int r)
{
int i, j, x, temp;
if(p < r)
{
i = p;
x = a[r];
for(j = p; j < r; j++)
{
if(a[j] <= x)
{
if(a[j] <a[i])
{
temp = a[j];
a[j] = a[i];
a[i] = temp;
}
i++;
}
else
{
temp = a[i];
a[i] = a[j];
a[j] = temp;
}
}
if(x != i)
{
temp = a[r];
a[r] = a[i];
a[i] = temp;
}
quicksort(a, p, i - 1);
quicksort(a, i + 1, r);
}
}