So we are given a permutation of the numers {1... N}.
We are given an integer k and then k queries of this type:
q(x,y,l,r) - count numbers between position X and Y in the permutation, which are >=l and <=r.
For example:
N - 7: (1 6 3 5 7 4 2)
q(1,4,2,7) -> 3 numbers ( 6, 3 and 5 , since 2<=6<=7 , 2<=3<=7 and 2<=5<=7)
So my attempt was to store the permutation and and position array (too have fast acces to the position of each number)
Then i check which interval is smaller [x,y] or [l,r] and iterate through the smaller.
The answers i get are correct, but i get 0 points, since my solution it's too slow.
Any tips how to make this queries as fast as possible for big N?
#include <iostream>
using namespace std;
int main()
{
int n;
cin >> n;
int q;
cin >> q;
int* perm = new int[n+1];
int* pos = new int[n+1];
for (int i = 1; i <= n; i++)
{
int num;
cin >> num;
perm[i] = num;
pos[num] = i;
}
for (int i = 0; i < q; i++)
{
int x, y, l, r;
cin >> x >>y>> l>> r;
int count = 0;
if (y - x < r - l)
{
for (int i = x; i <= y; i++)
{
if (perm[i] >= l && perm[i] <= r)
count++;
}
cout << count << endl;
}
else
{
int count = 0;
for (int i = l; i <= r; i++)
{
if (pos[i] >= x && pos[i] <= y)
count++;
}
cout << count << endl;
}
}
}
Related
https://cses.fi/problemset/task/1649
I'm solving this problem using Segment Trees and the solution I've written is
#include <bits/stdc++.h>
#define MAX 1000000001
using namespace std;
int n;
vector<int> tree;
int sum(int a, int b)
{
a += n;
b += n;
int s = INT_MAX;
while(a <= b) {
if (a % 2 == 1) s = min(s, tree[a++]);
if (b % 2 == 0) s = min(s, tree[b--]);
a>>=1;
b>>=1;
}
return s;
}
void update(int k, int change)
{
k += n;
tree[k] = change;
for(int i = k>>1; i >= 1; i>>=1) {
tree[i] = min(tree[2*i], tree[2*i+1]);
}
return;
}
int main()
{
int q;
cin >> n >> q;
n = pow(2, ceil(log2(n)));
tree.resize(2*n, INT_MAX);
for(int i = 0; i < n; i++) {
cin >> tree[i+n];
}
for(int i = n-1; i >= 1; i--) {
tree[i] = min(tree[2*i], tree[2*i+1]);
}
int type, a, b;
for(int i = 0; i < q; i++) {
cin >> type >> a >> b;
if (type == 1) {
update(a-1, b);
} else {
cout << sum(a-1, b-1) << endl;
}
}
return 0;
}
It works with first test case, but not with the second one. I've looked at other solutions online and they all look similar to mine. Please, help me spot the mistake.
This is a very simple program where the user inputs (x,y) coordinates and distance 'd' and the program has to find out the number of unrepeated coordinates from (x,y) to (x+d,y).
For eg: if input for one test case is: 4,9,2 then the unrepeated coordinates are (4,9),(5,9) and (6,9)(x=4,y=9,d=2). I have used a sorting algorithm as mentioned in the question (to keep track of multiple occurrences) however the program shows runtime error for test cases beyond 30. Is there any mistake in the code or is it an issue with my compiler?
For a detailed explanation of question: https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/missing-soldiers-december-easy-easy/
#include <stdio.h>
#include <stdlib.h>
int partition(int *arr, int p, int r) {
int x;
x = arr[r];
int tmp;
int i = p - 1;
for (int j = p; j <= r - 1; ++j) {
if (arr[j] <= x) {
i = i + 1;
tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
}
}
tmp = arr[i + 1];
arr[i + 1] = arr[r];
arr[r] = tmp;
return (i + 1);
}
void quicksort(int *arr, int p, int r) {
int q;
if (p < r) {
q = partition(arr, p, r);
quicksort(arr, p, q - 1);
quicksort(arr, q + 1, r);
}
}
int count(int A[],int ct) {
int cnt = 0;
for (int i = 0; i < ct; ++i) {
if (A[i] != A[i + 1]) {
cnt++;
}
}
return cnt;
}
int main() {
int t;
scanf("%d", &t);
long int tmp, y, d;
int ct = 0;
int i = 0;
int x[1000];
int j = 0;
for (int l = 0; l < t; ++l) {
scanf("%d%d%d", &tmp, &y, &d);
ct = ct + d + 1; //this counts the total no of coordinates for each (x,y,d)
for (int i = 0; i <= d; ++i) {
x[j] = tmp + i; //storing all possible the x and x+d coordinates
j++;
}
}
int cnt;
int p = ct - 1;
quicksort(x, 0, p); //quicksort sorting
for (int l = 0; l < ct; ++l) {
printf("%d ", x[l]); //prints sorted array not necessary to question
}
cnt = count(x, ct); //counts the number of non-repeated vertices
printf("%d\n", cnt);
}
The problem was the bounds of the array int x[1000] is not enough for the data given below.
Why I am getting time limit exceeded error in sorting array using merge sort algorithm? What is wrong with my code? I have taken an input of 9 elements.
Input: 4 2 1 8 5 9 6 7 0
Output: Time limit exceeded
#include <bits/stdc++.h>
using namespace std;
int a[100];
void merge(int a[], int l, int r, int m) {
int t[r - l + 1];
int i = l, j = m + 1, k = 0;
while (i <= m && j <= r) {
if (a[i] < a[j])
t[k++] = a[i++];
else
t[k++] = a[j++];
}
while (i <= m)
t[k++] = a[i++];
while (j <= r)
t[k++] = a[j++];
for (int i = l; i <= r; i++)
a[i] = t[i - l];
}
void msort(int a[], int l, int r) {
if (l > r)
return;
int m = (r + l) / 2;
msort(a, l, m);
msort(a, m + 1, r);
merge(a, l, r, m);
}
int main() {
int n;
cin >> n;
for (int i = 0; i < n; i++)
cin >> a[i];
msort(a, 0, n - 1);
for (int i = 0; i < n; i++)
cout << a[i] << " ";
cout << endl;
return 0;
}
There are some problems in your code:
The test for termination in msort() is incorrect: you should stop when the slice has a single element or less. You currently loop forever on slices of 1 element.
if (l >= r) return;
You should test in main() if the number n of elements read from the user is no greater than 100, the size of the global array a into which you read the elements to be sorted. You should instead use a local array with the proper size or allocate the array from the heap. The temporary array t in merge() might also be too large for automatic allocation. It is more efficient to allocate temporary space once and pass it recursively.
Note also that it is idiomatic in C and C++ to specify array slices with the index of the first element and the index of the element after the last one. This simplifies the code and allows for empty arrays and avoid special cases for unsigned index types.
Here is a modified version with this approach:
#include <bits/stdc++.h>
using namespace std;
void merge(int a[], int l, int r, int m, int t[]) {
int i = l, j = m, k = 0;
while (i < m && j < r) {
if (a[i] < a[j])
t[k++] = a[i++];
else
t[k++] = a[j++];
}
while (i < m)
t[k++] = a[i++];
while (j < r)
t[k++] = a[j++];
for (int i = l; i < r; i++)
a[i] = t[i - l];
}
void msort(int a[], int l, int r, int t[]) {
if (r - l > 1) {
int m = l + (r - l) / 2;
msort(a, l, m, t);
msort(a, m, r, t);
merge(a, l, r, m, t);
}
}
void msort(int a[], int n) {
if (n > 1) {
int *t = new int[n];
msort(a, 0, n, t);
delete[] t;
}
}
int main() {
int n;
cin >> n;
if (n <= 0)
return 1;
int *a = new int[n];
for (int i = 0; i < n; i++)
cin >> a[i];
msort(a, n);
for (int i = 0; i < n; i++)
cout << a[i] << " ";
cout << endl;
delete[] a;
return 0;
}
Given`en an array of integers. We have to find the length of the longest subsequence of integers such that gcd of any two consecutive elements in the sequence is greater than 1.
for ex: if array = [12, 8, 2, 3, 6, 9]
then one such subsequence can be = {12, 8, 2, 6, 9}
other one can be= {12, 3, 6, 9}
I tried to solve this problem by dynamic programming. Assume that maxCount is the array such that maxCount[i] will have the length of such longest subsequence
ending at index i.
`maxCount[0]=1 ;
for(i=1; i<N; i++)
{
max = 1 ;
for(j=i-1; j>=0; j--)
{
if(gcd(arr[i], arr[j]) > 1)
{
temp = maxCount[j] + 1 ;
if(temp > max)
max = temp ;
}
}
maxCount[i]=max;
}``
max = 0;
for(i=0; i<N; i++)
{
if(maxCount[i] > max)
max = maxCount[i] ;
}
cout<<max<<endl ;
`
But, this approach is getting timeout. As its time complexity is O(N^2). Can we improve the time complexity?
The condition "gcd is greater than 1" means that numbers have at least one common divisor. So, let dp[i] equals to the length of longest sequence finishing on a number divisible by i.
int n;
cin >> n;
const int MAX_NUM = 100 * 1000;
static int dp[MAX_NUM];
for(int i = 0; i < n; ++i)
{
int x;
cin >> x;
int cur = 1;
vector<int> d;
for(int i = 2; i * i <= x; ++i)
{
if(x % i == 0)
{
cur = max(cur, dp[i] + 1);
cur = max(cur, dp[x / i] + 1);
d.push_back(i);
d.push_back(x / i);
}
}
if(x > 1)
{
cur = max(cur, dp[x] + 1);
d.push_back(x);
}
for(int j : d)
{
dp[j] = cur;
}
}
cout << *max_element(dp, dp + MAX_NUM) << endl;
This solution has O(N * sqrt(MAX_NUM)) complexity. Actually you can calculate dp values only for prime numbers. To implement this you should be able to get prime factorization in less than O(N^0.5) time (this method, for example). That optimization should cast complexity to O(N * factorization + Nlog(N)). As memory optimization, you can replace dp array with map or unordered_map.
GCD takes log m time, where m is the maximum number in the array. Therefore, using a Segment Tree and binary search, one can reduce the time complexity to O(n log (m² * n)) (with O(n log m) preprocessing). This list details other data structures that can be used for RMQ-type queries and to reduce the complexity further.
Here is one possible implementation of this:
#include <bits/stdc++.h>
using namespace std;
struct SegTree {
using ftype = function<int(int, int)>;
vector<int> vec;
int l, og, dummy;
ftype f;
template<typename T> SegTree(const vector<T> &v, const T &x, const ftype &func) : og(v.size()), f(func), l(1), dummy(x) {
assert(og >= 1);
while (l < og) l *= 2;
vec = vector<int>(l*2);
for (int i = l; i < l+og; i++) vec[i] = v[i-l];
for (int i = l+og; i < 2*l; i++) vec[i] = dummy;
for (int i = l-1; i >= 1; i--) {
if (vec[2*i] == dummy && vec[2*i+1] == dummy) vec[i] = dummy;
else if (vec[2*i] == dummy) vec[i] = vec[2*i+1];
else if (vec[2*i+1] == dummy) vec[i] = vec[2*i];
else vec[i] = f(vec[2*i], vec[2*i+1]);
}
}
SegTree() {}
void valid(int x) {assert(x >= 0 && x < og);}
int get(int a, int b) {
valid(a); valid(b); assert(b >= a);
a += l; b += l;
int s = vec[a];
a++;
while (a <= b) {
if (a % 2 == 1) {
if (vec[a] != dummy) s = f(s, vec[a]);
a++;
}
if (b % 2 == 0) {
if (vec[b] != dummy) s = f(s, vec[b]);
b--;
}
a /= 2; b /= 2;
}
return s;
}
void add(int x, int c) {
valid(x);
x += l;
vec[x] += c;
for (x /= 2; x >= 1; x /= 2) {
if (vec[2*x] == dummy && vec[2*x+1] == dummy) vec[x] = dummy;
else if (vec[2*x] == dummy) vec[x] = vec[2*x+1];
else if (vec[2*x+1] == dummy) vec[x] = vec[2*x];
else vec[x] = f(vec[2*x], vec[2*x+1]);
}
}
void update(int x, int c) {add(x, c-vec[x+l]);}
};
// Constructor (where val is something that an element in the array is
// guaranteed to never reach):
// SegTree st(vec, val, func);
// finds longest subsequence where GCD is greater than 1
int longest(const vector<int> &vec) {
int l = vec.size();
SegTree st(vec, -1, [](int a, int b){return __gcd(a, b);});
// checks if a certain length is valid in O(n log (m² * n)) time
auto valid = [&](int n) -> bool {
for (int i = 0; i <= l-n; i++) {
if (st.get(i, i+n-1) != 1) {
return true;
}
}
return false;
};
int length = 0;
// do a "binary search" on the best possible length
for (int i = l; i >= 1; i /= 2) {
while (length+i <= l && valid(length+i)) {
length += i;
}
}
return length;
}
I am quite new to segment tree and would like to make myself busy by doing some more exercise on segment tree.
The problem's actually more ACM like and have following conditions:
There are n numbers and m operations, n,m<=10,000, each operation can be one of the following:
1. Update an interval by minus a number x, x can be different each time
2. Query an interval to find how many numbers in the interval is <= 0
Building the segment tree and updating here is obviously can be done in O(nlog n) / O(log n)
But I cannot figure out how to make a query in O(log n), can anyone give me some suggestions / hints?
Any suggestions would be helpful! Thanks!
TL;DR:
Given n numbers, and 2 type operations:
add x to all elements in [a,b], x can be different each time
Query number of elements in [a,b] is < C, C is given constant
How to make operation 1 & 2 both can be done in O(log n)?
Nice Problem:)
I think for a while but still can't work out this problem with segment tree, but I've tried using "Bucket Method" to solve this problem.
We can divide the initial n numbers into B buckets, sort the number in each buckets and maintain the total add val in each bucket. Then for each query:
"Add" update interval [a, b] with c
we only need to rebuild at most two buckets and add c to (b - a) / BUCKET_SIZE buckets
"Query" query interval [a, b] <= c
we only need to scan at most two buckets with each value one by one and quick go through (b-a) / BUCKET_SIZE buckets with binary search quickly
It should be run in O( N/BUCKET_SIZE * log(BUCKET_SIZE, 2)) for each query, which is smaller than bruteforce method( O(N)). Though it's bigger than O(logN), it may be sufficient in most cases.
Here are the test code:
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <cstring>
#include <cmath>
#include <algorithm>
#include <vector>
#include <set>
#include <map>
#include <ctime>
#include <cassert>
using namespace std;
struct Query {
//A a b c add c in [a, b] of arr
//Q a b c Query number of i in [a, b] which arr[i] <= c
char ty;
int a, b, c;
Query(char _ty, int _a, int _b, int _c):ty(_ty), a(_a), b(_b), c(_c){}
};
int n, m;
vector<int> arr;
vector<Query> queries;
vector<int> bruteforce() {
vector<int> ret;
vector<int> numbers = arr;
for (int i = 0; i < m; i++) {
Query q = queries[i];
if (q.ty == 'A') {
for (int i = q.a; i <= q.b; i++) {
numbers[i] += q.c;
}
ret.push_back(-1);
} else {
int tmp = 0;
for(int i = q.a; i <= q.b; i++) {
tmp += numbers[i] <= q.c;
}
ret.push_back(tmp);
}
}
return ret;
}
struct Bucket {
vector<int> numbers;
vector<int> numbers_sorted;
int add;
Bucket() {
add = 0;
numbers_sorted.clear();
numbers.clear();
}
int query(int pos) {
return numbers[pos] + add;
}
void add_pos(int pos, int val) {
numbers[pos] += val;
}
void build() {
numbers_sorted = numbers;
sort(numbers_sorted.begin(), numbers_sorted.end());
}
};
vector<int> bucket_count(int bucket_size) {
vector<int> ret;
vector<Bucket> buckets;
buckets.resize(int(n / bucket_size) + 5);
for (int i = 0; i < n; i++) {
buckets[i / bucket_size].numbers.push_back(arr[i]);
}
for (int i = 0; i <= n / bucket_size; i++) {
buckets[i].build();
}
for (int i = 0; i < m; i++) {
Query q = queries[i];
char ty = q.ty;
int a, b, c;
a = q.a, b = q.b, c = q.c;
if (ty == 'A') {
set<int> affect_buckets;
while (a < b && a % bucket_size != 0) buckets[a/ bucket_size].add_pos(a % bucket_size, c), affect_buckets.insert(a/bucket_size), a++;
while (a < b && b % bucket_size != 0) buckets[b/ bucket_size].add_pos(b % bucket_size, c), affect_buckets.insert(b/bucket_size), b--;
while (a < b) {
buckets[a/bucket_size].add += c;
a += bucket_size;
}
buckets[a/bucket_size].add_pos(a % bucket_size, c), affect_buckets.insert(a / bucket_size);
for (set<int>::iterator it = affect_buckets.begin(); it != affect_buckets.end(); it++) {
int id = *it;
buckets[id].build();
}
ret.push_back(-1);
} else {
int tmp = 0;
while (a < b && a % bucket_size != 0) tmp += (buckets[a/ bucket_size].query(a % bucket_size) <=c), a++;
while (a < b && b % bucket_size != 0) tmp += (buckets[b/ bucket_size].query(b % bucket_size) <=c), b--;
while (a < b) {
int pos = a / bucket_size;
tmp += upper_bound(buckets[pos].numbers_sorted.begin(), buckets[pos].numbers_sorted.end(), c - buckets[pos].add) - buckets[pos].numbers_sorted.begin();
a += bucket_size;
}
tmp += (buckets[a / bucket_size].query(a % bucket_size) <= c);
ret.push_back(tmp);
}
}
return ret;
}
void process(int cas) {
clock_t begin_t=clock();
vector<int> bf_ans = bruteforce();
clock_t bf_end_t =clock();
double bf_sec = ((1.0 * bf_end_t - begin_t)) / CLOCKS_PER_SEC;
//bucket_size is important
int bucket_size = 200;
vector<int> ans = bucket_count(bucket_size);
clock_t bucket_end_t =clock();
double bucket_sec = ((1.0 * bucket_end_t - bf_end_t)) / CLOCKS_PER_SEC;
bool correct = true;
for (int i = 0; i < ans.size(); i++) {
if (ans[i] != bf_ans[i]) {
cout << "query " << i + 1 << " bf = " << bf_ans[i] << " bucket = " << ans[i] << " bucket size = " << bucket_size << " " << n << " " << m << endl;
correct = false;
}
}
printf("Case #%d:%s bf_sec = %.9lf, bucket_sec = %.9lf\n", cas, correct ? "YES":"NO", bf_sec, bucket_sec);
}
void read() {
cin >> n >> m;
arr.clear();
for (int i = 0; i < n; i++) {
int val;
cin >> val;
arr.push_back(val);
}
queries.clear();
for (int i = 0; i < m; i++) {
char ty;
int a, b, c;
// a, b, c in [0, n - 1], a <= b
cin >> ty >> a >> b >> c;
queries.push_back(Query(ty, a, b, c));
}
}
void run(int cas) {
read();
process(cas);
}
int main() {
freopen("bucket.in", "r", stdin);
//freopen("bucket.out", "w", stdout);
int T;
scanf("%d", &T);
for (int cas = 1; cas <= T; cas++) {
run(cas);
}
return 0;
}
and here are the data gen code:
#coding=utf8
import random
import math
def gen_buckets(f):
t = random.randint(10, 20)
print >> f, t
nlimit = 100000
mlimit = 10000
limit = 100000
for i in xrange(t):
n = random.randint(1, nlimit)
m = random.randint(1, mlimit)
print >> f, n, m
for i in xrange(n):
val = random.randint(1, limit)
print >> f, val ,
print >> f
for i in xrange(m):
ty = random.randint(1, 2)
a = random.randint(0, n - 1)
b = random.randint(a, n - 1)
#a = 0
#b = n - 1
c = random.randint(-limit, limit)
print >> f, 'A' if ty == 1 else 'Q', a, b, c
f = open("bucket.in", "w")
gen_buckets(f)
Try applying a Binary Index Trees (BIT) instead of a segmented tree. Here's the link to the tutorial