How Should I Implement Cilk Parallelism with a Recursive Scan Algorithm? - performance

I implemented a recursive scan (prefix sum) algorithm, which I've included below. Here, I simply generate random lists of size powers of two up to the twenty-seventh power, checking against a simple sequential scan for accuracy. It works.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <mkl.h>
int *pscan(int *x, int n, int z, int chunk_size);
int reduce(int *x, int n);
int main(int argc, char **argv)
{
int n;
int i, j, k;
int *x, *seq, *r;
double begin, end;
srand48(time(0));
/* Randomly generate array of size n. */
for (k = 2; k < 28; k++) {
n = (int) pow(2, k);
seq = (int *) malloc(sizeof(int) * n);
x = (int *) malloc(sizeof(int) * n);
for (i = 0; i < n; i++) {
x[i] = lrand48() % 100 - 50;
seq[i] = x[i];
}
/* Parallel scan. */
begin = dsecnd();
r = pscan(x, n, 0, 2);
end = dsecnd();
printf("%d %lf\n", n, end - begin);
/* Sequential check. */
for (i = 1; i < n; i++) {
seq[i] = seq[i - 1] + seq[i];
}
for (i = 0; i < n; i++) {
if (r[i] != seq[i]) {
fprintf(stderr, "AGGGHHH!!! ERROR. Found with vector: \n");
for (j = 0; j < n; j++) {
printf("%d ", x[i]);
}
printf("\n");
exit(1);
}
}
free(r);
free(x);
free(seq);
}
return 0;
}
/* Perform parallel scan. */
int *pscan(int *x, int n, int z, int chunk_size)
{
int i, j;
int *sums, *sumscan, *scan, **fsum, *rv;
/* Base case, serially scan a chunk. */
if (n <= chunk_size) {
scan = (int *) malloc(sizeof(int) * n);
scan[0] = x[0] + z;
for (i = 1; i < n; i++) {
scan[i] = x[i] + scan[i - 1];
}
return scan;
}
sums = (int *) malloc(sizeof(int) * (n / chunk_size));
/* Reduce each chunk of the array. */
for (i = 0; i < n / chunk_size; i++) {
sums[i] = reduce(&x[i * chunk_size], chunk_size);
}
/* Perform a scan on the sums. */
sumscan = pscan(sums, n / chunk_size, 0, chunk_size);
free(sums);
fsum = (int **) malloc(sizeof(int *) * (n / chunk_size));
/* Perform a recursive scan on each chunk, using
the appropriate offset from the sums scan. */
for (i = 0; i < n / chunk_size; i++) {
if (i > 0) {
fsum[i] = pscan(&x[i * chunk_size], chunk_size, sumscan[i - 1], chunk_size);
} else {
fsum[i] = pscan(&x[i * chunk_size], chunk_size, 0, chunk_size);
}
}
free(sumscan);
rv = (int *) malloc(sizeof(int) * n);
/* Join the arrays. */
for (i = 0; i < n / chunk_size; i++) {
for (j = 0; j < chunk_size; j++) {
rv[i * chunk_size + j] = fsum[i][j];
}
}
for (i = 0; i < n / chunk_size; i++) {
free(fsum[i]);
}
free(fsum);
return rv;
}
/* Serial reduction. */
int reduce(int *x, int n)
{
int i;
int sum;
sum = 0;
for (i = 0; i < n; i++) {
sum += x[i];
}
return sum;
}
Now, I'd like to parallelize it. Because I'm feeling a little hipster-ish, I've hacked up a Cilk implementation. I just replace the two main for loops to parallelize 1) the reduction and 2) the recursive scan of each chunk, using the appropriate scan of the chunk reductions as an offset. It looks like so.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <cilk/cilk.h>
#include <mkl.h>
int *pscan(int *x, int n, int z, int chunk_size);
int reduce(int *x, int n);
int main(int argc, char **argv)
{
int n;
int i, j, k;
int *x, *seq, *r;
double begin, end;
srand48(time(0));
/* Randomly generate array of size n. */
for (k = 2; k < 28; k++) {
n = (int) pow(2, k);
seq = (int *) malloc(sizeof(int) * n);
x = (int *) malloc(sizeof(int) * n);
for (i = 0; i < n; i++) {
x[i] = lrand48() % 100 - 50;
seq[i] = x[i];
}
/* Parallel scan. */
begin = dsecnd();
r = pscan(x, n, 0, 2);
end = dsecnd();
printf("%d %lf\n", n, end - begin);
/* Sequential check. */
for (i = 1; i < n; i++) {
seq[i] = seq[i - 1] + seq[i];
}
for (i = 0; i < n; i++) {
if (r[i] != seq[i]) {
fprintf(stderr, "AGGGHHH!!! ERROR. Found with vector: \n");
for (j = 0; j < n; j++) {
printf("%d ", x[i]);
}
printf("\n");
exit(1);
}
}
free(r);
free(x);
free(seq);
}
return 0;
}
/* Perform parallel scan. */
int *pscan(int *x, int n, int z, int chunk_size)
{
int i, j;
int *sums, *sumscan, *scan, **fsum, *rv;
/* Base case, serially scan a chunk. */
if (n <= chunk_size) {
scan = (int *) malloc(sizeof(int) * n);
scan[0] = x[0] + z;
for (i = 1; i < n; i++) {
scan[i] = x[i] + scan[i - 1];
}
return scan;
}
sums = (int *) malloc(sizeof(int) * (n / chunk_size));
/* Reduce each chunk of the array. */
cilk_for (i = 0; i < n / chunk_size; i++) {
sums[i] = reduce(&x[i * chunk_size], chunk_size);
}
/* Perform a scan on the sums. */
sumscan = pscan(sums, n / chunk_size, 0, chunk_size);
free(sums);
fsum = (int **) malloc(sizeof(int *) * (n / chunk_size));
/* Perform a recursive scan on each chunk, using
the appropriate offset from the sums scan. */
cilk_for (i = 0; i < n / chunk_size; i++) {
if (i > 0) {
fsum[i] = pscan(&x[i * chunk_size], chunk_size, sumscan[i - 1], chunk_size);
} else {
fsum[i] = pscan(&x[i * chunk_size], chunk_size, 0, chunk_size);
}
}
free(sumscan);
rv = (int *) malloc(sizeof(int) * n);
/* Join the arrays. */
for (i = 0; i < n / chunk_size; i++) {
for (j = 0; j < chunk_size; j++) {
rv[i * chunk_size + j] = fsum[i][j];
}
}
for (i = 0; i < n / chunk_size; i++) {
free(fsum[i]);
}
free(fsum);
return rv;
}
/* Serial reduction. */
int reduce(int *x, int n)
{
int i;
int sum;
sum = 0;
for (i = 0; i < n; i++) {
sum += x[i];
}
return sum;
}
And it works! Well, it returns correct results. It doesn't achieve the performance I had hoped. The original performance was
4 0.000004
8 0.000001
16 0.000002
32 0.000003
64 0.000005
128 0.000011
256 0.000019
512 0.000035
1024 0.000068
2048 0.000130
4096 0.000257
8192 0.000512
16384 0.001129
32768 0.002262
65536 0.004519
131072 0.009065
262144 0.018297
524288 0.037416
1048576 0.078307
2097152 0.157448
4194304 0.313855
8388608 0.625689
16777216 1.251949
33554432 2.589439
67108864 5.084731
134217728 10.402186
for the single-threaded application, but the Cilk version performend worse, with the following runtimes
4 0.005383
8 0.000011
16 0.000009
32 0.000111
64 0.000055
128 0.000579
256 0.000339
512 0.000544
1024 0.000701
2048 0.001086
4096 0.001265
8192 0.001742
16384 0.002283
32768 0.003891
65536 0.005398
131072 0.009255
262144 0.020736
524288 0.058156
1048576 0.103893
2097152 0.215460
4194304 0.419988
8388608 0.749368
16777216 1.650938
33554432 2.960451
67108864 5.799836
134217728 11.294398
I have a 24-core machine, so we're obviously not seeing the speed-up we would hope for here. My first thought was that Cilk is mishandling the recursion, causing oversubscription, but Cilk is specifically supposed to handle recursion well. Any tips on how to implement this properly? I tried adding cilk_for to the bottom for loop (freeing everything) and the inner for-loop of the penultimate set of loops (joining the array), but that slowed performance down even more.
Any advice is well-appreciated.
However, please don't tell me to switch to Belloch's parallel scan algorithm discussed here. I already implemented that in Cilk, and it worked quite well. I'd like to see if I can match its performance with this recursive solution.

I fixed my performance problems by finding the optimal chunk size for each problem. At that chunk size, the (same) parallel version performs better than the sequential version.
In summary, there were a few things wrong with both my general approach and particularly the chunk size of two:
My benchmarking approach. In a code with a tuning parameter, it doesn't make much sense to plot runtime vs. problem size using the same value for the tuning parameter because the optimal value is dependent on problem size.
A chunk size of two was likely problematic because, while it maximizes parallelism, it also maximizes the number of levels of recursion and, likewise, the overhead that comes along with it.
A chunk size of two prevents vectorization.
As Leeor suggested, a chunk size of two probably also leads to false sharing in the cache.
Props to Leeor for leading me in the right direction.

Related

CUDA - Parallel Reduction Sum of Even and Odd Number Separately

I am trying to implement a parallel reduction sum of even and odd number Separately in CUDA.
I'm new in CUDA programming and I'm trying so hard but I can't find a solution.
I have for example the array : [5, 8, 0, -6, 2]. And the result need to be [4, 5] (Even : 8+0-6+2=4, Odd : 5=5).
But the result of my following code is [8, 5].
I think that my problem is in the notion of "shared" but I do not understand why.
__global__ void sumEvenOdd(int *a, int *b, int N){
int column = blockIdx.x * blockIdx.x + threadIdx.x;
__shared__ int s_data[2];
if (column < N){
if (a[column] % 2 == 0){
s_data[0] += a[column];
}
else{
s_data[1] += a[column];
}
__syncthreads();
b[0] = s_data[0];
b[1] = s_data[1];
}
}
void initArray(int *a, int N){
for (unsigned int i = 0; i < N; i++){
a[i] = rand() % 100;
}
}
void verify_result(int *a, int *b, int N){
int *verify_b;
verify_b = (int*)malloc(2 * sizeof(int));
verify_b[0] = 0;
verify_b[1] = 0;
for (unsigned int i = 0; i < N; i++){
if (a[i] % 2 == 0){
verify_b[0] += a[i];
}
else{
verify_b[1] += a[i];
}
}
for (unsigned int i = 0; i < 2; i++){
assert(verify_b[i] == b[i]);
}
}
void printResult(int *a, int *b, int N){
printf("\n");
for (unsigned int i = 0; i < N; i++){
printf("%d, ", a[i]);
}
printf("\n");
for (unsigned int i = 0; i < 2; i++){
printf("%d, ", b[i]);
}
}
int main(){
//Array sizes;
int N = 5;
//Size (in bytes) of matrix
size_t bytes = N * sizeof(int);
//Host pointers
int *a, *b;
// Allocate host memory
a = (int*)malloc(bytes);
b = (int*)malloc(2 * sizeof(int));
// Initialize array
initArray(a, N);
// Device pointers
int *d_a, *d_b;
// Allocated device memory
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, 2 * sizeof(int));
// Copy data to the device
cudaMemcpy(d_a, a, bytes, cudaMemcpyHostToDevice);
//Number of threads
int THREADS = 128;
//Number of blocks
int BLOCKS = (N + THREADS - 1) / THREADS;
// Launch kernel
sumEvenOdd<<<BLOCKS, THREADS>>>(d_a, d_b, N);
cudaDeviceSynchronize();
// Copy back to the host
cudaMemcpy(b, d_b, 2 * sizeof(int), cudaMemcpyDeviceToHost);
// Check result
verify_result(a, b, N);
printResult(a, b, N);
return 0;
}
you cannot just use
s_data[1] += a[column];
remember all units are going to execute this line at the same time, and store in the same position, so all threads are storing into s_data at the same time.
instead you should use atomic add
atomicAdd(&s_data[1], a[column]);
and you should also be initializing s_data to zeros.

Error correction on small message (8-Bit) with high resilience, what is the best method?

I need to implement an ECC algorithm on an 8-bit message with 32 bits to work with (32, 8), being new to ECC I started to google and learn a bit about it and ended up coming across two ECC methods, Hamming codes and Reed Solomon. Given that I needed my message to be resilient to 4-8 random bit flips on average I disregarded Hammings and looked into Reed, however, after applying it to my problem I realized it is also not suitable for my use case because while a whole symbol (8 bits) could be flipped, because my errors tend to spread out (on average), it can usually only fix a single error...
Therefore in the end I just settled for my first instinct which is to just copy the data over like so:
00111010 --> 0000 0000 1111 1111 1111 0000 1111 0000
This way every bit is resilient up to 1 error (8 across all bits) by taking the most prominent bits on each actual bit from the encoded message, and every bit can be subject to two bitflips while still detecting there was an error (which is also usable for my use case, eg: input 45: return [45, 173] is still useful).
My question then is if there is any better method, while I am pretty sure there is, I am not sure where to go from here.
By "better method" I mean resilient to even more errors given the (32, 8) ratio.
You can get a distance-11 code pretty easily using randomization.
#include <stdio.h>
#include <stdlib.h>
int main() {
uint32_t codes[256];
for (int i = 0; i < 256; i++) {
printf("%d\n", i);
retry:
codes[i] = arc4random();
for (int j = 0; j < i; j++) {
if (__builtin_popcount(codes[i] ^ codes[j]) < 11) goto retry;
}
}
}
I made a test program for David Eisenstat's example, to show it works for 1 to 5 bits in error. Code is for Visual Studio.
#include <intrin.h>
#include <stdio.h>
#include <stdlib.h>
typedef unsigned int uint32_t;
/*----------------------------------------------------------------------*/
/* InitCombination - init combination */
/*----------------------------------------------------------------------*/
void InitCombination(int a[], int k, int n) {
for(int i = 0; i < k; i++)
a[i] = i;
--a[k-1];
}
/*----------------------------------------------------------------------*/
/* NextCombination - generate next combination */
/*----------------------------------------------------------------------*/
int NextCombination(int a[], int k, int n) {
int pivot = k - 1;
while (pivot >= 0 && a[pivot] == n - k + pivot)
--pivot;
if (pivot == -1)
return 0;
++a[pivot];
for (int i = pivot + 1; i < k; ++i)
a[i] = a[pivot] + i - pivot;
return 1;
}
/*----------------------------------------------------------------------*/
/* Rnd32 - return pseudo random 32 bit number */
/*----------------------------------------------------------------------*/
uint32_t Rnd32()
{
static uint32_t r = 0;
r = r*1664525+1013904223;
return r;
}
static uint32_t codes[256];
/*----------------------------------------------------------------------*/
/* main - test random hamming distance 11 code */
/*----------------------------------------------------------------------*/
int main() {
int ptn[5]; /* error bit indexes */
int i, j, n;
uint32_t m;
int o, p;
for (i = 0; i < 256; i++) { /* generate table */
retry:
codes[i] = Rnd32();
for (j = 0; j < i; j++) {
if (__popcnt(codes[i] ^ codes[j]) < 11) goto retry;
}
}
for(n = 1; n <= 5; n++){ /* test 1 to 5 bit error patterns */
InitCombination(ptn, n, 32);
while(NextCombination(ptn, n, 32)){
for(i = 0; i < 256; i++){
o = m = codes[i]; /* o = m = coded msg */
for(j = 0; j < n; j++){ /* add errors to m */
m ^= 1<<ptn[j];
}
for(j = 0; j < 256; j++){ /* search for code */
if((p =__popcnt(m ^ codes[j])) <= 5)
break;
}
if(i != j){ /* check for match */
printf("fail %u %u\n", i, j);
goto exit0;
}
}
}
}
exit0:
return 0;
}

Random memory write is slower than random memory read?

I'm trying to figure out memory access time of sequential/random memory read/write. Here's the code:
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define PRINT_EXCECUTION_TIME(msg, code) \
do { \
struct timeval t1, t2; \
double elapsed; \
gettimeofday(&t1, NULL); \
do { \
code; \
} while (0); \
gettimeofday(&t2, NULL); \
elapsed = (t2.tv_sec - t1.tv_sec) * 1000.0; \
elapsed += (t2.tv_usec - t1.tv_usec) / 1000.0; \
printf(msg " time: %f ms\n", elapsed); \
} while (0);
const int RUNS = 20;
const int N = (1 << 27) - 1;
int *data;
int seqR() {
register int res = 0;
register int *data_p = data;
register int pos = 0;
for (register int j = 0; j < RUNS; j++) {
for (register int i = 0; i < N; i++) {
pos = (pos + 1) & N;
res = data_p[pos];
}
}
return res;
}
int seqW() {
register int res = 0;
register int *data_p = data;
register int pos = 0;
for (register int j = 0; j < RUNS; j++) {
for (register int i = 0; i < N; i++) {
pos = (pos + 1) & N;
data_p[pos] = res;
}
}
return res;
}
int rndR() {
register int res = 0;
register int *data_p = data;
register int pos = 0;
for (register int j = 0; j < RUNS; j++) {
for (register int i = 0; i < N; i++) {
pos = (pos + i) & N;
res = data_p[pos];
}
}
return res;
}
int rndW() {
register int res = 0;
register int *data_p = data;
register int pos = 0;
for (register int j = 0; j < RUNS; j++) {
for (register int i = 0; i < N; i++) {
pos = (pos + i) & N;
data_p[pos] = res;
}
}
return res;
}
int main() {
data = (int *)malloc(sizeof(int) * (N + 1));
assert(data);
for (int i = 0; i < N; i++) {
data[i] = i;
}
for (int i = 0; i < 10; i++) {
PRINT_EXCECUTION_TIME("seqR", seqR());
PRINT_EXCECUTION_TIME("seqW", seqW());
PRINT_EXCECUTION_TIME("rndR", rndR());
PRINT_EXCECUTION_TIME("rndW", rndW());
}
return 0;
}
I used gcc 6.5.0 with -O0 to prevent optimization but got result like this:
seqR time: 2538.010000 ms
seqW time: 2394.991000 ms
rndR time: 40625.169000 ms
rndW time: 46184.652000 ms
seqR time: 2411.038000 ms
seqW time: 2309.115000 ms
rndR time: 41575.063000 ms
rndW time: 46206.275000 ms
It's easy to understand that sequential access is way faster than random access. However, it doesn't make sense to me that random write is slower than random read while sequential write is faster than sequential read. What reason could cause this?
In addition, am I safe to say memory bandwidth for seqR is (20 * ((1 << 27) - 1) * 4 * 1024 * 1024 * 1024)GB / (2.538)s = 4.12GB/s?
Sounds normal. All x86-64 CPUs (and most other modern CPUs) use write-back / write-allocate caches so a write costs a read before it can commit to cache, and an eventual write-back.
with -O0 to prevent optimization
Since you used register on all your locals, this is one of the rare times when this didn't make your benchmark meaningless.
You could have just used volatile on your arrays, though, to make sure every one of those accesses happened in order, but leave it up to the optimizer how to make that happen.
Am I safe to say memory bandwidth for seqR is (20 * ((1 << 27) - 1) * 4 * 1024 * 1024 * 1024)GB / (2.538)s = 4.12GB/s?
No, you have an extra factor of 2^30 and 10^9 in your numerator. But you did it wrong and got close to the right number anyway.
The correct calculation is RUNS * N * sizeof(int) / time bytes per second, or that divided by 10^9 GB/s. Or divided by 2^30 for base 2 GiB/s. Memory sizes are usually in GiB, but you can take your pick with bandwidth; DRAM clock speeds are normally things like 1600 MHz, so base-10 GB = 10^9 is certainly normal for theoretical max bandwidths in GB/s.)
So 4.23 GB/s in base-10 GB.
Yes, you initialized the array first so neither timed run is triggering page-faults, but I might still have used the 2nd run after the CPU has warmed up to max turbo, if it hadn't already.
But keep in mind this is un-optimized code. That's how fast your un-optimized code ran, and doesn't tell you much about how fast your memory is. It's probably CPU bound, not memory.
Especially with a redundant & N in there to match the CPU work of the rndR/W functions. HW prefetching is probably able to keep up with 4GB/s, but it's still not even reading 1 int per clock cycle.

MPI_Scatterv submatrix with MPI_Type_struct

I'm currently working on a MPI-program and I'm trying to send blocks of a matrix with scatterv to all processes.
Process description
The matrix is given as an array.
First I produce a datatype with MPI_Type_vector to create the necessary block out of the original array.
Second I create a MPI_Type_struct that should hold rows of blocks.
#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define n 16
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm comm = MPI_COMM_WORLD;
int p,r;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &r);
int *arr;
arr = NULL;
if (r == 0){
arr = (int *) malloc(n * n * sizeof(int));
for (int i = 0; i < n * n; i++) arr[i] = i;
for (int i = 0; i < n; i++){
printf("\n");
for (int j = 0; j < n; j++)
printf("%4d", arr[i * n + j]);
}
}
printf("\n");
int ps = sqrt(p);
int ns = n / ps;
if (r == 0) {
printf("ps: %d ns: %d\n", ps, ns);
}
/* create datatype */
MPI_Datatype block;
MPI_Type_vector(ns, ns, n, MPI_INT, &block);
int blocks[ps];
MPI_Aint displs[ps];
for (int i = 0; i < ps; i++) {
blocks[i] = 1;
displs[i] = i * sizeof(int);
}
MPI_Datatype types[ps];
//for (int i = 0; i < ps - 1; i++) types[i] = block;
//types[ps - 1] = MPI_UB;
types[0] = block;
for (int i = 1; i < ps; i++) types[i] = MPI_UB;
//types[0] = block;
//types[1] = MPI_UB;
if (r == 0) {
printf("displs:\n");
for(int i = 0; i < ps; i++) printf("%3ld", displs[i]);
printf("\n");
}
MPI_Datatype row;
MPI_Type_struct(ps, blocks, displs, types, &row);
MPI_Type_commit(&row);
/* prepare scatter */
int sdispl[p]; int sendcounts[p];
for (int i = 0; i < p; i++) {
sdispl[i] = (i % ps) + (i / ps) * (ns * ps);
sendcounts[i] = 1;
}
if (r == 0) {
printf("sdispl: \n");
for (int i = 0; i < 4; i++) printf("%3d", sdispl[i]);
printf("\n");
}
int rcv[ns * ns];
MPI_Scatterv(arr, sendcounts, sdispl, row, rcv, ns * ns, MPI_INT, 0, comm);
int result = 1;
if (r == result) {
printf("result for %d:\n", result);
for (int i = 0; i < ns * ns; i++) {
printf("%4d", rcv[i]);
if ((i+1) % ns == 0) printf("\n");
}
}
if (arr != NULL) free(arr);
MPI_Finalize();
return 0;
}
So far the structure of the blocks is correct.
The problem
The block, that was sent to process r = 1 starts with 3 instead of 4. The block for process r = 2 also starts with 6 and the one for process r = 3 starts with 9.
For r == 4 it jumps to 48.
What it should do
r start
0 0
1 4
2 8
3 12
4 64
5 68
6 ...
15 204
The help I would need
I think, that I'm making some mistake with displ and sdispl.
Compiling and Running the example
The code is compiled with the folowing command:
mpicc -o main main.c -lm
I run the code with:
mpirun -np 16 ./main
Thanks for any help in advance!
With the hint of Zulan I was able to solve my problem.
The following code is based on the excellent answer to subarrays.
#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define n 8
void print_arr(int *arr, int x) {
printf("\n");
for (int i = 0; i < x*x; i++){
if (i % x == 0) printf("\n");
printf("%4d", arr[i]);
}
printf("\n");
}
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm comm = MPI_COMM_WORLD;
int p, r;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &r);
/* number of proceses in dim x and dim y */
int ps = sqrt(p);
/* number of elements in dim x and dim y in sarr */
int ns = n/ps;
/* array of data - distributed by process 0 */
int *arr = NULL;
if (r==0) {
arr = (int *) malloc(n * n * sizeof(int));
for (int i = 0; i < n*n; i++) arr[i] = i;
print_arr(arr, n);
}
MPI_Datatype type, resizedtype;
int sizes[2] = {n,n};
int subsizes[2] = {ns,ns};
int starts[2] = {0,0};
MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_INT, &type);
MPI_Type_create_resized(type, 0, ns*sizeof(int), &resizedtype);
MPI_Type_commit(&resizedtype);
int counts[p];
for (int i = 0; i < p; i++) counts[i] = 1;
int displs[p];
for (int i = 0; i < p; i++) displs[i] = i%ps + i/ps * ns * ps;
/* subarray to store distributed data */
int sarr[ns * ns];
/* send submatrices to all processes */
MPI_Scatterv(arr, counts, displs, resizedtype, sarr, ns*ns, MPI_INT, 0, comm);
/* print received data for process pr */
int pr = 3;
if (r == pr)
print_arr(sarr, ns);
/* free arr */
if (arr != NULL) free(arr);
MPI_Finalize();
return 0;
}
You can compile the example with
mpicc -o main main.c
and run it with
mpirun -np 4 ./main

The most efficient way to remove all characters in the 1st string from the 2nd string?

I was asked about this question. I can only think of a O(nm) algorithm if n is the length of the 1st string and m is the length of the 2nd string.
Well, you can do it in O(n + m). Just create a reference table showing whether character exists in first string. Something like this (pseudo-code in no particular language)
// fill the table
for (int i = 0; i < a.length; ++i) {
characterExists[a[i]] = true;
}
// iterate over second string
for (int i = 0; i < b.length; ++i) {
if !characterExists[b[i]] {
// remove char (or do whatever else you want)
}
}
Have you checked out the Boyer-Moore String Search Algorithm?
The worst-case to find all occurrences
in a text needs approximately 3*N
comparisons, hence the complexity is
O(n), regardless whether the text
contains a match or not. This
proof took some years to determine. In
the year the algorithm was devised,
1977, the maximum number of
comparisons was shown to be no more
than 6*N; in 1980 it was shown to be
no more than 4*N, until Cole's result
in Sep 1991.
C implementation:
#include <limits.h>
#include <string.h>
#define ALPHABET_SIZE (1 << CHAR_BIT)
static void compute_prefix(const char* str, size_t size, int result[size]) {
size_t q;
int k;
result[0] = 0;
k = 0;
for (q = 1; q < size; q++) {
while (k > 0 && str[k] != str[q])
k = result[k-1];
if (str[k] == str[q])
k++;
result[q] = k;
}
}
static void prepare_badcharacter_heuristic(const char *str, size_t size,
int result[ALPHABET_SIZE]) {
size_t i;
for (i = 0; i < ALPHABET_SIZE; i++)
result[i] = -1;
for (i = 0; i < size; i++)
result[(size_t) str[i]] = i;
}
void prepare_goodsuffix_heuristic(const char *normal, size_t size,
int result[size + 1]) {
char *left = (char *) normal;
char *right = left + size;
char reversed[size+1];
char *tmp = reversed + size;
size_t i;
/* reverse string */
*tmp = 0;
while (left < right)
*(--tmp) = *(left++);
int prefix_normal[size];
int prefix_reversed[size];
compute_prefix(normal, size, prefix_normal);
compute_prefix(reversed, size, prefix_reversed);
for (i = 0; i <= size; i++) {
result[i] = size - prefix_normal[size-1];
}
for (i = 0; i < size; i++) {
const int j = size - prefix_reversed[i];
const int k = i - prefix_reversed[i]+1;
if (result[j] > k)
result[j] = k;
}
}
/*
* Boyer-Moore search algorithm
*/
const char *boyermoore_search(const char *haystack, const char *needle) {
/*
* Calc string sizes
*/
size_t needle_len, haystack_len;
needle_len = strlen(needle);
haystack_len = strlen(haystack);
/*
* Simple checks
*/
if(haystack_len == 0)
return NULL;
if(needle_len == 0)
return haystack;
/*
* Initialize heuristics
*/
int badcharacter[ALPHABET_SIZE];
int goodsuffix[needle_len+1];
prepare_badcharacter_heuristic(needle, needle_len, badcharacter);
prepare_goodsuffix_heuristic(needle, needle_len, goodsuffix);
/*
* Boyer-Moore search
*/
size_t s = 0;
while(s <= (haystack_len - needle_len))
{
size_t j = needle_len;
while(j > 0 && needle[j-1] == haystack[s+j-1])
j--;
if(j > 0)
{
int k = badcharacter[(size_t) haystack[s+j-1]];
int m;
if(k < (int)j && (m = j-k-1) > goodsuffix[j])
s+= m;
else
s+= goodsuffix[j];
}
else
{
return haystack + s;
}
}
/* not found */
return NULL;
}

Resources