I am trying to pack an array and send it from one process to another. I am doing the operation on only 2 processes. I have written the following code.
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main( int argc, char *argv[] )
{
MPI_Init(&argc, &argv);
int myrank, size; //size will take care of number of processes
MPI_Comm_rank(MPI_COMM_WORLD, &myrank) ;
MPI_Comm_size(MPI_COMM_WORLD, &size);
//declaring the matrix
double mat[4]={1, 2, 3, 4};
int r=4;
//Now we will send from one matrix to another using MPI_Pack
// For that we will need buffers which will have same number of rows as number of columns
double snd_buf[r];
double recv_buf[r];
double buf[r];
//total size of the data that is beign sent
int outs=r*8;
int position=0;
MPI_Status status[r];
//packing and sending the data
if(myrank==0)
{
for(int i=0;i<r;i++)
{
MPI_Pack(&mat[i], 1 , MPI_DOUBLE,snd_buf,outs,&position,MPI_COMM_WORLD);
}
MPI_Send (&snd_buf, r , MPI_PACKED, 1 /*dest*/ , 100 /*tag*/ , MPI_COMM_WORLD);
}
//receiving the data
if(myrank==1)
{
MPI_Recv(recv_buf, r, MPI_PACKED, 0 /*src*/ , 100 /*tag*/, MPI_COMM_WORLD,&status[0]);
position=0;
for(int i=0;i<r;i++)
{
MPI_Unpack(recv_buf,outs,&position,&buf[i], 1, MPI_DOUBLE, MPI_COMM_WORLD);
}
}
//checking whether the packing in snd_buff is taking place correctly or not
if(myrank==1)
{
for(int i=0;i<r;i++)
{
printf("%lf ",buf[i]);
}
printf("\n");
}
MPI_Finalize();
return 0;
}
I am expecting the output--> 1 2 3 4 but I am only getting 0 0 0 0 in my output.
I was suspecting whether it is a problem of the snd_buffer, but the snd_buffer seems to be fine as it is having all elements 1 2 3 4 correctly.
I have also tried to send and receive like this
//packing and sending the data
if(myrank==0)
{
{
MPI_Pack(&mat[0], 4 , MPI_DOUBLE,snd_buf,outs,&position,MPI_COMM_WORLD);
}
MPI_Send (snd_buf, r , MPI_PACKED, 1 /*dest*/ , 100 /*tag*/ , MPI_COMM_WORLD);
}
//receiving the data
if(myrank==1)
{
MPI_Recv(recv_buf, r, MPI_PACKED, 0 /*src*/ , 100 /*tag*/, MPI_COMM_WORLD,&status[0]);
position=0;
{
MPI_Unpack(recv_buf,outs,&position,&buf[0], 4, MPI_DOUBLE, MPI_COMM_WORLD);
}
Still, this was of no help and the output was only 0s.
I am not able to get why I am facing this error. Any help will be appreciated. Thank you.
Answering my own question. The mistake that I have done was pointed out by Gilles in the comments.
This is the solution to the problem that I have faced
send/recv outs MPI_PACKED (instead of r).
PS--> Consider declaring send_buf and recv_bufas char[] in order to avoid this kind of confusion. char[] won't solve the issue, but make the code more readable (and more obvious sending/receiving r MPI_PACKED is not the right thing to do)
Related
I am trying to do the following with the help of MPI_reduce and MPI_groups. I have created 2 different groups, one having the odd rank processes and another having the even rank processes, but in the new groups they are all numbered from 0 to N. What I am doing is that I am reducing all the values in group 1 and 2 using MPI_SUM. Say group 1 has the following values like-> 1,2,3 and group 2 has the following values say--> 4,5,6 then after reducing in group 1 I am getting 6 and in group2 I am getting 15, I am storing them respectively in recv_val1 and recv_val2. Now what I want to do is that I want to add them together, but whenever I do so, I get some garbage values. Why is this happening?
#include <stdio.h>
#include "mpi.h"
int main( int argc, char *argv[])
{
int myrank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
MPI_Comm_size( MPI_COMM_WORLD, &size );
int color = myrank%2;
int newrank, newsize;
int sendval, maxval,recv_val1,recv_val2;
MPI_Status status;
MPI_Comm newcomm;
MPI_Comm_split (MPI_COMM_WORLD, color, myrank, &newcomm);
MPI_Comm_rank( newcomm, &newrank );
MPI_Comm_size( newcomm, &newsize );
//printf("color and newrank are %d and %d \n", color, newrank);
sendval = newrank;
if(color==0)
MPI_Reduce(&sendval, &recv_val1, 1, MPI_INT, MPI_SUM, 0, newcomm); // find sum
else
MPI_Reduce(&sendval, &recv_val2, 1, MPI_INT, MPI_SUM, 0, newcomm); // find sum
if(newrank == 0 && color == 0)
printf("%d\n", recv_val1);
if(newrank == 0 && color == 1)
printf("%d\n", recv_val2);
if(myrank==0)
printf(" sum is %d\n", recv_val1+recv_val2);
MPI_Comm_free(&newcomm);
MPI_Finalize();
return 0;
}
I am following a source code on my documents, but I encounter an error when I try to use MPI_Send() and MPI_Recv() from Open MPI library.
I have googled and read some threads in this site but I can not find the solution to resolve my error.
This is my error:
mca_oob_tcp_msg_recv: readv faled : Unknown error (108)
Here is details image:
And this is the code that I'm following:
#include <stdio.h>
#include <string.h>
#include <conio.h>
#include <mpi.h>
int main(int argc, char **argv) {
int rank, size, mesg, tag = 123;
MPI_Status status;
MPI_Init(&argv, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Need at least 2 processes!\n");
} else if (rank == 0) {
mesg = 11;
MPI_Send(&mesg,1,MPI_INT,1,tag,MPI_COMM_WORLD);
MPI_Recv(&mesg,1,MPI_INT,1,tag,MPI_COMM_WORLD,&status);
printf("Rank 0 received %d from rank 1\n",mesg);
} else if (rank == 1) {
MPI_Recv(&mesg,1,MPI_INT,0,tag,MPI_COMM_WORLD,&status);
printf("Rank 1 received %d from rank 0/n",mesg);
mesg = 42;
MPI_Send(&mesg,1,MPI_INT,0,tag,MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
I commented all of MPI_Send(), and MPI_Recv(), and my program worked. In other hand, I commented either MPI_Send() or MPI_Recv(), and I still got that error. So I think the problem are MPI_Send() and MPI_Recv() functions.
P.S.: I'm using Open MPI v1.6 on Windows 8.1 OS.
You pass in the wrong arguments to MPI_Init (two times argv, instead of argc and argv once each).
The sends and receives actually look fine, I think. But there is also one typo in one of your prints with a /n instead of \n.
Here is what works for me (on MacOSX, though):
int main(int argc, char **argv) {
int rank, size, mesg, tag = 123;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Need at least 2 processes!\n");
} else if (rank == 0) {
mesg = 11;
MPI_Send(&mesg,1,MPI_INT,1,tag,MPI_COMM_WORLD);
MPI_Recv(&mesg,1,MPI_INT,1,tag,MPI_COMM_WORLD,&status);
printf("Rank 0 received %d from rank 1\n",mesg);
} else if (rank == 1) {
MPI_Recv(&mesg,1,MPI_INT,0,tag,MPI_COMM_WORLD,&status);
printf("Rank 1 received %d from rank 0\n",mesg);
mesg = 42;
MPI_Send(&mesg,1,MPI_INT,0,tag,MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
If this does not work, I'd guess your OS does not let the processes communicate with each other via the method chosen by OpenMPI.
Set MPI_STATUS_IGNORED instead of &status in MPI_Recv in both places.
I am trying to invert a PGM image using MPI. The grayscale (PGM) image should be loaded on the root processor and then be sent to each of the s^2 processors. Each processor will invert a block of the given image, and the inverted blocks will be gathered back on the root processor, which will assemble the blocks into the final image and write it to a PGM image. I ran the following code, but did not get any output. The image was read after running the code, but there was no indication of writing the resultant image. Could you please let me know what could be wrong with it?
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <memory.h>
#define max(x, y) ((x>y) ? (x):(y))
#define min(x, y) ((x<y) ? (x):(y))
int xdim;
int ydim;
int maxraw;
unsigned char *image;
void ReadPGM(FILE*);
void WritePGM(FILE*);
#define s 2
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int p, rank;
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
const int NPROWS=s; /* number of rows in _decomposition_ */
const int NPCOLS=s; /* number of cols in _decomposition_ */
const int BLOCKROWS = xdim/NPROWS; /* number of rows in _block_ */
const int BLOCKCOLS = ydim/NPCOLS; /* number of cols in _block_ */
int i, j;
FILE *fp;
float BLimage[BLOCKROWS*BLOCKCOLS];
for (int ii=0; ii<BLOCKROWS*BLOCKCOLS; ii++)
BLimage[ii] = 0;
float BLfilteredMat[BLOCKROWS*BLOCKCOLS];
for (int ii=0; ii<BLOCKROWS*BLOCKCOLS; ii++)
BLfilteredMat[ii] = 0;
if (rank == 0) {
/* begin reading PGM.... */
ReadPGM(fp);
}
MPI_Datatype blocktype;
MPI_Datatype blocktype2;
MPI_Type_vector(BLOCKROWS, BLOCKCOLS, ydim, MPI_FLOAT, &blocktype2);
MPI_Type_create_resized( blocktype2, 0, sizeof(float), &blocktype);
MPI_Type_commit(&blocktype);
int disps[NPROWS*NPCOLS];
int counts[NPROWS*NPCOLS];
for (int ii=0; ii<NPROWS; ii++) {
for (int jj=0; jj<NPCOLS; jj++) {
disps[ii*NPCOLS+jj] = ii*ydim*BLOCKROWS+jj*BLOCKCOLS;
counts [ii*NPCOLS+jj] = 1;
}
}
MPI_Scatterv(image, counts, disps, blocktype, BLimage, BLOCKROWS*BLOCKCOLS, MPI_FLOAT, 0, MPI_COMM_WORLD);
//************** Invert the block **************//
for (int proc=0; proc<p; proc++) {
if (proc == rank) {
for (int j = 0; j < BLOCKCOLS; j++) {
for (int i = 0; i < BLOCKROWS; i++) {
BLfilteredMat[j*BLOCKROWS+i] = 255 - image[j*BLOCKROWS+i];
}
}
} // close if (proc == rank) {
MPI_Barrier(MPI_COMM_WORLD);
} // close for (int proc=0; proc<p; proc++) {
MPI_Gatherv(BLfilteredMat, BLOCKROWS*BLOCKCOLS,MPI_FLOAT, image, counts, disps,blocktype, 0, MPI_COMM_WORLD);
if (rank == 0) {
/* Begin writing PGM.... */
WritePGM(fp);
free(image);
}
MPI_Finalize();
return (1);
}
It is very likely MPI is not the right tool for the job. The reason for this is that your job is inherently bandwidth limited.
Think of it this way: You have a coloring book with images which you all want to color in.
Method 1: you take your time and color them in one by one.
Method 2: you copy each page to a new sheet of paper and mail it to a friend who then colors it in for you. He mails it back to you and in the end you glue all the pages you received from all of your friends together to make one colored-in book.
Note that method two involves copying the whole book, which is arguably the same amount of work needed to color in the whole book. So method two is less time-efficient without even considering the overhead of shoving the pages into an envelope, licking the stamp, going to the post office and waiting for the letter to be delivered.
If you look at your code, every transmitted byte is only touched once throughout the whole program in this line:
BLfilteredMat[j*BLOCKROWS+i] = 255 - image[j*BLOCKROWS+i];
The single processor is much faster at subtracting two integers than it is at sending an integer of the wire, therefore one must advise against using MPI for your particular problem.
My suggestion to solve your problem: Try to avoid unneccessary communication whenever possible. Do all processes have access to the file system on which the files are located? You could try reading them directly from the filesystem.
I'm trying to write a simple program with MPI that finds all numbers less than 514, that are equal to the exponent of the sum of their digits(for example, 512 = (5+1+2)^3. The problem I have is with the main loop - it works just fine on a few iterations(c=10), but when I try to increase the number of iterations(c=x), mpiexec.exe just hangs - seemingly in the middle of printf routine.
I'm pretty sure that deadlocks are to blame, but I couldn't find any.
The source code:
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include "mpi.h"
int main(int argc, char* argv[])
{
//our number
int x=514;
//amount of iterations
int c = 10;
//tags for message identification
int tag = 42;
int tagnumber = 43;
int np, me, y1, y2;
MPI_Status status;
/* Initialize MPI */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
/* Check that we run on more than two processors */
if (np < 2)
{
printf("You have to use at least 2 processes to run this program\n");
MPI_Finalize();
exit(0);
}
//begin iterations
while(c>0)
{
//if main thread, then send messages to all created threads
if (me == 0)
{
printf("Amount of threads: %d\n", np);
int b = 1;
while(b<np)
{
int q = x-b;
//sends a number to a secondary thread
MPI_Send(&q, 1, MPI_INT, b, tagnumber, MPI_COMM_WORLD);
printf("Process %d sending to process %d, value: %d\n", me, b, q);
//get a number from secondary thread
MPI_Recv(&y2, 1, MPI_INT, b, tag, MPI_COMM_WORLD, &status);
printf ("Process %d received value %d\n", me, y2);
//compare it with the sent one
if (q==y2)
{
//if they're equal, then print the result
printf("\nValue found: %d\n", q);
}
b++;
}
x = x-b+1;
b = 1;
}
else
{
//if not a main thread, then process the message sent and send the result back.
MPI_Recv (&y1, 1, MPI_INT, 0, tagnumber, MPI_COMM_WORLD, &status);
int sum = 0;
int y2 = y1;
while (y1!=0)
{
//find the number's sum of digits
sum += y1%10;
y1 /= 10;
}
int sum2 = sum;
while(sum2<y2)
{
//calculate the exponentiation
sum2 = sum2*sum;
}
MPI_Send (&sum2, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
c--;
}
MPI_Finalize();
exit(0);
}
And I run the compiled exe-file as "mpiexec.exe -n 4 lab2.exe". I use HPC Pack 2008 SDK, if that's of any use to you guys.
Is there any way to fix it? Or maybe some way to debug that situation properly?
Thanks a lot in advance!
Not sure if you already found where's the problem, but your infinite run happens in this loop:
while(sum2<y2)
{
//calculate the exponentiation
sum2 = sum2*sum;
}
You can confirm this by setting c to about 300 or above then make a printf call in this while loop. I haven't completely pinpoint your error of logic, but I marked three comments below at your code location where I feel is strange:
while(c>0)
{
if (me == 0)
{
...
while(b<np)
{
int q = x-b; //<-- you subtract b from x here
...
b++;
}
x = x-b+1; //<-- you subtract b again. sure this is what you want?
b = 1; //<-- this is useless
}
Hope this helps.
I have one structure xyz as given below struct xyz { char a; int32_t b; char c[50]; uint32_t d; uchar e[10];}
I need to broadcast it so I used MPI_Bcast() where i required MPI Datatype corresponding to struct xyz for that I used MPI_Type_creat_struct() function to create a new MPI datatype as MPI_Datatype MPI_my_new_datatype, oldtypes[4]; where I used MPI datatypes corresponding to above structure members datatype as followings
oldtypes[4] = {MPI_CHAR, MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED_CHAR}; and to craete new datatype i used following arguments in the function..
MPI_Type_create_struct(4,blockcounts, offsets, oldtypes, &MPI_my_new_datatype); MPI_Type_commit(&MPI_my_new_datatype);
Now it is compiling but giving run time error as below::
* An error occurred in MPI_Type_create_structon communicator MPI_COMM_WORLD MPI_ERR_ARG: invalid argument of some other kind MPI_ERRORS_ARE_FATAL (goodbye).
Can any one find out where is the problem?
You can't "bundle up" the similar types like that. Each field needs to be addressed seperately, and there are 5 of them, not 4.
Also note that, in general, it's a good idea to actually "measure" the offsets rather than infer them.
The following works:
#include <stdio.h>
#include <mpi.h>
#include <stdint.h>
struct xyz_t {
char a; int32_t b; char c[50]; uint32_t d; unsigned char e[10];
};
int main(int argc, char **argv) {
int rank, size, ierr;
MPI_Datatype oldtypes[5] = {MPI_CHAR, MPI_INT, MPI_CHAR, MPI_UNSIGNED, MPI_UNSIGNED_CHAR};
int blockcounts[5] = {1, 1, 50, 1, 10};
MPI_Datatype my_mpi_struct;
MPI_Aint offsets[5];
struct xyz_t old, new;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* find offsets */
offsets[0] = (char*)&(old.a) - (char*)&old;
offsets[1] = (char*)&(old.b) - (char*)&old;
offsets[2] = (char*)&(old.c) - (char*)&old;
offsets[3] = (char*)&(old.d) - (char*)&old;
offsets[4] = (char*)&(old.e) - (char*)&old;
MPI_Type_create_struct(5, blockcounts, offsets, oldtypes, &my_mpi_struct);
MPI_Type_commit(&my_mpi_struct);
if (rank == 0) {
old.a = 'a';
old.b = (int)'b';
strcpy(old.c,"This is field c");
old.d = (unsigned int)'d';
strcpy(old.e,"Field e");
MPI_Send(&old, 1, my_mpi_struct, 1, 1, MPI_COMM_WORLD);
} else if (rank == 1) {
MPI_Status status;
MPI_Recv(&new, 1, my_mpi_struct, 0, 1, MPI_COMM_WORLD, &status);
printf("new.a = %c\n", new.a);
printf("new.b = %d\n", new.b);
printf("new.e = %s\n", new.e);
}
MPI_Type_free(&my_mpi_struct);
MPI_Finalize();
return 0;
}
Running:
$ mpirun -np 2 ./struct
new.a = a
new.b = 98
new.e = Field e
Updated: As Dave Goodell below points out, the offset calculations would be better done as
#include <stddef.h>
/* ... */
offsets[0] = offsetof(struct xyz_t,a);
offsets[1] = offsetof(struct xyz_t,b);
offsets[2] = offsetof(struct xyz_t,c);
offsets[3] = offsetof(struct xyz_t,d);
offsets[4] = offsetof(struct xyz_t,e);
and if your MPI supports it (most should, though OpenMPI was slow with some of the MPI2.2 types) the MPI_UNSIGNED should be replaced with an MPI_UINT32