Consider the following fragment of OpenMP code which transfers private data between two threads using an intermediate shared variable
#pragma omp parallel shared(x) private(a,b)
{
...
a = somefunction(b);
if (omp_get_thread_num() == 0) {
x = a;
}
}
#pragma omp parallel shared(x) private(a,b)
{
if (omp_get_thread_num() == 1) {
a = x;
}
b = anotherfunction(a);
...
}
I would (in pseudocode ) need to transfer of private data from one process to another using a single-sided message-passing library.
Any ideas?
This is possible, but there's a lot more "scaffolding" involved -- after all, you are communicating data between potentially completely different computers.
The coordination for this sort of thing is done between windows of data which are accessible from other processors, and with lock/unlock operations which coordinate the access of this data. The locks aren't really locks in the sense of being mutexes, but they are more like synchronization points coordinating data access to the window.
I don't have time right now to explain this in the detail I'd like, but below is an example of using MPI2 to do something like shared memory flagging in a system that doesn't have shared memory:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "mpi.h"
int main(int argc, char** argv)
{
int rank, size, *a, geta;
int x;
int ierr;
MPI_Win win;
const int RCVR=0;
const int SENDER=1;
ierr = MPI_Init(&argc, &argv);
ierr |= MPI_Comm_rank(MPI_COMM_WORLD, &rank);
ierr |= MPI_Comm_size(MPI_COMM_WORLD, &size);
if (ierr) {
fprintf(stderr,"Error initializing MPI library; failing.\n");
exit(-1);
}
if (rank == RCVR) {
MPI_Alloc_mem(sizeof(int), MPI_INFO_NULL, &a);
*a = 0;
} else {
a = NULL;
}
MPI_Win_create(a, 1, sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
if (rank == SENDER) {
/* Lock recievers window */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, RCVR, 0, win);
x = 5;
/* put 1 int (from &x) to 1 int rank RCVR, at address 0 in window "win"*/
MPI_Put(&x, 1, MPI_INT, RCVR, 0, 1, MPI_INT, win);
/* Unlock */
MPI_Win_unlock(0, win);
printf("%d: My job here is done.\n", rank);
}
if (rank == RCVR) {
for (;;) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, RCVR, 0, win);
MPI_Get(&geta, 1, MPI_INT, RCVR, 0, 1, MPI_INT, win);
MPI_Win_unlock(0, win);
if (geta == 0) {
printf("%d: a still zero; sleeping.\n",rank);
sleep(2);
} else
break;
}
printf("%d: a now %d!\n",rank,geta);
printf("a = %d\n", *a);
MPI_Win_free(&win);
if (rank == RCVR) MPI_Free_mem(a);
MPI_Finalize();
return 0;
}
Related
I've written an MPI code that currently multithreads by sending equal numbers of elements from each array to a different process to do work (thus, for 6 workers, the array is broken into 6 equal parts). What I would like to do is send small chunks only if a worker is ready to receive, and receive completed chunks without blocking future sends; this way if one chunk takes 10 seconds but the other chunks take 1 second, other data can be processed while waiting for the long chunk to complete.
Here's some skeleton code I've put together:
#include <mpi.h>
#include <iostream>
#include <vector>
#include <cmath>
struct crazytaxi
{
double a = 10.0;
double b = 25.2;
double c = 222.222;
};
int main(int argc, char** argv)
{
//Initial and temp kanno vectors
std::vector<crazytaxi> kanno;
std::vector<crazytaxi> kanno_tmp;
//init MPI
MPI_Init(NULL,NULL);
//allocate vector
int SZ = 4200;
kanno.resize(SZ);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD,&world_size);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD,&world_rank);
if (world_rank == 0)
{
for (int i = 0; i < SZ; i++)
kanno[i].a = 1.0*i;
kanno[i].b = 10.0/(i+1);
}
for (int j = 0; j < 10; j++) {
//Make sure all processes have same kanno vector;
if (world_rank == 0) {
for (int i = 1; i < world_size; i++)
MPI_Send(&kanno[0],sizeof(crazytaxi)*kanno.size(),MPI_BYTE,i,3,MPI_COMM_WORLD);
} else {
MPI_Recv(&kanno[0],sizeof(crazytaxi)*kanno.size(),MPI_BYTE,0,3,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
//copy to tmp vector
kanno_tmp = kanno;
MPI_Barrier();
//the sender
if (world_rank == 0) {
unsigned p1 = 0;
unsigned segment = 10;
unsigned p2 = segment;
while (p1 < SZ) {
for (int i = 0; i < world_size; i++) {
//if (process #i is ready to receive)
//Send data in chunks of 10 to i
//else
//continue
}
}
}
if (world_rank != 0) {
//Receive data to be processed
//do some math
for (unsigned i = p1; i < p2; i++)
kanno_tmp[i].a = std::sqrt(kanno[i].a)/((double)i+1.0);
//Send processed data to 0 and wait to receive new data.
}
//copy temp vector to kanno
kanno = kanno_tmp;
}
//print some of the results;
if (world_rank == 0)
{
for (int i = 0; i < SZ; i += 40)
printf("Line %d: %lg,%lg\n",i,kanno[i].a,kanno[i].b);
}
MPI_Finalize();
}
I can 90% turn this into what I want, except that my MPI_Send and MPI_Recv calls will block, or the 'master' process won't know that the 'slave' processes are ready to receive data.
Is there a way in MPI to do something like
unsigned Datapointer = [some_array_index];
while (Datapointer < array_size) {
if (world_rank == 0) {
for (int i = 1; i < world_size; i++)
{
if (<process i is ready to receive>) {
MPI_Send([...]);
Datapointer += 10;
}
if (<process i has sent data>)
MPI_Recv([...]);
if (Datapointer > array_size) {
MPI_Bcast([killswitch]);
break;
}
}
}
}
MPI_Barrier();
or is there a more efficient way to structure this for variable-complexity chunks or variable-speed nodes?
As #Gilles Gouaillardet, pointed out the keywords in such scenario is MPI_ANY_SOURCE. Using it, the processes can receive message from any source. To know which process send that message, you can use status.MPI_SOURCE on the status of the recv call.
MPI_Status status;
if(rank == 0) {
//send initial work to all processes
while(true) {
MPI_recv(buf, 32, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
// do the distribution logic
MPI_send(buf, 32, MPI_INT, status.MPI_SOURCE, tag, MPI_COMM_WORLD);
// break out of the loop once the work is over and send all the processes
message to stop waiting for work
}
}
else {
while(true){
// receive work from rank 0
MPI_recv(buf, 32, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
// Perform computation and send back the result
MPI_send(buf, 32, MPI_INT, 0, tag, MPI_COMM_WORLD);
//break this until asked by master 0 using some kind of special message
}
}
I'm trying to write a simple program with MPI that finds all numbers less than 514, that are equal to the exponent of the sum of their digits(for example, 512 = (5+1+2)^3. The problem I have is with the main loop - it works just fine on a few iterations(c=10), but when I try to increase the number of iterations(c=x), mpiexec.exe just hangs - seemingly in the middle of printf routine.
I'm pretty sure that deadlocks are to blame, but I couldn't find any.
The source code:
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include "mpi.h"
int main(int argc, char* argv[])
{
//our number
int x=514;
//amount of iterations
int c = 10;
//tags for message identification
int tag = 42;
int tagnumber = 43;
int np, me, y1, y2;
MPI_Status status;
/* Initialize MPI */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
/* Check that we run on more than two processors */
if (np < 2)
{
printf("You have to use at least 2 processes to run this program\n");
MPI_Finalize();
exit(0);
}
//begin iterations
while(c>0)
{
//if main thread, then send messages to all created threads
if (me == 0)
{
printf("Amount of threads: %d\n", np);
int b = 1;
while(b<np)
{
int q = x-b;
//sends a number to a secondary thread
MPI_Send(&q, 1, MPI_INT, b, tagnumber, MPI_COMM_WORLD);
printf("Process %d sending to process %d, value: %d\n", me, b, q);
//get a number from secondary thread
MPI_Recv(&y2, 1, MPI_INT, b, tag, MPI_COMM_WORLD, &status);
printf ("Process %d received value %d\n", me, y2);
//compare it with the sent one
if (q==y2)
{
//if they're equal, then print the result
printf("\nValue found: %d\n", q);
}
b++;
}
x = x-b+1;
b = 1;
}
else
{
//if not a main thread, then process the message sent and send the result back.
MPI_Recv (&y1, 1, MPI_INT, 0, tagnumber, MPI_COMM_WORLD, &status);
int sum = 0;
int y2 = y1;
while (y1!=0)
{
//find the number's sum of digits
sum += y1%10;
y1 /= 10;
}
int sum2 = sum;
while(sum2<y2)
{
//calculate the exponentiation
sum2 = sum2*sum;
}
MPI_Send (&sum2, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
c--;
}
MPI_Finalize();
exit(0);
}
And I run the compiled exe-file as "mpiexec.exe -n 4 lab2.exe". I use HPC Pack 2008 SDK, if that's of any use to you guys.
Is there any way to fix it? Or maybe some way to debug that situation properly?
Thanks a lot in advance!
Not sure if you already found where's the problem, but your infinite run happens in this loop:
while(sum2<y2)
{
//calculate the exponentiation
sum2 = sum2*sum;
}
You can confirm this by setting c to about 300 or above then make a printf call in this while loop. I haven't completely pinpoint your error of logic, but I marked three comments below at your code location where I feel is strange:
while(c>0)
{
if (me == 0)
{
...
while(b<np)
{
int q = x-b; //<-- you subtract b from x here
...
b++;
}
x = x-b+1; //<-- you subtract b again. sure this is what you want?
b = 1; //<-- this is useless
}
Hope this helps.
I have implemented a 2d array Mpi scatter which works well. I mean that the master processor can scatter 2d parts of the initial big array. The problem is when I use as input the 2d image file dynamically allocated it doesn't work. I suppose that there must be something wrong with the memory. Is there any way of obtaining 2d parts of a big 2d array dynamically.
I had a similar problem, but it was one-dimensional vector with dynamically allocated.
Solved my problem as follows:
#include <stdio.h>
#include "mpi.h"
main(int argc, char** argv) {
/* .......Variables Initialisation ......*/
int Numprocs, MyRank, Root = 0;
int index;
int *InputBuffer, *RecvBuffer;
int Scatter_DataSize;
int DataSize;
MPI_Status status;
/* ........MPI Initialisation .......*/
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &Numprocs);
if (MyRank == Root) {
DataSize = 80000;
/* ...Allocate memory.....*/
InputBuffer = (int*) malloc(DataSize * sizeof(int));
for (index = 0; index < DataSize; index++)
InputBuffer[index] = index;
}
MPI_Bcast(&DataSize, 1, MPI_INT, Root, MPI_COMM_WORLD);
if (DataSize % Numprocs != 0) {
if (MyRank == Root)
printf("Input is not evenly divisible by Number of Processes\n");
MPI_Finalize();
exit(-1);
}
Scatter_DataSize = DataSize / Numprocs;
RecvBuffer = (int *) malloc(Scatter_DataSize * sizeof(int));
MPI_Scatter(InputBuffer, Scatter_DataSize, MPI_INT, RecvBuffer,
Scatter_DataSize, MPI_INT, Root, MPI_COMM_WORLD);
for (index = 0; index < Scatter_DataSize; ++index)
printf("MyRank = %d, RecvBuffer[%d] = %d \n", MyRank, index,
RecvBuffer[index]);
MPI_Finalize();
}
This link has examples that have helped me:
http://www.cse.iitd.ernet.in/~dheerajb/MPI/Document/hos_cont.html
Hope this helps.
I have one structure xyz as given below struct xyz { char a; int32_t b; char c[50]; uint32_t d; uchar e[10];}
I need to broadcast it so I used MPI_Bcast() where i required MPI Datatype corresponding to struct xyz for that I used MPI_Type_creat_struct() function to create a new MPI datatype as MPI_Datatype MPI_my_new_datatype, oldtypes[4]; where I used MPI datatypes corresponding to above structure members datatype as followings
oldtypes[4] = {MPI_CHAR, MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED_CHAR}; and to craete new datatype i used following arguments in the function..
MPI_Type_create_struct(4,blockcounts, offsets, oldtypes, &MPI_my_new_datatype); MPI_Type_commit(&MPI_my_new_datatype);
Now it is compiling but giving run time error as below::
* An error occurred in MPI_Type_create_structon communicator MPI_COMM_WORLD MPI_ERR_ARG: invalid argument of some other kind MPI_ERRORS_ARE_FATAL (goodbye).
Can any one find out where is the problem?
You can't "bundle up" the similar types like that. Each field needs to be addressed seperately, and there are 5 of them, not 4.
Also note that, in general, it's a good idea to actually "measure" the offsets rather than infer them.
The following works:
#include <stdio.h>
#include <mpi.h>
#include <stdint.h>
struct xyz_t {
char a; int32_t b; char c[50]; uint32_t d; unsigned char e[10];
};
int main(int argc, char **argv) {
int rank, size, ierr;
MPI_Datatype oldtypes[5] = {MPI_CHAR, MPI_INT, MPI_CHAR, MPI_UNSIGNED, MPI_UNSIGNED_CHAR};
int blockcounts[5] = {1, 1, 50, 1, 10};
MPI_Datatype my_mpi_struct;
MPI_Aint offsets[5];
struct xyz_t old, new;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* find offsets */
offsets[0] = (char*)&(old.a) - (char*)&old;
offsets[1] = (char*)&(old.b) - (char*)&old;
offsets[2] = (char*)&(old.c) - (char*)&old;
offsets[3] = (char*)&(old.d) - (char*)&old;
offsets[4] = (char*)&(old.e) - (char*)&old;
MPI_Type_create_struct(5, blockcounts, offsets, oldtypes, &my_mpi_struct);
MPI_Type_commit(&my_mpi_struct);
if (rank == 0) {
old.a = 'a';
old.b = (int)'b';
strcpy(old.c,"This is field c");
old.d = (unsigned int)'d';
strcpy(old.e,"Field e");
MPI_Send(&old, 1, my_mpi_struct, 1, 1, MPI_COMM_WORLD);
} else if (rank == 1) {
MPI_Status status;
MPI_Recv(&new, 1, my_mpi_struct, 0, 1, MPI_COMM_WORLD, &status);
printf("new.a = %c\n", new.a);
printf("new.b = %d\n", new.b);
printf("new.e = %s\n", new.e);
}
MPI_Type_free(&my_mpi_struct);
MPI_Finalize();
return 0;
}
Running:
$ mpirun -np 2 ./struct
new.a = a
new.b = 98
new.e = Field e
Updated: As Dave Goodell below points out, the offset calculations would be better done as
#include <stddef.h>
/* ... */
offsets[0] = offsetof(struct xyz_t,a);
offsets[1] = offsetof(struct xyz_t,b);
offsets[2] = offsetof(struct xyz_t,c);
offsets[3] = offsetof(struct xyz_t,d);
offsets[4] = offsetof(struct xyz_t,e);
and if your MPI supports it (most should, though OpenMPI was slow with some of the MPI2.2 types) the MPI_UNSIGNED should be replaced with an MPI_UINT32
Is there any way how to enumerate process with given PID in windows, and get list of all his opened handles(locked files, etc.)?
EDIT: I dont care about language. If it is in .NET, I'd be glad, if in WinApi (C), it won't hurt. If in something else, I think I can rewrite it :-)
I did a deep googling and found this article.
This article gave a link to download source code:
I tried method in NtSystemInfoTest.cpp ( downloaded source code ) and it worked superbly.
void ListHandles( DWORD processID, LPCTSTR lpFilter )
The code has following declaimer:
// Written by Zoltan Csizmadia, zoltan_csizmadia#yahoo.com
// For companies(Austin,TX): If you would like to get my resume, send an email.
//
// The source is free, but if you want to use it, mention my name and e-mail address
//
//////////////////////////////////////////////////////////////////////////////////////
//
I hope this helps you.
The command-line 'Handle' tool from Sysinternals does this, if you just want a tool. This won't help you if you're looking for a code solution, though.
Here is an example using ZwQueryProcessInformation from the DDK. The DDK is now known as the "WDK" and is available with MSDN. If you don't have MSDN, apparantly, you can also get it from here.
I haven't tried it, I just googled your question.
#include "ntdll.h"
#include <stdlib.h>
#include <stdio.h>
#include "ntddk.h"
#define DUPLICATE_SAME_ATTRIBUTES 0x00000004
#pragma comment(lib,"ntdll.lib")
BOOL EnablePrivilege(PCSTR name)
{
TOKEN_PRIVILEGES priv = {1, {0, 0, SE_PRIVILEGE_ENABLED}};
LookupPrivilegeValue(0, name, &priv.Privileges[0].Luid);
HANDLE hToken;
OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken);
AdjustTokenPrivileges(hToken, FALSE, &priv, sizeof priv, 0, 0);
BOOL rv = GetLastError() == ERROR_SUCCESS;
CloseHandle(hToken);
return rv;
}
int main(int argc, char *argv[])
{
if (argc == 1) return 0;
ULONG pid = strtoul(argv[1], 0, 0);
EnablePrivilege(SE_DEBUG_NAME);
HANDLE hProcess = OpenProcess(PROCESS_DUP_HANDLE, FALSE, pid);
ULONG n = 0x1000;
PULONG p = new ULONG[n];
while (NT::ZwQuerySystemInformation(NT::SystemHandleInformation, p, n * sizeof *p, 0)
== STATUS_INFO_LENGTH_MISMATCH)
delete [] p, p = new ULONG[n *= 2];
NT::PSYSTEM_HANDLE_INFORMATION h = NT::PSYSTEM_HANDLE_INFORMATION(p + 1);
for (ULONG i = 0; i < *p; i++) {
if (h[i].ProcessId == pid) {
HANDLE hObject;
if (NT::ZwDuplicateObject(hProcess, HANDLE(h[i].Handle), NtCurrentProcess(), &hObject,
0, 0, DUPLICATE_SAME_ATTRIBUTES)
!= STATUS_SUCCESS) continue;
NT::OBJECT_BASIC_INFORMATION obi;
NT::ZwQueryObject(hObject, NT::ObjectBasicInformation, &obi, sizeof obi, &n);
printf("%p %04hx %6lx %2x %3lx %3ld %4ld ",
h[i].Object, h[i].Handle, h[i].GrantedAccess,
int(h[i].Flags), obi.Attributes,
obi.HandleCount - 1, obi.PointerCount - 2);
n = obi.TypeInformationLength + 2;
NT::POBJECT_TYPE_INFORMATION oti = NT::POBJECT_TYPE_INFORMATION(new CHAR[n]);
NT::ZwQueryObject(hObject, NT::ObjectTypeInformation, oti, n, &n);
printf("%-14.*ws ", oti[0].Name.Length / 2, oti[0].Name.Buffer);
n = obi.NameInformationLength == 0
? MAX_PATH * sizeof (WCHAR) : obi.NameInformationLength;
NT::POBJECT_NAME_INFORMATION oni = NT::POBJECT_NAME_INFORMATION(new CHAR[n]);
NTSTATUS rv = NT::ZwQueryObject(hObject, NT::ObjectNameInformation, oni, n, &n);
if (NT_SUCCESS(rv))
printf("%.*ws", oni[0].Name.Length / 2, oni[0].Name.Buffer);
printf("\n");
CloseHandle(hObject);
}
}
delete [] p;
CloseHandle(hProcess);
return 0;
}