Problem in splitting array with MPI_ISEND with fortran - parallel-processing

I wrote the following lines of code with the aim to divide the 1D array Jp (which lives on the master process only) over different processes (included the master one).
Each process have to receive a block of non-contigouos modified data (the values are changed in the inner loop) and I created a new format newF with the MPI_TYPE_INDEXED function to select the correct portion of data to send.
I used both MPI_RECV or MPI_IRECV to receive the data.
The problem is that this part of code works fine, with any numbers of tasks (from 1 to 8), until the number of element of Jp is small, when I increase such number (i.e. n = 5000), not all the processes receive the data and the splitted array JpS show the values I used to initialized it (i.e. -10000).
The lines commented show all the changes done in order to solve this problem, Does anyone have an idea?
program test_send
use mpi
implicit none
integer :: rank, nproc, mpi_stat
integer :: n, m, k, io, i, j
integer, allocatable :: Jp(:), JpS(:), JpAux(:)
integer :: count, n_distro,newF
integer, allocatable :: sendcounts(:), displ(:), &
blocklens(:), blockdisp(:), &
request(:)
integer :: ARRAY_OF_STATUS(MPI_STATUS_SIZE), error
data count /3/
call mpi_init(mpi_stat)
call mpi_comm_size(mpi_comm_world, nproc, mpi_stat)
call mpi_comm_rank(mpi_comm_world, rank, mpi_stat)
n = 400*count
allocate(sendcounts(nproc), displ(nproc), &
blocklens(count), blockdisp(count), request(nproc))
if (rank.eq.0) then
allocate(Jp(n+1),JpAux(n+1))
Jp = 0
do i = 1,n+1
Jp(i) = i
enddo
endif
call mpi_barrier(mpi_comm_world, mpi_stat)
m = n/count
n_distro = (m+1)/nproc
k = 0
do i = 1,nproc
if (i<nproc) then
sendcounts(i) = n_distro
else
sendcounts(i) = m - (nproc-1)*n_distro
endif
displ(i) = k
k = k + sendcounts(i)
enddo
call mpi_barrier(mpi_comm_world, mpi_stat)
allocate(JpS(count*sendcounts(rank+1)+1))
call mpi_barrier(mpi_comm_world, mpi_stat)
! call mpi_irecv(JpS, (sendcounts(rank+1))*count+1,mpi_int,0,0,mpi_comm_world, request(rank+1), mpi_stat)
! call mpi_recv(JpS, (sendcounts(rank+1))*count+1,mpi_int,0,0,mpi_comm_world, MPI_STATUS_IGNORE,mpi_stat)
!call mpi_waitall(1,request,ARRAY_OF_STATUS,error)
! call mpi_barrier(mpi_comm_world, mpi_stat)
if (rank.eq.0) then
do i = 0,nproc-1
JpAux = -100000
blocklens = spread(sendcounts(i+1),1,count)
blockdisp = spread(displ(i+1),1,count) + (/ (k*m, k=0,count-1) /)
blocklens(count) = blocklens(count)+1
do j = 1,count
if (j.eq.1) then
JpAux(blockdisp(j)+1:blockdisp(j)+blocklens(j)) = Jp(blockdisp(j)+1:blockdisp(j)+blocklens(j))&
-Jp(blockdisp(j)+1)
else
JpAux(blockdisp(j)+1:blockdisp(j)+blocklens(j)) = Jp( blockdisp(j) + 1 : blockdisp(j) + blocklens(j) )&
-Jp( blockdisp(j)+1 ) + JpAux( blockdisp(j-1) + blocklens(j-1))&
+(Jp( blockdisp(j-1)+blocklens(j-1)+1 )-Jp( blockdisp(j-1)+blocklens(j-1)))
endif
enddo
call mpi_type_indexed(count, blocklens, blockdisp, mpi_int, newF, mpi_stat)
call mpi_type_commit(newF, mpi_stat)
call mpi_isend(JpAux, 1, newF, i, i, mpi_comm_world, request(i+1), mpi_stat)
call mpi_type_free(newF, mpi_stat)
enddo
endif
! call mpi_wait(request(rank+1), ARRAY_OF_STATUS, mpi_stat)
call mpi_barrier(mpi_comm_world, mpi_stat)
!call mpi_waitall(1,request,ARRAY_OF_STATUS,error)
call mpi_recv(JpS, (sendcounts(rank+1))*count+1,mpi_int,0,MPI_ANY_TAG,mpi_comm_world, MPI_STATUS_IGNORE,mpi_stat)
! print*, request
print*, 'rank: ', rank, ', size: ', size(JpS), ', Jp: ', JpS
call mpi_barrier(mpi_comm_world, mpi_stat)
call mpi_finalize(mpi_stat)
end program test_send

Related

OpenACC constant parameters

I am wondering what is the proper way to handle constants in OpenACC kernels.
For example, in the following code
module vecaddmod
implicit none
integer, parameter :: n = 100000
!$acc declare create(n)
contains
subroutine vecaddgpu(r, a, b)
real, dimension(:) :: r, a, b
integer :: i
!$acc update self(n)
!$acc data present(n)
!$acc kernels loop copyin(a(1:n),b(1:n)) copyout(r(1:n))
do i = 1, n
r(i) = a(i) + b(i)
enddo
!$acc end data
end subroutine vecaddgpu
end module vecaddmod
program main
use vecaddmod
implicit none
integer :: i, errs, argcount
real, dimension(:), allocatable :: a, b, r, e
character*10 :: arg1
allocate( a(n), b(n), r(n), e(n) )
do i = 1, n
a(i) = i
b(i) = 1000*i
enddo
! compute on the GPU
call vecaddgpu( r, a, b )
! compute on the host to compare
do i = 1, n
e(i) = a(i) + b(i)
enddo
! compare results
errs = 0
do i = 1, n
if( r(i) /= e(i) )then
errs = errs + 1
endif
enddo
print *, errs, ' errors found'
if( errs ) call exit(errs)
end program main
n is declared as a constant on CPU in a module, and it is used as the range in the loop. nvfortran warns me about Constant or Parameter used in data clause. Is the above example the proper way to handle this? Can I take advantage of the constant memory on GPU, such that I don't need to copy it from CPU to GPU for each kernel launch?
Thanks.
The compiler will replace parameters with the literal value so no need to put them in data regions.
module vecaddmod
implicit none
integer, parameter :: n = 100000
contains
subroutine vecaddgpu(r, a, b)
real, dimension(:) :: r, a, b
integer :: i
!$acc kernels loop copyin(a(1:n),b(1:n)) copyout(r(1:n))
do i = 1, n
r(i) = a(i) + b(i)
enddo
end subroutine vecaddgpu
end module vecaddmod
...
% nvfortran -acc -Minfo=accel test.f90
vecaddgpu:
11, Generating copyin(a(:100000)) << "n" is replaced with 100000
Generating copyout(r(:100000))
Generating copyin(b(:100000))
12, Loop is parallelizable
Generating Tesla code
12, !$acc loop gang, vector(128) ! blockidx%x threadidx%x

MPI program for the Poisson equation stuck after one iteration

I am trying to solve the Poisson Equation in a Square domain [(0,1)--(0,1)] using MPI and overlapping domains. Currently, my code takes an input of the number of domain divisions on X and Y directions, the length of the overlap between two domains as a function of domain length and the number of elemntal divisions in the overlap.
Input file looks like
2,2
10,10
10,10
program main
!implicit none
include 'mpif.h'
integer cols, divfx, divfy, iter
integer xdiv, ydiv, info, max_iter, x_shift, y_shift
integer, allocatable:: ipiv(:)
double precision, allocatable :: A(:,:), Ainv(:,:)
real, allocatable:: edge(:,:)
double precision, allocatable :: u(:,:), f(:,:)
double precision, allocatable :: u_exact(:,:)
allocatable :: Left(:,:), Right(:,:)
allocatable :: Top(:,:), Bottom(:,:)
allocatable :: TempLeft(:,:), TempRight(:,:)
allocatable :: TempTop(:,:), TempBottom(:,:)
integer myid, master, numprocs, ierr, status(MPI_STATUS_SIZE)
integer i, j, numsent, sender, L, T, R, B
integer anstype, row, dovfx, dovfy, domx, domy, idx
real dom1,dom2,buff
double precision mesh(2), buffer(4), divx, divy, dx, dy, xd, yd
double precision error, derror, error_norm
character(len=100) :: domaindata
call MPI_INIT(ierr)
call MPI_COMM_RANK(MPI_COMM_WORLD, myid, ierr)
call MPI_COMM_SIZE(MPI_COMM_WORLD, numprocs, ierr)
master = 0
divx=0.d0
divy=0.d0
! Input the number of divisions for domain decomposition and calculate sub-domain dimensions.
open(1, file='Inputdata.dat', status='old')
! read(1,*) domx,domy
! read(1,*) dovfx,dovfy
! read(1,*) divfx,divfy
write(*,*)'Starting the Program'
write(*,*) "Enter the number of domain divisions in x-direction &
and y-direction ( Enter 4 if you want three sub-domains)"
read(1,*) domx,domy
write(*,*) domx,domy
write(*,*) "Total number of sub-domains for the problem"
write(*,*) domx*domy
write(*,*) "Enter the sub-domain overlap in x & y -direction as &
a fraction of sub-domain length (multiple of 10)"
read(1,*) dovfx,dovfy
write(*,*) dovfx,dovfy
write(*,*) "Enter the number of divisions in the overlap in &
x & yas a fraction of sub-domain(multiple of 5)"
read(1,*) divfx,divfy
write(*,*) divfx,divfy
divx=1.d0/(((1.d0/domx)/dovfx)/divfx)
divy=1.d0/(((1.d0/domy)/dovfy)/divfy)
write(*,*)"Total number of elemental divisions for the &
problem domain (0,1) in both dimensions"
write(*,*) divx, divy, divx*divy
write(*,*)"Total number of nodal divisions for the problem domain"
write(*,*) (divx+1)*(divy+1)
! time
! **************************
tic = MPI_Wtime();
! Maximum number of iterations.
max_iter=100
! Mesh Size
mesh(1)=1/divx
mesh(2)=1/divy
write(*,*) 'Element Size'
write(*,*) mesh(1), mesh(2)
if ( myid .eq. master ) then
! Send iteration number to subdomain and receive the error from each to
! calculate total error.
write(*,*) 'still1'
do 10 iter = 1,max_iter
do 20 i = 1,domx*domy
call MPI_SEND(iter, 1, MPI_INTEGER, i, i, MPI_COMM_WORLD, ierr)
20 continue
! Receive results obtained from sub-processor/sub-domain
!
error = 0.d0
do 30 i = 1,domx*domy
call MPI_RECV(d_error , 1, MPI_DOUBLE_PRECISION, i, iter, &
MPI_COMM_WORLD, status, ierr)
error = error + d_error
30 continue
write(*,*) 'In iteration ', iter, 'cumulative error is', error*1.d0/domx/domy
10 continue
! time:
! *************
toc = MPI_Wtime();
! Write results to output
! **************************
write(*,*)
write(*,*) 'Time taken for parallel computation is: ',(toc-tic)*1000, 'miliseconds'
else
!************************************ Slaves receive mesh size for discretization ******************************************************
write(*,*) 'iter', iter
write(*,*) 'myid', myid
! Slaves receive corners, then creates a Cartesian grid for finite
! difference until done message received, for one iteration.
! This is done for the first iteration
! Get Domain ID :
if (myid.gt.(domx*domy)) goto 200
write(*,*) 'still31'
1000 call MPI_RECV(iter, 1, MPI_INTEGER, master, MPI_ANY_TAG, MPI_COMM_WORLD, status, ierr)
if (status(MPI_TAG) .eq. 0) goto 200
write(*,*) 'still4'
if (iter.eq.1) then
write(*,*) 'still5'
dom1=domx
dom2=domy
allocate (edge(domx*domy,4))
! Determining the edge matrices for each subdomain - the bounding box
do j =1,domx
do k=1,domy
idx=(j-1)*(domx-1)+k+(j-1)
buff=REAL((mod(idx-1,domx)))/domx
IF (buff-((1.d0/domx)/dovfx) .gt. 0) THEN
buff=buff-((1.d0/domx)/dovfx)
ENDIF
edge(idx,1) = buff
IF ((mod(idx ,domx)) .eq. 0) THEN
buff=1
ELSE
buff=REAL(mod(idx ,domx))/domx
ENDIF
!write(*,*) buff
IF (buff + ((1.d0/domx)/dovfx) .lt. 1) THEN
buff=buff+((1.d0/domx)/dovfx)
ENDIF
edge(idx,2) = buff
!
buff=REAL(floor((idx-1)/dom1))/dom1
IF (buff -((1.d0/domy)/dovfy) .gt. 0) THEN
buff=buff-((1.d0/domy)/dovfy)
ENDIF
edge(idx,3) = buff
buff=REAL(ceiling(idx/dom1))/dom1
IF (buff+((1.d0/domy)/dovfy) .lt. 1) THEN
buff= buff+((1.d0/domy)/dovfy)
ENDIF
edge(idx,4) = buff
end do
end do
write(*,*) myid, edge(myid,:)
write(*,*) 'iter', iter
call Surround_dom(myid,domx,domy,LeftC, RightC, BottomC, TopC)
! Calculate data for the matrices: Divisions in each subdomain. :
xdiv=(edge(myid,2)-edge(myid,1))/mesh(1)
ydiv=(edge(myid,4)-edge(myid,3))/mesh(2)
dx=mesh(1)
dy=mesh(2)
allocate (A((xdiv-1)*(ydiv-1),(xdiv-1)*(ydiv-1)))
allocate (Ainv((xdiv-1)*(ydiv-1),(xdiv-1)*(ydiv-1)))
allocate (u((xdiv-1)*(ydiv-1),1),f((xdiv-1)*(ydiv-1),1))
allocate (u_exact((xdiv-1)*(ydiv-1),1))
allocate (ipiv((xdiv-1)*(ydiv-1)))
allocate (Left((ydiv-1),1),Right((ydiv-1),1))
allocate (Top((xdiv-1),1), Bottom(((xdiv-1)),1))
allocate (TempLeft((ydiv-1),1),TempRight((ydiv-1),1))
allocate (TempTop((xdiv-1),1), TempBottom(((xdiv-1)),1))
Left = 0.d0; Right = 0.d0; Bottom = 0.d0; Top = 0.d0;
TempLeft = 0.d0; TempRight = 0.d0; TempBottom = 0.d0; TempTop = 0.d0;
A=0;
endif
write(*,*) 'still6'
! ******************************************************************
! SendReceive data based on location
! ******************************************************************
if (LeftC.ne.0) then
call MPI_SENDRECV(Left, ydiv - 1, MPI_DOUBLE_PRECISION, LeftC, iter, &
TempLeft, ydiv - 1, MPI_DOUBLE_PRECISION, LeftC, iter, MPI_COMM_WORLD, status, ierr)
end if
if (RightC.ne.0) then
call MPI_SENDRECV(Right, ydiv - 1, MPI_DOUBLE_PRECISION, RightC, iter, &
TempRight, ydiv - 1, MPI_DOUBLE_PRECISION, RightC, iter, MPI_COMM_WORLD, status, ierr)
end if
if (BottomC.ne.0) then
call MPI_SENDRECV(Bottom, xdiv - 1, MPI_DOUBLE_PRECISION, BottomC, iter, &
TempBottom, xdiv - 1, MPI_DOUBLE_PRECISION, BottomC, iter, MPI_COMM_WORLD, status, ierr)
end if
if (TopC.ne.0) then
call MPI_SENDRECV(Top, xdiv - 1, MPI_DOUBLE_PRECISION, TopC, iter, &
TempTop, xdiv - 1, MPI_DOUBLE_PRECISION, TopC, iter, MPI_COMM_WORLD, status, ierr)
end if
Left = TempLeft ;
Right = TempRight;
Top = TempTop ;
Bottom = TempBottom;
write(*,*) 'still7'
! Form the coefficient matrices
do i =1,(xdiv-1)*(ydiv-1)
A(i,i)=-2.d0*(1.d0/(dx**2)+1.d0/(dy**2))
enddo
do i=1,(xdiv-2)
do j=1,(ydiv-1)
A(i+(j-1)*(xdiv-1),i+(j-1)*(xdiv-1)+1)=1.d0/(dx**2)
A(i+(j-1)*(xdiv-1)+1,i+(j-1)*(xdiv-1))=1.d0/(dx**2)
enddo
enddo
do i=1,(xdiv-1)
do j=1,(ydiv-2)
A(i+(j-1)*(xdiv-1),i+(j)*(xdiv-1))=1.d0/(dy**2)
A(i+(j)*(xdiv-1),i+(j-1)*(xdiv-1))=1.d0/(dy**2)
enddo
enddo
write(*,*) 'still9'
L=1
T=1
R=1
B=1
write(*,*) 'still10'
! Impose Boundary Conditions in F matrix
do i=1,(xdiv-1)*(ydiv-1)
xd = edge(myid,1) + (dx)*mod(i,(xdiv-1))
if (mod(i,xdiv-1).eq.0) xd = edge(myid,1) + (dx)*(xdiv-1)
yd = edge(myid,3) + (dy)*ceiling(i*1.d0/(xdiv-1))
!if (iter.eq.1 .and. myid.eq.2) write(*,*) xd,yd
u_exact(i,1) = sin(2.d0*3.1415*xd)*sin(2.d0*3.1415*yd)
f(i,1) = 8.d0*3.1415*3.1415*u_exact(i,1)
IF (mod(i,(xdiv-1)) .eq. 1) THEN
f(i,1)= f(i,1)+Left(L,1)/dx/dx
L=L+1
ENDIF
IF (mod(i,(xdiv-1)) .eq. 0) THEN
f(i,1)=f(i,1)+Right(R,1)/dx/dx
R=R+1
ENDIF
IF (i .le. (xdiv-1)) THEN
f(i,1)=f(i,1)+Bottom(B,1)/dy/dy
B=B+1
ENDIF
IF (i .gt. (xdiv-1)*(ydiv-2)) THEN
f(i,1)=f(i,1)+Top(T,1)/dy/dy
T=T+1
END IF
! enddo
enddo
!Solve AU=F by LU factorization!
write(*,*) 'still11'
do i=1,(xdiv-1)*(ydiv-1)
do j=1,(xdiv-1)*(ydiv-1)
Ainv(i,j)=A(i,j)
end do
end do
! do i=1,(xdiv-1)*(ydiv-1)
! write(*,*) myid,Ainv(i,i)
!end do
call DGESV((xdiv-1)*(ydiv-1), 1, A, &
(xdiv-1)*(ydiv-1), ipiv, f, (xdiv-1)*(ydiv-1), info)
write(*,*) 'still12'
call ErrorNorm(f,u_exact,(xdiv-1)*(ydiv-1),error_norm)
write(*,*) 'still13'
! ****************************************************
! Update boundary conditions based on new solution:
! ****************************************************
x_shift = divfx-1 ;
y_shift = divfy-1 ;
! write(*,*) 'LeftC', myid,LeftC,RightC,TopC,BottomC
if (LeftC.ne.0) then
do 50 i = 1,ydiv - 1
Left(i,1) = f((xdiv - 1)*(i - 1) + 1 + x_shift,1)
!if ((myid.eq.2).and.(iter.eq.1)) write(*,*) 'for left',i, &
!(xdiv - 1)*(i - 1) + 1 + x_shift
50 continue
end if
if (RightC.ne.0) then
do 60 i = 1,ydiv - 1
Right(i,1) = f((xdiv - 1)*i - x_shift,1)
!if ((myid.eq.1).and.(iter.eq.1)) write(*,*) 'for right',i, &
!(xdiv - 1)*i - x_shift
60 continue
end if
if (TopC.ne.0) then
do 70 i = 1,xdiv - 1
Top(i,1) = f((xdiv - 1)*((ydiv - 2) - y_shift) + i,1)
!if ((myid.eq.1).and.(iter.eq.1)) write(*,*) 'for top',i, &
!((xdiv - 1)*((ydiv - 2) - y_shift) + i)
70 continue
end if
if ( BottomC.ne.0) then
do 80 i = 1,xdiv - 1
Bottom(i,1) = f((xdiv - 1)*y_shift + i,1)
!if ((myid.eq.3).and.(iter.eq.1)) write(*,*) 'for bottom',i, &
!((xdiv - 1)*y_shift + i)
80 continue
end if
write(*,*) 'still14'
TempLeft =Left;
TempRight = Right;
TempTop = Top;
TempBottom = Bottom;
call MPI_SEND(error_norm, 1, MPI_DOUBLE_PRECISION, master, iter, &
MPI_COMM_WORLD, ierr)
write(*,*) 'still15'
if (iter.lt.iter_max) go to 1000
! *********************************************************************************
! Write solution to data file to view the results.
! *********************************************************************************
write (domaindata, "(A7,I2,A4)") "domain_",myid,".dat"
open (unit=myid*10, file = domaindata)
write (myid*10,*) ' VARIABLE= "X","Y","U_EXACT","U_CALC" '
do i=1,(xdiv-1)*(ydiv-1)
xd = edge(myid,1) + (dx)*mod(i,(xdiv-1))
if (mod(i,xdiv-1).eq.0) xd = edge(myid,1) + (dx)*(xdiv-1)
yd = edge(myid,3) + (dy)*ceiling(i*1.d0/(xdiv-1))
write (myid*10,*) xd, yd, u_exact(i,1), f(i,1)
enddo
write(*,*) 'still16'
if (iter.eq.max_iter) go to 200
200 continue
write(*,*) 'still45'
endif
call MPI_FINALIZE(ierr)
stop
end program main
subroutine Surround_dom(myid,domx,domy,LeftID, RightID, BottomID, TopID)
implicit none
integer myid, j, k, domy, domx, BottomID, TopID, LeftID, RightID
j = ceiling(1.d0*myid/domx)
k = mod(myid,domx)
if (k.eq.0) k = domx
! Domain on the left
if(k.eq.1) then
LeftID = 0
else
LeftID = ((j-1)*domx + k) - 1
end if
! Domain on the Right
if(k.eq.domx) then
RightID = 0
else
RightID = ((j-1)*domx + k) + 1
end if
! Domain on the Bottom
if(j.eq.1) then
BottomID = 0
else
BottomID = ((j-1)*domx + k) - domx
end if
! Domain on the Top
if(j.eq.domy) then
TopID = 0
else
TopID = ((j-1)*domx + k) + domx
end if
return
end
subroutine ErrorNorm(u,u_exact,N,error_norm)
implicit none
double precision u(N), u_exact(N), err, error_norm
integer i, N
error_norm = 0.d0
do 10 i = 1,N
err = (u(i) - u_exact(i))
error_norm = error_norm + err*err
10 continue
error_norm = sqrt(error_norm)/(N*1.d0)
return
end
I expect the code to run through all the iterations giving me an respectable error about multiples of 1e-3/1e-4.?
Currently, no error shows up, the code successfully runs for 1 iteration and then doesn't produce any output at all, even after days. It would be really helpful if I could get some guidance. I am sorry since the structure of my code is a awkward, I am just a beginner. It won't run if number of domains is odd or if the number of domains is not equal to number of processors. Any suggestions on how to remove these limitations is also welcome.

Writing a large matrix in a single file using MPI

I have a large N by N matrix containing real numbers, which has been decomposed into blocks using MPI. I am now trying to recompose this matrix and write it in a single file.
This topic (writing a matrix into a single txt file with mpi) covered a similar issue, but I got pretty confused by all the 'integer-to-string' conversion, etc (I am not an expert!). I am using Fortran for my code, but I guess that even a C explanation should help. I have been reading tutorials on MPI-IO, but there are still a few things I do not understand. Here is the code I have been working on:
use mpi
implicit none
! matrix dimensions
integer, parameter :: imax = 200
integer, parameter :: jmax = 100
! domain decomposition in each direction
integer, parameter :: iprocs = 3
integer, parameter :: jprocs = 3
! variables
integer :: i, j
integer, dimension(mpi_status_size) :: wstatus
integer :: ierr, proc_num, numprocs, fileno, localarray
integer :: loc_i, loc_j, ppp
integer :: istart, iend, jstart, jend
real, dimension(:,:), allocatable :: x
! initialize MPI
call mpi_init(ierr)
call mpi_comm_size(mpi_comm_world, numprocs, ierr)
call mpi_comm_rank(mpi_comm_world, proc_num, ierr)
! define the beginning and end of blocks
loc_j = proc_num/iprocs
loc_i = proc_num-loc_j*iprocs
ppp = (imax+iprocs-1)/iprocs
istart = loc_i*ppp + 1
iend = min((loc_i+1)*ppp, imax)
ppp = (jmax+jprocs-1)/jprocs
jstart = loc_j*ppp + 1
jend = min((loc_j+1)*ppp, jmax)
! write random data in each block
allocate(x(istart:iend,jstart:jend))
do j = jstart, jend
do i = istart, iend
x(i,j) = real(i + j)
enddo
enddo
! create subarrays
call mpi_type_create_subarray( 2, [imax,jmax], [iend-istart+1,jend-jstart+1], &
[istart,jstart], mpi_order_fortran, mpi_real, localarray, ierr )
call mpi_type_commit( localarray, ierr )
! write to file
call mpi_file_open( mpi_comm_world, 'test.dat', IOR(MPI_mode_create,MPI_mode_wronly), &
mpi_info_null, fileno, ierr )
call mpi_file_set_view( fileno, 0, mpi_real, localarray, "native", mpi_info_null, ierr )
call mpi_file_write_all( fileno, x, (jend-jstart+1)*(iend-istart+1), MPI_real, wstatus, ierr )
call mpi_file_close( fileno, ierr )
! deallocate data
deallocate(x)
! finalize MPI
call mpi_finalize(ierr)
I have been following this tutorial (PDF), but my compiler complains that there is no specific subroutine for the generic mpi_file_set_view. Did I do something wrong? Is the rest of the code ok?
Thank you very much for your help!!
Joachim
I would say that the easy way is to use a library designed to perform such operations efficiently : http://2decomp.org/mpiio.html
You can also look at their source code (files io.f90 and io_write_one.f90).
In the source code, you will see a call to MPI_FILE_SET_SIZE that may be relevant for your case.
EDIT : consider using "call MPI_File_Set_View(fhandle, 0_MPI_OFFSET_KIND,...". Answer from MPI-IO: MPI_File_Set_View vs. MPI_File_Seek

the use of MPI_Init()

I encountered a question about the use of MPI_Init().
I want to initialize random number "randv" only on the root processor with the code in the context below. To see if my goal is fulfilled, I have the program print out the array "randv" by placing a do loop immediately after the line "call RANDOM_NUMBER(randv)."
However, what is shown on the outcome screen is the repetition of the random number array by 8 times (given the number of processors is 8). My question is why the processors other than the root one are initialized before call MPI_Init(). If all the processors are awaken and have the same random number array before evoking MPI_Init, why bother to place call MPI_Init() for initialization? Thanks.
Lee
Here is the example I use:
program main
include 'mpif.h'
integer :: i
integer :: ierr
integer :: irank
integer :: nrow, ncol
real, dimension(:,:), allocatable :: randv
nrow = 4
ncol = 2
allocate(randv(nrow,ncol))
call RANDOM_SEED
call RANDOM_NUMBER(randv)
do i = 1, nrow
write(*,'(2(f5.2,x))') randv(i,:)
enddo
call MPI_Init ( ierr )
allocate(row_list(ncol), col_list(nrow))
call MPI_Comm_rank ( MPI_COMM_WORLD, irank, ierr )
if( irank == 0 )then
do i = 1, nrow
write(*,'(2(f5.2,x))') randv(i,:)
enddo
endif
call MPI_Finalize ( ierr )
deallocate( randv )
end program
I think you misunderstand how MPI works. The program you wrote is executed by every process. MPI_Init initializes the MPI environment s.t. those processes can interact. After initialization every process is uniquely identified by its rank. You have to make sure that, based on these ranks, each process works on different portions of your data, or performs different tasks.
Typically, you should run MPI_Init before anything else in your program.
Using MPI_Comm_rank you can obtain the ID of the current process (its rank). The first process always has the rank 0. Therefore, if you want to run parts of the code on the "master" process only, you can test for irank == 0:
program main
include 'mpif.h'
integer :: i
integer :: ierr
integer :: irank
integer :: nrow, ncol
real, dimension(:,:), allocatable :: randv
! Initialize MPI
call MPI_Init ( ierr )
! Get process ID
call MPI_Comm_rank ( MPI_COMM_WORLD, irank, ierr )
! Executed on all processes
nrow = 4
ncol = 2
allocate(randv(nrow,ncol))
! Only exectued on the master process
if ( irank == 0 ) then
call RANDOM_SEED
call RANDOM_NUMBER(randv)
do i = 1, nrow
write(*,'(2(f5.2,x))') randv(i,:)
enddo
endif
! Executed on all threads
allocate(row_list(ncol), col_list(nrow))
! Only exectued on the master process
if ( irank == 0 ) then
do i = 1, nrow
write(*,'(2(f5.2,x))') randv(i,:)
enddo
endif
deallocate( randv )
! Finalize MPI, should always be executed last
call MPI_Finalize ( ierr )
end program

partition a 2D array column-wise and use allgather

I have a fortran MPI code in which a compute intensive function is invoked on every element of a 2D array. I'm trying to split the tasks among the ranks. For example if there are 30 columns and 10 ranks, then each rank gets 3 columns. The following code does this split and gathers the results using allgather. But the final array doesn't have the values from all ranks.
program allgather
include 'mpif.h'
!create a 2 x 30 myarray
integer :: x=2,y=30
integer :: numprocs,myid
integer :: i,j,k,myelements,mycolumns,jb,je
integer*4,dimension(:),allocatable :: displacement,recvcnt
real :: checksum
real,dimension(:,:),allocatable :: myarr,combinedarr
call MPI_INIT(IERR)
call MPI_COMM_SIZE(MPI_COMM_WORLD,NUMPROCS,IERR)
call MPI_COMM_RANK(MPI_COMM_WORLD,MYID,IERR)
mycolumns = y/numprocs
myelements = x * mycolumns
allocate(displacement(numprocs),recvcnt(numprocs))
jb = 1 + ( myid * mycolumns )
je = ( myid + 1 ) * mycolumns
allocate(myarr(x,mycolumns))
allocate(combinedarr(x,y))
myarr(:,:) =0
do j=jb,je
do i=1,x
myarr(i,j) = 1
enddo
enddo
!myarr(:,:)=1
if(mod(y,numprocs) > 0) then
if(myid==numprocs-1) then
jb=(myid + 1) * mycolumns + 1
do j=jb,y
do i=1,x
myarr(i,j) = 1
enddo
enddo
endif
endif
combinedarr(:,:) =0
recvcnt(:)=myelements
do k=1,numprocs
displacement(k) = (k-1) *myelements
enddo
call MPI_ALLGATHERV(myarr,myelements,MPI_REAL,combinedarr,recvcnt,displacement,MPI_REAL,MPI_COMM_WORLD,IERR)
if(mod(y,numprocs) > 0) then
recvcnt(:) = 0
recvcnt(numprocs) = (x*y) - myelements * (numprocs)
displacement(numprocs) = displacement(numprocs) + myelements
call MPI_ALLGATHERV(myarr,recvcnt(numprocs),MPI_REAL,combinedarr,recvcnt,displacement,MPI_REAL,MPI_COMM_WORLD,IERR)
endif
if (myid==0) then
checksum=0
write(6,*) "mycolumns:",mycolumns,"myelements:",myelements
do j=1,y
do i=1,x
checksum = checksum + combinedarr(i,j)
enddo
enddo
write(6,*) checksum
endif
end
First of all, you are using MPI_ALLGATHERV() just as MPI_ALLGATHER() and get no benefit from its ability to send different number of elements from/to each process. But that's not the error in your program. The error lies in the way it fills myarr. You allocate it as myarr(x,mycolumns) but when filling it from column jb to column je, you go past the end of the array in all processes but rank 0 since jb and je are greater than mycolumns there. Thus myarr contains ones only in rank 0 and zeroes in all other ranks. So, yes, the final array does not have the values that you expect but that's because you filled them wrong, not because of the way MPI subroutines are used.
Writing past the end of an allocatable array destroys the hidden structures that are used to manage heap allocation and usually crashes the program. In your case you are just lucky - I run your code with Open MPI and it crashed with core dumps each time.
And you are also missing a call to MPI_FINALIZE() at the end of your code.
Hint: use the Fortran 90 interface if available - replace include 'mpif.h' with use mpi
here is the final version of the code. I have implemented the fixes suggested by "Hristo Iliev" and also fixed the part where the # or ranks does not equally divide the # of columns. Here the last rank does the computation on the leftover columns.
program allgather
include 'mpif.h'
!create a 2 x 30 myarray
integer :: x=4,y=6
integer :: numprocs,myid
integer :: i,j,k,myelements,mycolumns,jb,je,jbb
integer*4,dimension(:),allocatable :: displacement,recvcnt
real :: checksum
real,dimension(:,:),allocatable :: myarr,combinedarr
call MPI_INIT(IERR)
call MPI_COMM_SIZE(MPI_COMM_WORLD,NUMPROCS,IERR)
call MPI_COMM_RANK(MPI_COMM_WORLD,MYID,IERR)
mycolumns = y/numprocs
myelements = x * mycolumns
allocate(displacement(numprocs),recvcnt(numprocs))
jb = 1 + ( myid * mycolumns )
je = ( myid + 1 ) * mycolumns
allocate(myarr(x,y))
allocate(combinedarr(x,y))
myarr(:,:) =0
do j=jb,je
do i=1,x
myarr(i,j) = (j-1) * x + i
enddo
enddo
if(mod(y,numprocs) > 0) then
if(myid==numprocs-1) then
jbb=(myid + 1) * mycolumns + 1
do j=jbb,y
do i=1,x
myarr(i,j) = (j-1) * x + i
enddo
enddo
endif
endif
combinedarr(:,:) =0
recvcnt(:)=myelements
do k=1,numprocs
displacement(k) = (k-1) *myelements
enddo
call MPI_ALLGATHERV(myarr(1,jb),myelements,MPI_REAL,combinedarr,recvcnt,displacement,MPI_REAL,MPI_COMM_WORLD,IERR)
if(mod(y,numprocs) > 0) then
recvcnt(:) = 0
recvcnt(numprocs) = (x*y) - myelements * (numprocs)
displacement(numprocs) = displacement(numprocs) + myelements
call MPI_ALLGATHERV(myarr(1,jbb),recvcnt(numprocs),MPI_REAL,combinedarr,recvcnt,displacement,MPI_REAL,MPI_COMM_WORLD,IERR)
endif
if (myid==0) then
checksum=0
write(6,*) "mycolumns:",mycolumns,"myelements:",myelements
do j=1,y
do i=1,x
checksum = checksum + combinedarr(i,j)
enddo
enddo
write(6,*) checksum
endif
end

Resources