Related
I am trying to write an attribute in my HDF file, my code manages to write the proper value for the attribute if it is of type integer, but not for reals. This is the code
! Number of processes is assumed to be 4
! PROGRAM DATASET_BY_CHUNK
USE HDF5 ! This module contains all necessary modules
! USE MPI
IMPLICIT NONE
include 'mpif.h'
CHARACTER(LEN=7), PARAMETER :: filename = "test.h5" ! File name
CHARACTER(LEN=9), PARAMETER :: dsetname = "data_test" ! Dataset name
INTEGER(HSIZE_T), DIMENSION(1) :: data_dims
INTEGER(HID_T) :: file_id ! File identifier
INTEGER(HID_T) :: dset_id ! Dataset identifier
INTEGER(HID_T) :: filespace ! Dataspace identifier in file
INTEGER(HID_T) :: memspace ! Dataspace identifier in memory
INTEGER(HID_T) :: plist_id ! Property list identifier
INTEGER(HID_T) :: attr_id ! Attribute identifier
INTEGER(HID_T) :: aspace_id ! Attribute Dataspace identifier
INTEGER(HID_T) :: atype_id ! Attribute Dataspace identifier
INTEGER(HSIZE_T), DIMENSION(2) :: dimsf = (/4,8/) ! Dataset dimensions
! in the file.
! INTEGER, DIMENSION(7) :: dimsfi = (/4,8,0,0,0,0,0/)
INTEGER(HSIZE_T), DIMENSION (2) :: dimsfi = (/4,8/)
INTEGER(HSIZE_T), DIMENSION(2) :: chunk_dims = (/2,4/) ! Chunks dimensions
INTEGER(HSIZE_T), DIMENSION(2) :: count
INTEGER(HSSIZE_T), DIMENSION(2) :: offset
INTEGER(HSIZE_T), DIMENSION(2) :: stride
INTEGER(HSIZE_T), DIMENSION(2) :: block
INTEGER(HSIZE_T), DIMENSION(1) :: adims ! Attribute dimension
INTEGER :: arank = 1 ! Attribure rank
INTEGER(SIZE_T) :: attrlen ! Length of the attribute string
CHARACTER(LEN=80) :: attr_data ! Attribute data
INTEGER, ALLOCATABLE :: data (:,:) ! Data to write
INTEGER :: rank = 2 ! Dataset rank
real re
INTEGER :: error, error_n ! Error flags
!
! MPI definitions and calls.
!
INTEGER :: mpierror ! MPI error flag
INTEGER :: comm, info
INTEGER :: mpi_size, mpi_rank
comm = MPI_COMM_WORLD
info = MPI_INFO_NULL
CALL MPI_INIT(mpierror)
CALL MPI_COMM_SIZE(comm, mpi_size, mpierror)
CALL MPI_COMM_RANK(comm, mpi_rank, mpierror)
! Quit if mpi_size is not 4
if (mpi_size .NE. 4) then
write(*,*) 'This example is set up to use only 4 processes'
write(*,*) 'Quitting....'
goto 100
endif
attr_data = "Dataset character attribute"
!
! Initialize HDF5 library and Fortran interfaces.
!
CALL h5open_f(error)
!
! Setup file access property list with parallel I/O access.
!
CALL h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, error)
CALL h5pset_fapl_mpio_f(plist_id, comm, info, error)
!
! Create the file collectively.
!
CALL h5fcreate_f(trim(filename), H5F_ACC_TRUNC_F, file_id, error, access_prp = plist_id)
CALL h5pclose_f(plist_id, error)
!
! Create some attribute
!
re = 20.0
!
! Create scalar data space for the attribute.
!
call h5screate_f(H5S_SCALAR_F,aspace_id,error)
adims=80
! -----------------------------
! Reynolds number
CALL h5acreate_f(file_id,'Re',H5T_NATIVE_DOUBLE,aspace_id, &
attr_id, error)
CALL h5awrite_f(attr_id,H5T_NATIVE_DOUBLE,re,adims,error)
CALL h5aclose_f(attr_id, error)
!
! Terminate access to the data space.
!
CALL h5sclose_f(aspace_id, error)
!
! Create the data space for the dataset.
!
CALL h5screate_simple_f(rank, dimsf, filespace, error)
CALL h5screate_simple_f(rank, chunk_dims, memspace, error)
!
! Create chunked dataset.
!
CALL h5pcreate_f(H5P_DATASET_CREATE_F, plist_id, error)
CALL h5pset_chunk_f(plist_id, rank, chunk_dims, error)
CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, filespace, &
dset_id, error, plist_id)
CALL h5sclose_f(filespace, error)
!
! Each process defines dataset in memory and writes it to the hyperslab
! in the file.
!
stride(1) = 1
stride(2) = 1
count(1) = 1
count(2) = 1
block(1) = chunk_dims(1)
block(2) = chunk_dims(2)
if (mpi_rank .EQ. 0) then
offset(1) = 0
offset(2) = 0
endif
if (mpi_rank .EQ. 1) then
offset(1) = chunk_dims(1)
offset(2) = 0
endif
if (mpi_rank .EQ. 2) then
offset(1) = 0
offset(2) = chunk_dims(2)
endif
if (mpi_rank .EQ. 3) then
offset(1) = chunk_dims(1)
offset(2) = chunk_dims(2)
endif
!
! Select hyperslab in the file.
!
CALL h5dget_space_f(dset_id, filespace, error)
CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, error, &
stride, block)
!
! Initialize data buffer with trivial data.
!
ALLOCATE (data(chunk_dims(1),chunk_dims(2)))
data = mpi_rank + 1
!
! Create property list for collective dataset write
!
CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error)
CALL h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, error)
!
! Write the dataset collectively.
!
CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, dimsfi, error, &
file_space_id = filespace, mem_space_id = memspace, xfer_prp = plist_id)
!
! Write the dataset independently.
!
! CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, dimsfi,error, &
! file_space_id = filespace, mem_space_id = memspace)
!
! Deallocate data buffer.
!
DEALLOCATE(data)
!
! Close dataspaces.
!
CALL h5sclose_f(filespace, error)
CALL h5sclose_f(memspace, error)
!
! Close the dataset.
!
CALL h5dclose_f(dset_id, error)
!
! Close the property list.
!
CALL h5pclose_f(plist_id, error)
!
! Close the file.
!
CALL h5fclose_f(file_id, error)
!
! Close FORTRAN interfaces and HDF5 library.
!
CALL h5close_f(error)
100 continue
CALL MPI_FINALIZE(mpierror)
END PROGRAM DATASET_BY_CHUNK
The program runs satisfactorily, but when checking in Matlab with h5disp, I get that:
Attributes:
'Re': 0.000000
Any suggestions about how to fix it would be great! Thanks a lot
A Minimal, Complete, and Verifiable example (MCVE) would help both yourself (to spot your mistake) and us to figure out what you are really doing.
So in saying that, here's one. Please note there is no error checking, this is bad, bad, bad.
program fa
use hdf5
implicit none
character(len=8), parameter :: filename = "test.h5"
integer(hid_t) :: file_id
integer(hid_t) :: attr_id
integer(hid_t) :: aspace_id
integer(size_t), dimension(1) :: adims = (/0/)
integer :: ierr
real :: re
re = 20.0
call h5open_f(ierr)
call h5fcreate_f(trim(filename), H5F_ACC_TRUNC_F, file_id, ierr)
call h5screate_f(H5S_SCALAR_F, aspace_id, ierr)
call h5acreate_f(file_id, 'Reynolds number', H5T_NATIVE_REAL, &
aspace_id, attr_id, ierr)
call h5awrite_f(attr_id, H5T_NATIVE_REAL, re, adims, ierr)
call h5aclose_f(attr_id, ierr)
call h5sclose_f(aspace_id, ierr)
call h5fclose_f(file_id, ierr)
call h5close_f(ierr)
end program fa
If you compile, run then dump the file:
$ h5fc -o fa fa.f90
$ ./fa
$ h5dump test.h5
HDF5 "test.h5" {
GROUP "/" {
ATTRIBUTE "Reynolds number" {
DATATYPE H5T_IEEE_F32LE
DATASPACE SCALAR
DATA {
(0): 20
}
}
}
}
Now the question I have to ask about your code is why is adims set to 80?
I would highly suggest three things
A MCVE.
Error checking.
In regards to using MPI you should always get a serial version (within reason) working then parallelize it. Yes, I know this might near be impossible, but if you do go this route it will make your life easier (at least to begin with).
I am trying out various ways of IPC to do the following:
Master starts.
Master starts a slave.
Master passes an array to slave.
Slave processes the array.
Slave sends the array back to master.
I have tried using OpenMPI to solve this by having the parent process spawn a child which in turn does the aforementioned processing. However, I have also tried - what I thought would be the worst possible way to do this - letting master write the data to a file and have slave read and write back to that file. The result is stunning.
Below is the two ways in which I achieve this. The first way is the "file" way, the second one is by using OpenMPI.
Master.f90
program master
implicit none
integer*4, dimension (10000) :: matrix
integer :: length, i, exitstatus, cmdstatus
logical :: waistatus
! put integers in matrix and output data into a file
open(1, file='matrixdata.dat', status='new')
length = 10000
do i=1,length
matrix(i) = i
write(1,*) matrix(i)
end do
close(1)
call execute_command_line("./slave.out", wait = .true., exitstat=exitstatus)
if(exitstatus .eq. 0) then
! open and read the file changed by subroutine slave
open(1, file= 'matrixdata.dat', status='old')
do i = 1, length
read(1,*) matrix(i)
end do
close(1)
endif
end program master
Slave.f90
program slave
implicit none
integer*4, dimension (10000) :: matrix
integer :: length, i
! Open and read the file made by master into a matrix
open (1, file= 'matrixdata.dat', status = 'old')
length = 10000
do i = 1, length
read(1,*) matrix(i)
end do
close(1)
! Square all numbers and write over the file with new data
open(1, file= 'matrixdata.dat', status = 'old')
do i=1,length
matrix(i) = matrix(i)**2
write(1,*) matrix(i)
end do
close(1)
end program slave
* OpenMPI *
Master.f90
program master
use mpi
implicit none
integer :: ierr, num_procs, my_id, intercomm, i, siz, array(10000000), s_tag, s_dest, siffra
CALL MPI_INIT(ierr)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, my_id, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, num_procs, ierr)
siz = 10000
!print *, "S.Rank =", my_id
!print *, "S.Size =", num_procs
if (.not. (ierr .eq. 0)) then
print*, "S.Unable to initilaize bös!"
stop
endif
do i=1,size(array)
array(i) = 2
enddo
if (my_id .eq. 0) then
call MPI_Comm_spawn("./slave.out", MPI_ARGV_NULL, 1, MPI_INFO_NULL, my_id, &
& MPI_COMM_WORLD, intercomm, MPI_ERRCODES_IGNORE, ierr)
s_dest = 0 !rank of destination (integer)
s_tag = 1 !message tag (integer)
call MPI_Send(array(1), siz, MPI_INTEGER, s_dest, s_tag, intercomm, ierr)
call MPI_Recv(array(1), siz, MPI_INTEGER, s_dest, s_tag, intercomm, MPI_STATUS_IGNORE, ierr)
!do i=1,10
! print *, "S.Array(",i,"): ", array(i)
!enddo
endif
call MPI_Finalize(ierr)
end program master
Slave.f90
program name
use mpi
implicit none
! type declaration statements
integer :: ierr, parent, my_id, n_procs, i, siz, array(10000000), ctag, csource, intercomm, siffra
logical :: flag
siz = 10000
! executable statements
call MPI_Init(ierr)
call MPI_Initialized(flag, ierr)
call MPI_Comm_get_parent(parent, ierr)
call MPI_Comm_rank(MPI_COMM_WORLD, my_id, ierr)
call MPI_Comm_size(MPI_COMM_WORLD, n_procs, ierr)
csource = 0 !rank of source
ctag = 1 !message tag
call MPI_Recv(array(1), siz, MPI_INTEGER, csource, ctag, parent, MPI_STATUS_IGNORE, ierr)
!do i=1,10
! print *, "C.Array(",i,"): ", array(i)
!enddo
do i=1,size(array)
array(i) = array(i)**2
enddo
!do i=1,10
! print *, "C.Array(",i,"): ", array(i)
!enddo
call MPI_Send(array(1), siz, MPI_INTEGER, csource, ctag, parent, ierr)
call MPI_Finalize(ierr)
end program name
Now, the interesting part is that by using the time program I have measured that it takes 19.8 ms to execute the "file version of the program". The OpenMPI version takes 60 ms. Why? Is there really so much overhead in OpenMPI that it is faster to read/write to file if you're working with <400 KiB?
I tried increasing the array to 10^5 integers. The file version executes in 114ms, OpenMPI in 53ms. When increasing to 10^6 integers file: 1103 ms, OpenMPI: 77ms.
Is the overhead really that much?
Fundamentally, it doesn't make sense to use distributed processing for problem sizes that fit in cache (except in some trivially parallel cases). The typical usage scenario is for data transfer much larger than LLC. Even you biggest case (10^6) fits in modern caches.
Firstly, for the method of writing to disk, you have to be aware of the influence of a page cache in your operating system. If your MPI processes are on the same chip, the operating system just hears 'do a write' then 'do a read'. If, in the interim, nothing pollutes the page cache then it will just fetch the data from RAM as oppose to the disk. A better experiment would be to flush the page cache between the write and read (this is possible, at least on linux, via a shell command). In effect you are performing shared memory processing if you're grabbing the data from the page cache.
Also, you are using time on the command line so you're incorporating the time it takes for MPI to initialize and establish communication interfaces with a few function calls. This is not a good benchmark because the interface provided for disk IO method has already been initialized by the operating system. Also for such a small problem size, the initialization of MPI is nontrivial compared to the runtime of the body of the program. The proper way to do this is to do the timing in the code.
For both methods, you should expect linear scaling biased by the overhead of the method. In fact, you should see a few regimes as the data size surpasses LLC and page cache. The best way to do this is to repeat your runs with ARRAY_SIZE=2^n for n=12,13,..24 and check out the curve.
I want to test if mpi_iSend and mpi_iRecv have run fine.
I have 2 request(argument) vectors: one vector for all the mpi_iSend, the other for all the mpi_iRecv.
The point is that the program runs fine until it started to run the cycle for MPI_TEST. I have tried even with 2 numbers (do i=1,2), still the same error.
Fatal error in PMPI_Test: Invalid MPI_Request, error stack:
PMPI_Test(166): MPI_Test(request=0x7fff93fd2220, flag=0x7fff93fd1ffc,
status=0x7fff93fd2890) failed PMPI_Test(121): Invalid MPI_Request
INTEGER :: ierr, myid, istatus(MPI_STATUS_SIZE), num, i, n
INTEGER,parameter :: seed = 86456, numbers=200
INTEGER :: req1(numbers), req2(numbers)
LOGICAL :: flag
CALL MPI_COMM_RANK(MPI_COMM_WORLD, myid, ierr)
IF (myid==0) THEN
DO n=1, numbers
req1(n)=0
req2(n)=0
num=IRAND()
CALL MPI_ISEND(num,1,MPI_INTEGER,1,1,MPI_COMM_WORLD,req1(n),ierr)
CALL MPI_IRECV(best_prime,1,MPI_INTEGER,1,0,MPI_COMM_WORLD,req2(n),ierr)
END DO
ELSE IF (myid==1) THEN
DO i=1, numbers
CALL MPI_TEST(req2(i),flag,istatus,ierr)
IF (flag .eqv. .false.) THEN
WRITE(*,*)'RECV',i,'non-blocking FAIL'
ELSE IF (flag .eqv. .true.) THEN
WRITE(*,*)'RECV',i,'non-blocking SUCCESS'
END IF
END DO
END IF
I have a bunch of errors coming up that say that my variables are not variables and that my statements are undefined. This subroutine is needed to compile my larger program, so I just copy pasted it at the end of my program. It didn't work if I compiled them together on different files.
How would I go about fixing those "undefined" issues? Is it because of the way I copy pasted my subroutine at the end of my program?
(I compiled with g77 and with gfortran, same thing happens)
geomalb.f:1088.6:
TEMP(J)= TLINAL(J)
1
Error: Unclassifiable statement at (1)
geomalb.f:1089.6:
DEN(J)= DLINAL(J)
1
Error: Unclassifiable statement at (1)
geomalb.f:1090.6:
PRESS(J)=PLINAL(J)
1
Error: Unclassifiable statement at (1)
geomalb.f:1110.6:
XMU(J)=28.0134*XN2(J)+2.158*H2(J)+16.0426*CH4(J)+39.948*AR(J)
1
Error: Unclassifiable statement at (1)
geomalb.f:1132.6:
1001 IF (IPRINT .LT. 0) RETURN
1
Error: Bad continuation line at (1)
geomalb.f:1152.72:
ENDDO
1
Error: Statement label in ENDDO at (1) doesn't match DO label
geomalb.f:1130.72:
IF (DT .LT. 0.001) GO TO 1001
1
Error: Label 1001 referenced at (1) is never defined
geomalb.f:72.27:
CALL ATMSETUP(NLEVEL,Z,RHCH4,FH2,FARGON,TEMP,PRESS,DEN,XMU,
1
Warning: Rank mismatch in argument 'z' at (1) (scalar and rank-1)
Here is the subroutine part of the program:
SUBROUTINE ATMSETUP(NLEVEL,Z,RHCH4,FH2,FARGON,TEMP,PRESS,DEN,XMU,
& CH4,H2,XN2,AR,IPRINT)
PARAMETER (NMAX=201)
DIMENSION CH4(1),H2(1),XN2(1),AR(1)
DIMENSION TLINAL(NMAX),DLINAL(NMAX),PLINAL(NMAX)
CALL LINDAL(NLEVEL,Z,TLINAL,DLINAL,PLINAL)
DO J=1,NLEVEL
TEMP(J)= TLINAL(J)
DEN(J)= DLINAL(J)
PRESS(J)=PLINAL(J)
ENDDO
DO 1000 ITS =1,20
CH4(NLEVEL)=PCH4(TEMP(NLEVEL))*RHCH4/PRESS(NLEVEL)
DO 134 J=NLEVEL-1,1,-1
CH4SAT=PCH4(TEMP(J))/PRESS(J)
CH4(J)=AMIN1(CH4SAT,CH4(NLEVEL),CH4(J+1))
134 CONTINUE
DO 20 J=1,NLEVEL
H2(J)=FH2
IF (FARGON .LT. 0.) THEN
AR(J)=(-FARGON-28.0134+25.8554*H2(J)+11.9708*CH4(J))/11.9346
ELSE
IF (FARGON .EQ. 0.) THEN
AR(J)=0.0
ELSE
AR(J)=FARGON
ENDIF
ENDIF
XN2(J)=1.0 - H2(J) - CH4(J) -AR(J)
XMU(J)=28.0134*XN2(J)+2.158*H2(J)+16.0426*CH4(J)+39.948*AR(J)
20 CONTINUE
SUMT=PLINAL(1)*6.02E23/10.
SUMB=SUMT
TLAST=TEMP(NLEVEL)
DO J=2,NLEVEL
DENF=294.1/(XN2(J)*294.1 + CH4(J)*410. + H2(J)*136. + AR(J)*277.8)
DEN(J) = DLINAL(J)*DENF
ADEN=(DEN(J)-DEN(J-1))/ALOG(DEN(J)/DEN(J-1))
SUMT=SUMT+(EFFG(Z(J))*ADEN)*( Z(J-1)-Z(J))*XMU(J)
ADEN=(DLINAL(J)-DLINAL(J-1))/ALOG(DLINAL(J)/DLINAL(J-1))
SUMB=SUMB+(EFFG(Z(J))*ADEN)*( Z(J-1)-Z(J))*28.01340
PRESS(J)=PLINAL(J)*SUMT/SUMB
TEMP(J) =TLINAL(J)*(SUMT/SUMB)*(1./DENF)
ENDDO
30 CONTINUE
DT= ABS(TEMP(NLEVEL)-TLAST)
IF (DT .LT. 0.001) GO TO 1001
1000 CONTINUE
1001 IF (IPRINT .LT. 0) RETURN
WRITE (6,139)RHCH4,FH2,FARGON,DT
DO 135 J=1,NLEVEL-1
WRITE(6,140)J,Z(J),PRESS(J),DEN(J),TEMP(J),
& CH4(J)*PRESS(J)/PCH4(TEMP(J))
& ,CH4(J)*100.,XN2(J)*100.,H2(J)*100.,AR(J)*100.,XMU(J)
& ,(TEMP(J+1)-TEMP(J))/(Z(J+1)-Z(J))
135 CONTINUE
J=NLEVEL
WRITE(6,140)J,Z(J),PRESS(J),DEN(J),TEMP(J),
& CH4(J)*PRESS(J)/PCH4(TEMP(J))
& ,CH4(J)*100.,XN2(J)*100.,H2(J)*100.,AR(J)*100.,XMU(J)
139 FORMAT(///' BACKGROUNG ATMOSPHERE AT LEVELS'/
& ' SURFACE HUMIDITY OF CH4:',F5.3,' H2 MIXING RATIO:',F6.4,
& ' ARGON SETTING:',F8.4/' FINAL CONVERGENCE ON TEMP:',F10.5
& ' LINDAL ET AL SCALING'/
&' LVL ALTITUDE P(BARS) DEN(CM-3) TEMP RH-CH4'
& , ' %CH4 %N2 %H2 %AR MU DT/DZ' )
140 FORMAT(1X,I3,F8.3,1P2E10.3,0PF7.2,F5.2,2F6.2,2F5.2,4F6.2)
RETURN
ENDDO
END
You will need a
DIMENSION DEN(1), PRESS(1), TEMP(1)
statement in the subroutine. Otherwise the subroutine does not "know" that these variables are to be treated as arrays.
All, I've been fighting these errors for hours, here's my code:
program hello
implicit none
integer :: k, n, iterator
integer, dimension(18) :: objectArray
call SetVariablesFromFile()
do iterator = 1, 18
write(*,*) objectArray(iterator)
end do
contains
subroutine SetVariablesFromFile()
IMPLICIT NONE
integer :: status, ierror, i, x
open(UNIT = 1, FILE = 'input.txt', &
ACTION = 'READ',STATUS = 'old', IOSTAT = ierror)
if(ierror /= 0) THEN
write(*, *) "Failed to open input.txt!"
stop
end if
do i = 1, 18
objectArray(i) = read(1, *, IOSTAT = status) x
if (status > 0) then
write(*,*) "Error reading input file"
exit
else if (status < 0) then
write(*,*) "EOF"
exit
end if
end do
close(1)
END subroutine SetVariablesFromFile
end program hello
I'm getting compile errors:
make: * [hello.o] Error1
Syntax error in argument list at (1)
I read online that the latter error could be due to a long line of code exceeding 132 characters, which doesn't appear to be the problem.I have no where to begin on the first error... any help would be much appreciated!
This,
objectArray(i) = read(1, *, IOSTAT = status) x
is not valid Fortran. You need to write it as,
read(1,*,iostat=status) objectArray(i)
Setting it in this correct form, I received no compiler errors with ifort 12.1, nor with gfortran 4.4.3