!------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: mpihelper ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for setting up the MPI variables used in the communications. !------------------------------------------------------------------------------ MODULE mpihelper USE constants USE mpi IMPLICIT NONE !> Structure containing the simulation parameters read from the input file TYPE BASICDATA LOGICAL :: nlres, nlsave, newres, nlxg, nlppform, nlPhis, nlclassical INTEGER :: nplasma, nz, it0d, it2d, itparts, & & itgraph, distribtype, nblock, nrun REAL(kind=db) :: job_time, extra_time, tmax, dt, & & potinn, potout, B0, n0, temp, & & Rcurv, width, H0, P0 INTEGER, DIMENSION(2) :: femorder, ngauss INTEGER, DIMENSION(3) :: nnr REAL(kind=db), DIMENSION(2):: lz REAL(kind=db), DIMENSION(4):: radii, plasmadim CHARACTER(len=64) :: resfile= "results.h5" LOGICAL :: partperiodic INTEGER :: samplefactor END TYPE BASICDATA !> Structure containing a single particle position and velocity used in MPI communications. TYPE particle INTEGER :: Rindex, Zindex, partindex REAL(kind=db) :: R, Z, THET,& & UZ, UR, UTHET, & & Gamma END TYPE particle INTEGER, SAVE :: basicdata_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for communicating basicdata INTEGER, SAVE :: particle_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for particles communication INTEGER, SAVE :: rhsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a rhs column INTEGER, SAVE :: db_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a REAL(kind=db) INTEGER, SAVE :: momentsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a column of a grid variable INTEGER, SAVE :: rcvrhsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the receive communication of a rhs column INTEGER, SAVE :: rcvmomentsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the receive communication of a column of a grid variable INTEGER, SAVE:: db_sum_op !< Store the MPI sum operation for db_type REAL(kind=db), ALLOCATABLE, SAVE:: rhsoverlap_buffer(:) !< buffer used for storing the rhs ghost cells !< received from the left or right MPI process REAL(kind=db), ALLOCATABLE, SAVE:: momentsoverlap_buffer(:) !< buffer used for storing the moments ghost cells !< received from the left or right MPI process !INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL !INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: rhsoverlap_tag= 20 INTEGER:: momentsoverlap_tag= 30 CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI types used for inter process communications ! !--------------------------------------------------------------------------- SUBROUTINE init_mpitypes IMPLICIT NONE INTEGER:: ierr ! Initialize db_type to use real(kind=db) in MPI and the sum operator for reduce CALL MPI_TYPE_CREATE_F90_REAL(dprequestedprec,MPI_UNDEFINED,db_type,ierr) CALL MPI_Type_commit(db_type,ierr) CALL MPI_Op_Create(DB_sum, .true., db_sum_op, ierr) CALL init_particlempi END SUBROUTINE init_mpitypes !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the sum in MPI_Reduce operations involving Real(kinc=db) ! !--------------------------------------------------------------------------- SUBROUTINE DB_sum(INVEC, INOUTVEC, LEN, TYPE) REAL(kind=db):: INVEC(0:LEN-1), INOUTVEC(0:LEN-1) INTEGER:: LEN, TYPE INTEGER:: i Do i=0,LEN-1 INOUTVEC(i)=INVEC(i)+INOUTVEC(i) END DO END SUBROUTINE DB_sum !-------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI communicators used for allreduce between neighbors ! !> @param[in] nrank ranks of the FEM array in (1) z direction and (2) r direction !> @param[in] femorder finite element method order in z and r direction !> @param[in] zlimleft z index delimiting the mpi local left boundary !> @param[in] zlimright z index delimiting the mpi local right boundary !> @param[in] nbmoments number of moments calculated and stored. ! !--------------------------------------------------------------------------- SUBROUTINE init_overlaps(nrank, femorder, zlimleft, zlimright, nbmoments) INTEGER, INTENT(IN):: nrank(:), femorder(:), zlimright, zlimleft, nbmoments IF(ALLOCATED(rhsoverlap_buffer)) DEALLOCATE(rhsoverlap_buffer) IF(ALLOCATED(momentsoverlap_buffer)) DEALLOCATE(momentsoverlap_buffer) ALLOCATE(rhsoverlap_buffer(nrank(2)*femorder(1))) ALLOCATE(momentsoverlap_buffer(nbmoments*nrank(2)*femorder(1))) WRITE(*,*)"nrank, femorder", nrank, femorder ! Initialize the MPI column overlap type for rhs CALL init_coltypempi(nrank(2), zlimright-zlimleft+femorder(1), 1, nbmoments, db_type, rhsoverlap_type) ! Initialize the MPI grid col type CALL init_coltypempi(nrank(2), zlimright-zlimleft+femorder(1), nbmoments, 1, db_type, momentsoverlap_type) ! Initialize the MPI receive column overlap type for rhs CALL init_coltypempi(nrank(2), nrank(1), 1, nbmoments, db_type, rcvrhsoverlap_type) ! Initialize the MPI receive grid col type CALL init_coltypempi(nrank(2), nrank(1), nbmoments, 1, db_type, rcvmomentsoverlap_type) END SUBROUTINE init_overlaps SUBROUTINE start_persistentcomm(requests, mpirank, leftproc, rightproc) INTEGER, ASYNCHRONOUS, INTENT(INOUT):: requests(:) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) LOGICAL:: completed=.false. IF(leftproc .lt. mpirank) THEN ! Start to receive CALL MPI_START(requests(2),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send CALL MPI_START(requests(1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF IF(leftproc .lt. mpirank) THEN ! Start to receive completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(2), completed,stats(:,2),ierr) END DO WRITE(*,*)"status 2", completed, stats(:,2) !CALL MPI_WAIT(requests(2),stats(:,2),ierr) !WRITE(*,*)"status 2", stats(:,2) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(1), completed,stats(:,1),ierr) END DO !CALL MPI_WAIT(requests(1),stats(:,1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF END SUBROUTINE start_persistentcomm SUBROUTINE start_rhscomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) rhsoverlap_requests=MPI_REQUEST_NULL rhsoverlap_buffer=0 IF(rightproc .gt. mpirank) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), rhsoverlap_type, rightproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank) THEN CALL MPI_IRECV(rhsoverlap_buffer, nrank(2)*(femorder(1)), db_type, leftproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,rhsoverlap_requests,stats, ierr) END SUBROUTINE start_rhscomm SUBROUTINE start_momentscomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) momentsoverlap_requests=MPI_REQUEST_NULL momentsoverlap_buffer=0 IF(rightproc .gt. mpirank) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), momentsoverlap_type, rightproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank) THEN CALL MPI_IRECV(momentsoverlap_buffer, 10*nrank(2)*(femorder(1)), db_type, leftproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,momentsoverlap_requests,stats, ierr) END SUBROUTINE start_momentscomm !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the particle MPI type used for inter process communications and publish it to !> the process in the communicator ! !--------------------------------------------------------------------------- SUBROUTINE init_particlempi() INTEGER :: nblock = 10 INTEGER:: blocklength(10) INTEGER(kind=MPI_ADDRESS_KIND):: displs(10), displ0 INTEGER:: types(10) TYPE(particle) :: part INTEGER:: ierr CALL mpi_get_address(part%Rindex, displs(1), ierr) CALL mpi_get_address(part%Zindex, displs(2), ierr) CALL mpi_get_address(part%partindex, displs(3), ierr) types(1:3)=MPI_INTEGER CALL mpi_get_address(part%R, displs(4), ierr) CALL mpi_get_address(part%Z, displs(5), ierr) CALL mpi_get_address(part%THET, displs(6), ierr) CALL mpi_get_address(part%UZ, displs(7), ierr) CALL mpi_get_address(part%UR, displs(8), ierr) CALL mpi_get_address(part%UTHET, displs(9), ierr) CALL mpi_get_address(part%GAMMA, displs(10), ierr) types(4:10)=db_type blocklength(1:10) = 1 CALL mpi_get_address(part, displ0, ierr) displs=displs-displ0 CALL MPI_Type_create_struct(nblock, blocklength, displs, types, particle_type, ierr) CALL MPI_Type_commit(particle_type,ierr) END SUBROUTINE init_particlempi !--------------------------------------------------------------------------- !> @author Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Initialize the column MPI type used for inter process communications and publish it to !> the processes in the communicator (can be rhs or grid quantities) ! !> @param[in] nr number of elements in the r direction !> @param[in] nz number of elements in the z direction !> @param[in] init_type MPI type of the initial data !> @param[inout] mpi_coltype final type usable in communications !--------------------------------------------------------------------------- SUBROUTINE init_coltypempi(nr, nz, block_size, stride, init_type, mpi_coltype) INTEGER, INTENT(IN) :: nr INTEGER, INTENT(IN) :: nz INTEGER, INTENT(IN) :: block_size INTEGER, INTENT(IN) :: stride INTEGER, INTENT(IN) :: init_type INTEGER, INTENT(OUT) :: mpi_coltype INTEGER :: temp_mpi_coltype, ierr INTEGER(KIND=MPI_ADDRESS_KIND):: init_type_lb, init_type_extent !(nrank(2), nrank(1), 1, 10, db_type, rhsoverlap_type) ! if mpi_coltype was used, we free it first IF( mpi_coltype .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_coltype,ierr) ! Create vector type of length nx CALL MPI_TYPE_VECTOR(nr, block_size, stride*block_size*nz, init_type, temp_mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(temp_mpi_coltype, ierr) ! Get the size in bytes of the initial type CALL MPI_TYPE_GET_EXTENT(init_type, init_type_lb, init_type_extent, ierr) if(mpi_coltype .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_coltype,ierr) ! Resize temp_mpi_coltype such that the next item to read is at j+1 CALL MPI_TYPE_CREATE_RESIZED(temp_mpi_coltype, init_type_lb, stride*block_size*init_type_extent ,& & mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(mpi_coltype, ierr) CALL MPI_TYPE_FREE(temp_mpi_coltype,ierr) END SUBROUTINE init_coltypempi END MODULE mpihelper