!------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: mpihelper ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for setting up the MPI variables used in the communications. !------------------------------------------------------------------------------ MODULE mpihelper USE constants use mpi USE particletypes IMPLICIT NONE INTEGER, SAVE :: basicdata_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for communicating basicdata INTEGER, SAVE :: particle_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for particles communication between nodes !INTEGER, SAVE :: particles_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for particles gathering on node 0 and broadcast from node 0 INTEGER, SAVE :: rhsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a rhs column INTEGER, SAVE :: db_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a REAL(kind=db) INTEGER, SAVE :: momentsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a column of a grid variable INTEGER, SAVE :: rcvrhsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the receive communication of a rhs column INTEGER, SAVE :: rcvmomentsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the receive communication of a column of a grid variable INTEGER, SAVE:: db_sum_op !< Store the MPI sum operation for db_type REAL(kind=db), ALLOCATABLE, SAVE:: rhsoverlap_buffer(:) !< buffer used for storing the rhs ghost cells !< received from the left or right MPI process REAL(kind=db), ALLOCATABLE, SAVE:: momentsoverlap_buffer(:) !< buffer used for storing the moments ghost cells !< received from the left or right MPI process !INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL !INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: rhsoverlap_tag= 200 INTEGER:: momentsoverlap_tag= 300 INTEGER:: partsgather_tag= 500 INTEGER:: partsexchange_tag=600 INTEGER:: nbpartsexchange_tag=700 CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI types used for inter process communications ! !--------------------------------------------------------------------------- SUBROUTINE mpitypes_init IMPLICIT NONE INTEGER:: ierr ! Initialize db_type to use real(kind=db) in MPI and the sum operator for reduce CALL MPI_TYPE_CREATE_F90_REAL(dprequestedprec,MPI_UNDEFINED,db_type,ierr) CALL MPI_Type_commit(db_type,ierr) CALL MPI_Op_Create(DB_sum, .true., db_sum_op, ierr) CALL init_particlempi END SUBROUTINE mpitypes_init !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the sum in MPI_Reduce operations involving Real(kinc=db) ! !--------------------------------------------------------------------------- SUBROUTINE DB_sum(INVEC, INOUTVEC, LEN, TYPE)bind(c) !REAL(kind=db):: INVEC(0:LEN-1), INOUTVEC(0:LEN-1) use, intrinsic:: iso_c_binding, ONLY: c_ptr,c_f_pointer use mpi implicit none TYPE(C_PTR), VALUE:: invec, inoutvec real(kind=db),pointer:: ivec(:), iovec(:) INTEGER:: LEN INTEGER:: TYPE INTEGER:: i call c_f_pointer(INVEC,ivec, (/len/)) call c_f_pointer(inoutvec,iovec, (/len/)) Do i=1,LEN IOVEC(i)=IVEC(i)+IOVEC(i) END DO END SUBROUTINE DB_sum !-------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI communicators used for allreduce between neighbors ! !> @param[in] nrank ranks of the FEM array in (1) z direction and (2) r direction !> @param[in] femorder finite element method order in z and r direction !> @param[in] zlimleft z index delimiting the mpi local left boundary !> @param[in] zlimright z index delimiting the mpi local right boundary !> @param[in] nbmoments number of moments calculated and stored. ! !--------------------------------------------------------------------------- SUBROUTINE init_overlaps(nrank, femorder, zlimleft, zlimright, nbmoments) INTEGER, INTENT(IN):: nrank(:), femorder(:), zlimright, zlimleft, nbmoments IF(ALLOCATED(rhsoverlap_buffer)) DEALLOCATE(rhsoverlap_buffer) IF(ALLOCATED(momentsoverlap_buffer)) DEALLOCATE(momentsoverlap_buffer) ALLOCATE(rhsoverlap_buffer(nrank(2)*femorder(1))) ALLOCATE(momentsoverlap_buffer(nbmoments*nrank(2)*femorder(1))) ! Initialize the MPI column overlap type for rhs CALL init_coltypempi(nrank(2), zlimright-zlimleft+femorder(1), 1, 1, db_type, rhsoverlap_type) ! Initialize the MPI grid col type CALL init_coltypempi(nrank(2), zlimright-zlimleft+femorder(1), nbmoments, 1, db_type, momentsoverlap_type) ! Initialize the MPI receive column overlap type for rhs CALL init_coltypempi(nrank(2), nrank(1), 1, 1, db_type, rcvrhsoverlap_type) ! Initialize the MPI receive grid col type CALL init_coltypempi(nrank(2), nrank(1), nbmoments, 1, db_type, rcvmomentsoverlap_type) END SUBROUTINE init_overlaps SUBROUTINE start_persistentcomm(requests, mpirank, leftproc, rightproc) INTEGER,INTENT(INOUT):: requests(:) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) LOGICAL:: completed=.false. IF(leftproc .lt. mpirank) THEN ! Start to receive CALL MPI_START(requests(2),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send CALL MPI_START(requests(1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF IF(leftproc .lt. mpirank) THEN ! Start to receive completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(2), completed,stats(:,2),ierr) END DO WRITE(*,*)"status 2", completed, stats(:,2) !CALL MPI_WAIT(requests(2),stats(:,2),ierr) !WRITE(*,*)"status 2", stats(:,2) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(1), completed,stats(:,1),ierr) END DO !CALL MPI_WAIT(requests(1),stats(:,1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF END SUBROUTINE start_persistentcomm SUBROUTINE rhsoverlapcomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) rhsoverlap_requests=MPI_REQUEST_NULL rhsoverlap_buffer=0 IF(rightproc .gt. mpirank .and. rightproc .ge. 0) THEN CALL MPI_ISEND(moments(zlimright+1), femorder(1), rhsoverlap_type, rightproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank .and. leftproc .ge. 0) THEN CALL MPI_IRECV(rhsoverlap_buffer, nrank(2)*(femorder(1)), db_type, leftproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,rhsoverlap_requests,stats, ierr) END SUBROUTINE rhsoverlapcomm SUBROUTINE momentsoverlapcomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) momentsoverlap_requests=MPI_REQUEST_NULL momentsoverlap_buffer=0 IF(rightproc .gt. mpirank .and. rightproc .ge. 0) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), momentsoverlap_type, rightproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank .and. leftproc .ge. 0) THEN CALL MPI_IRECV(momentsoverlap_buffer, 10*nrank(2)*(femorder(1)), db_type, leftproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,momentsoverlap_requests,stats, ierr) END SUBROUTINE momentsoverlapcomm !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the particle MPI type used for inter process communications and publish it to !> the process in the communicator ! !--------------------------------------------------------------------------- SUBROUTINE init_particlempi() INTEGER :: nblock = 5 INTEGER:: blocklength(5) INTEGER(kind=MPI_ADDRESS_KIND):: displs(5), displ0 INTEGER:: types(5) TYPE(particle) :: part INTEGER:: ierr CALL mpi_get_address(part%partindex, displs(1), ierr) types(1)=MPI_INTEGER CALL mpi_get_address(part%pos, displs(2), ierr) CALL mpi_get_address(part%U, displs(3), ierr) CALL mpi_get_address(part%GAMMA, displs(4), ierr) CALL mpi_get_address(part%pot, displs(5), ierr) types(2:5)=db_type blocklength(1:5) = 1 blocklength(2:3)=3 CALL mpi_get_address(part, displ0, ierr) displs=displs-displ0 CALL MPI_Type_create_struct(nblock, blocklength, displs, types, particle_type, ierr) CALL MPI_Type_commit(particle_type,ierr) END SUBROUTINE init_particlempi !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the particles MPI type used for gathering particles to the root and broadcast them and publish it to !> the process in the communicator ! !--------------------------------------------------------------------------- SUBROUTINE init_particles_gather_mpi(p,idstart,nsend,mpi_particles_type) INTEGER:: mpi_particles_type INTEGER:: nsend INTEGER:: idstart INTEGER :: nblock = 9 INTEGER:: blocklength(9) INTEGER(kind=MPI_ADDRESS_KIND):: displs(9), displ0 INTEGER:: types(9) TYPE(particles), INTENT(INOUT):: p INTEGER:: ierr INTEGER:: temptype IF(nsend .lt. 1) RETURN temptype=MPI_DATATYPE_NULL IF( mpi_particles_type .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_particles_type,ierr) CALL mpi_get_address(p%Rindex(idstart), displs(1), ierr) CALL mpi_get_address(p%Zindex(idstart), displs(2), ierr) CALL mpi_get_address(p%partindex(idstart), displs(3), ierr) types(1:3)=MPI_INTEGER CALL mpi_get_address(p%pos(1,idstart), displs(4), ierr) !CALL mpi_get_address(p%Z(idstart), displs(5), ierr) !CALL mpi_get_address(p%THET(idstart), displs(6), ierr) CALL mpi_get_address(p%pot(idstart), displs(5), ierr) CALL mpi_get_address(p%U(1,idstart), displs(6), ierr) CALL mpi_get_address(p%Uold(1,idstart), displs(7), ierr) !CALL mpi_get_address(p%UTHET(idstart), displs(10), ierr) !CALL mpi_get_address(p%UTHETold(idstart), displs(11), ierr) !CALL mpi_get_address(p%UZ(idstart), displs(12), ierr) !CALL mpi_get_address(p%UZold(idstart), displs(13), ierr) CALL mpi_get_address(p%GAMMA(idstart), displs(8), ierr) CALL mpi_get_address(p%GAMMAold(idstart), displs(9), ierr) types(4:9)=db_type blocklength = nsend blocklength(4)=3*nsend blocklength(6:7)=3*nsend CALL mpi_get_address(p, displ0, ierr) displs=displs-displ0 CALL MPI_Type_create_struct(nblock, blocklength, displs, types, mpi_particles_type, ierr) CALL MPI_TYPE_COMMIT(mpi_particles_type, ierr) END SUBROUTINE init_particles_gather_mpi !--------------------------------------------------------------------------- !> @author Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Initialize the column MPI type used for inter process communications and publish it to !> the processes in the communicator (can be rhs or grid quantities) ! !> @param[in] nr number of elements in the r direction !> @param[in] nz number of elements in the z direction !> @param[in] init_type MPI type of the initial data !> @param[inout] mpi_coltype final type usable in communications !--------------------------------------------------------------------------- SUBROUTINE init_coltypempi(nr, nz, block_size, stride, init_type, mpi_coltype) INTEGER, INTENT(IN) :: nr INTEGER, INTENT(IN) :: nz INTEGER, INTENT(IN) :: block_size INTEGER, INTENT(IN) :: stride INTEGER, INTENT(IN) :: init_type INTEGER, INTENT(OUT) :: mpi_coltype INTEGER :: temp_mpi_coltype INTEGER:: ierr INTEGER(KIND=MPI_ADDRESS_KIND):: init_type_lb, init_type_extent !(nrank(2), nrank(1), 1, 10, db_type, rhsoverlap_type) ! if mpi_coltype was used, we free it first IF( mpi_coltype .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_coltype,ierr) ! Create vector type of length nx CALL MPI_TYPE_VECTOR(nr, block_size, stride*block_size*nz, init_type, temp_mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(temp_mpi_coltype, ierr) ! Get the size in bytes of the initial type CALL MPI_TYPE_GET_EXTENT(init_type, init_type_lb, init_type_extent, ierr) if(mpi_coltype .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_coltype,ierr) ! Resize temp_mpi_coltype such that the next item to read is at j+1 CALL MPI_TYPE_CREATE_RESIZED(temp_mpi_coltype, init_type_lb, stride*block_size*init_type_extent ,& & mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(mpi_coltype, ierr) CALL MPI_TYPE_FREE(temp_mpi_coltype,ierr) END SUBROUTINE init_coltypempi END MODULE mpihelper