!------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: mpihelper ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for setting up the MPI variables used in the communications. !------------------------------------------------------------------------------ MODULE mpihelper USE constants USE mpi IMPLICIT NONE !> Structure containing the simulation parameters read from the input file TYPE BASICDATA LOGICAL :: nlres, nlsave, newres, nlxg, nlppform, nlPhis, nlclassical INTEGER :: nplasma, nz, it0d, it2d, itparts, & & itgraph, distribtype, nblock, nrun DOUBLE PRECISION :: job_time, extra_time, tmax, dt, & & potinn, potout, B0, n0, temp, & & Rcurv, width, H0, P0 INTEGER, DIMENSION(2) :: femorder, ngauss INTEGER, DIMENSION(3) :: nnr DOUBLE PRECISION, DIMENSION(2):: lz DOUBLE PRECISION, DIMENSION(4):: radii, plasmadim CHARACTER(len=64) :: resfile= "results.h5" LOGICAL :: partperiodic END TYPE BASICDATA !> Structure containing a single particle position and velocity used in MPI communications. TYPE particle INTEGER :: Rindex, Zindex, partindex DOUBLE PRECISION :: R, Z, THET,& & UZ, UR, UTHET, & & Gamma END TYPE particle INTEGER, SAVE :: basicdata_type !< Stores the MPI data type used for communicating basicdata INTEGER, SAVE :: particle_type !< Stores the MPI data type used for particles communication INTEGER, SAVE :: rhsoverlap_type !< Stores the MPI data type used for the communication of a rhs column INTEGER, SAVE :: momentsoverlap_type !< Stores the MPI data type used for the communication of a column of a grid variable DOUBLE PRECISION, ALLOCATABLE, SAVE:: rhsoverlap_buffer(:) !< buffer used for storing the rhs ghost cells !< received from the left or right MPI process DOUBLE PRECISION, ALLOCATABLE, SAVE:: momentsoverlap_buffer(:) !< buffer used for storing the moments ghost cells !< received from the left or right MPI process INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: rhsoverlap_tag= 20 INTEGER:: momentsoverlap_tag= 30 CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI types used for inter process communications ! !--------------------------------------------------------------------------- SUBROUTINE init_mpitypes CALL init_particlempi CALL init_basicdatampi END SUBROUTINE init_mpitypes !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI communicators used for allreduce between neighbors ! !> @param[in] mpirank rank of the MPI process in MPI_COMM_WORLD ! !--------------------------------------------------------------------------- SUBROUTINE init_overlaps(nrank, femorder, zlimright, moments, mpirank, left_proc, right_proc) DOUBLE PRECISION, ASYNCHRONOUS:: moments(:,:) INTEGER, INTENT(IN):: nrank(:), femorder(:), left_proc, right_proc, zlimright, mpirank INTEGER:: ierr ALLOCATE(rhsoverlap_buffer(nrank(2)*(femorder(1)))) ALLOCATE(momentsoverlap_buffer(10*nrank(2)*(femorder(1)))) WRITE(*,*)"nrank, femorder", nrank, femorder ! Initialize the MPI column overlap type for rhs CALL init_coltypempi(nrank(2), nrank(1), 1, 10, MPI_DOUBLE_PRECISION, rhsoverlap_type) ! Initialize the MPI grid col type CALL init_coltypempi(nrank(2), nrank(1), 10, 1, MPI_DOUBLE_PRECISION, momentsoverlap_type) ! If the processor on the right has actually higher z positions IF(right_proc .gt. mpirank) THEN CALL MPI_SEND_INIT(moments(1,zlimright+1), femorder(1), rhsoverlap_type, right_proc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(1), ierr ) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in rhs send_init" CALL MPI_SEND_INIT(moments(1,zlimright+1), femorder(1), momentsoverlap_type, right_proc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(1), ierr ) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in moments send_init" END IF ! If the processor on the left has actually lower z positions IF(left_proc .lt. mpirank) THEN CALL MPI_RECV_INIT(rhsoverlap_buffer, nrank(2)*(femorder(1)), MPI_DOUBLE_PRECISION, left_proc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(2), ierr ) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in rhs recv_init" CALL MPI_RECV_INIT(momentsoverlap_buffer, 10*nrank(2)*(femorder(1)), MPI_DOUBLE_PRECISION, left_proc, & & momentsoverlap_tag, MPI_COMM_WORLD, momentsoverlap_requests(2), ierr ) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in moments recv_init" END IF END SUBROUTINE init_overlaps SUBROUTINE start_persistentcomm(requests, mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, ASYNCHRONOUS, INTENT(INOUT):: requests(:) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc DOUBLE PRECISION, DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) LOGICAL:: completed=.false. IF(leftproc .lt. mpirank) THEN ! Start to receive CALL MPI_START(requests(2),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send CALL MPI_START(requests(1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF IF(leftproc .lt. mpirank) THEN ! Start to receive completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(2), completed,stats(:,2),ierr) END DO WRITE(*,*)"status 2", completed, stats(:,2) !CALL MPI_WAIT(requests(2),stats(:,2),ierr) !WRITE(*,*)"status 2", stats(:,2) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(1), completed,stats(:,1),ierr) END DO !CALL MPI_WAIT(requests(1),stats(:,1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF END SUBROUTINE start_persistentcomm SUBROUTINE start_rhscomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc DOUBLE PRECISION, DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) LOGICAL:: completed=.false. IF(rightproc .gt. mpirank) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), rhsoverlap_type, rightproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank) THEN CALL MPI_IRECV(rhsoverlap_buffer, nrank(2)*(femorder(1)), MPI_DOUBLE_PRECISION, leftproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,rhsoverlap_requests,stats, ierr) END SUBROUTINE start_rhscomm SUBROUTINE start_momentscomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc DOUBLE PRECISION, DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) IF(rightproc .gt. mpirank) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), momentsoverlap_type, rightproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank) THEN CALL MPI_IRECV(momentsoverlap_buffer, 10*nrank(2)*(femorder(1)), MPI_DOUBLE_PRECISION, leftproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,momentsoverlap_requests,stats, ierr) END SUBROUTINE start_momentscomm !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the particle MPI type used for inter process communications and publish it to !> the process in the communicator ! !--------------------------------------------------------------------------- SUBROUTINE init_particlempi() INTEGER :: nblock = 10 INTEGER:: blocklength(10) INTEGER(kind=MPI_ADDRESS_KIND):: displs(10), displ0 INTEGER, DIMENSION(10) :: types TYPE(particle) :: part INTEGER:: ierr CALL mpi_get_address(part%Rindex, displs(1), ierr) CALL mpi_get_address(part%Zindex, displs(2), ierr) CALL mpi_get_address(part%partindex, displs(3), ierr) types(1:3)=MPI_INTEGER CALL mpi_get_address(part%R, displs(4), ierr) CALL mpi_get_address(part%Z, displs(5), ierr) CALL mpi_get_address(part%THET, displs(6), ierr) CALL mpi_get_address(part%UZ, displs(7), ierr) CALL mpi_get_address(part%UR, displs(8), ierr) CALL mpi_get_address(part%UTHET, displs(9), ierr) CALL mpi_get_address(part%GAMMA, displs(10), ierr) types(4:10)=MPI_DOUBLE_PRECISION blocklength(1:10) = 1 CALL mpi_get_address(part, displ0, ierr) displs=displs-displ0 CALL MPI_Type_create_struct(nblock, blocklength, displs, types, particle_type, ierr) CALL MPI_Type_commit(particle_type,ierr) END SUBROUTINE init_particlempi !--------------------------------------------------------------------------- !> @author Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Initialize the column MPI type used for inter process communications and publish it to !> the processes in the communicator (can be rhs or grid quantities) ! !> @param[in] nr number of elements in the r direction !> @param[in] nz number of elements in the z direction !> @param[in] init_type MPI type of the initial data !> @param[inout] mpi_coltype final type usable in communications !--------------------------------------------------------------------------- SUBROUTINE init_coltypempi(nr, nz, block_size, stride, init_type, mpi_coltype) INTEGER, INTENT(IN) :: nr INTEGER, INTENT(IN) :: nz INTEGER, INTENT(IN) :: block_size INTEGER, INTENT(IN) :: stride INTEGER, INTENT(IN) :: init_type INTEGER, INTENT(OUT) :: mpi_coltype INTEGER :: temp_mpi_coltype, ierr INTEGER(KIND=MPI_ADDRESS_KIND):: init_type_lb, init_type_extent !(nrank(2), nrank(1), 1, 10, MPI_DOUBLE_PRECISION, rhsoverlap_type) ! Create vector type of length nx CALL MPI_TYPE_VECTOR(nr, block_size, stride*block_size*nz, init_type, temp_mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(temp_mpi_coltype, ierr) ! Get the size in bytes of the initial type CALL MPI_TYPE_GET_EXTENT(init_type, init_type_lb, init_type_extent, ierr) ! Resize temp_mpi_coltype such that the next item to read is at j+1 CALL MPI_TYPE_CREATE_RESIZED(temp_mpi_coltype, init_type_lb, INT(stride*block_size,kind=MPI_ADDRESS_KIND)*init_type_extent, mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(mpi_coltype, ierr) END SUBROUTINE init_coltypempi !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the basicdata MPI type used for inter process communications and publish it to !> the process in the communicator ! !--------------------------------------------------------------------------- SUBROUTINE init_basicdatampi() INTEGER :: nblock = 37 INTEGER:: blocklength(37) INTEGER(kind=MPI_ADDRESS_KIND):: displs(37), displ0 INTEGER, DIMENSION(37) :: types TYPE(BASICDATA) :: bdata INTEGER:: ierr CALL mpi_get_address(bdata%nlres, displs(1), ierr) CALL mpi_get_address(bdata%nlsave, displs(2), ierr) CALL mpi_get_address(bdata%newres, displs(3), ierr) CALL mpi_get_address(bdata%nlxg, displs(4), ierr) CALL mpi_get_address(bdata%nlppform, displs(5), ierr) CALL mpi_get_address(bdata%nlPhis, displs(6), ierr) CALL mpi_get_address(bdata%nlclassical, displs(7), ierr) types(1:7)=MPI_LOGICAL CALL mpi_get_address(bdata%nplasma, displs(8), ierr) CALL mpi_get_address(bdata%nz, displs(9), ierr) CALL mpi_get_address(bdata%it0d, displs(10), ierr) CALL mpi_get_address(bdata%it2d, displs(11), ierr) CALL mpi_get_address(bdata%itparts, displs(12), ierr) CALL mpi_get_address(bdata%itgraph, displs(13), ierr) CALL mpi_get_address(bdata%distribtype, displs(14), ierr) CALL mpi_get_address(bdata%nblock, displs(15), ierr) CALL mpi_get_address(bdata%nrun, displs(16), ierr) types(8:16)=MPI_INTEGER CALL mpi_get_address(bdata%job_time, displs(17), ierr) CALL mpi_get_address(bdata%extra_time, displs(18), ierr) CALL mpi_get_address(bdata%tmax, displs(19), ierr) CALL mpi_get_address(bdata%dt, displs(20), ierr) CALL mpi_get_address(bdata%potinn, displs(21), ierr) CALL mpi_get_address(bdata%potout, displs(22), ierr) CALL mpi_get_address(bdata%B0, displs(23), ierr) CALL mpi_get_address(bdata%n0, displs(24), ierr) CALL mpi_get_address(bdata%temp, displs(25), ierr) CALL mpi_get_address(bdata%Rcurv, displs(26), ierr) CALL mpi_get_address(bdata%width, displs(27), ierr) CALL mpi_get_address(bdata%H0, displs(28), ierr) CALL mpi_get_address(bdata%P0, displs(29), ierr) types(17:29)=MPI_DOUBLE_PRECISION blocklength(1:29) = 1 CALL mpi_get_address(bdata%femorder, displs(30), ierr) CALL mpi_get_address(bdata%ngauss, displs(31), ierr) CALL mpi_get_address(bdata%nnr, displs(32), ierr) types(30:32)=MPI_INTEGER blocklength(30:31) = 2 blocklength(32) = 3 CALL mpi_get_address(bdata%lz, displs(33), ierr) blocklength(33) = 2 CALL mpi_get_address(bdata%radii, displs(34), ierr) CALL mpi_get_address(bdata%plasmadim, displs(35), ierr) types(33:35)=MPI_DOUBLE_PRECISION blocklength(34:35) = 4 CALL mpi_get_address(bdata%resfile, displs(36), ierr) types(36)=MPI_CHARACTER blocklength(36) = 64 CALL mpi_get_address(bdata%partperiodic, displs(37), ierr) types(37)=MPI_LOGICAL blocklength(37) = 1 CALL mpi_get_address(bdata, displ0, ierr) displs=displs-displ0 CALL MPI_Type_create_struct(nblock, blocklength, displs, types, basicdata_type, ierr) CALL MPI_Type_commit(basicdata_type,ierr) END SUBROUTINE init_basicdatampi END MODULE mpihelper