diff --git a/src/geometry_mod.F90 b/src/geometry_mod.F90 index 83a972f..dd60f23 100644 --- a/src/geometry_mod.F90 +++ b/src/geometry_mod.F90 @@ -1,448 +1,516 @@ module geometry ! computes geometrical quantities ! Adapted from B.J.Frei MOLIX code (2021) use prec_const use model use grid use array use fields use basic use calculus, ONLY: simpson_rule_z use miller, ONLY: set_miller_parameters, get_miller implicit none PRIVATE ! Geometry input parameters CHARACTER(len=16), & PUBLIC, PROTECTED :: geom REAL(dp), PUBLIC, PROTECTED :: q0 = 1.4_dp ! safety factor REAL(dp), PUBLIC, PROTECTED :: shear = 0._dp ! magnetic field shear REAL(dp), PUBLIC, PROTECTED :: eps = 0.18_dp ! inverse aspect ratio REAL(dp), PUBLIC, PROTECTED :: alpha_MHD = 0 ! shafranov shift effect alpha = -q2 R dbeta/dr ! parameters for Miller geometry - REAL(dp), PUBLIC, PROTECTED :: kappa = 1._dp ! elongation + REAL(dp), PUBLIC, PROTECTED :: kappa = 1._dp ! elongation (1 for circular) REAL(dp), PUBLIC, PROTECTED :: s_kappa = 0._dp ! r normalized derivative skappa = r/kappa dkappa/dr REAL(dp), PUBLIC, PROTECTED :: delta = 0._dp ! triangularity REAL(dp), PUBLIC, PROTECTED :: s_delta = 0._dp ! '' sdelta = r/sqrt(1-delta2) ddelta/dr REAL(dp), PUBLIC, PROTECTED :: zeta = 0._dp ! squareness REAL(dp), PUBLIC, PROTECTED :: s_zeta = 0._dp ! '' szeta = r dzeta/dr + ! to apply shift in the parallel z-BC if shearless + REAL(dp), PUBLIC, PROTECTED :: shift_y = 0._dp ! for Arno + ! Chooses the type of parallel BC we use for the unconnected kx modes (active for non-zero shear only) + ! 'periodic' : Connect a disconnected kx to a mode on the other cadran + ! 'dirichlet' : Connect a disconnected kx to 0 + ! 'disconnected' : Connect all kx to 0 + ! 'shearless' : Connect all kx to itself + CHARACTER(len=256), & + PUBLIC, PROTECTED :: parallel_bc ! GENE unused additional parameters for miller_mod REAL(dp), PUBLIC, PROTECTED :: edge_opt = 0 ! meant to redistribute the points in z REAL(dp), PUBLIC, PROTECTED :: major_R = 1 ! major radius REAL(dp), PUBLIC, PROTECTED :: major_Z = 0 ! vertical elevation REAL(dp), PUBLIC, PROTECTED :: dpdx_pm_geom = 0 ! amplitude mag. eq. pressure grad. REAL(dp), PUBLIC, PROTECTED :: C_y = 0 ! defines y coordinate : Cy (q theta - phi) REAL(dp), PUBLIC, PROTECTED :: C_xy = 0 ! defines x coordinate : B = Cxy Vx x Vy ! Geometrical auxiliary variables LOGICAL, PUBLIC, PROTECTED :: SHEARED = .false. ! flag for shear magn. geom or not ! Curvature REAL(dp), PUBLIC, DIMENSION(:,:,:,:), ALLOCATABLE :: Ckxky ! dimensions: kx, ky, z, odd/even p ! Jacobian REAL(dp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: Jacobian ! dimensions: z, odd/even p COMPLEX(dp), PUBLIC, PROTECTED :: iInt_Jacobian ! Inverse integrated Jacobian ! Metric REAL(dp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: gxx, gxy, gxz, gyy, gyz, gzz ! dimensions: z, odd/even p REAL(dp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: dxdr, dxdZ, Rc, phic, Zc ! derivatives of magnetic field strength REAL(dp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: gradxB, gradyB, gradzB ! Relative magnetic field strength REAL(dp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: hatB, hatB_NL ! Relative strength of major radius REAL(dp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: hatR, hatZ ! Some geometrical coefficients REAL(dp), PUBLIC, DIMENSION(:,:) , ALLOCATABLE :: gradz_coeff ! 1 / [ J_{xyz} \hat{B} ] ! Array to map the index of mode (kx,ky,-pi) to (kx+2pi*s*ky,ky,pi) for sheared periodic boundary condition INTEGER, PUBLIC, DIMENSION(:,:), ALLOCATABLE :: ikx_zBC_L, ikx_zBC_R + ! pb_phase, for parallel boundary phase, contains the factor that occurs when taking into account + ! that q0 is defined in the middle of the fluxtube whereas the radial position spans in [0,Lx) + ! This shift introduces a (-1)^(Nexc*iky) phase change that is included in GENE + COMPLEX(dp), PUBLIC, DIMENSION(:), ALLOCATABLE :: pb_phase_L, pb_phase_R ! Functions PUBLIC :: geometry_readinputs, geometry_outputinputs,& eval_magnetic_geometry, set_ikx_zBC_map CONTAINS SUBROUTINE geometry_readinputs ! Read the input parameters IMPLICIT NONE NAMELIST /GEOMETRY/ geom, q0, shear, eps,& - kappa, s_kappa,delta, s_delta, zeta, s_zeta ! For miller + kappa, s_kappa,delta, s_delta, zeta, s_zeta,& ! For miller + parallel_bc, shift_y READ(lu_in,geometry) IF(shear .NE. 0._dp) SHEARED = .true. + SELECT CASE(parallel_bc) + CASE ('dirichlet') + CASE ('periodic') + CASE ('shearless') + CASE ('disconnected') + CASE DEFAULT + stop 'Parallel BC not recognized' + END SELECT + IF(my_id .EQ. 0) print*, 'Parallel BC : ', parallel_bc END SUBROUTINE geometry_readinputs subroutine eval_magnetic_geometry ! evalute metrix, elementwo_third_kpmaxts, jacobian and gradient implicit none REAL(dp) :: kx,ky COMPLEX(dp), DIMENSION(izs:ize) :: integrant real(dp) :: G1,G2,G3,Cx,Cy ! Allocate arrays CALL geometry_allocate_mem ! IF( (Ny .EQ. 1) .AND. (Nz .EQ. 1)) THEN !1D perp linear run IF( my_id .eq. 0 ) WRITE(*,*) '1D perpendicular geometry' call eval_1D_geometry ELSE SELECT CASE(geom) CASE('s-alpha') IF( my_id .eq. 0 ) WRITE(*,*) 's-alpha-B geometry' call eval_salphaB_geometry CASE('Z-pinch') IF( my_id .eq. 0 ) WRITE(*,*) 'Z-pinch geometry' call eval_zpinch_geometry SHEARED = .FALSE. shear = 0._dp CASE('miller') IF( my_id .eq. 0 ) WRITE(*,*) 'Miller geometry' call set_miller_parameters(kappa,s_kappa,delta,s_delta,zeta,s_zeta) call get_miller(eps,major_R,major_Z,q0,shear,alpha_MHD,edge_opt,& C_y,C_xy,dpdx_pm_geom,gxx,gyy,gzz,gxy,gxz,gyz,& gradxB,gradyB,hatB,jacobian,gradzB,hatR,hatZ,dxdR,dxdZ,& Ckxky,gradz_coeff) CASE DEFAULT STOP 'geometry not recognized!!' END SELECT ENDIF ! ! Evaluate perpendicular wavenumber ! k_\perp^2 = g^{xx} k_x^2 + 2 g^{xy}k_x k_y + k_y^2 g^{yy} ! normalized to rhos_ DO eo = 0,1 DO iz = izgs,izge DO iky = ikys, ikye ky = kyarray(iky) DO ikx = ikxs, ikxe kx = kxarray(ikx) kparray(iky, ikx, iz, eo) = & SQRT( gxx(iz,eo)*kx**2 + 2._dp*gxy(iz,eo)*kx*ky + gyy(iz,eo)*ky**2)/hatB(iz,eo) ! there is a factor 1/B from the normalization; important to match GENE ENDDO ENDDO ENDDO ! Curvature operator (Frei et al. 2022 eq 2.15) DO iz = izgs,izge G1 = gxy(iz,eo)*gxy(iz,eo)-gxx(iz,eo)*gyy(iz,eo) G2 = gxy(iz,eo)*gxz(iz,eo)-gxx(iz,eo)*gyz(iz,eo) G3 = gyy(iz,eo)*gxz(iz,eo)-gxy(iz,eo)*gyz(iz,eo) Cx = (G1*gradyB(iz,eo) + G2*gradzB(iz,eo))/hatB(iz,eo) Cy = (G3*gradzB(iz,eo) - G1*gradxB(iz,eo))/hatB(iz,eo) DO iky = ikys, ikye ky = kyarray(iky) DO ikx= ikxs, ikxe kx = kxarray(ikx) Ckxky(iky, ikx, iz,eo) = (Cx*kx + Cy*ky) ENDDO ENDDO ! coefficient in the front of parallel derivative gradz_coeff(iz,eo) = 1._dp / jacobian(iz,eo) / hatB(iz,eo) ENDDO ENDDO ! set the mapping for parallel boundary conditions CALL set_ikx_zBC_map two_third_kpmax = 2._dp/3._dp * MAXVAL(kparray) ! ! Compute the inverse z integrated Jacobian (useful for flux averaging) integrant = Jacobian(izs:ize,0) ! Convert into complex array CALL simpson_rule_z(integrant,iInt_Jacobian) iInt_Jacobian = 1._dp/iInt_Jacobian ! reverse it END SUBROUTINE eval_magnetic_geometry ! !-------------------------------------------------------------------------------- ! SUBROUTINE eval_salphaB_geometry ! evaluate s-alpha geometry model implicit none REAL(dp) :: z, kx, ky, Gx, Gy alpha_MHD = 0._dp parity: DO eo = 0,1 zloop: DO iz = izgs,izge z = zarray(iz,eo) ! metric gxx(iz,eo) = 1._dp gxy(iz,eo) = shear*z - alpha_MHD*SIN(z) gxz(iz,eo) = 0._dp gyy(iz,eo) = 1._dp + (shear*z - alpha_MHD*SIN(z))**2 gyz(iz,eo) = 1._dp/eps gzz(iz,eo) = 0._dp dxdR(iz,eo)= COS(z) dxdZ(iz,eo)= SIN(z) ! Relative strengh of radius hatR(iz,eo) = 1._dp + eps*COS(z) hatZ(iz,eo) = 1._dp + eps*SIN(z) ! toroidal coordinates Rc (iz,eo) = hatR(iz,eo) phic(iz,eo) = z Zc (iz,eo) = hatZ(iz,eo) ! Jacobian Jacobian(iz,eo) = q0*hatR(iz,eo) ! Relative strengh of modulus of B hatB(iz,eo) = 1._dp / hatR(iz,eo) ! Derivative of the magnetic field strenght gradxB(iz,eo) = -COS(z) ! Gene put a factor hatB^2 or 1/hatR^2 in this gradyB(iz,eo) = 0._dp gradzB(iz,eo) = eps*SIN(z)/hatR(iz,eo) ! Gene put a factor hatB or 1/hatR in this ! Curvature operator Gx = (gxz(iz,eo) * gxy(iz,eo) - gxx(iz,eo) * gyz(iz,eo)) *eps*SIN(Z) ! Kx Gy = -COS(z) + (gxz(iz,eo) * gyy(iz,eo) - gxy(iz,eo) * gyz(iz,eo)) *eps*SIN(Z) ! Ky DO iky = ikys, ikye ky = kyarray(iky) DO ikx= ikxs, ikxe kx = kxarray(ikx) Ckxky(iky, ikx, iz,eo) = (-SIN(z)*kx - COS(z)*ky -(shear*z-alpha_MHD*SIN(z))*SIN(z)*ky)/ hatB(iz,eo) ! Ckxky(iky, ikx, iz,eo) = (Gx*kx + Gy*ky) * hatB(iz,eo) ! .. multiply by hatB to cancel the 1/ hatB factor in moments_eqs_rhs.f90 routine ENDDO ENDDO ! coefficient in the front of parallel derivative gradz_coeff(iz,eo) = 1._dp / Jacobian(iz,eo) / hatB(iz,eo) ! Nonlinear term prefactor hatB_NL(iz,eo) = 1._dp !Jacobian(iz,eo)*(gxx(iz,eo)*gyy(iz,eo) - gxy(iz,eo)**2)/hatB(iz,eo) ENDDO zloop ENDDO parity END SUBROUTINE eval_salphaB_geometry ! !-------------------------------------------------------------------------------- ! SUBROUTINE eval_zpinch_geometry implicit none REAL(dp) :: z, kx, ky, alpha_MHD alpha_MHD = 0._dp parity: DO eo = 0,1 zloop: DO iz = izgs,izge z = zarray(iz,eo) ! metric gxx(iz,eo) = 1._dp gxy(iz,eo) = 0._dp gxz(iz,eo) = 0._dp gyy(iz,eo) = 1._dp gyz(iz,eo) = 0._dp gzz(iz,eo) = 1._dp dxdR(iz,eo)= COS(z) dxdZ(iz,eo)= SIN(z) ! Relative strengh of radius hatR(iz,eo) = 1._dp hatZ(iz,eo) = 1._dp ! toroidal coordinates Rc (iz,eo) = hatR(iz,eo) phic(iz,eo) = z Zc (iz,eo) = hatZ(iz,eo) ! Jacobian Jacobian(iz,eo) = 1._dp ! Relative strengh of modulus of B hatB (iz,eo) = 1._dp hatB_NL(iz,eo) = 1._dp ! Derivative of the magnetic field strenght gradxB(iz,eo) = -1._dp ! Gene put a factor hatB^2 or 1/hatR^2 in this gradyB(iz,eo) = 0._dp gradzB(iz,eo) = 0._dp ! Gene put a factor hatB or 1/hatR in this ! Curvature operator DO iky = ikys, ikye ky = kyarray(iky) DO ikx= ikxs, ikxe kx = kxarray(ikx) Ckxky(iky, ikx, iz,eo) = -ky ENDDO ENDDO ! coefficient in the front of parallel derivative gradz_coeff(iz,eo) = 1._dp / Jacobian(iz,eo) / hatB(iz,eo) ENDDO zloop ENDDO parity END SUBROUTINE eval_zpinch_geometry ! !-------------------------------------------------------------------------------- ! subroutine eval_1D_geometry ! evaluate 1D perp geometry model implicit none REAL(dp) :: z, kx, ky parity: DO eo = 0,1 zloop: DO iz = izs,ize z = zarray(iz,eo) ! metric gxx(iz,eo) = 1._dp gxy(iz,eo) = 0._dp gyy(iz,eo) = 1._dp ! Relative strengh of radius hatR(iz,eo) = 1._dp ! Jacobian Jacobian(iz,eo) = 1._dp ! Relative strengh of modulus of B hatB(iz,eo) = 1._dp ! Curvature operator DO iky = ikys, ikye ky = kyarray(iky) DO ikx= ikxs, ikxe kx = kxarray(ikx) Ckxky(ikx, iky, iz,eo) = -kx ! .. multiply by hatB to cancel the 1/ hatB factor in moments_eqs_rhs.f90 routine ENDDO ENDDO ! coefficient in the front of parallel derivative gradz_coeff(iz,eo) = 1._dp ENDDO zloop ENDDO parity END SUBROUTINE eval_1D_geometry ! !-------------------------------------------------------------------------------- ! SUBROUTINE set_ikx_zBC_map IMPLICIT NONE REAL :: shift, kx_shift - ! For periodic CHI BC or 0 dirichlet - LOGICAL :: PERIODIC_CHI_BC = .FALSE. - ALLOCATE(ikx_zBC_R(ikys:ikye,ikxs:ikxe)) - ALLOCATE(ikx_zBC_L(ikys:ikye,ikxs:ikxe)) + INTEGER :: ikx_shift - !! No shear case (simple id mapping) + ALLOCATE(ikx_zBC_L(ikys:ikye,ikxs:ikxe)) + ALLOCATE(ikx_zBC_R(ikys:ikye,ikxs:ikxe)) + ALLOCATE(pb_phase_L(ikys:ikye)) + ALLOCATE(pb_phase_R(ikys:ikye)) + !! No shear case (simple id mapping) or not at the end of the z domain !3 | 1 2 3 4 5 6 | ky = 3 dky !2 ky | 1 2 3 4 5 6 | ky = 2 dky !1 A | 1 2 3 4 5 6 | ky = 1 dky !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=free) DO iky = ikys,ikye DO ikx = ikxs,ikxe - ikx_zBC_L(iky,ikx) = ikx + ikx_zBC_L(iky,ikx) = ikx ! connect to itself per default ikx_zBC_R(iky,ikx) = ikx ENDDO + pb_phase_L(iky) = 1._dp ! no phase change per default + pb_phase_R(iky) = 1._dp ENDDO - ! Modify connection map only at border of z - IF(SHEARED) THEN - ! connection map BC of the RIGHT boundary (z=pi*Npol-dz) (even NZ) - !3 | 4 x x x 2 3 | ky = 3 dky - !2 ky | 3 4 x x 1 2 | ky = 2 dky - !1 A | 2 3 4 x 6 1 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - - ! connection map BC of the RIGHT boundary (z=pi*Npol-dz) (ODD NZ) - !3 | x x x 2 3 | ky = 3 dky - !2 ky | 3 x x 1 2 | ky = 2 dky - !1 A | 2 3 x 5 1 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5 | ky = 0 dky - !kx = 0 0.1 0.2 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - IF(contains_zmax) THEN ! Check if the process is at the end of the FT + ! Parallel boundary are not trivial for sheared case and if + ! the user does not ask explicitly for shearless bc + IF(SHEARED .AND. (parallel_bc .NE. 'shearless')) THEN + !!!!!!!!!! LEFT PARALLEL BOUNDARY + ! Modify connection map only at border of z (matters for MPI z-parallelization) + IF(contains_zmin) THEN ! Check if the process is at the start of the fluxtube DO iky = ikys,ikye + ! Formula for the shift due to shear after Npol turns shift = 2._dp*PI*shear*kyarray(iky)*Npol DO ikx = ikxs,ikxe - kx_shift = kxarray(ikx) + shift - ! We use EPSILON() to treat perfect equality case - IF( ((kx_shift-EPSILON(kx_shift)) .GT. kx_max) .AND. (.NOT. PERIODIC_CHI_BC) )THEN ! outside of the frequ domain - ikx_zBC_R(iky,ikx) = -99 - ELSE - ikx_zBC_R(iky,ikx) = ikx+(iky-1)*Nexc - IF( ikx_zBC_R(iky,ikx) .GT. Nkx ) & - ikx_zBC_R(iky,ikx) = ikx_zBC_R(iky,ikx) - Nkx + ! Usual formula for shifting indices using that dkx = 2pi*shear*dky/Nexc + ikx_zBC_L(iky,ikx) = ikx-(iky-1)*Nexc + ! Check if it points out of the kx domain + ! IF( (kxarray(ikx) - shift) .LT. kx_min ) THEN + IF( (ikx-(iky-1)*Nexc) .LT. 1 ) THEN ! outside of the frequ domain + SELECT CASE(parallel_bc) + CASE ('dirichlet')! connected to 0 + ikx_zBC_L(iky,ikx) = -99 + CASE ('periodic') !reroute it by cycling through modes + ikx_zBC_L(iky,ikx) = MODULO(ikx_zBC_L(iky,ikx)-1,Nkx)+1 + END SELECT ENDIF ENDDO + ! phase present in GENE from a shift of the x origin by Lx/2 (useless?) + ! We also put the user defined shift in the y direction (see Volcokas et al. 2022) + pb_phase_L(iky) = (-1._dp)**(Nexc*(iky-1))*EXP(imagu*REAL(iky-1,dp)*2._dp*pi*shift_y) ENDDO ENDIF - ! connection map BC of the LEFT boundary (z=-pi*Npol) - !3 | x 5 6 1 x x | ky = 3 dky - !2 ky | 5 6 1 2 x x | ky = 2 dky - !1 A | 6 1 2 3 x 5 | ky = 1 dky - !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky - !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) - IF(contains_zmin) THEN ! Check if the process is at the start of the FT + ! Option for disconnecting every modes, viz. connecting all boundary to 0 + IF(parallel_bc .EQ. 'disconnected') ikx_zBC_L = -99 + !!!!!!!!!! RIGHT PARALLEL BOUNDARY + IF(contains_zmax) THEN ! Check if the process is at the end of the flux-tube DO iky = ikys,ikye + ! Formula for the shift due to shear after Npol shift = 2._dp*PI*shear*kyarray(iky)*Npol DO ikx = ikxs,ikxe - kx_shift = kxarray(ikx) - shift - ! We use EPSILON() to treat perfect equality case - IF( ((kx_shift+EPSILON(kx_shift)) .LT. kx_min) .AND. (.NOT. PERIODIC_CHI_BC) ) THEN ! outside of the frequ domain - ikx_zBC_L(iky,ikx) = -99 - ELSE - ikx_zBC_L(iky,ikx) = ikx-(iky-1)*Nexc - IF( ikx_zBC_L(iky,ikx) .LT. 1 ) & - ikx_zBC_L(iky,ikx) = ikx_zBC_L(iky,ikx) + Nkx + ! Usual formula for shifting indices + ikx_zBC_R(iky,ikx) = ikx+(iky-1)*Nexc + ! Check if it points out of the kx domain + ! IF( (kxarray(ikx) + shift) .GT. kx_max ) THEN ! outside of the frequ domain + IF( (ikx+(iky-1)*Nexc) .GT. Nkx ) THEN ! outside of the frequ domain + SELECT CASE(parallel_bc) + CASE ('dirichlet') ! connected to 0 + ikx_zBC_R(iky,ikx) = -99 + CASE ('periodic') !reroute it by cycling through modes + write(*,*) 'check',ikx,iky, kxarray(ikx) + shift, '>', kx_max + ikx_zBC_R(iky,ikx) = MODULO(ikx_zBC_R(iky,ikx)-1,Nkx)+1 + END SELECT ENDIF ENDDO + ! phase present in GENE from a shift ofthe x origin by Lx/2 (useless?) + ! We also put the user defined shift in the y direction (see Volcokas et al. 2022) + pb_phase_R(iky) = (-1._dp)**(Nexc*(iky-1))*EXP(-imagu*REAL(iky-1,dp)*2._dp*pi*shift_y) ENDDO ENDIF - ELSE -ENDIF + ! Option for disconnecting every modes, viz. connecting all boundary to 0 + IF(parallel_bc .EQ. 'disconnected') ikx_zBC_R = -99 + ENDIF + ! write(*,*) kxarray + ! write(*,*) kyarray + ! write(*,*) 'ikx_zBC_L :-----------' + ! DO iky = ikys,ikye + ! print*, ikx_zBC_L(iky,:) + ! enddo + ! write(*,*) 'ikx_zBC_R :-----------' + ! DO iky = ikys,ikye + ! print*, ikx_zBC_R(iky,:) + ! enddo + ! stop + !!!!!!! Example of maps ('x' means connected to 0 value, in the table it is -99) + ! dirichlet connection map BC of the RIGHT boundary (z=pi*Npol-dz) + !3 | 4 x x x 2 3 | ky = 3 dky + !2 ky | 3 4 x x 1 2 | ky = 2 dky + !1 A | 2 3 4 x 6 1 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! periodic connection map BC of the LEFT boundary (z=-pi*Npol) + !3 | 4 5 6 1 2 3 | ky = 3 dky + !2 ky | 5 6 1 2 3 4 | ky = 2 dky + !1 A | 6 1 2 3 4 5 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! shearless connection map BC of the LEFT/RIGHT boundary (z=+/-pi*Npol) + !3 | 1 2 3 4 5 6 | ky = 3 dky + !2 ky | 1 2 3 4 5 6 | ky = 2 dky + !1 A | 1 2 3 4 5 6 | ky = 1 dky + !0 | -> kx | 1____2____3____4____5____6 | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) + + ! disconnected connection map BC of the LEFT/RIGHT boundary (z=+/-pi*Npol) + !3 | x x x x x x | ky = 3 dky + !2 ky | x x x x x x | ky = 2 dky + !1 A | x x x x x x | ky = 1 dky + !0 | -> kx | x____x____x____x____x____x | ky = 0 dky + !(e.g.) kx = 0 0.1 0.2 0.3 -0.2 -0.1 (dkx=2pi*shear*npol*dky) END SUBROUTINE set_ikx_zBC_map ! !-------------------------------------------------------------------------------- ! SUBROUTINE geometry_allocate_mem ! Curvature and geometry CALL allocate_array( Ckxky, ikys,ikye, ikxs,ikxe,izgs,izge,0,1) CALL allocate_array( Jacobian,izgs,izge, 0,1) CALL allocate_array( gxx,izgs,izge, 0,1) CALL allocate_array( gxy,izgs,izge, 0,1) CALL allocate_array( gxz,izgs,izge, 0,1) CALL allocate_array( gyy,izgs,izge, 0,1) CALL allocate_array( gyz,izgs,izge, 0,1) CALL allocate_array( gzz,izgs,izge, 0,1) CALL allocate_array( gradxB,izgs,izge, 0,1) CALL allocate_array( gradyB,izgs,izge, 0,1) CALL allocate_array( gradzB,izgs,izge, 0,1) CALL allocate_array( hatB,izgs,izge, 0,1) CALL allocate_array( hatB_NL,izgs,izge, 0,1) CALL allocate_array( hatR,izgs,izge, 0,1) CALL allocate_array( hatZ,izgs,izge, 0,1) CALL allocate_array( Rc,izgs,izge, 0,1) CALL allocate_array( phic,izgs,izge, 0,1) CALL allocate_array( Zc,izgs,izge, 0,1) CALL allocate_array( dxdR,izgs,izge, 0,1) CALL allocate_array( dxdZ,izgs,izge, 0,1) call allocate_array(gradz_coeff,izgs,izge, 0,1) CALL allocate_array( kparray, ikys,ikye, ikxs,ikxe,izgs,izge,0,1) END SUBROUTINE geometry_allocate_mem SUBROUTINE geometry_outputinputs(fidres, str) ! Write the input parameters to the results_xx.h5 file USE futils, ONLY: attach USE prec_const IMPLICIT NONE INTEGER, INTENT(in) :: fidres CHARACTER(len=256), INTENT(in) :: str CALL attach(fidres, TRIM(str),"geometry", geom) CALL attach(fidres, TRIM(str), "q0", q0) CALL attach(fidres, TRIM(str), "shear", shear) CALL attach(fidres, TRIM(str), "eps", eps) END SUBROUTINE geometry_outputinputs end module geometry diff --git a/src/ghosts_mod.F90 b/src/ghosts_mod.F90 index 0f3136b..bd23ad9 100644 --- a/src/ghosts_mod.F90 +++ b/src/ghosts_mod.F90 @@ -1,367 +1,367 @@ module ghosts USE basic USE grid USE time_integration USE model, ONLY : KIN_E, beta -USE geometry, ONLY : SHEARED, ikx_zBC_L, ikx_zBC_R +USE geometry, ONLY : SHEARED, ikx_zBC_L, ikx_zBC_R, pb_phase_L, pb_phase_R IMPLICIT NONE INTEGER :: status(MPI_STATUS_SIZE), source, dest, count, ipg PUBLIC :: update_ghosts_moments, update_ghosts_EM CONTAINS SUBROUTINE update_ghosts_moments CALL cpu_time(t0_ghost) IF (num_procs_p .GT. 1) THEN ! Do it only if we share the p IF(KIN_E)& CALL update_ghosts_p_e CALL update_ghosts_p_i ENDIF IF(Nz .GT. 1) THEN IF(KIN_E) & CALL update_ghosts_z_e CALL update_ghosts_z_i ENDIF CALL cpu_time(t1_ghost) tc_ghost = tc_ghost + (t1_ghost - t0_ghost) END SUBROUTINE update_ghosts_moments SUBROUTINE update_ghosts_EM CALL cpu_time(t0_ghost) IF(Nz .GT. 1) THEN CALL update_ghosts_z_phi IF(beta .GT. 0._dp) & CALL update_ghosts_z_psi ENDIF - + CALL cpu_time(t1_ghost) tc_ghost = tc_ghost + (t1_ghost - t0_ghost) END SUBROUTINE update_ghosts_EM !Communicate p+1, p+2 moments to left neighboor and p-1, p-2 moments to right one ! [a b|C D|e f] : proc n has moments a to f where a,b,e,f are ghosts ! !proc 0: [0 1 2 3 4|5 6] ! V V ^ ^ !proc 1: [3 4|5 6 7 8|9 10] ! V V ^ ^ !proc 2: [7 8|9 10 11 12|13 14] ! V V ^ ^ !proc 3: [11 12|13 14 15 16|17 18] ! ^ ^ !Closure by zero truncation : 0 0 SUBROUTINE update_ghosts_p_e USE fields, ONLY : moments_e IMPLICIT NONE count = (ijge_e-ijgs_e+1)*(ikye-ikys+1)*(ikxe-ikxs+1)*(izge-izgs+1) !!!!!!!!!!! Send ghost to right neighbour !!!!!!!!!!!!!!!!!!!!!! ! Send the last local moment to fill the -1 neighbour ghost CALL mpi_sendrecv(moments_e(ipe_e ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 10, & ! Send to right moments_e(ips_e-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 10, & ! Recieve from left comm0, status, ierr) IF (deltape .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil CALL mpi_sendrecv(moments_e(ipe_e-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 11, & ! Send to right moments_e(ips_e-2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 11, & ! Recieve from left comm0, status, ierr) !!!!!!!!!!! Send ghost to left neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv(moments_e(ips_e ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 12, & ! Send to left moments_e(ipe_e+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 12, & ! Recieve from right comm0, status, ierr) IF (deltape .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil CALL mpi_sendrecv(moments_e(ips_e+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 13, & ! Send to left moments_e(ipe_e+2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 13, & ! Recieve from right comm0, status, ierr) END SUBROUTINE update_ghosts_p_e !Communicate p+1, p+2 moments to left neighboor and p-1, p-2 moments to right one SUBROUTINE update_ghosts_p_i USE fields, ONLY : moments_i IMPLICIT NONE count = (ijge_i-ijgs_i+1)*(ikye-ikys+1)*(ikxe-ikxs+1)*(izge-izgs+1) ! Number of elements sent !!!!!!!!!!! Send ghost to right neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv(moments_i(ipe_i ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 14, & moments_i(ips_i-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 14, & comm0, status, ierr) IF (deltapi .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil CALL mpi_sendrecv(moments_i(ipe_i-1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 15, & moments_i(ips_i-2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 15, & comm0, status, ierr) !!!!!!!!!!! Send ghost to left neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv(moments_i(ips_i ,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 16, & moments_i(ipe_i+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 16, & comm0, status, ierr) IF (deltapi .EQ. 1) & ! If we have odd Hermite degrees we need a 2nd order stencil CALL mpi_sendrecv(moments_i(ips_i+1,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_L, 17, & moments_i(ipe_i+2,:,:,:,:,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_R, 17, & comm0, status, ierr) END SUBROUTINE update_ghosts_p_i !Communicate z+1, z+2 moments to left neighboor and z-1, z-2 moments to right one ! [a b|C D|e f] : proc n has moments a to f where a,b,e,f are ghosts ! !proc 0: [0 1 2 3 4|5 6] ! V V ^ ^ !proc 1: [3 4|5 6 7 8|9 10] ! V V ^ ^ !proc 2: [7 8|9 10 11 12|13 14] ! V V ^ ^ !proc 3: [11 12|13 14 15 16|17 18] ! ^ ^ !Periodic: 0 1 SUBROUTINE update_ghosts_z_e USE parallel, ONLY : buff_pjxy_zBC_e USE fields, ONLY : moments_e IMPLICIT NONE INTEGER :: ikxBC_L, ikxBC_R IF(Nz .GT. 1) THEN IF (num_procs_z .GT. 1) THEN CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) count = (ipge_e-ipgs_e+1)*(ijge_e-ijgs_e+1)*(ikye-ikys+1)*(ikxe-ikxs+1) !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! ! Send the last local moment to fill the -1 neighbour ghost CALL mpi_sendrecv(moments_e(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 20, & ! Send to Up the last buff_pjxy_zBC_e(:,:,:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 20, & ! Recieve from Down the first-1 comm0, status, ierr) CALL mpi_sendrecv(moments_e(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 21, & ! Send to Up the last buff_pjxy_zBC_e(:,:,:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 21, & ! Recieve from Down the first-1 comm0, status, ierr) !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv(moments_e(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 22, & ! Send to Up the last buff_pjxy_zBC_e(:,:,:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 22, & ! Recieve from Down the first-1 comm0, status, ierr) CALL mpi_sendrecv(moments_e(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 23, & ! Send to Up the last buff_pjxy_zBC_e(:,:,:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 23, & ! Recieve from Down the first-1 comm0, status, ierr) ELSE !No parallel (copy) buff_pjxy_zBC_e(:,:,:,:,-1) = moments_e(:,:,:,:,ize ,updatetlevel) buff_pjxy_zBC_e(:,:,:,:,-2) = moments_e(:,:,:,:,ize-1,updatetlevel) buff_pjxy_zBC_e(:,:,:,:,+1) = moments_e(:,:,:,:,izs ,updatetlevel) buff_pjxy_zBC_e(:,:,:,:,+2) = moments_e(:,:,:,:,izs+1,updatetlevel) ENDIF DO iky = ikys,ikye DO ikx = ikxs,ikxe ikxBC_L = ikx_zBC_L(iky,ikx); IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! first-1 gets last - moments_e(:,:,iky,ikx,izs-1,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-1) + moments_e(:,:,iky,ikx,izs-1,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-1) ! first-2 gets last-1 - moments_e(:,:,iky,ikx,izs-2,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-2) + moments_e(:,:,iky,ikx,izs-2,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_L,-2) ELSE moments_e(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp moments_e(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp ENDIF ikxBC_R = ikx_zBC_R(iky,ikx); IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! last+1 gets first - moments_e(:,:,iky,ikx,ize+1,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+1) + moments_e(:,:,iky,ikx,ize+1,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+1) ! last+2 gets first+1 - moments_e(:,:,iky,ikx,ize+2,updatetlevel) = buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+2) + moments_e(:,:,iky,ikx,ize+2,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_e(:,:,iky,ikxBC_R,+2) ELSE moments_e(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp moments_e(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp ENDIF ENDDO ENDDO ENDIF END SUBROUTINE update_ghosts_z_e SUBROUTINE update_ghosts_z_i USE parallel, ONLY : buff_pjxy_zBC_i USE fields, ONLY : moments_i IMPLICIT NONE INTEGER :: ikxBC_L, ikxBC_R IF(Nz .GT. 1) THEN IF (num_procs_z .GT. 1) THEN CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) count = (ipge_i-ipgs_i+1)*(ijge_i-ijgs_i+1)*(ikye-ikys+1)*(ikxe-ikxs+1) !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! ! Send the last local moment to fill the -1 neighbour ghost CALL mpi_sendrecv(moments_i(:,:,:,:,ize ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 24, & ! Send to Up the last buff_pjxy_zBC_i(:,:,:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 24, & ! Recieve from Down the first-1 comm0, status, ierr) CALL mpi_sendrecv(moments_i(:,:,:,:,ize-1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_U, 25, & ! Send to Up the last buff_pjxy_zBC_i(:,:,:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 25, & ! Recieve from Down the first-1 comm0, status, ierr) !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv(moments_i(:,:,:,:,izs ,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 26, & ! Send to Up the last buff_pjxy_zBC_i(:,:,:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 26, & ! Recieve from Down the first-1 comm0, status, ierr) CALL mpi_sendrecv(moments_i(:,:,:,:,izs+1,updatetlevel), count, MPI_DOUBLE_COMPLEX, nbr_D, 27, & ! Send to Up the last buff_pjxy_zBC_i(:,:,:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 27, & ! Recieve from Down the first-1 comm0, status, ierr) ELSE !No parallel (copy) buff_pjxy_zBC_i(:,:,:,:,-1) = moments_i(:,:,:,:,ize ,updatetlevel) buff_pjxy_zBC_i(:,:,:,:,-2) = moments_i(:,:,:,:,ize-1,updatetlevel) buff_pjxy_zBC_i(:,:,:,:,+1) = moments_i(:,:,:,:,izs ,updatetlevel) buff_pjxy_zBC_i(:,:,:,:,+2) = moments_i(:,:,:,:,izs+1,updatetlevel) ENDIF DO iky = ikys,ikye DO ikx = ikxs,ikxe ikxBC_L = ikx_zBC_L(iky,ikx); IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! first-1 gets last - moments_i(:,:,iky,ikx,izs-1,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-1) + moments_i(:,:,iky,ikx,izs-1,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-1) ! first-2 gets last-1 - moments_i(:,:,iky,ikx,izs-2,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-2) + moments_i(:,:,iky,ikx,izs-2,updatetlevel) = pb_phase_L(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_L,-2) ELSE moments_i(:,:,iky,ikx,izs-1,updatetlevel) = 0._dp moments_i(:,:,iky,ikx,izs-2,updatetlevel) = 0._dp ENDIF ikxBC_R = ikx_zBC_R(iky,ikx); IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! last+1 gets first - moments_i(:,:,iky,ikx,ize+1,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+1) + moments_i(:,:,iky,ikx,ize+1,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+1) ! last+2 gets first+1 - moments_i(:,:,iky,ikx,ize+2,updatetlevel) = buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+2) + moments_i(:,:,iky,ikx,ize+2,updatetlevel) = pb_phase_R(iky)*buff_pjxy_zBC_i(:,:,iky,ikxBC_R,+2) ELSE moments_i(:,:,iky,ikx,ize+1,updatetlevel) = 0._dp moments_i(:,:,iky,ikx,ize+2,updatetlevel) = 0._dp ENDIF ENDDO ENDDO ENDIF END SUBROUTINE update_ghosts_z_i SUBROUTINE update_ghosts_z_phi USE parallel, ONLY : buff_xy_zBC USE fields, ONLY : phi IMPLICIT NONE INTEGER :: ikxBC_L, ikxBC_R CALL cpu_time(t1_ghost) IF(Nz .GT. 1) THEN IF (num_procs_z .GT. 1) THEN CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) count = (ikye-ikys+1) * (ikxe-ikxs+1) !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv( phi(:,:,ize ), count, MPI_DOUBLE_COMPLEX, nbr_U, 30, & ! Send to Up the last buff_xy_zBC(:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 30, & ! Receive from Down the first-1 comm0, status, ierr) CALL mpi_sendrecv( phi(:,:,ize-1), count, MPI_DOUBLE_COMPLEX, nbr_U, 31, & ! Send to Up the last buff_xy_zBC(:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 31, & ! Receive from Down the first-2 comm0, status, ierr) !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv( phi(:,:,izs ), count, MPI_DOUBLE_COMPLEX, nbr_D, 32, & ! Send to Down the first buff_xy_zBC(:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 32, & ! Recieve from Up the last+1 comm0, status, ierr) CALL mpi_sendrecv( phi(:,:,izs+1), count, MPI_DOUBLE_COMPLEX, nbr_D, 33, & ! Send to Down the first buff_xy_zBC(:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 33, & ! Recieve from Up the last+2 comm0, status, ierr) ELSE buff_xy_zBC(:,:,-1) = phi(:,:,ize ) buff_xy_zBC(:,:,-2) = phi(:,:,ize-1) buff_xy_zBC(:,:,+1) = phi(:,:,izs ) buff_xy_zBC(:,:,+2) = phi(:,:,izs+1) ENDIF DO iky = ikys,ikye DO ikx = ikxs,ikxe ikxBC_L = ikx_zBC_L(iky,ikx); IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! first-1 gets last - phi(iky,ikx,izs-1) = buff_xy_zBC(iky,ikxBC_L,-1) + phi(iky,ikx,izs-1) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-1) ! first-2 gets last-1 - phi(iky,ikx,izs-2) = buff_xy_zBC(iky,ikxBC_L,-2) + phi(iky,ikx,izs-2) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-2) ELSE phi(iky,ikx,izs-1) = 0._dp phi(iky,ikx,izs-2) = 0._dp ENDIF ikxBC_R = ikx_zBC_R(iky,ikx); IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! last+1 gets first - phi(iky,ikx,ize+1) = buff_xy_zBC(iky,ikxBC_R,+1) + phi(iky,ikx,ize+1) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+1) ! last+2 gets first+1 - phi(iky,ikx,ize+2) = buff_xy_zBC(iky,ikxBC_R,+2) + phi(iky,ikx,ize+2) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+2) ELSE phi(iky,ikx,ize+1) = 0._dp phi(iky,ikx,ize+2) = 0._dp ENDIF ENDDO ENDDO ENDIF CALL cpu_time(t1_ghost) tc_ghost = tc_ghost + (t1_ghost - t0_ghost) END SUBROUTINE update_ghosts_z_phi SUBROUTINE update_ghosts_z_psi USE parallel, ONLY : buff_xy_zBC USE fields, ONLY : psi IMPLICIT NONE INTEGER :: ikxBC_L, ikxBC_R CALL cpu_time(t1_ghost) IF(Nz .GT. 1) THEN IF (num_procs_z .GT. 1) THEN CALL MPI_BARRIER(MPI_COMM_WORLD,ierr) count = (ikye-ikys+1) * (ikxe-ikxs+1) !!!!!!!!!!! Send ghost to up neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv( psi(:,:,ize ), count, MPI_DOUBLE_COMPLEX, nbr_U, 40, & ! Send to Up the last buff_xy_zBC(:,:,-1), count, MPI_DOUBLE_COMPLEX, nbr_D, 40, & ! Receive from Down the first-1 comm0, status, ierr) CALL mpi_sendrecv( psi(:,:,ize-1), count, MPI_DOUBLE_COMPLEX, nbr_U, 41, & ! Send to Up the last buff_xy_zBC(:,:,-2), count, MPI_DOUBLE_COMPLEX, nbr_D, 41, & ! Receive from Down the first-2 comm0, status, ierr) !!!!!!!!!!! Send ghost to down neighbour !!!!!!!!!!!!!!!!!!!!!! CALL mpi_sendrecv( psi(:,:,izs ), count, MPI_DOUBLE_COMPLEX, nbr_D, 42, & ! Send to Down the first buff_xy_zBC(:,:,+1), count, MPI_DOUBLE_COMPLEX, nbr_U, 42, & ! Recieve from Up the last+1 comm0, status, ierr) CALL mpi_sendrecv( psi(:,:,izs+1), count, MPI_DOUBLE_COMPLEX, nbr_D, 43, & ! Send to Down the first buff_xy_zBC(:,:,+2), count, MPI_DOUBLE_COMPLEX, nbr_U, 43, & ! Recieve from Up the last+2 comm0, status, ierr) ELSE buff_xy_zBC(:,:,-1) = psi(:,:,ize ) buff_xy_zBC(:,:,-2) = psi(:,:,ize-1) buff_xy_zBC(:,:,+1) = psi(:,:,izs ) buff_xy_zBC(:,:,+2) = psi(:,:,izs+1) ENDIF DO iky = ikys,ikye DO ikx = ikxs,ikxe ikxBC_L = ikx_zBC_L(iky,ikx); IF (ikxBC_L .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! first-1 gets last - psi(iky,ikx,izs-1) = buff_xy_zBC(iky,ikxBC_L,-1) + psi(iky,ikx,izs-1) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-1) ! first-2 gets last-1 - psi(iky,ikx,izs-2) = buff_xy_zBC(iky,ikxBC_L,-2) + psi(iky,ikx,izs-2) = pb_phase_L(iky)*buff_xy_zBC(iky,ikxBC_L,-2) ELSE psi(iky,ikx,izs-1) = 0._dp psi(iky,ikx,izs-2) = 0._dp ENDIF ikxBC_R = ikx_zBC_R(iky,ikx); IF (ikxBC_R .NE. -99) THEN ! Exchanging the modes that have a periodic pair (a) ! last+1 gets first - psi(iky,ikx,ize+1) = buff_xy_zBC(iky,ikxBC_R,+1) + psi(iky,ikx,ize+1) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+1) ! last+2 gets first+1 - psi(iky,ikx,ize+2) = buff_xy_zBC(iky,ikxBC_R,+2) + psi(iky,ikx,ize+2) = pb_phase_R(iky)*buff_xy_zBC(iky,ikxBC_R,+2) ELSE psi(iky,ikx,ize+1) = 0._dp psi(iky,ikx,ize+2) = 0._dp ENDIF ENDDO ENDDO ENDIF CALL cpu_time(t1_ghost) tc_ghost = tc_ghost + (t1_ghost - t0_ghost) END SUBROUTINE update_ghosts_z_psi END MODULE ghosts