diff --git a/src/beam_mod.f90 b/src/beam_mod.f90 index 9e62834..e5b42b2 100644 --- a/src/beam_mod.f90 +++ b/src/beam_mod.f90 @@ -1,2085 +1,2085 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: beam ! !> @author !> Guillaume Le Bars EPFL/SPC !> Patryk Kaminski EPFL/SPC !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> Module responsible for loading, advancing and computing the necessary diagnostics for the simulated particles. !------------------------------------------------------------------------------ MODULE beam ! USE constants USE mpi USE mpihelper USE basic, ONLY: mpirank, mpisize USE distrib IMPLICIT NONE !> Stores the particles properties for the run. TYPE particles INTEGER :: Nploc !< Local number of simulated particles INTEGER :: Nptot !< Total number of simulated particles REAL(kind=db) :: m !< Particle mass REAL(kind=db) :: q !< Particle charge REAL(kind=db) :: weight !< Number of particles represented by one macro-particle REAL(kind=db) :: qmRatio !< Charge over mass ratio REAL(kind=db) :: H0 REAL(kind=db) :: P0 REAL(kind=db) :: temperature LOGICAL :: Davidson=.false. LOGICAL :: is_test= .false. INTEGER, DIMENSION(4) :: nblost !< number of particles lost in (z_min,z_max,r_min,r_max) since last gather INTEGER, DIMENSION(:), ALLOCATABLE :: Rindex !< Index in the electric potential grid for the R direction INTEGER, DIMENSION(:), ALLOCATABLE :: Zindex !< Index in the electric potential grid for the Z direction INTEGER, DIMENSION(:), ALLOCATABLE :: partindex !< Index of the particle to be able to follow it when it goes from one MPI host to the other REAL(kind=db), DIMENSION(:), ALLOCATABLE :: R !< radial coordinates of the particles REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Z !< longitudinal coordinates of the particles REAL(kind=db), DIMENSION(:), ALLOCATABLE :: THET !< azimuthal coordinates of the particles REAL(kind=db), DIMENSION(:), ALLOCATABLE :: BZ !< axial radial relative distances to the left grid line REAL(kind=db), DIMENSION(:), ALLOCATABLE :: BR !< radial relative distances to the bottom grid line REAL(kind=db), DIMENSION(:), ALLOCATABLE :: pot !< Electric potential REAL(kind=db), DIMENSION(:), ALLOCATABLE :: potxt !< External electric potential REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Er !< Radial Electric field REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Ez !< Axial electric field REAL(kind=db), DIMENSION(:),POINTER:: UR !< normalized radial velocity at the current time step REAL(kind=db), DIMENSION(:),POINTER:: URold !< normalized radial velocity at the previous time step REAL(kind=db), DIMENSION(:),POINTER:: UTHET !< normalized azimuthal velocity at the current time step REAL(kind=db), DIMENSION(:),POINTER:: UTHETold !< normalized azimuthal velocity at the previous time step REAL(kind=db), DIMENSION(:),POINTER:: UZ !< normalized axial velocity at the current time step REAL(kind=db), DIMENSION(:),POINTER:: UZold !< normalized axial velocity at the previous time step REAL(kind=db), DIMENSION(:),POINTER:: Gamma !< Lorentz factor at the current time step REAL(kind=db), DIMENSION(:),POINTER:: Gammaold !< Lorentz factor at the previous time step Real(kind=db), Dimension(:,:),ALLOCATABLE:: geomweight !< geometric weight at the particle position LOGICAL:: collected !< Stores if the particles data have been collected to MPI root process during this timestep INTEGER, DIMENSION(:), ALLOCATABLE:: addedlist END TYPE particles ! !TYPE(particles) :: parts !< Storage for all the particles !SAVE :: parts TYPE(particles), DIMENSION(:), ALLOCATABLE, SAVE :: partslist ! Diagnostics (scalars) REAL(kind=db) :: ekin=0 !< Total kinetic energy (J) REAL(kind=db) :: epot=0 !< Total potential energy (J) REAL(kind=db) :: etot=0 !< Current total energy (J) REAL(kind=db) :: etot0=0 !< Initial total energy (J) REAL(kind=db) :: loc_etot0=0 !< theoretical local total energy (J) REAL(kind=db) :: Energies(4) !< (1) kinetic energy, (2) potential energy, (3) total energy and (4) gained/lossed energy due to gain or loss of particles (J) ! INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: Nplocs_all !< Array containing the local numbers of particles in each MPI process ! CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Allocate the memory for the particles variable storing the particles quantities. ! !> @param[inout] p the particles variable needing to be allocated. !> @param[in] nparts the maximum number of particles that will be stored in this variable !--------------------------------------------------------------------------- SUBROUTINE creat_parts(p, nparts) TYPE(particles) :: p INTEGER, INTENT(in) :: nparts IF (.NOT. ALLOCATED(p%Z) ) THEN p%Nploc = nparts p%Nptot = nparts ALLOCATE(p%Z(nparts)) ALLOCATE(p%R(nparts)) ALLOCATE(p%THET(nparts)) ALLOCATE(p%BZ(nparts)) ALLOCATE(p%BR(nparts)) ALLOCATE(p%UR(nparts)) ALLOCATE(p%UZ(nparts)) ALLOCATE(p%UTHET(nparts)) ALLOCATE(p%URold(nparts)) ALLOCATE(p%UZold(nparts)) ALLOCATE(p%UTHETold(nparts)) ALLOCATE(p%Gamma(nparts)) ALLOCATE(p%Rindex(nparts)) ALLOCATE(p%Zindex(nparts)) ALLOCATE(p%partindex(nparts)) ALLOCATE(p%pot(nparts)) ALLOCATE(p%potxt(nparts)) ALLOCATE(p%Er(nparts)) ALLOCATE(p%Ez(nparts)) ALLOCATE(p%GAMMAold(nparts)) Allocate(p%geomweight(nparts,0:2)) p%nblost=0 p%partindex=-1 p%URold=0 p%UZold=0 p%UTHETold=0 p%rindex=0 p%zindex=0 p%BR=0 p%BZ=0 p%UR=0 p%UZ=0 p%UTHET=0 p%Z=0 p%R=0 p%THET=0 p%Gamma=1 p%Er=0 p%Ez=0 p%pot=0 p%potxt=0 p%gammaold=1 p%collected=.false. p%Davidson=.false. p%is_test=.false. p%m=me p%q=-elchar p%qmRatio=p%q/p%m p%weight=1.0_db p%H0=0 p%P0=0 p%temperature=0 p%geomweight=0 END IF END SUBROUTINE creat_parts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Loads the particles at the beginning of the simulation and create the parts variable if necessary !--------------------------------------------------------------------------- SUBROUTINE load_parts USE basic, ONLY: nplasma, mpirank, ierr, distribtype, mpisize, nlclassical, nbspecies, Zbounds USE mpi INTEGER:: i, j REAL(kind=db), DIMENSION(:), ALLOCATABLE :: VZ, VR, VTHET ALLOCATE(VZ(nplasma), VR(nplasma), VTHET(nplasma)) ! Select case to define the type of distribution SELECT CASE(distribtype) CASE(1) ! Gaussian distribution in V, uniform in Z and 1/R in R CALL loaduniformRZ(partslist(1), VR, VZ, VTHET) CASE(2) !Stable distribution from Davidson 4.95 p.119 CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodunir) CASE(3) !Stable distribution from Davidson 4.95 p.119 but with constant distribution in R CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodinvr) CASE(4) !Stable distribution from Davidson 4.95 p.119 but with gaussian distribution in R CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodgausr) CASE(5) !Stable distribution from Davidson 4.95 p.119 with gaussian in V computed from v_th given by temp CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodunir) CASE(6) ! Uniform distribution in R and Z and Gaussian distribution in V with Vz @brief Checks for each particle if the z position is outside of the local/global simulation space. !> Depending on the boundary conditions, the leaving particles are sent to the correct neighbouring MPI process !> or deleted. ! !> @param[in] p particles structure ! !> @author Guillaume Le Bars EPFL/SPC !--------------------------------------------------------------------------- SUBROUTINE bound(p) USE basic, ONLY: zgrid, nz, Zbounds, mpirank, step, leftproc, rightproc USE IFPORT IMPLICIT NONE type(particles), INTENT(INOUT):: p INTEGER :: i, rsendnbparts, lsendnbparts, nblostparts INTEGER :: receivednbparts, partdiff, lostindex, sentindex INTEGER, DIMENSION(p%Nploc) :: sendhole INTEGER, DIMENSION(p%Nploc) :: losthole LOGICAL:: leftcomm, rightcomm, sent INTEGER, ALLOCATABLE:: partstoremove(:) rsendnbparts=0 lsendnbparts=0 nblostparts=0 losthole=0 sendhole=0 receivednbparts=0 ! We communicate with the left processus leftcomm = leftproc .ne. -1 ! We communicate with the right processus rightcomm = rightproc .ne. -1 IF (p%Nploc .gt. 0) THEN ! Boundary condition at z direction !$OMP PARALLEL DO DEFAULT(SHARED) DO i=1,p%Nploc ! If the particle is to the right of the local simulation space, it is sent to the right MPI process IF (p%Z(i) .ge. zgrid(Zbounds(mpirank+1))) THEN !$OMP CRITICAL (nbparts) IF(rightcomm) THEN rsendnbparts=rsendnbparts+1 sendhole(lsendnbparts+rsendnbparts)=i DO WHILE (p%Z(i) .GT. zgrid(nz)) p%Z(i) = p%Z(i) - zgrid(nz) + zgrid(0) END DO ELSE nblostparts=nblostparts+1 losthole(nblostparts)=i p%nblost(2)=p%nblost(2)+1 END IF !$OMP END CRITICAL (nbparts) ! If the particle is to the left of the local simulation space, it is sent to the left MPI process ELSE IF (p%Z(i) .lt. zgrid(Zbounds(mpirank))) THEN !$OMP CRITICAL (nbparts) IF(leftcomm) THEN ! We send the particle to the left process lsendnbparts=lsendnbparts+1 sendhole(lsendnbparts+rsendnbparts)=-i DO WHILE (p%Z(i) .LT. zgrid(0)) p%Z(i) = p%Z(i) + zgrid(nz) - zgrid(0) END DO ELSE ! we destroy the particle nblostparts=nblostparts+1 losthole(nblostparts)=i p%nblost(1)=p%nblost(1)+1 END IF !$OMP END CRITICAL (nbparts) END IF END DO !$OMP END PARALLEL DO END IF IF(mpisize .gt. 1) THEN ! We send the particles leaving the local simulation space to the closest neighbour CALL particlescommunication(p, lsendnbparts, rsendnbparts, sendhole, receivednbparts, (/leftproc,rightproc/)) END IF ! If the boundary conditions are not periodic, we delete the corresponding particles IF(nblostparts .gt. 0 .and. step .ne. 0) THEN DO i=nblostparts,1,-1 CALL delete_part(p, losthole(i), .false. ) END DO !WRITE(*,'(i8.2,a,i4.2)') nblostparts, " particles lost in z on process: ", mpirank END IF ! computes if we received less particles than we sent partdiff=max(lsendnbparts+rsendnbparts-receivednbparts,0) IF(nblostparts + partdiff .gt. 0) THEN ALLOCATE(partstoremove(nblostparts+partdiff)) partstoremove(1:partdiff)=abs(sendhole(receivednbparts+1:receivednbparts+partdiff)) partstoremove(partdiff+1:partdiff+nblostparts)=abs(losthole(1:nblostparts)) call qsort(partstoremove,size(partstoremove),sizeof(partstoremove(1)),compare_int) ! If we received less particles than we sent, or lost particles we fill the remaining holes with the particles from the end of the ! parts arrays DO i=1,nblostparts+partdiff CALL move_part(p, p%Nploc, partstoremove(i)) p%partindex(p%Nploc)=-1 p%Nploc = p%Nploc-1 END DO END IF CONTAINS INTEGER(2) function compare_int(a,b) INTEGER :: a, b, c c=b-a if(c.ne.0) c=1 compare_int=sign(c,b-a) end function END subroutine bound !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Compute the grid cell indices for each particle as well as the distance weight Wr, Wz. !> @param[in] p particles structure !--------------------------------------------------------------------------- SUBROUTINE localisation(p) USE basic, ONLY: zgrid, rgrid, dz, nr, nnr, dr, mpirank Use geometry, ONLY: r_a, geom_weight USE IFPORT type(particles), INTENT(INOUT):: p INTEGER :: i, nblostparts INTEGER, DIMENSION(p%Nploc) :: losthole INTEGER, DIMENSION(4):: nblost losthole=0 nblostparts=0 nblost=p%nblost IF (p%Nploc .gt. 0) THEN !$OMP PARALLEL DO DEFAULT(SHARED), private(i) reduction(+:nblost) DO i=1,p%Nploc call geom_weight(p%Z(i), p%r(i), p%geomweight(i,:)) if(p%geomweight(i,0).le.0 .or. p%R(i) .gt. rgrid(nr) .or. p%R(i) .lt. rgrid(0)) then ! If the particle is outside of the simulation space in the r direction, it is deleted. !$OMP CRITICAL (lostparts) nblostparts=nblostparts+1 losthole(nblostparts)=i !$OMP END CRITICAL (lostparts) if(p%R(i) .le. r_a) then nblost(3)=nblost(3)+1 else nblost(4)=nblost(4)+1 end if else call p_calc_index(p,i) end if END DO !$OMP END PARALLEL DO IF(nblostparts.gt.0) THEN p%nblost=nblost call qsort(losthole(1:nblostparts),nblostparts,sizeof(losthole(1)),compare_int) DO i=1,nblostparts !WRITE(*,'(a,i8.8,a,i4.2,a,3(1pe12.3))') "Particle: ", losthole(i) , " of process: ", mpirank, " is out of bound in r: ", p%R(losthole(i))*rnorm, rgrid(0)*rnorm, rgrid(nr)*rnorm CALL delete_part(p,losthole(i)) END DO !WRITE(*,'(i6.2,a,i6.2)') nblostparts, " particles lost in r on process: ", mpirank END IF END IF CONTAINS INTEGER(2) function compare_int(a,b) INTEGER :: a, b, c c=b-a if(c.ne.0) c=1 compare_int=sign(c,b-a) end function END SUBROUTINE localisation subroutine p_calc_index(p,i) use basic, only: rgrid,zgrid,invdz,invdr, nnr, nr integer::i type(particles)::p IF (p%R(i) .GT. rgrid(0) .AND. p%R(i) .LT. rgrid(nnr(1))) THEN p%rindex(i)=(p%R(i)-rgrid(0))*invdr(1) ELSE IF(p%R(i) .GE. rgrid(nnr(1)) .AND. p%R(i) .LT. rgrid(nnr(1)+nnr(2))) THEN p%rindex(i)=(p%R(i)-rgrid(nnr(1)))*invdr(2)+nnr(1) ELSE IF(p%R(i) .GE. rgrid(nnr(1)+nnr(2)) .AND. p%R(i) .LT. rgrid(nr)) THEN p%rindex(i)=(p%R(i)-rgrid(nnr(1)+nnr(2)))*invdr(3)+nnr(1)+nnr(2) End if p%zindex(i)=(p%Z(i)-zgrid(0))*invdz end subroutine p_calc_index SUBROUTINE comp_mag_p(p) USE basic, ONLY: zgrid, rgrid, dz, BZ, BR, nz, invdz type(particles), INTENT(INOUT):: p INTEGER :: i, nblostparts Real(kind=db):: WZ,WR INTEGER:: j1,j2,j3,j4 !$OMP PARALLEL DO SIMD DEFAULT(SHARED) Private(J1,J2,J3,J4,WZ,WR) DO i=1,p%Nploc WZ=(p%Z(i)-zgrid(p%zindex(i)))*invdz; WR=(p%R(i)-rgrid(p%rindex(i)))/(rgrid(p%rindex(i)+1)-rgrid(p%rindex(i))); J1=(p%rindex(i))*(nz+1) + p%zindex(i)+1 J2=(p%rindex(i))*(nz+1) + p%zindex(i)+2 J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 J4=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+2 ! Interpolation for magnetic field p%BZ(i)=(1-WZ)*(1-WR)*Bz(J4) & & +WZ*(1-WR)*Bz(J3) & & +(1-WZ)*WR*Bz(J2) & & +WZ*WR*Bz(J1) p%BR(i)=(1-WZ)*(1-WR)*Br(J4) & & +WZ*(1-WR)*Br(J3) & & +(1-WZ)*WR*Br(J2) & & +WZ*WR*Br(J1) END DO !$OMP END PARALLEL DO SIMD end subroutine comp_mag_p !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief General routine to compute the velocities at time t+1. !> This routine allows to treat the classical and relativistic case efficiently from a numerical standpoint, !> by using a pointer to the routine computing gamma. This avoid the nlclassical flag check on each particle. ! !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE comp_velocity(p) ! ! Computes the new velocity of the particles due to Lorentz force ! USE basic, ONLY : nlclassical type(particles), INTENT(INOUT):: p ! Store old Velocities CALL swappointer(p%UZold, p%UZ) CALL swappointer(p%URold, p%UR) CALL swappointer(p%UTHETold, p%UTHET) CALL swappointer(p%Gammaold, p%Gamma) IF (nlclassical) THEN CALL comp_velocity_fun(p, gamma_classical) ELSE CALL comp_velocity_fun(p, gamma_relativistic) END IF END SUBROUTINE comp_velocity !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Routine called by comp_velocity to compute the velocities at time t+1. !> This routine allows to treat the classical and relativistic case efficiently from a numerical standpoint, !> by using the routine computing gamma as an input. This avoid the nlclassical flag check on each particle. ! !> @param[in] gamma the function used to compute the value of the lorentz factor \f$\gamma\f$ !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE comp_velocity_fun(p, gamma) ! ! Computes the new velocity of the particles due to Lorentz force ! USE basic, ONLY : bnorm, dt, BZ, BR, nz interface REAL(kind=db) FUNCTION gamma(UZ, UR, UTHET) USE constants REAL(kind=db), INTENT(IN):: UR,UZ,UTHET end FUNCTION end interface type(particles), INTENT(INOUT):: p REAL(kind=db) :: tau REAL(kind=db):: BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR, ZBZ2, ZBR2 INTEGER:: J1, J2, J3, J4 INTEGER:: i ! Normalized time increment !tau=omegac/2/omegap*dt/tnorm tau=p%qmRatio*bnorm*0.5*dt IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(J1,J2,J3,J4,BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR, ZBZ2, ZBR2) DO i=1,p%Nploc !J1=(p%rindex(i))*(nz+1) + p%zindex(i)+1 !J2=(p%rindex(i))*(nz+1) + p%zindex(i)+2 !J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 !J4=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+2 ! Interpolation for magnetic field !BRZ=(1-p%WZ(i))*(1-p%WR(i))*Bz(J4) & !& +p%WZ(i)*(1-p%WR(i))*Bz(J3) & !& +(1-p%WZ(i))*p%WR(i)*Bz(J2) & !& +p%WZ(i)*p%WR(i)*Bz(J1) !BRR=(1-p%WZ(i))*(1-p%WR(i))*Br(J4) & !& +p%WZ(i)*(1-p%WR(i))*Br(J3) & !& +(1-p%WZ(i))*p%WR(i)*Br(J2) & !& +p%WZ(i)*p%WR(i)*Br(J1) ! First half of electric pulse p%UZ(i)=p%UZold(i)+p%Ez(i)*tau p%UR(i)=p%URold(i)+p%ER(i)*tau p%Gamma(i)=gamma(p%UZ(i), p%UR(i), p%UTHETold(i)) ! Rotation along magnetic field !ZBZ=tau*BRZ/p%Gamma(i) !ZBR=tau*BRR/p%Gamma(i) ZBZ=tau*p%BZ(i)/p%Gamma(i) ZBR=tau*p%BR(i)/p%Gamma(i) ZPZ=p%UZ(i)-ZBR*p%UTHETold(i) !u'_{z} ZPR=p%UR(i)+ZBZ*p%UTHETold(i) !u'_{r} ZPTHET=p%UTHETold(i)+(ZBR*p%UZ(i)-ZBZ*p%UR(i)) !u'_{theta} SQR=1+ZBZ*ZBZ+ZBR*ZBR ZBZ2=2*ZBZ/SQR ZBR2=2*ZBR/SQR p%UZ(i)=p%UZ(i)-ZBR2*ZPTHET !u+_{z} p%UR(i)=p%UR(i)+ZBZ2*ZPTHET !u+_{r} p%UTHET(i)=p%UTHETold(i)+(ZBR2*ZPZ-ZBZ2*ZPR) !u+_{theta} ! Second half of acceleration p%UZ(i)=p%UZ(i)+p%EZ(i)*tau p%UR(i)=p%UR(i)+p%ER(i)*tau ! Final computation of the Lorentz factor p%Gamma(i)=gamma(p%UZ(i), p%UR(i), p%UTHET(i)) END DO !$OMP END PARALLEL DO SIMD END IF p%collected=.false. END SUBROUTINE comp_velocity_fun !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Routine used to compute the lorentz factor \f$\gamma\f$ in the classical simulations. !> This routine systematically returns 1.0 to treat the system according to classical dynamic. ! !> @param[out] gamma the lorentz factor \f$\gamma\f$ !> @param[in] UZ \f$\gamma\beta_z=\gamma v_z/c\f$ the normalized particle longitudinal velocity !> @param[in] UR \f$\gamma\beta_r=\gamma v_r/c\f$ the normalized particle radial velocity !> @param[in] UTHET \f$\gamma\beta_\theta=\gamma v_\theta/c\f$ the normalized particle azimuthal velocity !--------------------------------------------------------------------------- ELEMENTAL REAL(kind=db) FUNCTION gamma_classical(UZ, UR, UTHET) #if __INTEL_COMPILER > 1700 !$OMP declare simd(gamma_classical) #endif REAL(kind=db), INTENT(IN):: UR,UZ,UTHET gamma_classical=1.0 END FUNCTION gamma_classical !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Routine used to compute the lorentz factor \f$\gamma\f$ in the relativistic simulations. !> This routine computes the Lorentz factor \f$\gamma=\sqrt{1+\mathbf{\gamma\beta}^2}\f$ ! !> @param[out] gamma the lorentz factor \f$\gamma\f$ !> @param[in] UZ \f$\gamma\beta_z=\gamma v_z/c\f$ the normalized particle longitudinal velocity !> @param[in] UR \f$\gamma\beta_r=\gamma v_r/c\f$ the normalized particle radial velocity !> @param[in] UTHET \f$\gamma\beta_\theta=\gamma v_\theta/c\f$ the normalized particle azimuthal velocity !--------------------------------------------------------------------------- ELEMENTAL REAL(kind=db) FUNCTION gamma_relativistic(UZ, UR, UTHET) #if __INTEL_COMPILER > 1700 !$OMP declare simd(gamma_relativistic) #endif REAL(kind=db), INTENT(IN):: UR,UZ,UTHET gamma_relativistic=sqrt(1+UZ**2+UR**2+UTHET**2) END FUNCTION gamma_relativistic !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes the particles position at time t+1 !> This routine computes the particles position at time t+1 according to the Bunemann algorithm. ! !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE push(p) Use basic, ONLY: dt, tnorm type(particles), INTENT(INOUT):: p REAL(kind=db):: XP, YP, COSA, SINA, U1, U2, ALPHA INTEGER :: i IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(XP, YP, COSA, SINA, U1, U2, ALPHA) DO i=1,p%Nploc ! Local Cartesian coordinates XP=p%R(i)+dt/tnorm*p%UR(i)/p%Gamma(i) YP=dt/tnorm*p%UTHET(i)/p%Gamma(i) ! Conversion to cylindrical coordiantes p%Z(i)=p%Z(i)+dt/tnorm*p%UZ(i)/p%Gamma(i) p%R(i)=sqrt(XP**2+YP**2) ! Computation of the rotation angle IF (p%R(i) .EQ. 0) THEN COSA=1 SINA=0 ALPHA=0 ELSE COSA=XP/p%R(i) SINA=YP/p%R(i) ALPHA=asin(SINA) END IF ! New azimuthal position p%THET(i)=MOD(p%THET(i)+ALPHA,2*pi) ! Velocity in rotated reference frame U1=COSA*p%UR(i)+SINA*p%UTHET(i) U2=-SINA*p%UR(i)+COSA*p%UTHET(i) p%UR(i)=U1 p%UTHET(i)=U2 END DO !$OMP END PARALLEL DO SIMD END IF p%collected=.false. END SUBROUTINE push !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes several diagnostic quantities !> This routine computes the total kinetic and electric potential energy. !> It keeps track of the reference energy and the number of particle per mpi node. ! !--------------------------------------------------------------------------- SUBROUTINE diagnostics ! ! Compute energies ! USE constants, ONLY: vlight USE basic, ONLY: phinorm, cstep, nlclassical, ierr, step, nlend,& & itparts INTEGER:: i ! Reset the quantities ekin=0 epot=0 etot=0 ! Computation of the kinetic and potential energy as well as fluid velocities and density !$OMP PARALLEL DO SIMD REDUCTION(+:epot, ekin) DEFAULT(SHARED) DO i=1,partslist(1)%Nploc ! Potential energy epot=epot+(partslist(1)%pot(i)+partslist(1)%potxt(i)) ! Kinetic energy IF(.not. nlclassical) THEN ekin=ekin+(partslist(1)%Gamma(i)-1) ELSE ekin=ekin+0.5*( partslist(1)%UR(i)**2 & & + partslist(1)%UZ(i)**2 & & + partslist(1)%UTHET(i)**2 ) END IF END DO !$OMP END PARALLEL DO SIMD epot=epot*phinorm*0.5*partslist(1)%q*partslist(1)%weight ekin=ekin*partslist(1)%m*partslist(1)%weight*vlight**2 ! Shift to Etot at cstep=1 (not valable yet at cstep=0!) IF(cstep.EQ. 0) THEN ! Compute the local total energy loc_etot0 = epot+ekin etot0=0 END IF !etot=loc_etot0 ! Compute the total energy etot=epot+ekin Energies=(/ekin,epot,etot,loc_etot0/) ! The computed energy is sent to the root process IF(mpisize .gt.1) THEN IF(mpirank .eq.0 ) THEN CALL MPI_REDUCE(MPI_IN_PLACE, Energies, 4, db_type, db_sum_op, & & 0, MPI_COMM_WORLD, ierr) etot0=etot0+Energies(4) ekin=Energies(1) epot=Energies(2) etot=Energies(3) CALL MPI_REDUCE(MPI_IN_PLACE, partslist(1)%nblost, 4, MPI_INTEGER, MPI_SUM, & & 0, MPI_COMM_WORLD, ierr) ELSE CALL MPI_REDUCE(Energies, Energies, 4, db_type, db_sum_op, & & 0, MPI_COMM_WORLD, ierr) CALL MPI_REDUCE(partslist(1)%nblost, partslist(1)%nblost, 4, MPI_INTEGER, MPI_SUM, & & 0, MPI_COMM_WORLD, ierr) END IF ELSE etot0=etot0+loc_etot0 END IF loc_etot0=0 ! Send the local number of particles on each node to the root process IF(mpisize .gt. 1) THEN Nplocs_all(mpirank)=partslist(1)%Nploc IF(mpirank .eq.0 ) THEN CALL MPI_gather(MPI_IN_PLACE, 1, MPI_INTEGER, Nplocs_all, 1, MPI_INTEGER,& & 0, MPI_COMM_WORLD, ierr) partslist(1)%Nptot=sum(Nplocs_all) ELSE CALL MPI_gather(Nplocs_all(mpirank), 1, MPI_INTEGER, Nplocs_all, 1, MPI_INTEGER,& & 0, MPI_COMM_WORLD, ierr) END IF ELSE partslist(1)%Nptot=partslist(1)%Nploc END IF !Calls the communications to send the particles data to the root process for diagnostic file every itparts time steps IF(mpisize .gt. 1 .and. (modulo(step,itparts) .eq. 0 .or. nlend)) THEN CALL collectparts(partslist(1)) END IF end subroutine diagnostics !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Collect the particles positions and velocities on the root process. !> If the collection has already been performed at this time step, the routine does nothing. ! !--------------------------------------------------------------------------- SUBROUTINE collectparts(p) USE basic, ONLY: mpirank, mpisize, ierr type(particles), INTENT(INOUT):: p INTEGER, DIMENSION(:), ALLOCATABLE :: displs, Nploc INTEGER:: i IF(p%collected) RETURN ! exit subroutine if particles have already been collected during this time step ALLOCATE(Nploc(0:mpisize-1)) ALLOCATE(displs(0:mpisize-1)) displs=0 Nploc(mpirank)=p%Nploc CALL MPI_Allgather(MPI_IN_PLACE, 1, MPI_INTEGER, Nploc, 1, MPI_INTEGER,& & MPI_COMM_WORLD, ierr) p%Nptot=sum(Nploc) IF(p%Nptot .eq. 0 ) THEN p%collected=.true. RETURN END IF IF(mpirank .ne. 0) THEN ! Send Particles informations to root process CALL MPI_Gatherv(p%Z, Nploc(mpirank), db_type, p%Z, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%R, Nploc(mpirank), db_type, p%R, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%THET, Nploc(mpirank), db_type, p%THET, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%UR, Nploc(mpirank), db_type, p%UR, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%UZ, Nploc(mpirank), db_type, p%UZ, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%UTHET, Nploc(mpirank), db_type, p%UTHET, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%pot, Nploc(mpirank), db_type, p%pot, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%Rindex, Nploc(mpirank), MPI_INTEGER, p%Rindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%Zindex, Nploc(mpirank), MPI_INTEGER, p%Zindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%partindex, Nploc(mpirank), MPI_INTEGER, p%partindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) ELSE Do i=1,mpisize-1 displs(i)=displs(i-1)+Nploc(i-1) END DO IF(p%Nptot .gt. size(p%R,1)) THEN CALL change_parts_allocation(p,p%Nptot-size(P%R,1)) END IF ! Receive particle information from all processes CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%Z, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%R, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%THET, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%UR, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%UZ, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%UTHET, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%pot, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), MPI_INTEGER, p%Rindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), MPI_INTEGER, p%Zindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), MPI_INTEGER, p%partindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) p%partindex(sum(Nploc)+1:)=-1 END IF p%collected=.TRUE. END SUBROUTINE collectparts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Computes the velocities at time t-1/2 to keep the second order precision in time on the velocity. ! !--------------------------------------------------------------------------- SUBROUTINE adapt_vinit(p) !! Computes the velocity at time -dt/2 from velocities computed at time 0 ! USE basic, ONLY : bnorm, dt, BZ, BR, nlclassical, phinorm, nz, distribtype, vnorm type(particles), INTENT(INOUT):: p REAL(kind=db) :: tau, BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, & & SQR, Vperp, v2 INTEGER :: J1, J2, J3, J4, i REAL(kind=db), DIMENSION(:), ALLOCATABLE :: VZ, VR, VTHET ! In case Davidson distribution is used the longitudinal and radial velocities are adapted to take into account the ! electric potential. IF(distribtype .EQ. 2 .OR. distribtype .EQ. 3 .OR. distribtype .EQ. 4 .or. p%Davidson) THEN ALLOCATE(VR(p%Nploc),VZ(p%Nploc),VTHET(p%Nploc)) CALL loduni(7,VZ) VZ=VZ*2*pi VTHET=p%UTHET/p%Gamma*vnorm DO i=1,p%Nploc Vperp=sqrt(MAX(2*p%H0/p%m-2*p%qmRatio*p%pot(i)*phinorm-VTHET(i)**2,0.0_db)) VR(i)=Vperp*sin(VZ(i)) VZ(i)=Vperp*cos(VZ(i)) IF(nlclassical) THEN p%Gamma(i)=1 ELSE v2=VR(i)**2+VZ(i)**2+VTHET(i)**2 p%Gamma(i)=sqrt(1/(1-v2/vnorm**2)) END IF p%UR(i)=p%Gamma(i)*VR(i)/vnorm p%UZ(i)=p%Gamma(i)*VZ(i)/vnorm p%UTHET(i)=p%Gamma(i)*VTHET(i)/vnorm END DO DEALLOCATE(VR,VZ,VTHET) END IF RETURN ! Normalised time increment !tau=-omegac/2/omegap*dt/tnorm tau=p%qmRatio*bnorm*0.5*dt ! Store old Velocities CALL swappointer(p%UZold, p%UZ) CALL swappointer(p%URold, p%UR) CALL swappointer(p%UTHETold, p%UTHET) CALL swappointer(p%Gammaold, p%Gamma) IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(J1,J2,J3,J4,BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR) DO i=1,p%Nploc ! Compute the particle linear indices for the magnetic field interpolation !J1=(p%rindex(i))*(nz+1)+p%zindex(i)+1 !J2=J1+1 !J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 !J4=J3+1 ! ! Interpolation for magnetic field !BRZ=(1-p%BZ(i))*(1-p%BR(i))*Bz(J4) & !& +p%BZ(i)*(1-p%BR(i))*Bz(J3) & !& +(1-p%BZ(i))*p%BR(i)*Bz(J2) & !& +p%BZ(i)*p%BR(i)*Bz(J1) !BRR=(1-p%BZ(i))*(1-p%BR(i))*Br(J4) & !& +p%BZ(i)*(1-p%BR(i))*Br(J3) & !& +(1-p%BZ(i))*p%BR(i)*Br(J2) & !& +p%BZ(i)*p%BR(i)*Br(J1) ! Half inverse Rotation along magnetic field ZBZ=tau*p%BZ(i)/p%Gammaold(i) ZBR=tau*p%BR(i)/p%Gammaold(i) SQR=1+ZBZ*ZBZ+ZBR*ZBR ZPZ=(p%UZold(i)-ZBR*p%UTHETold(i))/SQR !u-_{z} ZPR=(p%URold(i)+ZBZ*p%UTHETold(i))/SQR !u-_{r} ZPTHET=p%UTHETold(i)+(ZBR*p%UZold(i)-ZBZ*p%URold(i))/SQR !u-_{theta} p%UZ(i)=ZPZ p%UR(i)=ZPR p%UTHET(i)=ZPTHET ! half of decceleration p%UZ(i)=p%UZ(i)+p%Ez(i)*tau p%UR(i)=p%UR(i)+p%Er(i)*tau IF(.not. nlclassical) THEN p%Gamma(i)=sqrt(1+p%UZ(i)**2+p%UR(i)**2+p%UTHET(i)**2) END IF END DO !$OMP END PARALLEL DO SIMD END IF END SUBROUTINE adapt_vinit !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief In the case of MPI parallelism, computes the indices of the particle assigned to the current process. !--------------------------------------------------------------------------- SUBROUTINE distribpartsonprocs(p) ! Computes the start and end indices for the Z boundaries on local processus ! Computes the particle indices from initial particle loading vector, that stay in current process USE basic, ONLY: nz, Zbounds TYPE(particles), INTENT(INOUT):: p INTEGER:: k, i, nbparts REAL(kind=db):: idealnbpartsperproc INTEGER:: totparts ! Total number of particle from zgrid(0) to zgrid(k) INTEGER:: partindexstart ! Starting index of local particle in the parts variable used later to move the local particles at the begining of the vector INTEGER:: partindexlast ! Ending index of local particle in the parts variable used later to move the local particles at the begining of the vector INTEGER, DIMENSION(0:nz):: partspercol ! Vector containing the number of particles between zgrid(n) and zgrid(n+1) INTEGER:: Zmin, Zmax ! Minimum and maximum indices of particles in Z direction INTEGER:: Zperproc, zindex partspercol=0 idealnbpartsperproc = FLOOR(REAL(p%Nploc)/REAL(mpisize)) Zmin=MINVAL(p%Zindex(1:p%Nploc)) Zmax=MAXVAL(p%Zindex(1:p%Nploc)) Zperproc=(Zmax-Zmin)/mpisize IF(Zmax .eq. 0) Zmax=nz DO k=1,p%Nploc zindex=p%Zindex(k) partspercol(zindex)=partspercol(zindex)+1 END DO IF (Zperproc .eq. 0) THEN !! No particles are present initially Zperproc=nz/mpisize Zmin=0 DO k=1,mpisize-1 IF(k .lt. mpisize-1-MODULO(Zmax-Zmin,mpisize)) THEN Zbounds(k)=Zmin+k*Zperproc-1 ELSE Zbounds(k)=Zmin+k*Zperproc-1+k-mpisize+2+MODULO(Zmax-Zmin,mpisize) END IF END DO ELSE i=0 DO k=1,mpisize-1 nbparts=0 DO WHILE(nbparts<0.99*idealnbpartsperproc .and. i .lt. Zmax) nbparts=nbparts+partspercol(i) i=i+1 END DO Zbounds(k)=i END DO END IF partindexstart=1 totparts=0 partindexlast=p%Nploc DO k=0,mpisize-1 Nplocs_all(k)=SUM(partspercol(Zbounds(k):Zbounds(k+1)-1)) END DO WRITE(*,*) mpirank, " Zbounds: ", Zbounds(mpirank), Zbounds(mpirank+1), " nptot", Nplocs_all(mpirank) END SUBROUTINE distribpartsonprocs SUBROUTINE keep_mpi_self_parts(p,Zbounds) TYPE(particles),INTENT(INOUT):: p INTEGER,INTENT(in)::Zbounds(0:) INTEGER :: i, partstart, old_sum,ierr partstart=1 p%Nploc=0 Do i=1,p%Nptot IF(p%Zindex(i).ge.Zbounds(mpirank).and.p%Zindex(i).lt.Zbounds(mpirank+1)) THEN p%Nploc=p%Nploc+1 CALL move_part(p,i,p%Nploc) END IF END DO old_sum=p%Nptot CALL MPI_REDUCE(p%Nploc, p%Nptot,1,MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, ierr) IF(p%Nptot .ne. old_sum) WRITE(*,*) "Error in particle distribution kept: ", p%Nptot, "/",old_sum END SUBROUTINE keep_mpi_self_parts !_______________________________________________________________________________ !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Manage the particle communication between neighbours. !> This routine is responsible to receive the incoming particles from the MPI neighbours and to send its outgoing !> particles to these neighbours ! !> @param [in] lsendnbparts number of particles to send to the left neighbour (mpirank-1) !> @param [in] rsendnbparts number of particles to send to the right neighbour (mpirank+1) !> @param [in] sendholes array containing the indices of the particle leaving the local domain in ascending order. If the index is positive, the particle goes to the right neigbour, and to the left neighbour if the index is negative !--------------------------------------------------------------------------- SUBROUTINE particlescommunication(p, lsendnbparts, rsendnbparts, sendholes, receivednbparts, procs) USE mpihelper, ONLY: particle_type #ifdef _DEBUG USE basic, ONLY: step #endif type(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: lsendnbparts, rsendnbparts INTEGER, INTENT(out) :: receivednbparts INTEGER, INTENT(in) :: sendholes(:) INTEGER, INTENT(in) :: procs(2) INTEGER, ASYNCHRONOUS :: rrecvnbparts=0, lrecvnbparts=0 INTEGER, ASYNCHRONOUS :: sendrequest(2), recvrequest(2) INTEGER, ASYNCHRONOUS :: sendstatus(MPI_STATUS_SIZE,2), recvstatus(MPI_STATUS_SIZE,2) TYPE(particle), ALLOCATABLE :: rrecvpartbuff(:), lrecvpartbuff(:), rsendpartbuff(:), lsendpartbuff(:) ! buffers to send and receive particle from left and right processes INTEGER :: lsentnbparts, rsentnbparts INTEGER :: lreceivednbparts, rreceivednbparts, ierr lsentnbparts=lsendnbparts rsentnbparts=rsendnbparts sendrequest=MPI_REQUEST_NULL recvrequest=MPI_REQUEST_NULL lrecvnbparts=0 rrecvnbparts=0 ! Send and receive the number of particles to exchange CALL MPI_IRECV(lrecvnbparts, 1, MPI_INTEGER, procs(1), 0, MPI_COMM_WORLD, recvrequest(1), ierr) CALL MPI_IRECV(rrecvnbparts, 1, MPI_INTEGER, procs(2), 0, MPI_COMM_WORLD, recvrequest(2), ierr) CALL MPI_ISEND(lsentnbparts, 1, MPI_INTEGER, procs(1), 0, MPI_COMM_WORLD, sendrequest(1), ierr) CALL MPI_ISEND(rsentnbparts, 1, MPI_INTEGER, procs(2), 0, MPI_COMM_WORLD, sendrequest(2), ierr) CALL MPI_Waitall(2,recvrequest(1:2), recvstatus(:,1:2), ierr) recvrequest=MPI_REQUEST_NULL lreceivednbparts=lrecvnbparts rreceivednbparts=rrecvnbparts ! Re/allocate enough memory to store the incoming particles ALLOCATE(rrecvpartbuff(rreceivednbparts)) ALLOCATE(lrecvpartbuff(lreceivednbparts)) ! Receive particles from left and right processes to the corresponding buffers IF ( lrecvnbparts .gt. 0) THEN CALL MPI_IRECV(lrecvpartbuff, lreceivednbparts, particle_type, procs(1), 1, MPI_COMM_WORLD, recvrequest(1), ierr) END IF IF( rrecvnbparts .gt. 0) THEN CALL MPI_IRECV(rrecvpartbuff, rreceivednbparts, particle_type, procs(2), 1, MPI_COMM_WORLD, recvrequest(2), ierr) END IF ALLOCATE(rsendpartbuff(rsendnbparts)) ALLOCATE(lsendpartbuff(lsendnbparts)) ! Copy the leaving particles to the corresponding send buffers IF ( (lsendnbparts + rsendnbparts) .gt. 0) THEN CALL AddPartSendBuffers(p, lsendnbparts, rsendnbparts, sendholes, lsendpartbuff, rsendpartbuff) END IF CALL MPI_Waitall(2,sendrequest(1:2), sendstatus(:,1:2), ierr) ! Send the particles to the left and right neighbours IF( lsendnbparts .gt. 0) THEN CALL MPI_ISEND(lsendpartbuff, lsendnbparts, particle_type, procs(1), 1, MPI_COMM_WORLD, sendrequest(1), ierr) #ifdef _DEBUG !WRITE(*,*)"snding ", lsendnbparts , " to left at step: ",step #endif END IF IF( rsendnbparts .gt. 0) THEN CALL MPI_ISEND(rsendpartbuff, rsendnbparts, particle_type, procs(2), 1, MPI_COMM_WORLD, sendrequest(2), ierr) #ifdef _DEBUG !WRITE(*,*)"snding ", rsendnbparts , " to right at step: ",step #endif END IF ! Receive the incoming parts in the receive buffers IF ( lreceivednbparts .gt. 0) THEN CALL MPI_Wait(recvrequest(1), recvstatus(:,1), ierr) IF(ierr .ne. MPI_SUCCESS) THEN WRITE(*,*) "Error in particle reception on proc:", mpirank, " error code:", ierr, "status:", recvstatus(:,1) CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END IF #ifdef _DEBUG !WRITE(*,*)"rcvd ", lreceivednbparts , " from left at step: ",step #endif END IF IF ( rreceivednbparts .gt. 0) THEN CALL MPI_Wait(recvrequest(2), recvstatus(:,2), ierr) IF(ierr .ne. MPI_SUCCESS) THEN WRITE(*,*) "Error in particle reception on proc:", mpirank, " error code:", ierr, "status:", recvstatus(:,2) CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END IF #ifdef _DEBUG !WRITE(*,*)"rcvd ", rreceivednbparts , " from right at step: ",step #endif END IF receivednbparts=rreceivednbparts+lreceivednbparts IF(p%Nploc+receivednbparts-lsendnbparts-rsendnbparts .gt. size(p%R,1)) THEN CALL change_parts_allocation(p,receivednbparts) END IF ! Copy the incoming particles from the receive buffers to the simulation parts variable CALL Addincomingparts(p, rreceivednbparts, lreceivednbparts, lsendnbparts+rsendnbparts, & & sendholes, lrecvpartbuff, rrecvpartbuff) ! Wait for the outgoing particles to be fully received by the neighbours IF( lsendnbparts .gt. 0) THEN CALL MPI_Wait(sendrequest(1), sendstatus(:,1), ierr) #ifdef _DEBUG !WRITE(*,*)"sent ", lsentnbparts , " to left at step: ",step #endif END IF IF( rsendnbparts .gt. 0) THEN CALL MPI_Wait(sendrequest(2), sendstatus(:,2), ierr) #ifdef _DEBUG !WRITE(*,*)"sent ", rsentnbparts , " to right at step: ",step #endif END IF ! ! END SUBROUTINE particlescommunication !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy the particles from the receive buffers to the local simulation variable parts. !> The incoming particles will first be stored in the holes left by the outgoing particles, then they !> will be added at the end of the parts variable ! !> @param [in] rrecvnbparts number of particles received from the right neighbour (mpirank+1) !> @param [in] lrecvnbparts number of particles received from the left neighbour (mpirank-1) !> @param [in] sendnbparts total number of particles having left the local domain !> @param [in] sendholes array containing the indices of the particle having left the local domain in ascending order. !--------------------------------------------------------------------------- SUBROUTINE Addincomingparts(p, rrecvnbparts, lrecvnbparts, sendnbparts, sendholes,lrecvpartbuff, rrecvpartbuff) ! USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: rrecvnbparts, lrecvnbparts, sendnbparts INTEGER, INTENT(in) :: sendholes(:) TYPE(particle), INTENT(IN) :: rrecvpartbuff(:), lrecvpartbuff(:) INTEGER k,partpos ! First import the particles coming from the right IF(rrecvnbparts .gt. 0) THEN Do k=1,rrecvnbparts IF(k .le. sendnbparts) THEN ! Fill the holes left by sent parts partpos=abs(sendholes(k)) ELSE ! Add at the end of parts and keep track of number of parts p%Nploc=p%Nploc+1 partpos=p%Nploc END IF CALL Insertincomingpart(p, rrecvpartbuff, partpos, k) END DO END IF ! Then import the particles coming from the left IF(lrecvnbparts .gt. 0) THEN Do k=1,lrecvnbparts IF(k+rrecvnbparts .le. sendnbparts) THEN ! Fill the holes left by sent parts partpos=abs(sendholes(k+rrecvnbparts)) ELSE ! Add at the end of parts and keep track of number of parts p%Nploc=p%Nploc+1 partpos=p%Nploc END IF CALL Insertincomingpart(p, lrecvpartbuff, partpos, k) END DO END IF ! END SUBROUTINE Addincomingparts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy one particle from the receive buffers to the local simulation variable parts. ! !> @param [in] buffer receive buffer containing the particles parameters to copy from !> @param [in] bufferindex particle index in the receive buffer !> @param [in] partsindex destination particle index in the local parts variable !--------------------------------------------------------------------------- SUBROUTINE Insertincomingpart(p, buffer, partsindex, bufferindex) USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: bufferindex, partsindex TYPE(particle), DIMENSION(:), INTENT(in) :: buffer p%partindex(partsindex) = buffer(bufferindex)%partindex p%Rindex(partsindex) = buffer(bufferindex)%Rindex p%Zindex(partsindex) = buffer(bufferindex)%Zindex p%R(partsindex) = buffer(bufferindex)%R p%Z(partsindex) = buffer(bufferindex)%Z p%THET(partsindex) = buffer(bufferindex)%THET p%UZ(partsindex) = buffer(bufferindex)%UZ p%UR(partsindex) = buffer(bufferindex)%UR p%UTHET(partsindex) = buffer(bufferindex)%UTHET p%Gamma(partsindex) = buffer(bufferindex)%Gamma ! ! END SUBROUTINE Insertincomingpart !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy one particle from the local parts variable to the send buffer. ! !> @param [in] buffer send buffer to copy to !> @param [in] bufferindex particle index in the send buffer !> @param [in] partsindex origin particle index in the local parts variable !--------------------------------------------------------------------------- SUBROUTINE Insertsentpart(p, buffer, bufferindex, partsindex) USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: bufferindex, partsindex TYPE(particle), DIMENSION(:), INTENT(inout) :: buffer buffer(bufferindex)%partindex = p%partindex(partsindex) buffer(bufferindex)%Rindex = p%Rindex(partsindex) buffer(bufferindex)%Zindex = p%Zindex(partsindex) buffer(bufferindex)%R = p%R(partsindex) buffer(bufferindex)%Z = p%Z(partsindex) buffer(bufferindex)%THET = p%THET(partsindex) buffer(bufferindex)%UZ = p%UZ(partsindex) buffer(bufferindex)%UR = p%UR(partsindex) buffer(bufferindex)%UTHET = p%UTHET(partsindex) buffer(bufferindex)%Gamma = p%Gamma(partsindex) ! END SUBROUTINE Insertsentpart !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy the particles from the local parts variable to the left and right send buffers. ! !> @param [in] lsendnbparts number of particles to send to the left neighbour (mpirank-1) !> @param [in] rsendnbparts number of particles to send to the right neighbour (mpirank+1) !> @param [in] sendholes array containing the indices of the particle leaving the local domain in ascending order. If the index is positive, the particle goes to the right neigbour, and to the left neighbour if the index is negative !--------------------------------------------------------------------------- SUBROUTINE AddPartSendBuffers(p, lsendnbparts, rsendnbparts, sendholes, lsendpartbuff, rsendpartbuff) ! USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: lsendnbparts, rsendnbparts INTEGER, INTENT(in) :: sendholes(:) TYPE(particle), INTENT(OUT) :: rsendpartbuff(:), lsendpartbuff(:) INTEGER:: partpos, k INTEGER:: lsendpos, rsendpos lsendpos=0 rsendpos=0 ! Loop over the outgoing particles and fill the correct send buffer Do k=lsendnbparts+rsendnbparts,1,-1 partpos=abs(sendholes(k)) IF(sendholes(k) .GT. 0) THEN rsendpos=rsendpos+1 CALL Insertsentpart(p, rsendpartbuff, rsendpos, partpos) ELSE IF(sendholes(k) .LT. 0) THEN lsendpos=lsendpos+1 CALL Insertsentpart(p, lsendpartbuff, lsendpos, partpos) END IF END DO ! ! END SUBROUTINE AddPartSendBuffers !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Exchange two particles in the parts variable. ! !> @param [in] index1 index in parts of the first particle to exchange. !> @param [in] index2 index in parts of the second particle to exchange. !--------------------------------------------------------------------------- SUBROUTINE exchange_parts(p, index1, index2) TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(IN) :: index1, index2 REAL(kind=db):: R, Z, THET, UR, UZ, UTHET, Gamma, geomweight(0:2) INTEGER :: Rindex, Zindex, partindex !! Exchange particle at index1 with particle at index2 ! Store part at index1 in temporary value partindex = p%partindex(index1) Gamma = p%Gamma(index1) R = p%R(index1) Z = p%Z(index1) THET = p%THET(index1) UR = p%UR(index1) UTHET = p%UTHET(index1) UZ = p%UZ(index1) Rindex = p%Rindex(index1) Zindex = p%Zindex(index1) geomweight = p%geomweight(index1,:) ! Move part at index2 in part at index 1 p%partindex(index1) = p%partindex(index2) p%Gamma(index1) = p%Gamma(index2) p%R(index1) = p%R(index2) p%Z(index1) = p%Z(index2) p%THET(index1) = p%THET(index2) p%UR(index1) = p%UR(index2) p%UTHET(index1) = p%UTHET(index2) p%UZ(index1) = p%UZ(index2) p%Rindex(index1) = p%Rindex(index2) p%Zindex(index1) = p%Zindex(index2) p%geomweight(index1,:) = p%geomweight(index2,:) ! Move temporary values from part(index1) to part(index2) p%partindex(index2) = partindex p%Gamma(index2) = Gamma p%R(index2) = R p%Z(index2) = Z p%THET(index2) = THET p%UR(index2) = UR p%UTHET(index2) = UTHET p%UZ(index2) = UZ p%Rindex(index2) = Rindex p%Zindex(index2) = Zindex p%geomweight(index2,:) = geomweight END SUBROUTINE exchange_parts SUBROUTINE change_parts_allocation(p, sizedifference) implicit none TYPE(particles), INTENT(INOUT):: p INTEGER,INTENT(IN) :: sizedifference CALL change_array_size_int(p%Rindex, sizedifference) CALL change_array_size_int(p%Zindex, sizedifference) CALL change_array_size_int(p%partindex, sizedifference) CALL change_array_size_dp(p%ER,sizedifference) CALL change_array_size_dp(p%EZ,sizedifference) CALL change_array_size_dp(p%pot,sizedifference) CALL change_array_size_dp(p%potxt,sizedifference) CALL change_array_size_dp(p%R,sizedifference) CALL change_array_size_dp(p%Z,sizedifference) CALL change_array_size_dp(p%THET,sizedifference) CALL change_array_size_dp(p%BR,sizedifference) CALL change_array_size_dp(p%BZ,sizedifference) CALL change_array_size_dp2(p%geomweight,sizedifference) CALL change_array_size_dp_ptr(p%UR,sizedifference) CALL change_array_size_dp_ptr(p%URold,sizedifference) CALL change_array_size_dp_ptr(p%UZ,sizedifference) CALL change_array_size_dp_ptr(p%UZold,sizedifference) CALL change_array_size_dp_ptr(p%UTHET,sizedifference) CALL change_array_size_dp_ptr(p%UTHETold,sizedifference) CALL change_array_size_dp_ptr(p%Gamma,sizedifference) CALL change_array_size_dp_ptr(p%Gammaold,sizedifference) p%Nploc=MIN(p%Nploc,size(p%R)) END SUBROUTINE change_parts_allocation SUBROUTINE change_array_size_dp(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:) INTEGER:: current_size, new_size if(allocated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp SUBROUTINE change_array_size_dp2(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:,:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:,:) INTEGER:: current_size, new_size if(allocated(arr)) THEN current_size=size(arr,1) new_size=current_size+sizedifference ALLOCATE(temp(new_size,0:size(arr,2)-1)) temp(1:min(current_size,new_size),:)=arr(1:min(current_size,new_size),:) DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp2 SUBROUTINE change_array_size_dp_ptr(arr, sizedifference) implicit none REAL(kind=db), POINTER, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), POINTER:: temp(:) INTEGER:: current_size, new_size if(associated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) arr=> temp END IF END SUBROUTINE change_array_size_dp_ptr SUBROUTINE change_array_size_int(arr, sizedifference) implicit none INTEGER, ALLOCATABLE, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference INTEGER, ALLOCATABLE:: temp(:) INTEGER:: current_size, new_size if(allocated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) CALL move_alloc(temp,arr) END IF END SUBROUTINE change_array_size_int SUBROUTINE add_created_part(p, buffer) USE bsplines USE basic, ONLY: splrz, phinorm, nlclassical USE geometry IMPLICIT NONE TYPE(particles), INTENT(INOUT):: p TYPE(particle), ALLOCATABLE, INTENT(in) :: buffer(:) INTEGER:: i, nptotinit, parts_size_increase, nb_added, newindex real(kind=db), ALLOCATABLE::gtildeloc(:,:) nptotinit=p%Nploc+1 nb_added=size(buffer,1) IF(nb_added .le. 0) RETURN ! No particles to add ! if there is not enough space in the parts simulation buffer, increase the parst size IF(p%Nploc + nb_added .gt. size(p%Z,1)) THEN parts_size_increase=Max(floor(0.1*size(p%Z,1)),nb_added) CALL change_parts_allocation(p, parts_size_increase) END IF if( .not. p%is_test) then IF(allocated(p%addedlist)) then call change_array_size_int(p%addedlist,2) else allocate(p%addedlist(2)) end if p%addedlist(size(p%addedlist)-1:size(p%addedlist))=(/nptotinit,nb_added/) end if newindex=MAXVAL(p%partindex) DO i=1,nb_added p%Nploc=p%Nploc+1 newindex=newindex+1 CALL Insertincomingpart(p, buffer, p%Nploc, i) p%partindex(p%Nploc)=newindex call p_calc_index(p,p%Nploc) END DO CALL geom_weight(p%Z(nptotinit:p%Nploc),p%R(nptotinit:p%Nploc),p%geomweight(nptotinit:p%Nploc,:)) !! Compute the energy added by these new particles for energy conservation diagnostic !allocate(gtildeloc(nb_added,0:2)) ! !CALL getgrad(splrz, p%Z(nptotinit:p%Nploc), p%R(nptotinit:p%Nploc), & !& p%pot(nptotinit:p%Nploc), p%EZ(nptotinit:p%Nploc), p%ER(nptotinit:p%Nploc)) !CALL CompgBoundary(p%Z(nptotinit:p%Nploc), p%R(nptotinit:p%Nploc), gtildeloc(:,:)) !p%EZ(nptotinit:p%Nploc)=-p%Ez(nptotinit:p%Nploc)*p%geomweight(nptotinit:p%Nploc,0) & !& -p%pot(nptotinit:p%Nploc)*p%geomweight(nptotinit:p%Nploc,1)-gtildeloc(:,1) !p%ER(nptotinit:p%Nploc)=-p%ER(nptotinit:p%Nploc)*p%geomweight(nptotinit:p%Nploc,0) & !& -p%pot(nptotinit:p%Nploc)*p%geomweight(nptotinit:p%Nploc,2)-gtildeloc(:,2) !p%pot(nptotinit:p%Nploc)=p%geomweight(nptotinit:p%Nploc,0)*p%pot(nptotinit:p%Nploc) + gtildeloc(:,0) ! !loc_etot0=loc_etot0+p%q*p%weight*sum(p%pot(nptotinit:p%Nploc))*phinorm !IF(.not. nlclassical) THEN ! loc_etot0=loc_etot0+sum(p%m*p%weight*vlight**2*(p%Gamma(nptotinit:p%Nploc)-1)) !ELSE ! loc_etot0=loc_etot0+0.5*p%m*p%weight*vlight**2*sum(p%UR(nptotinit:p%Nploc)**2 & ! & +p%UZ(nptotinit:p%Nploc)**2 & ! & +p%UTHET(nptotinit:p%Nploc)**2) !END IF - END SUBROUTINE + END SUBROUTINE add_created_part SUBROUTINE calc_newparts_energy(p) USE basic, ONLY: phinorm, nlclassical type(particles)::p integer::i,n,nptotinit,nbadded, nptotend if(p%is_test) return if( allocated(p%addedlist)) then n=size(p%addedlist) Do i=1,n,2 nptotinit=p%addedlist(i) nbadded=p%addedlist(i+1) nptotend=nptotinit+nbadded-1 loc_etot0=loc_etot0+p%q*p%weight*sum(p%pot(nptotinit:nptotend))*phinorm IF(.not. nlclassical) THEN loc_etot0=loc_etot0+p%m*p%weight*vlight**2*sum(p%Gamma(nptotinit:nptotend)-1) ELSE loc_etot0=loc_etot0+0.5*p%m*p%weight*vlight**2*sum(p%UR(nptotinit:nptotend)**2 & & +p%UZ(nptotinit:nptotend)**2 & & +p%UTHET(nptotinit:nptotend)**2) END IF end do deallocate(p%addedlist) end if end subroutine calc_newparts_energy !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Delete particle at given index removing its energy from the system ! !> @param [in] index index of particle to be deleted !--------------------------------------------------------------------------- SUBROUTINE delete_part(p, index, replace) !! This will destroy particle at the given index USE constants, ONLY: vlight USE bsplines USE geometry USE basic, ONLY: phinorm, nlclassical,splrz TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(IN) :: index LOGICAL, OPTIONAL :: replace LOGICAL:: repl IF(present(replace)) THEN repl=replace ELSE repl=.true. END IF !Computes the potential at the particle position with phi_ext+phi_s IF(index .le. p%Nploc) THEN IF(.not. p%is_test) THEN loc_etot0=loc_etot0-p%q*p%weight*(p%pot(index))*phinorm IF(.not. nlclassical) THEN loc_etot0=loc_etot0-p%m*p%weight*vlight**2*(p%Gamma(index)-1) ELSE loc_etot0=loc_etot0-0.5*p%m*p%weight*vlight**2*(p%UR(index)**2+p%UZ(index)**2+p%UTHET(index)**2) END IF END IF IF(repl) THEN ! We fill the gap CALL move_part(p, p%Nploc, index) p%partindex(p%Nploc)=-1 ! Reduce the total number of simulated parts p%Nploc=p%Nploc-1 END IF END IF END SUBROUTINE delete_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Move particle with index sourceindex to particle with index destindex. !> !WARNING! This will overwrite particle at destindex. ! !> @param [in] sourceindex index in parts of the particle to move. !> @param [in] destindex index in parts of the moved particle destination. !--------------------------------------------------------------------------- SUBROUTINE move_part(p, sourceindex, destindex) !! This will destroy particle at destindex INTEGER, INTENT(IN) :: destindex, sourceindex TYPE(particles), INTENT(INOUT)::p IF(sourceindex .eq. destindex) RETURN IF(sourceindex .le. 0 .or. destindex .le. 0) RETURN ! Move part at sourceindex in part at destindex Call copy_part(p,sourceindex,destindex,p) END SUBROUTINE move_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy particle with index sourceindex in particles sourcep to particle with index destindex in particles destp. !> !WARNING! This will overwrite particle at destp(destindex). ! !> @param [inout] sourcep Structure of source particles. !> @param [in] sourceindex index in parts of the particle to move. !> @param [in] destindex index in parts of the moved particle destination. !> @param [inout] destp Structure of source particles. !--------------------------------------------------------------------------- SUBROUTINE copy_part(sourcep, sourceindex, destindex, destp) !! This will destroy particle at destindex INTEGER, INTENT(IN) :: destindex, sourceindex TYPE(particles), INTENT(IN)::sourcep TYPE(particles), INTENT(INOUT)::destp IF(sourceindex .le. 0 .or. destindex .le. 0) RETURN IF( destindex .gt. size(destp%R,1)) RETURN ! Move part at sourceindex in part at destindex destp%partindex(destindex) = sourcep%partindex(sourceindex) destp%Gamma(destindex) = sourcep%Gamma(sourceindex) destp%R(destindex) = sourcep%R(sourceindex) destp%Z(destindex) = sourcep%Z(sourceindex) destp%THET(destindex) = sourcep%THET(sourceindex) destp%UR(destindex) = sourcep%UR(sourceindex) destp%UTHET(destindex) = sourcep%UTHET(sourceindex) destp%UZ(destindex) = sourcep%UZ(sourceindex) destp%Rindex(destindex) = sourcep%Rindex(sourceindex) destp%Zindex(destindex) = sourcep%Zindex(sourceindex) destp%geomweight(destindex,:) = sourcep%geomweight(sourceindex,:) END SUBROUTINE copy_part !________________________________________________________________________________ SUBROUTINE destroy_parts(p) TYPE(particles) :: p p%Nploc=0 IF(ALLOCATED(p%Z)) DEALLOCATE(p%Z) IF(ALLOCATED(p%R)) DEALLOCATE(p%R) IF(ALLOCATED(p%THET)) DEALLOCATE(p%THET) IF(ALLOCATED(p%BZ)) DEALLOCATE(p%BZ) IF(ALLOCATED(p%BR)) DEALLOCATE(p%BR) IF(ASSOCIATED(p%UR)) DEALLOCATE(p%UR) IF(Associated(p%URold)) DEALLOCATE(p%URold) IF(Associated(p%UZ)) DEALLOCATE(p%UZ) IF(Associated(p%UZold)) DEALLOCATE(p%UZold) IF(Associated(p%UTHET)) DEALLOCATE(p%UTHET) IF(Associated(p%UTHETold)) DEALLOCATE(p%UTHETold) IF(Associated(p%Gamma)) DEALLOCATE(p%Gamma) IF(Associated(p%Gammaold)) DEALLOCATE(p%Gammaold) IF(ALLOCATED(p%Rindex)) DEALLOCATE(p%Rindex) IF(ALLOCATED(p%Zindex)) DEALLOCATE(p%Zindex) IF(ALLOCATED(p%partindex)) DEALLOCATE(p%partindex) if(allocated(p%geomweight)) Deallocate(p%geomweight) END SUBROUTINE !________________________________________________________________________________ SUBROUTINE clean_beam ! INTEGER:: i Do i=1,size(partslist,1) CALL destroy_parts(partslist(i)) END DO ! END SUBROUTINE clean_beam !________________________________________________________________________________ SUBROUTINE swappointer( pointer1, pointer2) REAL(kind=db), DIMENSION(:), POINTER, INTENT(inout):: pointer1, pointer2 REAL(kind=db), DIMENSION(:), POINTER:: temppointer temppointer=>pointer1 pointer1=>pointer2 pointer2=>temppointer END SUBROUTINE swappointer !_______________________________________________________________________________ SUBROUTINE loaduniformRZ(p, VR,VZ,VTHET) USE basic, ONLY: plasmadim, rnorm, temp, qsim, msim USE constants, ONLY: me, kb, elchar REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p CALL creat_parts(p, size(VR,1)) p%Nploc=size(VR,1) p%Nptot=size(VR,1) p%q=sign(elchar,qsim) p%weight=msim/me p%m=me p%qmRatio=qsim/msim ! Initial distribution in z with normalisation CALL loduni(1,p%Z(1:p%Nploc)) p%Z(1:p%Nploc)=(plasmadim(1)+(plasmadim(2)-plasmadim(1))*p%Z(1:p%Nploc))/rnorm ! Initial distribution in r with normalisation CALL loduni(2,p%R(1:p%Nploc)) p%R(1:p%Nploc)=(plasmadim(3)+p%R(1:p%Nploc)*(plasmadim(4)-plasmadim(3)))/rnorm ! Initial velocities distribution CALL loadGaussianVelocities(p, VR, VZ, VTHET, temp) END SUBROUTINE loaduniformRZ !_______________________________________________________________________________ SUBROUTINE loadDavidson(p, VR,VZ,VTHET, lodr) USE constants, ONLY: me, kb, elchar USE basic, ONLY: nplasma, rnorm, plasmadim, distribtype, H0, P0, Rcurv, width, qsim, msim, & & omegac, zgrid, nz, rnorm, n0, nblock, temp interface subroutine lodr(nbase,y,rminus,rplus) USE constants REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rplus, rminus end subroutine end interface TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT)::VZ(:), VR(:), VTHET(:) REAL(kind=db), DIMENSION(:), ALLOCATABLE::ra, rb, z REAL(kind=db) :: r0, deltar2, halfLz, Mirrorratio, Le, VOL INTEGER :: j, n, blockstart, blockend, addedpart, remainparts INTEGER, DIMENSION(:), ALLOCATABLE :: blocksize CALL creat_parts(p, size(VR,1)) p%Nploc=size(VR,1) p%Nptot=p%Nploc Allocate(ra(nblock),rb(nblock), z(0:nblock)) !r0=(plasmadim(4)+plasmadim(3))/2 r0=sqrt(4*H0/(me*omegac**2)) halfLz=(zgrid(nz)+zgrid(0))/2 MirrorRatio=(Rcurv-1)/(Rcurv+1) z(0)=plasmadim(1) DO n=1,nblock ! Compute limits in radius and load radii for each part Le=(plasmadim(2)-plasmadim(1))/nblock*(n-0.5)-halfLz*rnorm+plasmadim(1) z(n)=z(0)+n*(plasmadim(2)-plasmadim(1))/nblock deltar2=1-MirrorRatio*cos(2*pi*Le/width) rb(n)=r0/deltar2*sqrt(1-P0*abs(omegac)/2/H0*deltar2+sqrt(1-P0*abs(omegac)/H0*deltar2)) ra(n)=r0/deltar2*sqrt(1-P0*abs(omegac)/2/H0*deltar2-sqrt(1-P0*abs(omegac)/H0*deltar2)) END DO VOL=SUM(2*pi*MINVAL(ra)*(rb-ra)*(plasmadim(2)-plasmadim(1))/nblock) qsim=VOL*n0*elchar/nplasma msim=abs(qsim)/elchar*me p%weight=abs(qsim)/elchar p%m=me p%q=sign(elchar,qsim) p%qmRatio=p%q/p%m blockstart=1 blockend=0 ALLOCATE(blocksize(nblock)) WRITE(*,*) "blocksize: ", size(blocksize), nblock DO n=1,nblock blocksize(n)=nplasma/VOL*2*pi*MINVAL(ra)*(rb(n)-ra(n))*(plasmadim(2)-plasmadim(1))/nblock END DO remainparts=p%Nploc-SUM(blocksize) addedpart=1 n=nblock/2 j=1 DO WHILE(remainparts .GT. 0) blocksize(n)=blocksize(n)+addedpart remainparts=remainparts-addedpart n=n+j j=-1*(j+SIGN(1,j)) END DO CALL loadPartSlices(p, lodr, ra, rb, z, blocksize) IF(distribtype .eq. 5) THEN CALL loadGaussianVelocities(p, VR, VZ, VTHET, temp) VZ=VZ/4 VR=VR*8 VTHET=VTHET*8 ELSE Call loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) END IF END SUBROUTINE loadDavidson SUBROUTINE loadDavidsonVelocities(p, VR,VZ,VTHET, H0, P0) USE constants, ONLY: me, kb, elchar USE basic, ONLY: rnorm, Rcurv, B0, width, vnorm, zgrid, nz TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT)::VZ(:), VR(:), VTHET(:) REAL(kind=db), INTENT(IN):: H0, P0 REAL(kind=db) :: athetpos, rg, zg, halfLz, Mirrorratio, Pcomp, Acomp INTEGER :: i MirrorRatio=(Rcurv-1)/(Rcurv+1) halfLz=(zgrid(nz)+zgrid(0))/2 ! Load velocities theta velocity ! Loading of r and z velocity is done in adapt_vinit to have ! access to parts%pot DO i=1,p%Nploc ! Interpolation for Magnetic potential rg=p%R(i)*rnorm zg=(p%Z(i)-halfLz)*rnorm Athetpos=0.5*B0*(rg - width/pi*MirrorRatio*bessi1(2*pi*rg/width)*COS(2*pi*zg/width)) Pcomp=P0/rg/p%m Acomp=-p%qmRatio*Athetpos VTHET(i)=SIGN(MIN(abs(Pcomp+Acomp),sqrt(2*H0/p%m)),Pcomp+Acomp) !VTHET(i)=Pcomp+Acomp END DO VTHET=VTHET/vnorm VZ=0._db VR=0._db p%Davidson=.true. p%H0=H0 p%P0=P0 END SUBROUTINE loadDavidsonvelocities SUBROUTINE loadGaussianVelocities(p, VR,VZ,VTHET, temperature) USE basic, ONLY: vnorm USE constants, ONLY: kb REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(IN):: temperature REAL(kind=db):: vth ! Initial velocities distribution vth=sqrt(kb*temperature/p%m)/vnorm !thermal velocity CALL lodgaus(3,VZ) CALL lodgaus(5,VR) CALL lodgaus(7,VTHET) VZ=VZ*vth VR=VR*vth VTHET=VTHET*vth p%temperature=temperature p%Davidson=.false. END SUBROUTINE loadGaussianVelocities SUBROUTINE loadPartslices(p, lodr, ra, rb, z, blocksize) USE basic, ONLY: rnorm interface subroutine lodr(nbase,y,rminus,rplus) USE constants REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rplus, rminus end subroutine end interface TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(IN)::ra(:), rb(:), z(0:) INTEGER, DIMENSION(:), INTENT(IN) :: blocksize INTEGER :: n, blockstart, blockend, nblock nblock=size(blocksize,1) blockstart=1 blockend=0 DO n=1,nblock blockstart=blockend+1 blockend=MIN(blockstart+blocksize(n)-1,p%Nploc) ! Initial distribution in z with normalisation between magnetic mirrors CALL loduni(1,p%Z(blockstart:blockend)) p%Z(blockstart:blockend)= (z(n-1)+p%Z(blockstart:blockend)*(z(n)-z(n-1)))/rnorm CALL lodr(2,p%R(blockstart:blockend),ra(n), rb(n)) p%R(blockstart:blockend)=p%R(blockstart:blockend)/rnorm END DO END SUBROUTINE loadPartslices SUBROUTINE loadPartFile(p, partfileindex, VR, VZ, VTHET) USE basic, ONLY: partfile, nplasma, lu_partfile implicit None TYPE(particles), INTENT(INOUT):: p REAL(kind=db), DIMENSION(:), ALLOCATABLE, INTENT(INOUT)::VR, VZ, VTHET INTEGER, INTENT(IN):: partfileindex INTEGER:: nblock = 0 REAL(kind=db), Dimension(:), ALLOCATABLE:: ra, rb, z INTEGER, Dimension(:), ALLOCATABLE:: npartsslice INTEGER:: velocitytype=1 !< 1) gaussian with temp 2) Davidson with H0, P0 INTEGER:: radialtype=1 !< 1) 1/R 2) uniform 3) 1/R^2 4) gauss INTEGER:: npartsalloc !< initial size of particles arrays REAL(kind=db):: mass=me REAL(kind=db):: charge=-elchar REAL(kind=db):: weight=1.0 CHARACTER(len=256) :: header=' ' !< header of csv file REAL(kind=db):: H0=3.2e-14 !< Total energy REAL(kind=db):: P0=8.66e-25 !< Canonical angular momentum REAL(kind=db):: temperature=10000 !< temperature in kelvins real(kind=db):: n0 !< density factor LOGICAL :: is_test = .false. !< Defines if particle are test particles or tracers INTEGER:: i, ierr, openerr NAMELIST /partsload/ nblock, mass, charge, weight, npartsalloc, velocitytype, & & radialtype, temperature, H0, P0, is_test, n0 OPEN(UNIT=lu_partfile,FILE=trim(partfile(partfileindex)),ACTION='READ',IOSTAT=openerr) header=' ' IF(openerr .ne. 0) THEN CLOSE(unit=lu_partfile) RETURN END IF READ(lu_partfile,partsload) IF(mpirank .eq.0) THEN WRITE(*,'(a,i2,a,a)')"reading partfile: ", partfileindex, " ", partfile(partfileindex) WRITE(*,partsload) END IF IF( nblock .ge. 1) THEN ALLOCATE(z(0:nblock),ra(nblock),rb(nblock), npartsslice(nblock)) DO WHILE(header(1:8) .ne. '//slices') READ(lu_partfile,'(a)') header END DO DO i=1,nblock READ(lu_partfile,*) z(i-1),ra(i),rb(i),npartsslice(i) END DO READ(lu_partfile,*) z(nblock) CALL creat_parts(p,max(npartsalloc,sum(npartsslice))) p%Nploc=sum(npartsslice) p%Nptot=p%Nploc IF(partfileindex .eq. 1) nplasma=p%Nploc IF( allocated(VR) ) THEN DEALLOCATE(VR,VZ,VTHET) end if if(.not. allocated(VR)) THEN ALLOCATE(VR(p%Nploc)) ALLOCATE(VZ(p%Nploc)) ALLOCATE(VTHET(p%Nploc)) END IF p%m=mass p%q=charge p%weight=weight p%qmRatio=charge/mass p%is_test=is_test SELECT CASE(radialtype) CASE(1) ! 1/R distribution in R CALL loadPartslices(p, lodunir, ra, rb, z, npartsslice) CASE(2) ! flat top distribution in R CALL loadPartslices(p, lodlinr, ra, rb, z, npartsslice) CASE(3) ! 1/R^2 distribution in R CALL loadPartslices(p, lodinvr, ra, rb, z, npartsslice) CASE(4) ! gaussian distribution in R CALL loadPartslices(p, lodgausr, ra, rb, z, npartsslice) CASE DEFAULT IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of radial distribution:", radialtype CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END SELECT SELECT CASE(velocitytype) CASE(1) ! Gaussian with temperature CALL loadGaussianVelocities(p, VR, VZ, VTHET, temperature) CASE(2) CALL loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) CASE DEFAULT IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of velocity distribution:", velocitytype CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END SELECT END IF CLOSE(unit=lu_partfile) END SUBROUTINE !=============================================================================== SUBROUTINE Temp_rescale ! USE basic, ONLY: temprescale, nlclassical, nz ! INTEGER:: i, gridindex ! REAL(kind=db) :: vr, vz, vthet ! DO i=1,parts%Nptot ! gridindex=parts%zindex(i)+1+parts%rindex(i)*nz ! vr=parts%UR(i)/parts%Gamma(i)-fluidur(gridindex) ! vz=parts%UZ(i)/parts%Gamma(i)-fluiduz(gridindex) ! vthet=parts%UTHET(i)/parts%Gamma(i)-fluiduthet(gridindex) ! parts%UR(i)=vr*temprescale+fluidur(gridindex) ! parts%UTHET(i)=vthet*temprescale+fluiduthet(gridindex) ! parts%UZ(i)=vz*temprescale+fluiduz(gridindex) ! IF(nlclassical) THEN ! parts%Gamma(i)=1.0 ! ELSE ! parts%Gamma(i)=1/sqrt(1-(parts%UR(i)**2+parts%UTHET(i)**2+parts%UZ(i)**2)) ! END IF ! parts%UR(i)=parts%UR(i)*parts%Gamma(i) ! parts%UTHET(i)=parts%UTHET(i)*parts%Gamma(i) ! parts%UZ(i)=parts%UZ(i)*parts%Gamma(i) ! END DO END SUBROUTINE Temp_rescale !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Increase the number of macroparticles by separating each previous macroparticles into !> samplefactor new macroparticles of equally divided weight. The new sub particles are distributed !> uniformly in space to maintain the density and other moments. ! !> @param [in] samplefactor multiplicator of the number of macroparticles. !> @param [in] p particles type to increase. !--------------------------------------------------------------------------- SUBROUTINE upsample(p, samplefactor) USE basic, ONLY : nplasma, dr, dz INTEGER, INTENT(IN) ::samplefactor TYPE(particles), INTENT(INOUT):: p INTEGER:: i, j, currentindex REAL(kind=db), DIMENSION(p%Nploc) :: spreaddir ! random direction for the spread of each initial macro particle REAL(kind=db) :: dir ! Direction in which the particle is moved REAL(kind=db) :: dl ! Particle displacement used for ! Load and scale the direction angle for spreading the new particles CALL loduni(2, spreaddir) spreaddir=spreaddir*2*pi/samplefactor dl=min(dz,minval(dr))/100 DO i=1,p%Nploc DO j=1,samplefactor-1 currentindex=p%Nploc+(i-1)*(samplefactor-1)+j CALL move_part(p,i,currentindex) p%partindex(currentindex)=currentindex dir = spreaddir(i)+2*pi*j/samplefactor p%R(currentindex)=p%R(currentindex) + dl*cos(dir) p%Z(currentindex)=p%Z(currentindex) + dl*sin(dir) END DO p%partindex(i)=i p%R(i)=p%R(i) + dl*cos(spreaddir(i)) p%Z(i)=p%Z(i) + dl*sin(spreaddir(i)) END DO nplasma=nplasma*samplefactor p%weight=p%weight/samplefactor p%Nploc=p%Nploc*samplefactor END SUBROUTINE upsample END MODULE beam diff --git a/src/geometry_mod.f90 b/src/geometry_mod.f90 index 0951004..78c785c 100644 --- a/src/geometry_mod.f90 +++ b/src/geometry_mod.f90 @@ -1,997 +1,997 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: geometry ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for handling geometries with non constant radius using b-splines interpolation !> This module defines ways to comupte the weight function needed for weighted b-splines and !> can load the definition of the geometry from input files !------------------------------------------------------------------------------ MODULE geometry USE constants USE bsplines USE mumps_bsplines IMPLICIT NONE type innerspline Integer, Allocatable:: k(:) ! Index in reduced set real(kind=db), Allocatable:: weight(:) ! geomtric weight at relevant cell end type innerspline Real(kind=db),ALLOCATABLE, SAVE ::IVand (:,:) Real(kind=db),save:: z_0=0, r_0=0, invr_r=0, invr_z=0, r_a=0, r_b=0, z_r=1, r_r=1 Real(kind=db), save:: Phidown=0,Phiup=0 Real(kind=db), save:: L_r=0.16, L_z=0.24 Logical, save:: nlweb=.false. Integer, save::Interior=-1 Integer, save:: above1=1, above2=-1 Integer, save :: walltype = 0 Integer, save, Allocatable:: bsplinetype(:) ! Array containing the inner/outer type for each bspline Integer, save, Allocatable:: gridcelltype(:,:) ! Array containing the inner/outer type for each gridcell Real(kind=db), save, allocatable:: gridwgeom(:,:) ! Stores the geometric weight at the grid points Real(kind=db), save, allocatable:: gtilde(:,:) ! Stores the extension to the domain of the boundary conditions type(mumps_mat):: etilde ! Matrix of extendend web splines type(mumps_mat):: etildet ! Transpose of Matrix of extendend web splines integer,save :: nbreducedspline ! Number of splines in the reduced set PUBLIC:: geom_weight INTERFACE geom_weight MODULE PROCEDURE geom_weight0, geom_weight1, geom_weight2 END INTERFACE geom_weight - NAMELIST /geomparams/ z_0, r_0, z_r, r_r, r_a, r_b, walltype, L_r, L_z, nlweb + NAMELIST /geomparams/ z_0, r_0, z_r, r_r, r_a, r_b, walltype, L_r, L_z, nlweb, above1, above2 contains SUBROUTINE read_geom(Fileid, rnorm, rgrid, Potinn, Potout) Use mpi Real(kind=db):: rnorm, rgrid(:), Potinn, Potout Integer:: Fileid, mpirank, ierr CALL MPI_COMM_RANK(MPI_COMM_WORLD, mpirank, ierr) Rewind(Fileid) READ(Fileid, geomparams) if(mpirank .eq. 0) WRITE(*, geomparams) !! Normalizations and initialization of geometric variables r_a=r_a/rnorm r_b=r_b/rnorm if(r_a .eq. 0 .and. r_b .eq.0) then !! in case no geom_params have been defined r_a=rgrid(1) r_b=rgrid(size(rgrid,1)) end if z_0=z_0/rnorm r_0=r_0/rnorm z_r=z_r/rnorm r_r=r_r/rnorm invr_r=1/r_r invr_z=1/z_r Interior=-1 above1=1 above2=-1 Phidown=Potinn Phiup=Potout L_r=L_r/rnorm L_z=L_z/rnorm end subroutine read_geom SUBROUTINE init_geom(spl2, vec1, vec2) type(spline2d):: spl2 real(kind=db):: vec1(:),vec2(:) Real(kind=db), Allocatable:: zgrid(:),rgrid(:) Integer:: nrank(2), nrz(2) Call get_dim(spl2, nrank, nrz) ! Obtain grid data drom the spline structure Allocate(zgrid(1:nrz(1)+1)) Allocate(rgrid(1:nrz(2)+1)) zgrid=spl2%sp1%knots(0:nrz(1)) rgrid=spl2%sp2%knots(0:nrz(2)) ! create a table of the calssification for each cell Allocate(gridcelltype(nrz(1),nrz(2))) Call classifycell(zgrid, rgrid, gridcelltype) if (nlweb) then Allocate(bsplinetype(nrank(1)*nrank(2))) Call classifyspline(spl2, gridcelltype, bsplinetype) Call buildetilde(spl2,bsplinetype,gridcelltype,etilde) end if ALLOCATE(gridwgeom((nrz(1)+1)*(nrz(2)+1),0:2)) gridwgeom=0 ALLOCATE(gtilde((nrz(1)+1)*(nrz(2)+1),0:2)) gtilde=0 CALL CompgBoundary(vec1,vec2,gtilde) Call geom_weight(vec1, vec2, gridwgeom) end Subroutine init_geom Subroutine diag_geom(File_handle, str, rnorm) Use mpi Use futils Integer:: File_handle Real(kind=db):: rnorm Character(len=*):: str CHARACTER(len=128):: grpname Integer:: ierr, mpirank CALL MPI_COMM_RANK(MPI_COMM_WORLD, mpirank, ierr) IF(mpirank .eq. 0) THEN Write(grpname,'(a,a)') trim(str),"/geometry" If(.not. isgroup(File_handle, trim(grpname))) THEN CALL creatg(File_handle, trim(grpname)) END IF Call attach(File_handle, trim(grpname), "r_a", r_a*RNORM) Call attach(File_handle, trim(grpname), "r_b", r_b*RNORM) Call attach(File_handle, trim(grpname), "z_0", z_0*RNORM) Call attach(File_handle, trim(grpname), "r_0", r_0*RNORM) Call attach(File_handle, trim(grpname), "r_r", r_r*RNORM) Call attach(File_handle, trim(grpname), "z_r", z_r*RNORM) Call attach(File_handle, trim(grpname), "interior", interior) Call attach(File_handle, trim(grpname), "above1", above1) Call attach(File_handle, trim(grpname), "above2", above2) Call attach(File_handle, trim(grpname), "walltype", walltype) Call putarr(File_handle, trim(grpname)//'/geomweight',gridwgeom) END IF End subroutine Subroutine buildetilde(spl2, bsplinetype, celltype, etilde) USE mumps_bsplines type(spline2d):: spl2 Integer:: bsplinetype(:) Integer:: celltype(:,:) type(mumps_mat):: etilde Logical, Allocatable:: Ibsplinetype(:) Integer:: i,j,k, icellz, icellr, jcellz, jcellr, nrank(2), nrz(2), norder(2) real(kind=db), allocatable:: zgrid(:), rgrid(:) real(kind=db):: wgeomi, indexdistance, eij1 integer:: l,m, n, linkedi type(innerspline):: innersplinelist real(kind=db), allocatable:: rgridmesh(:),zgridmesh(:), d(:), hz(:), cz(:), hr(:), cr(:), hmesh(:) !if(allocated(etilde)) deallocate(etilde) nbreducedspline=count(bsplinetype .eq. 1) Call get_dim(spl2,nrank,nrz,norder) ! Obtain grid data drom the spline structure Allocate(zgrid(1:nrz(1)+1)) Allocate(rgrid(1:nrz(2)+1)) zgrid=spl2%sp1%knots(0:nrz(1)) rgrid=spl2%sp2%knots(0:nrz(2)) allocate(innersplinelist%k(nrank(1)*nrank(2)),innersplinelist%weight(nrank(1)*nrank(2))) allocate(Ibsplinetype(nrank(1)*nrank(2))) allocate(rgridmesh(nrank(1)*nrank(2)), zgridmesh(nrank(1)*nrank(2))) allocate(hmesh(nrank(1)*nrank(2))) allocate(d(size(bsplinetype))) Ibsplinetype=.False. call calcsplinecenters(spl2%sp1,cz,hz) call calcsplinecenters(spl2%sp2,cr,hr) Do i=0,nrank(2)-1 zgridmesh(i*nrank(1)+1:(i+1)*nrank(1))=cz rgridmesh(i*nrank(1)+1:(i+1)*nrank(1))=cr(i+1) hmesh(i*nrank(1)+1:(i+1)*nrank(1))=hr(i+1)*hz End do !allocate(etilde(nbinspline,nrank(1)*nrank(2))) !etilde=0 call init(nrank(1)*nrank(2),nbreducedspline,etilde) call init(nrank(1)*nrank(2),nbreducedspline,etildet) k=1 Do i=1,nrank(1)*nrank(2) if(bsplinetype(i) .ne. 1) cycle ! span of this this spline is completely outside D ! one cell of bspline i is completely in D icellz=mod(i-1,nrank(1))+1 icellr=(i-1)/(nrank(1))+1 outer: do l=max(1,icellz-norder(1)),min(nrz(1),icellz) do m=max(1,icellr-norder(2)),min(nrz(2),icellr) if (celltype(l,m).eq.1) then call geom_weight((zgrid(l)+zgrid(l+1))/2,(rgrid(m)+rgrid(m+1))/2,wgeomi) EXIT outer end if end do end do outer call putele(etilde,k,i,1/wgeomi) call putele(etildet,i,k,1/wgeomi) innersplinelist%k(i)=k innersplinelist%weight(i)=1/wgeomi k=k+1 !Ibsplinetype(i)=icellz-norder(1) .gt. 0 .and. icellr-norder(2) .gt. 0 .and. icellz .le. nrz(1) .and. icellr .le. nrz(2) Ibsplinetype(i)=icellz+norder(1) .le. nrank(1) .and. icellr+norder(2) .le. nrank(2) if(.not. Ibsplinetype(i)) cycle do m=0,norder(2) do l=0,norder(1) ! Check if all linked splines are inner splines Ibsplinetype(i)=Ibsplinetype(i) .and. (bsplinetype(i+l+m*nrank(1)) .eq. 1) end do end do end do !!$OMP PARALLEL DO DEFAULT(SHARED), PRIVATE(d,k,icellz,icellr,jcellr,jcellz,indexdistance,n,i,m,l,eij,linkedi) Do j=1,nrank(1)*nrank(2) if(bsplinetype(j) .ne. 0) cycle d=0 where(Ibsplinetype) d=(rgridmesh(j)-rgridmesh)**2+(zgridmesh(j)-zgridmesh)**2 end where k=minloc(d,1,MASK=Ibsplinetype) icellz=mod(k-1,nrank(1)) icellr=(k-1)/(nrank(1)) jcellz=mod(j-1,nrank(1)) jcellr=(j-1)/(nrank(1)) indexdistance=sqrt(real((jcellz-icellz)**2 + (jcellr-icellr)**2,kind=db)) if(indexdistance .gt. 20)then Write(*,*) 'Error on system conditioning, the number of radial or axial points is too low' stop end if do n=0,norder(2) do i=0,norder(1) eij1=1 !eij=1 do m=0,norder(2) if(n.eq.m) cycle eij1=eij1*(rgridmesh(j)-rgridmesh(k+m*nrank(1)))/(rgridmesh(k+n*nrank(1))-rgridmesh(k+m*nrank(1))) end do do l=0,norder(1) if( i .eq. l ) cycle eij1=eij1*(zgridmesh(j)-zgridmesh(k+l))/(zgridmesh(k+i)-zgridmesh(k+l)) !eij=eij*(jcellr-icellr-m)/(n-m)*(jcellz-icellz-l)/(i-l) end do linkedi=innersplinelist%k(k+i+n*nrank(1)) ! equivalent to findloc, necessary for ifort 17 !findloc(innersplinelist%mu,k,1) eij1=eij1*innersplinelist%weight(k+i+n*nrank(1)) !eij=eij*innersplinelist%weight(k+i+n*nrank(1)) call putele(etilde,linkedi,j,eij1) call putele(etildet,j,linkedi,eij1) end do end do end do !!$OMP End parallel do call to_mat(etilde) call to_mat(etildet) end subroutine Subroutine calcsplinecenters(spl,ctrs,heights) use bsplines type(spline1d):: spl real(kind=db), allocatable:: ctrs(:), heights(:) integer:: nrank, nx, order, i, left1, j, left2, left3 real(kind=db):: x1, x2, x3 real(kind=db), allocatable:: fun1(:,:), fun2(:,:), fun3(:,:) real(kind=db),allocatable:: init_heights(:) call get_dim(spl, nrank, nx, order) if (allocated(ctrs)) deallocate(ctrs) if (allocated(heights)) deallocate(heights) allocate(heights(nrank)) allocate(ctrs(nrank)) allocate(fun1(1:order+1,0:1), fun2(1:order+1,0:1), fun3(1:order+1,0:1)) allocate(init_heights(nrank)) init_heights=1.0 ctrs(:)=spl%knots(0:nrank-1) call gridval(spl, ctrs, heights, 0, init_heights) if (order .gt.1) then do i=2,nrank-1 x1=spl%knots(i-order)+1e5*Epsilon(x1) x2=spl%knots(i-1)-1e5*Epsilon(x2) left1=max(0,i-order) left2=min(nx-2,i-2) call basfun(x1,spl,fun1, left1+1) !Write(*,*) 'i,xpt,xptm,w,wold,',i,xpt,xptm,w,wold call basfun(x2,spl,fun2, left2+1) fun3=1 j=0 Do while( j .le.100) !Write(*,*) 'i,xpt,xptm,w,wold,',i,xpt,xptm,w,wold x3=(x1+x2)/2 call locintv(spl, x3, left3) call basfun(x3,spl,fun3, left3+1) if( (x2-x1)/2.lt.1e-12) exit if(abs(fun3(i-left3,1)).lt.1e-15) exit if(fun3(i-left3,1)*fun1(i-left1,1).le.0) then fun2=fun3 x2=x3 left2=left3 else fun1=fun3 x1=x3 left1=left3 end if j=j+1 End do ctrs(i)=x3 heights(i)=fun3(i-left3,0) end do end if end subroutine !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Subroutine classifycell(zgrid,rgrid,gridctype) Real(kind=db):: zgrid(:),rgrid(:) Integer:: gridctype(:,:) Integer::i,j gridctype=-1 !$OMP parallel do private(i) Do j=1,size(rgrid,1)-1 Do i=1,size(zgrid,1)-1 ! Determines the type inner/boundary/outer for each cell Call classification((/zgrid(i),zgrid(i+1)/),(/rgrid(j),rgrid(j+1)/),& & gridctype(i,j)) End Do End do !$OMP end parallel do End subroutine !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine classifyspline(spl2,gridctype,bsptype) type(spline2d):: spl2 Integer:: gridctype(:,:) Integer:: bsptype(:) Integer:: nrank(2), nrz(2), norder(2) Integer:: i, j, mu, imin, imax, jmin, jmax Integer, Allocatable:: splinespan(:,:) call get_dim(spl2, nrank, nrz, norder) bsptype=0 Allocate(splinespan(norder(1)+1,norder(2)+1)) Do mu=1,nrank(1)*nrank(2) i=mod(mu-1,nrank(1))+1 j=(mu-1)/nrank(1)+1 splinespan=-1 imin=max(1,i-norder(1)) imax=min(nrz(1),i) jmin=max(1,j-norder(2)) jmax=min(nrz(2),j) splinespan(1:(imax-imin+1),1:(jmax-jmin+1))=gridctype(imin:imax,jmin:jmax) if ( ANY( splinespan==1 ) ) then bsptype(mu)=1 else if (sum(splinespan).eq. -(norder(1)+1)*(1+norder(2))) then bsptype(mu)=-1 end if end do End subroutine !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> general placeholder function used in fields_mod to compute the weight for weighted_bsplines !> This functions combine an ellipse function and an horizontal wall ! !--------------------------------------------------------------------------- SUBROUTINE geom_weight0(z,r,w) Real(kind=db), INTENT(IN):: r,z Real(kind=db), INTENT(OUT):: w Real(kind=db):: rtmp(1:1), ztmp(1:1), walltmp(1:1,1:1), elliptmp(1:1,1:1) ztmp=z rtmp=r call wallweight(ztmp,rtmp,walltmp, r_a, above1) if (walltype .ne. 0) then call ellipseweight(ztmp,rtmp,elliptmp,Interior) else call wallweight(ztmp,rtmp,elliptmp, r_b, above2) end if if(interior.eq.0 .and. above2.eq.0) then w=walltmp(1,1) else if (above1 .eq. 0) then w=elliptmp(1,1) else w=walltmp(1,1)+elliptmp(1,1)-sqrt(walltmp(1,1)**2+elliptmp(1,1)**2) ! weight at position r,z end if End SUBROUTINE geom_weight0 SUBROUTINE geom_weight1(z,r,w) Real(kind=db), INTENT(IN):: r,z Real(kind=db), INTENT(OUT):: w(0:) Real(kind=db):: rtmp(1:1), ztmp(1:1), walltmp(1:1,0:size(w,1)-1), elliptmp(1:1,0:size(w,1)-1) Real(kind=db):: squareroot, denom ztmp=z rtmp=r call wallweight(ztmp,rtmp,walltmp, r_a, above1) if (walltype .ne. 0) then call ellipseweight(ztmp,rtmp,elliptmp,Interior) else call wallweight(ztmp,rtmp,elliptmp, r_b, above2) end if if(interior.eq.0 .and. above2.eq.0) then w=walltmp(1,:) else if (above1 .eq. 0) then w=elliptmp(1,:) else denom=walltmp(1,0)**2+elliptmp(1,0)**2 squareroot=sqrt(denom) w(0)=walltmp(1,0)+elliptmp(1,0)-squareroot ! weight at position r,z If(size(w,1) .gt. 1) then ! first derivative w(1)=walltmp(1,1)+elliptmp(1,1)-(elliptmp(1,1)*elliptmp(1,0)+walltmp(1,1)*walltmp(1,0))/& & squareroot ! z derivative of w w(2)=walltmp(1,2)+elliptmp(1,2)-(elliptmp(1,2)*elliptmp(1,0)+walltmp(1,2)*walltmp(1,0))/& & squareroot ! r derivative of w End If If (size(w,1).gt.3) then ! second derivative w(3)=walltmp(1,3)+elliptmp(1,3)-((elliptmp(1,1)*elliptmp(1,0)-walltmp(1,1)*walltmp(1,0))**2+& & (elliptmp(1,3)*elliptmp(1,0)+walltmp(1,3)*walltmp(1,0))*denom)/& & (squareroot*denom)! zz derivative of w w(4)=walltmp(1,4)+elliptmp(1,4)-((elliptmp(1,4)*elliptmp(1,0)-walltmp(1,4)*walltmp(1,0))*denom+& & elliptmp(1,1)*elliptmp(1,2)*walltmp(1,0)**2 + walltmp(1,1)*walltmp(1,2)*elliptmp(1,0)**2- & & elliptmp(1,1)*elliptmp(1,0)*walltmp(1,2)*walltmp(1,0) - & & elliptmp(1,2)*elliptmp(1,0)*walltmp(1,1)*walltmp(1,0) )/& & (squareroot*denom)! zr derivative of w w(5)=walltmp(1,5)+elliptmp(1,5)-((elliptmp(1,2)*elliptmp(1,0)-walltmp(1,2)*walltmp(1,0))**2+& & (elliptmp(1,5)*elliptmp(1,0)+walltmp(1,5)*walltmp(1,0))*denom)/& & (squareroot*denom)! rr derivative of w End If End if End SUBROUTINE geom_weight1 SUBROUTINE geom_weight2(z,r,w) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: w(:,0:) Real(kind=db):: walltmp(size(w,1),0:size(w,2)-1), elliptmp(size(w,1),0:size(w,2)-1) Real(kind=db):: squareroot(size(w,1)), denom(size(w,1)) call wallweight(z,r,walltmp, r_a, above1) if (walltype .ne. 0) then call ellipseweight(z,r,elliptmp,Interior) else call wallweight(z,r,elliptmp, r_b, above2) end if if(interior.eq.0 .and. above2.eq.0) then w=walltmp(:,:) else if (above1 .eq. 0) then w=elliptmp(:,:) else denom=walltmp(:,0)**2+elliptmp(:,0)**2 squareroot=sqrt(denom) w(:,0)=walltmp(:,0)+elliptmp(:,0)-squareroot ! weight at position r,z If(size(w,2) .gt. 1) then ! first derivative w(:,1)=walltmp(:,1)+elliptmp(:,1)-(elliptmp(:,1)*elliptmp(:,0)+walltmp(:,1)*walltmp(:,0))/& & squareroot ! z derivative of w w(:,2)=walltmp(:,2)+elliptmp(:,2)-(elliptmp(:,2)*elliptmp(:,0)+walltmp(:,2)*walltmp(:,0))/& & squareroot ! r derivative of w End If If (size(w,2).gt.3) then ! second derivative w(:,3)=walltmp(:,3)+elliptmp(:,3)-((elliptmp(:,1)*elliptmp(:,0)-walltmp(:,1)*walltmp(:,0))**2+& & (elliptmp(:,3)*elliptmp(:,0)+walltmp(:,3)*walltmp(:,0))*denom)/& & (squareroot*denom)! zz derivative of w w(:,4)=walltmp(:,4)+elliptmp(:,4)-((elliptmp(:,4)*elliptmp(:,0)-walltmp(:,4)*walltmp(:,0))*denom+& & elliptmp(:,1)*elliptmp(:,2)*walltmp(:,0)**2 + walltmp(:,1)*walltmp(:,2)*elliptmp(:,0)**2- & & elliptmp(:,1)*elliptmp(:,0)*walltmp(:,2)*walltmp(:,0) - & & elliptmp(:,2)*elliptmp(:,0)*walltmp(:,1)*walltmp(:,0) )/& & (squareroot*denom)! zr derivative of w w(:,5)=walltmp(:,5)+elliptmp(:,5)-((elliptmp(:,2)*elliptmp(:,0)-walltmp(:,2)*walltmp(:,0))**2+& & (elliptmp(:,5)*elliptmp(:,0)+walltmp(:,5)*walltmp(:,0))*denom)/& & (squareroot*denom)! rr derivative of w End If End if End SUBROUTINE geom_weight2 Subroutine Boundary_limits(pts) Real(kind=db), Allocatable, INTENT(out) :: pts(:,:) If(allocated(pts))Deallocate(pts) allocate(pts(4,2)) pts(1,1:2)=(/z_0-invr_z,r_0/) pts(2,1:2)=(/z_0,r_0+invr_r/) pts(3,1:2)=(/z_0+invr_z,r_0/) pts(4,1:2)=(/z_0,r_0-invr_r/) End subroutine Boundary_limits SUBROUTINE classification(z, r, ctype) real(kind=db), INTENT(IN):: r(2), z(2) INTEGER, INTENT(OUT):: ctype Real(kind=db)::weights(5,1:1) Real(kind=db):: zeval(5),reval(5) zeval=(/ z(1),z(2),z(1),z(2), (z(2)+z(1))/2 /) reval=(/ r(1),r(1),r(2),r(2), (r(2)+r(1))/2 /) CAll geom_weight(zeval,reval,weights) ctype=int(sign(1.0_db,weights(5,1))) If(weights(1,1)*weights(2,1) .lt. 0 ) then ctype=0 return End If If(weights(1,1)*weights(3,1) .lt. 0 ) then ctype=0 return End If If(weights(2,1)*weights(4,1) .lt. 0 ) then ctype=0 return End If If(weights(3,1)*weights(4,1) .lt. 0 ) then ctype=0 return End If end subroutine ! ################################################################## Subroutine calc_gauss(spl2, ngauss, i, j, xgauss, wgauss, gausssize, celltype) type(spline2d), INTENT(IN):: spl2 Integer, Intent(out):: gausssize INTEGER, Intent(out), Optional :: celltype Real(kind=db), Allocatable::rgrid(:),zgrid(:) Real(kind=db), ALLOCATABLE::xgauss(:,:), wgauss(:) Integer:: i,j, ngauss(2) Real(kind=db),Allocatable:: pts(:,:), zpoints(:) Real(kind=db):: zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2)) Integer:: k, l, directiondown, directionup, nbzpoints, direction, ctype Logical:: hasmaxpoint Real(kind=db):: xptup, xptdown, w, rmin, rmax type(spline1d):: splz, splr splz=spl2%sp1 splr=spl2%sp2 Allocate(zgrid(1:splz%nints+1)) Allocate(rgrid(1:splr%nints+1)) zgrid=splz%knots(0:splz%nints) rgrid=splr%knots(0:splr%nints) hasmaxpoint=.false. If(allocated(xgauss)) deallocate(xgauss) if(allocated(wgauss)) deallocate(wgauss) !Call classification((/zgrid(i),zgrid(i+1)/),(/rgrid(j),rgrid(j+1)/),ctype) ctype=gridcelltype(i,j) If (ctype .ge. 1) then ! we have a normal internal cell Allocate(xgauss(ngauss(1)*ngauss(2),2)) Allocate(wgauss(ngauss(1)*ngauss(2))) gausssize=ngauss(1)*ngauss(2) !Computation of gauss weight and position in r and z direction for gaussian integration CALL get_gauss(spl2%sp1, ngauss(1), i, zg, wzg) CALL get_gauss(spl2%sp2, ngauss(2), j, rg, wrg) ! Construction of matrix xgauss and wgauss storing the weight and position for 2d gaussian integration DO k=1,ngauss(2) xgauss((k-1)*ngauss(1)+1:k*ngauss(1),1)=zg xgauss((k-1)*ngauss(1)+1:k*ngauss(1),2)=rg(k) wgauss((k-1)*ngauss(1)+1:k*ngauss(1))=wrg(k)*wzg END DO Else If(ctype.eq.0) then ! we have a boundary cell Call Boundary_Limits(pts) !Do k=1,size(pts,1) ! hasmaxpoint=hasmaxpoint .or. (pts(k,1) .ge. zgrid(i) .and. pts(k,1) .le. zgrid(i+1) & ! & .and. pts(k,2) .ge. rgrid(j) .and. pts(k,2) .le. rgrid(j+1)) !End do If(.not. hasmaxpoint) Then directiondown=1 directionup=1 Call Find_crosspoint((/zgrid(i),zgrid(i+1)/),rgrid(j),xptdown,directiondown) Call Find_crosspoint((/zgrid(i),zgrid(i+1)/),rgrid(j+1),xptup,directionup) select case ( directionup+directiondown) Case (0) ! The intersections are only on the left and right cell boundaries nbzpoints=2 Allocate(zpoints(nbzpoints)) zpoints=(/zgrid(i),zgrid(i+1)/) Case(1) nbzpoints=3 Allocate(zpoints(nbzpoints)) if(directiondown.eq.1) zpoints=(/zgrid(i), xptdown,zgrid(i+1)/) if(directionup .eq. 1) zpoints=(/zgrid(i), xptup,zgrid(i+1)/) Case(2) nbzpoints=3 Allocate(zpoints(nbzpoints)) call geom_weight(zgrid(i),rgrid(j),w) If(w.ge.0) zpoints=(/zgrid(1),min(xptdown,xptup),max(xptdown,xptup) /) If(w.lt.0) zpoints=(/min(xptdown,xptup),max(xptdown,xptup), zgrid(i+1) /) End select Allocate(xgauss(ngauss(1)*ngauss(2)*(nbzpoints-1),2)) Allocate(wgauss(ngauss(1)*ngauss(2)*(nbzpoints-1))) gausssize=ngauss(1)*ngauss(2)*(nbzpoints-1) ! Compute gauss points Do l=1,nbzpoints-1 !CALL get_gauss(spl2%sp1, ngauss(1), i, zg, wzg) Call gauleg(zpoints(l),zpoints(l+1),zg,wzg,ngauss(1)) ! We test if the lower or upper side is in the domain call geom_weight(zg(1),rgrid(j),w) rmin=rgrid(j) if (w .lt. 0) rmin = rgrid(j+1) Do k=1,ngauss(1) direction=2 Call Find_crosspoint((/rgrid(j),rgrid(j+1)/),zg(k),rmax,direction) ! We compute the radial limits at each z position Call gauleg(min(rmin,rmax),max(rmin,rmax),rg,wrg,ngauss(2)) ! We obtain the gauss w and pos for these boundaries xgauss(k+(l-1)*ngauss(1)*ngauss(2) : l*ngauss(2)*ngauss(1) : ngauss(1),1) = zg(k) xgauss(k+(l-1)*ngauss(1)*ngauss(2) : l*ngauss(2)*ngauss(1) : ngauss(1),2) = rg wgauss(k+(l-1)*ngauss(1)*ngauss(2) : l*ngauss(2)*ngauss(1) : ngauss(1)) = wrg*wzg(k) End do End Do End If Else gausssize=1 Allocate(xgauss(1:1,2)) Allocate(wgauss(1:1)) !Computation of gauss weight and position in r and z direction for gaussian integration CALL get_gauss(spl2%sp1, ngauss(1), i, zg, wzg) CALL get_gauss(spl2%sp2, ngauss(2), j, rg, wrg) ! Construction of matrix xgauss and wgauss storing the weight and position for 2d gaussian integration xgauss(1,1)=(zg(1)+zg(ngauss(1)))*0.5 xgauss(1,2)=(rg(1)+rg(ngauss(2)))*0.5 wgauss(1)=0 End If If(PRESENT(celltype)) celltype=ctype End Subroutine calc_gauss Subroutine Find_crosspoint(x,y,xpt, direction) Real(kind=db):: x(2), y Real(kind=db):: xptm, xpt, temp Real(kind=db):: w, wold Integer, Intent(INOUT):: direction Integer:: i xptm=x(1) xpt=x(2) i=0 if(direction .eq.1) Call geom_weight(xptm,y,wold) if(direction .eq.2) Call geom_weight(y,xptm,wold) if(direction .eq.1) Call geom_weight(xpt,y,w) if(direction .eq.2) Call geom_weight(y,xpt,w) if(w*wold.gt.0) then direction=0 return End If Do while( i .le.100 .and. w*wold .lt. 0) !Write(*,*) 'i,xpt,xptm,w,wold,',i,xpt,xptm,w,wold if(direction .eq.1) Call geom_weight(xpt,y,w) if(direction .eq.2) Call geom_weight(y,xpt,w) xptm=xpt-w*(xpt-xptm)/(w-wold) wold=w temp=xptm xptm=xpt xpt=temp i=i+1 End do if(xpt .ge. x(2) .or. xpt .le. x(1)) direction=0 End Subroutine SUBROUTINE wallweight(z,r,w, r_lim, above) Real(kind=db):: z(:),r(:),w(:,0:), r_lim Integer:: above w(:,0)=above*(r-r_lim) ! weight at position r,z If(size(w,2) .gt. 1) then ! first derivative w(:,1)=0 ! z derivative of w w(:,2)=above ! r derivative of w End If If (size(w,2).gt.3) then ! second derivative w(:,3)= 0 ! zz derivative w(:,4)= 0 ! zr derivative w(:,5)= 0 ! rr derivative End If End subroutine wallweight Subroutine ellipseweight(z,r,w, Interior) Real(kind=db):: z(:),r(:),w(:,0:) Integer:: Interior w(:,0)=Interior*(1-(r-r_0)**2*invr_r**2-(z-z_0)**2*invr_z**2) ! weight at position r,z If(size(w,2) .gt. 1) then ! first derivative w(:,1)=-2*(z-z_0)*invr_z**2*Interior ! z derivative of w w(:,2)=-2*(r-r_0)*invr_r**2*Interior ! r derivative of w End If If (size(w,2).gt.3) then ! second derivative w(:,3)=-2*invr_z**2*Interior ! zz derivative w(:,4)=0 ! zr derivative w(:,5)=-2*invr_r**2*Interior ! rr derivative End If End subroutine ellipseweight Subroutine CompgBoundary(z,r,gtilde) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: gtilde(:,0:) if (walltype .eq. -1) then call gtest(z,r,gtilde) else call gstd(z,r,gtilde) end if End subroutine Subroutine gstd(z,r,gtilde) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: gtilde(:,0:) - Real(kind=db):: walltmp(size(r,1),0:size(gtilde,2)-1), elliptmp(size(r,1),0:size(gtilde,2)-1) + Real(kind=db):: belowtmp(size(r,1),0:size(gtilde,2)-1), abovetmp(size(r,1),0:size(gtilde,2)-1) Real(kind=db):: denom(size(r,1)) - call wallweight(z,r,walltmp, r_a, above1) + call wallweight(z,r,belowtmp, r_a, above1) if (walltype .eq. 1) then - call ellipseweight(z,r,elliptmp,Interior) + call ellipseweight(z,r,abovetmp,Interior) else - call wallweight(z,r,elliptmp, r_b, above2) + call wallweight(z,r,abovetmp, r_b, above2) end if ! Extension to the whole domain of the boundary conditions - gtilde(:,0)=(Phidown*elliptmp(:,0) + Phiup*walltmp(:,0) ) / & - & (elliptmp(:,0)+walltmp(:,0)) + gtilde(:,0)=(Phidown*abovetmp(:,0) + Phiup*belowtmp(:,0) ) / & + & (abovetmp(:,0)+belowtmp(:,0)) If(size(gtilde,2) .gt. 1) then ! first derivative - denom=(elliptmp(:,0)+walltmp(:,0))**2 - gtilde(:,1)=(Phiup-Phidown)*(walltmp(:,1)*elliptmp(:,0)-walltmp(:,0)*elliptmp(:,1)) / & + denom=(abovetmp(:,0)+belowtmp(:,0))**2 + gtilde(:,1)=(Phiup-Phidown)*(belowtmp(:,1)*abovetmp(:,0)-belowtmp(:,0)*abovetmp(:,1)) / & & denom ! Axial derivative - gtilde(:,2)=(Phiup-Phidown)*(walltmp(:,2)*elliptmp(:,0)-walltmp(:,0)*elliptmp(:,2)) / & + gtilde(:,2)=(Phiup-Phidown)*(belowtmp(:,2)*abovetmp(:,0)-belowtmp(:,0)*abovetmp(:,2)) / & & denom ! Radial derivative End If End subroutine Subroutine gtest(z,r,gtilde) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: gtilde(:,0:) ! gtilde(:,0)=sin(2*pi*z/L_z)*cos(2*pi*r/L_r) + 2 If(size(gtilde,2) .gt. 1) then ! first derivative gtilde(:,1)=2*pi/L_z*cos(2*pi*z/L_z)*cos(2*pi*r/L_r) gtilde(:,2)=-2*pi/L_r*sin(2*pi*z/L_z)*sin(2*pi*r/L_r) End If End subroutine Subroutine ftest(z,r,f) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT)::f(:,0:) ! f(:,0)=2*pi*sin(2*pi*z/L_z)*( & & 2*pi*cos(2*pi*r/L_r)*(1/L_z**2+1/L_r**2) & & +1/(r*L_r)*sin(2*pi*r/L_r) ) End Subroutine ftest subroutine Reducematrix(full,reduced) use mumps_bsplines type(mumps_mat):: full, reduced, tempmat Real(kind=db), allocatable:: temparray(:), rsltarray(:) Integer:: fullrank, reducedrank Integer:: i,j call to_mat(full) fullrank=full%rank reducedrank=nbreducedspline call init(reducedrank,2,reduced) call init(fullrank,2,tempmat) write(*,*) "start reducced mat 1" !$OMP parallel private(j), firstprivate(temparray,rsltarray), default(shared) allocate(temparray(fullrank),rsltarray(fullrank)) !$OMP do do i=1,fullrank call getcol(etildet,i,temparray) rsltarray=vmx_mumps_mat_loc(full,temparray) DO j=1,fullrank CALL putele_sploc(tempmat%mat%row(i), j, rsltarray(j), .false.) END DO end do !$OMP end do !$OMP barrier !$OMP single call to_mat(tempmat) write(*,*) "start reducced mat 2" !$OMP end single !$OMP do do i=1,reducedrank call getcol(etildet,i,temparray) rsltarray=vmx_mumps_mat_loc(tempmat,temparray) DO j=1,reducedrank CALL putele_sploc(reduced%mat%row(i), j, rsltarray(j), .false.) END DO end do !$OMP end do deallocate(temparray,rsltarray) !$OMP end parallel end subroutine !!############################################################################################### ! subroutines imported from mumps_bsplines and sparse lib to have thread-private routines SUBROUTINE putele_sploc(arow, j, val, nlforce_zero) ! ! Put (overwrite) element j of row arow or insert it in an increasing "index" ! USE sparse TYPE(sprow), TARGET :: arow INTEGER, INTENT(in) :: j DOUBLE PRECISION, INTENT(in) :: val LOGICAL, INTENT(in), OPTIONAL :: nlforce_zero ! TYPE(elt), TARGET :: pre_root TYPE(elt), POINTER :: t, p LOGICAL :: rmnode ! pre_root%next => arow%row0 ! pre_root is linked to the head of the list. t => pre_root ! ! Remove node which has zero val or not? ! But never create new node with zero val ! rmnode = .TRUE. IF(PRESENT(nlforce_zero)) rmnode = .NOT.nlforce_zero ! DO WHILE( ASSOCIATED(t%next) ) p => t%next IF( p%index .EQ. j ) THEN IF(ABS(val).LE.EPSILON(0.0d0) .AND. rmnode) THEN ! Remove the node for zero val! t%next => p%next arow%nnz = arow%nnz-1 arow%row0 => pre_root%next ! In case the head is altered DEALLOCATE(p) ELSE p%val = val END IF RETURN END IF IF( p%index .GT. j ) EXIT t => t%next END DO ! ! Never create new node with zero val ! IF(ABS(val).GT.EPSILON(0.0d0)) THEN ALLOCATE(p) p = elt(j, val, t%next) t%next => p arow%nnz = arow%nnz+1 arow%row0 => pre_root%next END IF END SUBROUTINE putele_sploc ! SUBROUTINE getcol_loc(amat, j, arr) ! ! Get a column from sparse matrix ! USE mumps_bsplines TYPE(mumps_mat), INTENT(in) :: amat INTEGER, INTENT(in) :: j DOUBLE PRECISION, INTENT(out) :: arr(:) INTEGER :: i ! DO i=amat%istart,amat%iend CALL getele_loc(amat, i, j, arr(i)) END DO END SUBROUTINE getcol_loc SUBROUTINE getele_loc(mat, i, j, val) ! ! Get element (i,j) of sparse matrix ! USE mumps_bsplines TYPE(mumps_mat) :: mat INTEGER, INTENT(in) :: i, j DOUBLE PRECISION, INTENT(out) :: val INTEGER :: iget, jget INTEGER :: k, s, e ! iget = i jget = j IF(mat%nlsym) THEN IF( i.GT.j ) THEN ! Lower triangular part iget = j jget = i END IF END IF ! val = 0.0d0 ! Assume zero val if outside IF( iget.LT.mat%istart .OR. iget.GT.mat%iend ) RETURN ! IF(ASSOCIATED(mat%mat)) THEN CALL getele(mat%mat, iget, jget, val) ELSE s = mat%irow(iget) - mat%nnz_start + 1 e = mat%irow(iget+1) - mat%nnz_start k = isearch(mat%cols(s:e), jget) IF( k.GE.0 ) THEN val =mat%val(s+k) ELSE val = 0.0d0 ! Assume zero val if not found END IF END IF END SUBROUTINE getele_loc !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ FUNCTION vmx_mumps_mat_loc(mat, xarr) RESULT(yarr) ! ! Return product mat*x ! Use mumps_bsplines TYPE(mumps_mat) :: mat DOUBLE PRECISION, INTENT(in) :: xarr(:) DOUBLE PRECISION :: yarr(SIZE(xarr)) DOUBLE PRECISION :: alpha=1.0d0, beta=0.0d0 CHARACTER(len=6) :: matdescra INTEGER :: n, i, j ! n = mat%rank ! !#ifdef MKL IF(mat%nlsym) THEN matdescra = 'sun' ELSE matdescra = 'g' END IF ! CALL mkl_dcsrmv('N', n, n, alpha, matdescra, mat%val, & & mat%cols, mat%irow(1), mat%irow(2), xarr, & & beta, yarr) !#else ! yarr = 0.0d0 ! DO i=1,n ! DO j=mat%irow(i), mat%irow(i+1)-1 ! yarr(i) = yarr(i) + mat%val(j)*xarr(mat%cols(j)) ! END DO ! IF(mat%nlsym) THEN ! DO j=mat%irow(i)+1, mat%irow(i+1)-1 ! yarr(mat%cols(j)) = yarr(mat%cols(j)) & ! & + mat%val(j)*xarr(i) ! END DO ! END IF ! END DO ! Write(*,*) "linear mult") !#endif ! END FUNCTION vmx_mumps_mat_loc END MODULE geometry \ No newline at end of file diff --git a/src/stepon.f90 b/src/stepon.f90 index cf47d89..b6fbec7 100644 --- a/src/stepon.f90 +++ b/src/stepon.f90 @@ -1,43 +1,53 @@ SUBROUTINE stepon ! ! Advance one time step ! USE basic USE constants USE fields USE beam USE maxwellsource INTEGER:: i - -! The particles are injected by the source - IF (nlmaxwellsource) CALL maxwellsource_inject(time) - + DO i=1,nbspecies ! Boundary conditions for plasma particles outside the plasam region CALL bound(partslist(i)) ! Localisation of particles in cells CALL localisation(partslist(i)) + END DO + + ! The particles are injected by the source + CALL maxwellsource_inject(time) + DO i=1,nbspecies ! Assemble right hand side CALL rhscon(partslist(i)) END DO + ! Solve Poisson equation CALL poisson(splrz) DO i=1,nbspecies + ! Compute the electric field at the particle position CALL EFieldscompatparts(partslist(i)) + ! Compute the magnetuc field at the particle position + call comp_mag_p(partslist(i)) + + ! Compute the energy of added particles + call calc_newparts_energy(partslist(i)) + ! Solve Newton eq. and advance velocity by delta t CALL comp_velocity(partslist(i)) ! Calculate new positions of particles CALL push(partslist(i)) END DO ! Calculate main physical quantities CALL diagnostics END SUBROUTINE stepon