diff --git a/src/beam_mod.f90 b/src/beam_mod.f90 index d65bd4c..41f2fda 100644 --- a/src/beam_mod.f90 +++ b/src/beam_mod.f90 @@ -1,2193 +1,2184 @@ MODULE beam !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: beam ! !> @author !> Guillaume Le Bars EPFL/SPC !> Patryk Kaminski EPFL/SPC !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> Module responsible for loading, advancing and computing the necessary diagnostics for the simulated particles. !------------------------------------------------------------------------------ ! USE constants use mpi USE mpihelper USE basic, ONLY: mpirank, mpisize USE distrib USE particletypes USE weighttypes IMPLICIT NONE ! !TYPE(particles) :: parts !< Storage for all the particles !SAVE :: parts TYPE(particles), DIMENSION(:), ALLOCATABLE, SAVE :: partslist ! Diagnostics (scalars) REAL(kind=db) :: ekin=0 !< Total kinetic energy (J) REAL(kind=db) :: epot=0 !< Total potential energy (J) REAL(kind=db) :: etot=0 !< Current total energy (J) REAL(kind=db) :: etot0=0 !< Initial total energy (J) REAL(kind=db) :: loc_etot0=0 !< theoretical local total energy (J) REAL(kind=db) :: Energies(4) !< (1) kinetic energy, (2) potential energy, (3) total energy and (4) gained/lossed energy due to gain or loss of particles (J) ! INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: Nplocs_all !< Array containing the local numbers of particles in each MPI process INTERFACE add_created_part MODULE PROCEDURE add_linked_created_part, add_list_created_part END INTERFACE add_created_part ! abstract interface subroutine rloader(nbase,y,rminus,rplus) USE constants REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rplus, rminus end subroutine REAL(kind=db) FUNCTION gamma(UZ, UR, UTHET) USE constants REAL(kind=db), INTENT(IN):: UR,UZ,UTHET end FUNCTION end interface CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Loads the particles at the beginning of the simulation and create the parts variable if necessary !--------------------------------------------------------------------------- SUBROUTINE load_parts USE basic, ONLY: nplasma, mpirank, ierr, distribtype, nlclassical, nbspecies, partfile use mpi INTEGER:: i REAL(kind=db), DIMENSION(:), ALLOCATABLE :: VZ, VR, VTHET ALLOCATE(VZ(nplasma), VR(nplasma), VTHET(nplasma)) ! Select case to define the type of distribution SELECT CASE(distribtype) CASE(1) ! Gaussian distribution in V, uniform in Z and 1/R in R CALL loaduniformRZ(partslist(1), VR, VZ, VTHET) CASE(2) !Stable distribution from Davidson 4.95 p.119 CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodunir) CASE(3) !Stable distribution from Davidson 4.95 p.119 but with constant distribution in R CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodinvr) CASE(4) !Stable distribution from Davidson 4.95 p.119 but with gaussian distribution in R CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodgausr) CASE(5) !Stable distribution from Davidson 4.95 p.119 with gaussian in V computed from v_th given by temp CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodunir) CASE(6) ! Uniform distribution in R and Z and Gaussian distribution in V with Vz @brief Checks for each particle if the z position is outside of the local/global simulation space. !> Depending on the boundary conditions, the leaving particles are sent to the correct neighbouring MPI process !> or deleted. ! !> @param[in] p particles structure ! !> @author Guillaume Le Bars EPFL/SPC !--------------------------------------------------------------------------- SUBROUTINE bound(p) USE basic, ONLY: zgrid, nz, Zbounds, mpirank, step, leftproc, rightproc, partperiodic use omp_lib IMPLICIT NONE type(particles), INTENT(INOUT):: p INTEGER :: i,j, rsendnbparts, lsendnbparts, nblostparts INTEGER :: receivednbparts, partdiff LOGICAL:: leftcomm, rightcomm INTEGER, ALLOCATABLE:: partstoremove(:) INTEGER,allocatable :: nblost(:) allocate(nblost(size(p%nblost,1))) nblost=0 - !$OMP SINGLE - p%losthole=0 - p%sendhole=0 - !$OMP END SINGLE NOWAIT IF (p%Nploc .gt. 0) THEN ! We communicate with the left processus leftcomm = leftproc .ne. -1 ! We communicate with the right processus rightcomm = rightproc .ne. -1 ! Boundary condition at z direction !$OMP DO DO i=1,p%Nploc + p%losthole(i)=0 + p%sendhole(i)=0 ! If the particle is to the right of the local simulation space, it is sent to the right MPI process IF (p%pos(3,i) .ge. zgrid(Zbounds(mpirank+1))) THEN IF(partperiodic) THEN DO WHILE (p%pos(3,i) .GT. zgrid(nz)) p%pos(3,i) = p%pos(3,i) - zgrid(nz) + zgrid(0) END DO END IF !!$OMP CRITICAL (nbparts) IF(rightcomm) THEN rsendnbparts=rsendnbparts+1 p%sendhole(i)=i ELSE IF(.not. partperiodic) THEN nblostparts=nblostparts+1 p%losthole(i)=i nblost(2)=nblost(2)+1 END IF !!$OMP END CRITICAL (nbparts) ! If the particle is to the left of the local simulation space, it is sent to the left MPI process ELSE IF (p%pos(3,i) .lt. zgrid(Zbounds(mpirank))) THEN IF(partperiodic) THEN DO WHILE (p%pos(3,i) .LT. zgrid(0)) p%pos(3,i) = p%pos(3,i) + zgrid(nz) - zgrid(0) END DO END IF !!$OMP CRITICAL (nbparts) IF(leftcomm) THEN ! We send the particle to the left process lsendnbparts=lsendnbparts+1 p%sendhole(i)=-i ELSE IF(.not. partperiodic) THEN ! we destroy the particle nblostparts=nblostparts+1 p%losthole(i)=i nblost(1)=nblost(1)+1 END IF !!$OMP END CRITICAL (nbparts) END IF END DO !$OMP END DO NOWAIT END IF !$OMP critical (lostparts_red) p%nblost=nblost+p%nblost !$OMP END CRITICAL (lostparts_red) !$OMP BARRIER !$OMP MASTER receivednbparts=0 j=1 rsendnbparts=0 lsendnbparts=0 Do i=1,p%Nploc if(p%sendhole(i) .eq. 0) cycle p%sendhole(j)=p%sendhole(i) if(p%sendhole(i).gt.0)then rsendnbparts=rsendnbparts+1 else lsendnbparts=lsendnbparts+1 end if j=j+1 end do j=1 nblostparts=0 Do i=1,p%Nploc if(p%losthole(i) .eq. 0) cycle p%losthole(j)=p%losthole(i) j=j+1 nblostparts=nblostparts+1 end do - + IF(mpisize .gt. 1) THEN ! We send the particles leaving the local simulation space to the closest neighbour - CALL particlescommunication(p, lsendnbparts, rsendnbparts, p%sendhole, receivednbparts, (/leftproc,rightproc/)) + CALL particlescommunication(p, lsendnbparts, rsendnbparts, receivednbparts, (/leftproc,rightproc/)) END IF ! If the boundary conditions are not periodic, we delete the corresponding particles IF(nblostparts .gt. 0 .and. step .ne. 0) THEN DO i=1,nblostparts CALL delete_part(p, p%losthole(i), .false. ) END DO !WRITE(*,'(i8.2,a,i4.2)') nblostparts, " particles lost in z on process: ", mpirank END IF ! computes if we received less particles than we sent partdiff=max(lsendnbparts+rsendnbparts-receivednbparts,0) IF(nblostparts + partdiff .gt. 0) THEN ALLOCATE(partstoremove(nblostparts+partdiff)) partstoremove(1:partdiff)=abs(p%sendhole(receivednbparts+1:receivednbparts+partdiff)) partstoremove(partdiff+1:partdiff+nblostparts)=abs(p%losthole(1:nblostparts)) call LSDRADIXSORT(partstoremove,nblostparts + partdiff) ! If we received less particles than we sent, or lost particles we fill the remaining holes with the particles from the end of the parts arrays DO i=nblostparts+partdiff,1,-1 CALL move_part(p, p%Nploc, partstoremove(i)) p%partindex(p%Nploc)=-1 p%Nploc = p%Nploc-1 END DO deallocate(partstoremove) END IF !$OMP END MASTER deallocate(nblost) END subroutine bound !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Check if a particle is outside the simulation domain and remove it if needed !> @param[in] p particles structure !--------------------------------------------------------------------------- SUBROUTINE boundary_loss(p) USE basic, ONLY: rgrid, nr Use geometry, ONLY: geom_weight, dom_weight Use iiee Use omp_lib !--------------------------------------------------------------------------- ! add below the usage of module iiee USE iiee IMPLICIT NONE type(particles), INTENT(INOUT):: p INTEGER :: i,j,isup, nblostparts, iend,nbunch INTEGER, DIMENSION(16)::idwall INTEGER :: nblost(size(p%nblost,1)), ii, Nploc_init, Nploc_new nblost=0 nbunch=16 IF (p%Nploc .le. 0) return - !$OMP SINGLE - p%losthole=0 - !$OMP END SINGLE NOWAIT - !!$OMP PARALLEL DEFAULT(SHARED), private(i,iend,j,isup,idwall) !$OMP DO DO i=1,p%Nploc,nbunch ! Avoid segmentation fault caused by accessing non relevant data iend=min(i+nbunch-1,p%Nploc) + p%losthole(i:iend)=0 ! calculate the weight do determine if a particle is inside the simulation domain. call dom_weight(p%pos(3,i:iend), p%pos(1,i:iend), p%geomweight(0,i:iend),idwall(1:iend-i+1)) do j=i,iend if(p%geomweight(0,j).le.0 .or. p%pos(1,j) .ge. rgrid(nr) .or. p%pos(1,j) .le. rgrid(0)) then ! If the particle is outside of the simulation space in the r direction, or if it is outside of the vacuum region it is deleted. !!$OMP CRITICAL (lostparts) p%losthole(j)=j !!$OMP END CRITICAL (lostparts) isup=0 if(p%pos(1,j) .ge. rgrid(nr) .or. idwall(j-i+1) .gt.0) then isup=1 end if nblost(3+isup+idwall(j-i+1))=nblost(3+isup+idwall(j-i+1))+1 else call p_calc_rzindex(p,j) end if end do call geom_weight(p%pos(3,i:iend), p%pos(1,i:iend), p%geomweight(:,i:iend)) END DO !$OMP END DO NOWAIT !!$OMP END PARALLEL !$OMP critical (lostparts_red) p%nblost=nblost+p%nblost !$OMP END CRITICAL (lostparts_red) !$OMP BARRIER !$OMP MASTER nblostparts=0 j=1 Do i=1,p%Nploc if(p%losthole(i) .eq. 0) cycle p%losthole(j)=p%losthole(i) j=j+1 nblostparts=nblostparts+1 end do IF(nblostparts.gt.0) THEN !call qsort(losthole,p%Nploc,sizeof(losthole(1)),compare_int) call LSDRADIXSORT(p%losthole(1:nblostparts),nblostparts) !Write(*,'(a,60i)') "losthole: ", losthole(1:nblostparts+1) IF(p%iiee_id.gt.0) THEN Nploc_init = partslist(p%iiee_id)%Nploc CALL ion_induced(p, p%losthole, partslist(p%iiee_id), nblostparts) Nploc_new = partslist(p%iiee_id)%Nploc if (Nploc_new-Nploc_init .ge. 1) then DO ii =Nploc_init+1,Nploc_new Call p_calc_rzindex(partslist(p%iiee_id),ii) END DO end if !---------------------------------------------------------- ! CALL ion_induced(p,losthole,partslist(indpelec)) ! here we call our routine to create electrons out of ! eliminated ions. ! need to define in this file: indpelec (need not to since) ! we have the index p%iiee_id !---------------------------------------------------------- END IF DO i=nblostparts,1,-1 CALL delete_part(p,p%losthole(i)) END DO END IF !$OMP END MASTER !$OMP BARRIER END SUBROUTINE boundary_loss !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Computes the radial and axial cell index of the particle i !> @param[in] p particles structure !> @param[in] i index in p of the particle !--------------------------------------------------------------------------- subroutine p_calc_rzindex(p,i) use basic, only: rgrid,zgrid,invdz,invdr, nnr, nsubr,nsubz, nnz integer::i,j,k type(particles)::p k=0 do j=1,nsubr IF (p%pos(1,i) .GT. rgrid(k) .AND. p%pos(1,i) .LT. rgrid(k+nnr(j))) THEN p%rindex(i)=floor((p%pos(1,i)-rgrid(k))*invdr(j))+k exit end if k=k+nnr(j) end do k=0 do j=1,nsubz IF (p%pos(3,i) .GT. zgrid(k) .AND. p%pos(3,i) .LT. zgrid(k+nnz(j))) THEN p%zindex(i)=floor((p%pos(3,i)-zgrid(k))*invdz(j))+k exit end if k=k+nnz(j) end do !p%zindex(i)=floor((p%Z(i)-zgrid(0))*invdz) end subroutine p_calc_rzindex !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Computes the magnetic field amplitude at each particle position interpolated from the magnetic field at the closeset grid point !> @param[in] p particles structure !--------------------------------------------------------------------------- SUBROUTINE comp_mag_p(p) USE basic, ONLY: zgrid, rgrid, BZ, BR, nz, invdz type(particles), INTENT(INOUT):: p INTEGER :: i Real(kind=db):: WZ,WR INTEGER:: j1,j2,j3,j4 !$OMP DO SIMD Private(J1,J2,J3,J4,WZ,WR) DO i=1,p%Nploc WZ=(p%pos(3,i)-zgrid(p%zindex(i)))/(zgrid(p%zindex(i)+1)-zgrid(p%zindex(i))); WR=(p%pos(1,i)-rgrid(p%rindex(i)))/(rgrid(p%rindex(i)+1)-rgrid(p%rindex(i))); J1=(p%rindex(i))*(nz+1) + p%zindex(i)+1 J2=(p%rindex(i))*(nz+1) + p%zindex(i)+2 J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 J4=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+2 ! Interpolation for magnetic field p%B(2,i)=(1-WZ)*(1-WR)*Bz(J4) & & +WZ*(1-WR)*Bz(J3) & & +(1-WZ)*WR*Bz(J2) & & +WZ*WR*Bz(J1) p%B(1,i)=(1-WZ)*(1-WR)*Br(J4) & & +WZ*(1-WR)*Br(J3) & & +(1-WZ)*WR*Br(J2) & & +WZ*WR*Br(J1) END DO !$OMP END DO SIMD NOWAIT end subroutine comp_mag_p !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Routine used to compute the lorentz factor \f$\gamma\f$ in the classical simulations. !> This routine systematically returns 1.0 to treat the system according to classical dynamic. ! !> @param[out] gamma the lorentz factor \f$\gamma\f$ !> @param[in] UZ \f$\gamma\beta_z=\gamma v_z/c\f$ the normalized particle longitudinal velocity !> @param[in] UR \f$\gamma\beta_r=\gamma v_r/c\f$ the normalized particle radial velocity !> @param[in] UTHET \f$\gamma\beta_\theta=\gamma v_\theta/c\f$ the normalized particle azimuthal velocity !--------------------------------------------------------------------------- REAL(kind=db) FUNCTION gamma_classical(UZ, UR, UTHET) !!#if __INTEL_COMPILER > 1700 !$OMP declare simd(gamma_classical) !!#endif REAL(kind=db), INTENT(IN):: UR,UZ,UTHET gamma_classical=1.0 END FUNCTION gamma_classical !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Routine used to compute the lorentz factor \f$\gamma\f$ in the relativistic simulations. !> This routine computes the Lorentz factor \f$\gamma=\sqrt{1+\mathbf{\gamma\beta}^2}\f$ ! !> @param[out] gamma the lorentz factor \f$\gamma\f$ !> @param[in] UZ \f$\gamma\beta_z=\gamma v_z/c\f$ the normalized particle longitudinal velocity !> @param[in] UR \f$\gamma\beta_r=\gamma v_r/c\f$ the normalized particle radial velocity !> @param[in] UTHET \f$\gamma\beta_\theta=\gamma v_\theta/c\f$ the normalized particle azimuthal velocity !--------------------------------------------------------------------------- REAL(kind=db) FUNCTION gamma_relativistic(UZ, UR, UTHET) !!#if __INTEL_COMPILER > 1700 !$OMP declare simd(gamma_relativistic) !!#endif REAL(kind=db), INTENT(IN):: UR,UZ,UTHET gamma_relativistic=sqrt(1+UZ**2+UR**2+UTHET**2) END FUNCTION gamma_relativistic !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief General routine to compute the velocities at time t+1. !> This routine allows to treat the classical and relativistic case efficiently from a numerical standpoint, !> by using a pointer to the routine computing gamma. This avoid the nlclassical flag check on each particle. ! !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE comp_velocity(p) ! ! Computes the new velocity of the particles due to Lorentz force ! USE basic, ONLY : nlclassical type(particles), INTENT(INOUT):: p ! Store old Velocities !CALL swappointer(p%UZold, p%UZ) !$OMP master CALL swappointer2(p%Uold, p%U) !CALL swappointer(p%UTHETold, p%UTHET) CALL swappointer(p%Gammaold, p%Gamma) !$OMP end master !$OMP BARRIER IF (nlclassical) THEN CALL comp_velocity_fun(p, gamma_classical) ELSE CALL comp_velocity_fun(p, gamma_relativistic) END IF END SUBROUTINE comp_velocity !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Routine called by comp_velocity to compute the velocities at time t+1. !> This routine allows to treat the classical and relativistic case efficiently from a numerical standpoint, !> by using the routine computing gamma as an input. This avoid the nlclassical flag check on each particle. ! !> @param[in] gamma the function used to compute the value of the lorentz factor \f$\gamma\f$ !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE comp_velocity_fun(p, gammafun) ! ! Computes the new velocity of the particles due to Lorentz force ! USE basic, ONLY : bnorm, dt, tnorm procedure(gamma)::gammafun type(particles), INTENT(INOUT):: p REAL(kind=db) :: tau REAL(kind=db):: BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR, ZBZ2, ZBR2 INTEGER:: J1, J2, J3, J4 INTEGER:: i ! Normalized time increment tau=p%qmRatio*bnorm*0.5*dt*tnorm IF (p%Nploc .NE. 0) THEN !$OMP DO SIMD PRIVATE(J1,J2,J3,J4,BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR, ZBZ2, ZBR2) DO i=1,p%Nploc ! First half of electric pulse p%U(3,i)=p%Uold(3,i)+p%E(2,i)*tau p%U(1,i)=p%Uold(1,i)+p%E(1,i)*tau p%Gamma(i)=gammafun(p%U(3,i), p%U(1,i), p%Uold(2,i)) ! Rotation along magnetic field ZBZ=tau*p%B(2,i)/p%Gamma(i) ZBR=tau*p%B(1,i)/p%Gamma(i) ZPZ=p%U(3,i)-ZBR*p%Uold(2,i) !u'_{z} ZPR=p%U(1,i)+ZBZ*p%Uold(2,i) !u'_{r} ZPTHET=p%Uold(2,i)+(ZBR*p%U(3,i)-ZBZ*p%U(1,i)) !u'_{theta} SQR=1+ZBZ*ZBZ+ZBR*ZBR ZBZ2=2*ZBZ/SQR ZBR2=2*ZBR/SQR p%U(3,i)=p%U(3,i)-ZBR2*ZPTHET !u+_{z} p%U(1,i)=p%U(1,i)+ZBZ2*ZPTHET !u+_{r} p%U(2,i)=p%Uold(2,i)+(ZBR2*ZPZ-ZBZ2*ZPR) !u+_{theta} ! Second half of acceleration p%U(3,i)=p%U(3,i)+p%E(2,i)*tau p%U(1,i)=p%U(1,i)+p%E(1,i)*tau ! Final computation of the Lorentz factor p%Gamma(i)=gammafun(p%U(3,i), p%U(1,i), p%U(2,i)) END DO !$OMP END DO SIMD NOWAIT END IF p%collected=.false. END SUBROUTINE comp_velocity_fun !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes the particles position at time t+1 !> This routine computes the particles position at time t+1 according to the Bunemann algorithm. ! !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE push(p) Use basic, ONLY: dt type(particles), INTENT(INOUT):: p REAL(kind=db):: XP, YP, COSA, SINA, U1, U2, ALPHA INTEGER :: i IF (p%Nploc .NE. 0) THEN !!$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(XP, YP, COSA, SINA, U1, U2, ALPHA) !$OMP DO SIMD DO i=1,p%Nploc ! Local Cartesian coordinates XP=p%pos(1,i)+dt*p%U(1,i)/p%Gamma(i) YP=dt*p%U(2,i)/p%Gamma(i) ! Conversion to cylindrical coordiantes p%pos(3,i)=p%pos(3,i)+dt*p%U(3,i)/p%Gamma(i) p%pos(1,i)=sqrt(XP**2+YP**2) ! Computation of the rotation angle IF (p%pos(1,i) .EQ. 0) THEN COSA=1 SINA=0 ALPHA=0 ELSE COSA=XP/p%pos(1,i) SINA=YP/p%pos(1,i) ALPHA=asin(SINA) END IF ! New azimuthal position p%pos(2,i)=MOD(p%pos(2,i)+ALPHA,2*pi) ! Velocity in rotated reference frame U1=COSA*p%U(1,i)+SINA*p%U(2,i) U2=-SINA*p%U(1,i)+COSA*p%U(2,i) p%U(1,i)=U1 p%U(2,i)=U2 END DO !$OMP END DO SIMD NOWAIT END IF !$OMP SINGLE p%collected=.false. !$OMP END SINGLE END SUBROUTINE push !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes several diagnostic quantities !> This routine computes the total kinetic and electric potential energy. !> It keeps track of the reference energy and the number of particle per mpi node. ! !--------------------------------------------------------------------------- SUBROUTINE partdiagnostics ! ! Compute energies ! USE constants, ONLY: vlight USE basic, ONLY: phinorm, cstep, nlclassical, ierr, nbspecies INTEGER:: i,j ! Reset the quantities !$OMP SINGLE ekin=0 epot=0 etot=0 !$OMP END SINGLE NOWAIT ! Computation of the kinetic and potential energy as well as fluid velocities and density !!$OMP PARALLEL DO REDUCTION(+:epot, ekin) DEFAULT(SHARED), PRIVATE(i,j) Do j=1,nbspecies if(.not. partslist(j)%is_field) CYCLE !$OMP DO reduction(+:epot,ekin) DO i=1,partslist(j)%Nploc ! Potential energy epot=epot+(partslist(j)%pot(i)+partslist(j)%potxt(i))*partslist(j)%q*partslist(j)%weight ! Kinetic energy IF(.not. nlclassical) THEN ekin=ekin+(0.5*(partslist(j)%Gammaold(i)+partslist(j)%Gamma(i))-1)*partslist(j)%m*partslist(j)%weight ELSE ekin=ekin+0.5*( partslist(j)%U(1,i)*partslist(j)%Uold(1,i) & & + partslist(j)%U(3,i)*partslist(j)%Uold(3,i) & & + partslist(j)%U(2,i)*partslist(j)%Uold(2,i) )*partslist(j)%m*partslist(j)%weight END IF END DO !$OMP END DO NOWAIT END DO !$OMP BARRIER !$OMP MASTER !!$OMP END PARALLEL DO epot=epot*phinorm*0.5 ekin=ekin*vlight**2 ! Shift to Etot at cstep=1 (not valable yet at cstep=0!) IF(cstep.EQ. 1) THEN ! Compute the local total energy loc_etot0 = epot+ekin etot0=0 END IF !etot=loc_etot0 ! Compute the total energy etot=epot+ekin Energies=(/ekin,epot,etot,loc_etot0/) ! The computed energy is sent to the root process IF(mpisize .gt.1) THEN IF(mpirank .eq.0 ) THEN CALL MPI_REDUCE(MPI_IN_PLACE, Energies, 4, db_type, db_sum_op, & & 0, MPI_COMM_WORLD, ierr) etot0=etot0+Energies(4) ekin=Energies(1) epot=Energies(2) etot=Energies(3) ELSE CALL MPI_REDUCE(Energies, Energies, 4, db_type, db_sum_op, & & 0, MPI_COMM_WORLD, ierr) END IF ELSE etot0=etot0+loc_etot0 END IF loc_etot0=0 ! Send the local number of particles on each node to the root process IF(mpisize .gt. 1) THEN Nplocs_all(mpirank)=partslist(1)%Nploc IF(mpirank .eq.0 ) THEN CALL MPI_gather(MPI_IN_PLACE, 1, MPI_INTEGER, Nplocs_all, 1, MPI_INTEGER,& & 0, MPI_COMM_WORLD, ierr) !CALL MPI_REDUCE(MPI_IN_PLACE,partslist(1)%nudcol,3,db_type,db_sum_op,0,MPI_COMM_WORLD,ierr) partslist(1)%Nptot=sum(Nplocs_all) !partslist(1)%nudcol=partslist(1)%nudcol/partslist(1)%Nptot ELSE CALL MPI_gather(Nplocs_all(mpirank), 1, MPI_INTEGER, Nplocs_all, 1, MPI_INTEGER,& & 0, MPI_COMM_WORLD, ierr) !CALL MPI_REDUCE(partslist(1)%nudcol,partslist(1)%nudcol,3,db_type,db_sum_op,0,MPI_COMM_WORLD,ierr) END IF ELSE partslist(1)%Nptot=partslist(1)%Nploc END IF !$OMP END MASTER !$OMP BARRIER end subroutine partdiagnostics !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Collect the particles positions and velocities on the root process. !> If the collection has already been performed at this time step, the routine does nothing. ! !--------------------------------------------------------------------------- SUBROUTINE collectparts(p) USE basic, ONLY: mpirank, mpisize, ierr type(particles), INTENT(INOUT):: p INTEGER, DIMENSION(:), ALLOCATABLE :: displs, Nploc INTEGER:: i INTEGER:: particles_type(mpisize-1) !< Stores the MPI data type used for particles gathering on node 0 and broadcast from node 0 INTEGER :: part_requests(mpisize-1) INTEGER:: stats(MPI_STATUS_SIZE,mpisize-1) part_requests=MPI_REQUEST_NULL particles_type=MPI_DATATYPE_NULL IF(p%collected) RETURN ! exit subroutine if particles have already been collected during this time step ALLOCATE(Nploc(0:mpisize-1)) ALLOCATE(displs(0:mpisize-1)) displs=0 Nploc(mpirank)=p%Nploc CALL MPI_Allgather(MPI_IN_PLACE, 1, MPI_INTEGER, Nploc, 1, MPI_INTEGER,& & MPI_COMM_WORLD, ierr) p%Nptot=sum(Nploc) IF(p%Nptot .eq. 0 ) THEN p%partindex=-1 p%collected=.true. RETURN END IF Do i=1,mpisize-1 displs(i)=displs(i-1)+Nploc(i-1) END DO IF(mpirank.eq.0 .and. p%Nptot .gt. size(p%pos,2)) THEN CALL change_parts_allocation(p,max(p%Nptot-size(P%pos,2),floor(0.5*size(P%pos,2)))) END IF IF(mpirank .ne. 0) THEN if(Nploc(mpirank) .gt. 0) THEN Call init_particles_gather_mpi(p,1,Nploc(mpirank),particles_type(mpirank)) ! Send Particles informations to root process CALL MPI_SEND(p, 1, particles_type(mpirank), 0, partsgather_tag, MPI_COMM_WORLD, ierr) CALL MPI_TYPE_FREE(particles_type(mpirank),ierr) END IF ELSE ! Receive particle information from all processes DO i=1,mpisize-1 if(Nploc(i) .lt. 1) cycle Call init_particles_gather_mpi(p,displs(i)+1,Nploc(i),particles_type(i)) CALL MPI_IRECV(p,1,particles_type(i),i,partsgather_tag,MPI_COMM_WORLD, part_requests(i), ierr) END DO CALL MPI_WAITALL(mpisize-1,part_requests, stats, ierr) p%partindex(sum(Nploc)+1:)=-1 Do i=1,mpisize-1 if(Nploc(i) .lt. 1) cycle CALL MPI_TYPE_FREE(particles_type(i),ierr) END DO END IF p%collected=.TRUE. END SUBROUTINE collectparts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Computes the velocities at time t-1/2 delta t to keep the second order precision in time on the velocity. !> This should only be used at particle initialisation time, ot in the case of a restart. ! !--------------------------------------------------------------------------- SUBROUTINE adapt_vinit(p) !! Computes the velocity at time -dt/2 from velocities computed at time 0 ! USE basic, ONLY : bnorm, dt, tnorm, nlclassical, phinorm, distribtype, vnorm type(particles), INTENT(INOUT):: p REAL(kind=db) :: tau, BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, & & SQR, Vperp, v2 INTEGER :: J1, J2, J3, J4, i REAL(kind=db), DIMENSION(:), ALLOCATABLE :: VZ, VR, VTHET ! In case Davidson distribution is used the longitudinal and radial velocities are adapted to take into account the ! electric potential. IF(distribtype .EQ. 2 .OR. distribtype .EQ. 3 .OR. distribtype .EQ. 4 .or. p%Davidson) THEN ALLOCATE(VR(p%Nploc),VZ(p%Nploc),VTHET(p%Nploc)) CALL loduni(7,VZ) VZ=VZ*2*pi VTHET=p%U(2,:)/p%Gamma*vnorm DO i=1,p%Nploc Vperp=sqrt(MAX(2*p%H0/p%m-2*p%qmRatio*p%pot(i)*phinorm-VTHET(i)**2,0.0_db)) VR(i)=Vperp*sin(VZ(i)) VZ(i)=Vperp*cos(VZ(i)) IF(nlclassical) THEN p%Gamma(i)=1 ELSE v2=VR(i)**2+VZ(i)**2+VTHET(i)**2 p%Gamma(i)=sqrt(1/(1-v2/vnorm**2)) END IF p%U(1,i)=p%Gamma(i)*VR(i)/vnorm p%U(3,i)=p%Gamma(i)*VZ(i)/vnorm p%U(2,i)=p%Gamma(i)*VTHET(i)/vnorm END DO DEALLOCATE(VR,VZ,VTHET) END IF ! Normalised time increment !tau=-omegac/2/omegap*dt/tnorm tau=-p%qmRatio*bnorm*0.5*dt*tnorm ! Store old Velocities CALL swappointer2(p%Uold, p%U) !CALL swappointer(p%URold, p%UR) !CALL swappointer(p%UTHETold, p%UTHET) CALL swappointer(p%Gammaold, p%Gamma) IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(J1,J2,J3,J4,BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR) DO i=1,p%Nploc ! Half inverse Rotation along magnetic field ZBZ=tau*p%B(2,i)/p%Gammaold(i) ZBR=tau*p%B(1,i)/p%Gammaold(i) SQR=1+ZBZ*ZBZ+ZBR*ZBR ZPZ=(p%Uold(3,i)-ZBR*p%Uold(2,i))/SQR !u-_{z} ZPR=(p%Uold(1,i)+ZBZ*p%Uold(2,i))/SQR !u-_{r} ZPTHET=p%Uold(2,i)+(ZBR*p%Uold(3,i)-ZBZ*p%Uold(1,i))/SQR !u-_{theta} p%U(3,i)=ZPZ p%U(1,i)=ZPR p%U(2,i)=ZPTHET ! half of decceleration p%U(3,i)=p%U(3,i)+p%E(2,i)*tau p%U(1,i)=p%U(1,i)+p%E(1,i)*tau IF(.not. nlclassical) THEN p%Gamma(i)=sqrt(1+p%U(3,i)**2+p%U(1,i)**2+p%U(2,i)**2) END IF END DO !$OMP END PARALLEL DO SIMD END IF END SUBROUTINE adapt_vinit !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Calculates the number of particles per column of the spatial grid ( at fixed axial cell position) !> This facilitate the computation of the axial grid limits for each MPI worker ! !--------------------------------------------------------------------------- SUBROUTINE calcnbperz(p,nbperz) USE basic, only: nz IMPLICIT NONE type(particles):: p INTEGER, INTENT(INOUT):: nbperz(0:) Integer::i, zindex nbperz=0 !! $OMP PARALLEL DO DEFAULT(SHARED) reduction(+:nbperz), private(zindex,i) Do i=1,p%Nploc ! we make sure zindex is in [0, nz-1] to avoid segmentation faults zindex=min(nz-1,max(p%zindex(i),0)) nbperz(zindex)=nbperz(zindex)+1 END DO !! $OMP END PARALLEL DO END SUBROUTINE calcnbperz !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief In the case of MPI parallelism, computes the axial limits of the local domain. !--------------------------------------------------------------------------- SUBROUTINE calc_Zbounds(p, Zbounds, norder) ! Computes the start and end indices for the Z boundaries on local processus ! Computes the particle indices from initial particle loading vector, that stay in current process USE basic, ONLY: nz, cstep, mpirank, mpisize,step USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER:: Zbounds(0:) INTEGER:: norder(2) INTEGER:: old_Zbounds(0:size(Zbounds,1)-1) INTEGER:: k, i, nbparts REAL(kind=db):: idealnbpartsperproc INTEGER, DIMENSION(0:nz-1):: partspercol ! Vector containing the number of particles between zgrid(n) and zgrid(n+1) INTEGER:: Zmin, Zmax ! Minimum and maximum indices of particles in Z direction INTEGER:: Zperproc, ierr, remparts CHARACTER(12)::fmt ! calculatese the axial disstibution integrated along the radial direction call calcnbperz(p,partspercol) ! gather this data on all nodes if(step .gt. 0 .and. mpisize .gt. 1) THEN old_Zbounds=Zbounds CALL MPI_ALLREDUCE(MPI_IN_PLACE, partspercol, nz, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, ierr) END IF ! estimate the ideal number of particle per MPI worker idealnbpartsperproc = p%Nptot/mpisize ! find the start and end indices where particles are present Zmin=0 Zmax=nz-1 Do k=0,nz-1 if(partspercol(k) .gt.0) then Zmin=k exit end if end do Do k=nz-1,0,-1 if(partspercol(k) .gt.0) then Zmax=k exit end if end do ! Find naive axial limits assuming uniform axial distribution IF(Zmax .le. 0) Zmax=nz-1 IF(Zmin .gt. nz) Zmin=0 Zperproc=(Zmax-Zmin)/mpisize IF (Zperproc .lt. 1 .or. cstep .eq. 0) THEN !! No particles are present initially Zperproc=nz/mpisize Zmin=0 ! Define boundaries using naive guess on start or restart (allow to start with 0 parts) DO k=1,mpisize-1 IF(k .lt. mpisize-1-MODULO(Zmax-Zmin,mpisize)) THEN Zbounds(k)=Zmin+k*Zperproc-1 ELSE Zbounds(k)=Zmin+k*Zperproc-1+k-mpisize+2+MODULO(Zmax-Zmin,mpisize) END IF END DO ELSE i=0 ! Define axial boundaries using the axial distribution information. ! the subdomains are not equal remparts=p%Nptot DO k=1,mpisize-1 nbparts=0 DO WHILE(nbparts<0.98*idealnbpartsperproc .and. i .lt. Zmax .and. (nbparts+partspercol(i)).lt.1.25*idealnbpartsperproc) nbparts=nbparts+partspercol(i) i=i+1 END DO remparts=remparts-nbparts Zbounds(k)=i END DO END IF IF(step .gt. 0 .and. mpirank .eq. 0) THEN Do i=1,mpisize-1 !We check that the new limits will not exceed the old limits of the left and right process ! This avoids particle communications with process >mpirank+2 and < mpirank-2 ! However this should converge over time IF(Zbounds(i) .lt. old_Zbounds(i-1)) Zbounds(i)=old_Zbounds(i-1) if(Zbounds(i) .gt. old_Zbounds(i+1))Zbounds(i)=old_Zbounds(i+1) ! If a process would have an axial domain shoter than axial norder, we revert to the old boundaries. IF((Zbounds(i)-Zbounds(i-1)).lt. norder(1) .or. (Zbounds(i+1)-Zbounds(i)).lt. norder(1)) THEN Zbounds=old_Zbounds EXIT END IF END DO END IF ! send the new boundaries to all the workers CALL MPI_Bcast(Zbounds,mpisize+1,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) DO k=0,mpisize-1 Nplocs_all(k)=SUM(partspercol(Zbounds(k):Zbounds(k+1)-1)) END DO if(mpirank .eq. 0) THEN WRITE(fmt,'(a,i3,a)')"(a,",mpisize+1, "i5)" WRITE(*,fmt) "Zbounds: ", Zbounds WRITE(fmt,'(a,i3,a)')"(a,",mpisize, "i8)" WRITE(*,fmt) "Nplocs: ", Nplocs_all END IF END SUBROUTINE calc_Zbounds !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief After a restart keep only the particles in the local domain of the current MPI worker !--------------------------------------------------------------------------- SUBROUTINE keep_mpi_self_parts(p,Zbounds) TYPE(particles),INTENT(INOUT):: p INTEGER,INTENT(in)::Zbounds(0:) INTEGER :: i, partstart, old_sum,ierr partstart=1 p%Nploc=0 Do i=1,p%Nptot IF(p%Zindex(i).ge.Zbounds(mpirank).and.p%Zindex(i).lt.Zbounds(mpirank+1)) THEN p%Nploc=p%Nploc+1 CALL move_part(p,i,p%Nploc) END IF END DO old_sum=p%Nptot CALL MPI_REDUCE(p%Nploc, p%Nptot,1,MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, ierr) IF(p%Nptot .ne. old_sum) THEN WRITE(*,*) "Error in particle distribution kept: ", p%Nptot, "/",old_sum !call MPI_Abort(MPI_COMM_WORLD, -1, ierr) !stop END IF END SUBROUTINE keep_mpi_self_parts !_______________________________________________________________________________ !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Manage the particle communication between neighbours. !> This routine is responsible to receive the incoming particles from the MPI neighbours and to send its outgoing !> particles to these neighbours ! !> @param [in] lsendnbparts number of particles to send to the left neighbour (mpirank-1) !> @param [in] rsendnbparts number of particles to send to the right neighbour (mpirank+1) -!> @param [in] sendholes array containing the indices of the particle leaving the local domain in ascending order. If the index is positive, the particle goes to the right neigbour, and to the left neighbour if the index is negative !--------------------------------------------------------------------------- - SUBROUTINE particlescommunication(p, lsendnbparts, rsendnbparts, sendholes, receivednbparts, procs) + SUBROUTINE particlescommunication(p, lsendnbparts, rsendnbparts, receivednbparts, procs) USE mpihelper, ONLY: particle_type #ifdef _DEBUG USE basic, ONLY: step #endif type(particles), INTENT(INOUT):: p - INTEGER, INTENT(in) :: lsendnbparts, rsendnbparts + INTEGER, INTENT(inout) :: lsendnbparts, rsendnbparts INTEGER, INTENT(out) :: receivednbparts - INTEGER, INTENT(in) :: sendholes(:) INTEGER, INTENT(in) :: procs(2) INTEGER :: rrecvnbparts=0, lrecvnbparts=0 INTEGER :: sendrequest(2), recvrequest(2) INTEGER :: sendstatus(MPI_STATUS_SIZE,2), recvstatus(MPI_STATUS_SIZE,2) TYPE(particle), ALLOCATABLE :: rrecvpartbuff(:), lrecvpartbuff(:), rsendpartbuff(:), lsendpartbuff(:) ! buffers to send and receive particle from left and right processes INTEGER :: lsentnbparts, rsentnbparts INTEGER :: lreceivednbparts, rreceivednbparts, ierr lsentnbparts=lsendnbparts rsentnbparts=rsendnbparts sendrequest=MPI_REQUEST_NULL recvrequest=MPI_REQUEST_NULL lrecvnbparts=0 rrecvnbparts=0 ! Send and receive the number of particles to exchange CALL MPI_IRECV(lrecvnbparts, 1, MPI_INTEGER, procs(1), nbpartsexchange_tag, MPI_COMM_WORLD, recvrequest(1), ierr) CALL MPI_IRECV(rrecvnbparts, 1, MPI_INTEGER, procs(2), nbpartsexchange_tag, MPI_COMM_WORLD, recvrequest(2), ierr) CALL MPI_ISEND(lsentnbparts, 1, MPI_INTEGER, procs(1), nbpartsexchange_tag, MPI_COMM_WORLD, sendrequest(1), ierr) CALL MPI_ISEND(rsentnbparts, 1, MPI_INTEGER, procs(2), nbpartsexchange_tag, MPI_COMM_WORLD, sendrequest(2), ierr) CALL MPI_Waitall(2,recvrequest(1:2), recvstatus(:,1:2), ierr) recvrequest=MPI_REQUEST_NULL lreceivednbparts=lrecvnbparts rreceivednbparts=rrecvnbparts ! Re/allocate enough memory to store the incoming particles ALLOCATE(rrecvpartbuff(rreceivednbparts)) ALLOCATE(lrecvpartbuff(lreceivednbparts)) ! Receive particles from left and right processes to the corresponding buffers IF ( lrecvnbparts .gt. 0) THEN CALL MPI_IRECV(lrecvpartbuff, lreceivednbparts, particle_type, procs(1), partsexchange_tag, MPI_COMM_WORLD, recvrequest(1), ierr) END IF IF( rrecvnbparts .gt. 0) THEN CALL MPI_IRECV(rrecvpartbuff, rreceivednbparts, particle_type, procs(2), partsexchange_tag, MPI_COMM_WORLD, recvrequest(2), ierr) END IF ALLOCATE(rsendpartbuff(rsendnbparts)) ALLOCATE(lsendpartbuff(lsendnbparts)) ! Copy the leaving particles to the corresponding send buffers IF ( (lsendnbparts + rsendnbparts) .gt. 0) THEN - CALL AddPartSendBuffers(p, lsendnbparts, rsendnbparts, sendholes, lsendpartbuff, rsendpartbuff) + CALL AddPartSendBuffers(p, lsendnbparts, rsendnbparts, lsendpartbuff, rsendpartbuff) END IF CALL MPI_Waitall(2,sendrequest(1:2), sendstatus(:,1:2), ierr) ! Send the particles to the left and right neighbours IF( lsendnbparts .gt. 0) THEN CALL MPI_ISEND(lsendpartbuff, lsendnbparts, particle_type, procs(1), partsexchange_tag, MPI_COMM_WORLD, sendrequest(1), ierr) #ifdef _DEBUG !WRITE(*,*)"snding ", lsendnbparts , " to left at step: ",step #endif END IF IF( rsendnbparts .gt. 0) THEN CALL MPI_ISEND(rsendpartbuff, rsendnbparts, particle_type, procs(2), partsexchange_tag, MPI_COMM_WORLD, sendrequest(2), ierr) #ifdef _DEBUG !WRITE(*,*)"snding ", rsendnbparts , " to right at step: ",step #endif END IF + receivednbparts=rreceivednbparts+lreceivednbparts + + IF(p%Nploc+receivednbparts-lsendnbparts-rsendnbparts .gt. size(p%pos,2)) THEN + CALL change_parts_allocation(p,receivednbparts) + END IF + ! Receive the incoming parts in the receive buffers IF ( lreceivednbparts .gt. 0) THEN CALL MPI_Wait(recvrequest(1), recvstatus(:,1), ierr) IF(ierr .ne. MPI_SUCCESS) THEN WRITE(*,*) "Error in particle reception on proc:", mpirank, " error code:", ierr, "status:", recvstatus(:,1) CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END IF #ifdef _DEBUG !WRITE(*,*)"rcvd ", lreceivednbparts , " from left at step: ",step #endif END IF IF ( rreceivednbparts .gt. 0) THEN CALL MPI_Wait(recvrequest(2), recvstatus(:,2), ierr) IF(ierr .ne. MPI_SUCCESS) THEN WRITE(*,*) "Error in particle reception on proc:", mpirank, " error code:", ierr, "status:", recvstatus(:,2) CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END IF #ifdef _DEBUG !WRITE(*,*)"rcvd ", rreceivednbparts , " from right at step: ",step #endif END IF - receivednbparts=rreceivednbparts+lreceivednbparts - IF(p%Nploc+receivednbparts-lsendnbparts-rsendnbparts .gt. size(p%pos,2)) THEN - CALL change_parts_allocation(p,receivednbparts) - END IF ! Copy the incoming particles from the receive buffers to the simulation parts variable CALL Addincomingparts(p, rreceivednbparts, lreceivednbparts, lsendnbparts+rsendnbparts, & - & sendholes, lrecvpartbuff, rrecvpartbuff) + & lrecvpartbuff, rrecvpartbuff) ! Wait for the outgoing particles to be fully received by the neighbours IF( lsendnbparts .gt. 0) THEN CALL MPI_Wait(sendrequest(1), sendstatus(:,1), ierr) #ifdef _DEBUG !WRITE(*,*)"sent ", lsentnbparts , " to left at step: ",step #endif END IF IF( rsendnbparts .gt. 0) THEN CALL MPI_Wait(sendrequest(2), sendstatus(:,2), ierr) #ifdef _DEBUG !WRITE(*,*)"sent ", rsentnbparts , " to right at step: ",step #endif END IF ! ! END SUBROUTINE particlescommunication !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy the particles from the receive buffers to the local simulation variable parts. !> The incoming particles will first be stored in the holes left by the outgoing particles, then they !> will be added at the end of the parts variable ! !> @param [in] rrecvnbparts number of particles received from the right neighbour (mpirank+1) !> @param [in] lrecvnbparts number of particles received from the left neighbour (mpirank-1) !> @param [in] sendnbparts total number of particles having left the local domain -!> @param [in] sendholes array containing the indices of the particle having left the local domain in ascending order. !--------------------------------------------------------------------------- - SUBROUTINE Addincomingparts(p, rrecvnbparts, lrecvnbparts, sendnbparts, sendholes,lrecvpartbuff, rrecvpartbuff) + SUBROUTINE Addincomingparts(p, rrecvnbparts, lrecvnbparts, sendnbparts,lrecvpartbuff, rrecvpartbuff) ! USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: rrecvnbparts, lrecvnbparts, sendnbparts - INTEGER, INTENT(in) :: sendholes(:) TYPE(particle), INTENT(IN) :: rrecvpartbuff(:), lrecvpartbuff(:) INTEGER k,partpos ! First import the particles coming from the right IF(rrecvnbparts .gt. 0) THEN Do k=1,rrecvnbparts IF(k .le. sendnbparts) THEN ! Fill the holes left by sent parts - partpos=abs(sendholes(k)) + partpos=abs(p%sendhole(k)) ELSE ! Add at the end of parts and keep track of number of parts p%Nploc=p%Nploc+1 partpos=p%Nploc END IF CALL Insertincomingpart(p, rrecvpartbuff(k), partpos) END DO END IF ! Then import the particles coming from the left IF(lrecvnbparts .gt. 0) THEN Do k=1,lrecvnbparts IF(k+rrecvnbparts .le. sendnbparts) THEN ! Fill the holes left by sent parts - partpos=abs(sendholes(k+rrecvnbparts)) + partpos=abs(p%sendhole(k+rrecvnbparts)) ELSE ! Add at the end of parts and keep track of number of parts p%Nploc=p%Nploc+1 partpos=p%Nploc END IF CALL Insertincomingpart(p, lrecvpartbuff(k), partpos) END DO END IF ! END SUBROUTINE Addincomingparts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy the particles from the local parts variable to the left and right send buffers. ! !> @param [in] lsendnbparts number of particles to send to the left neighbour (mpirank-1) !> @param [in] rsendnbparts number of particles to send to the right neighbour (mpirank+1) -!> @param [in] sendholes array containing the indices of the particle leaving the local domain in ascending order. If the index is positive, the particle goes to the right neigbour, and to the left neighbour if the index is negative !--------------------------------------------------------------------------- - SUBROUTINE AddPartSendBuffers(p, lsendnbparts, rsendnbparts, sendholes, lsendpartbuff, rsendpartbuff) + SUBROUTINE AddPartSendBuffers(p, lsendnbparts, rsendnbparts, lsendpartbuff, rsendpartbuff) ! USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: lsendnbparts, rsendnbparts - INTEGER, INTENT(in) :: sendholes(:) TYPE(particle), INTENT(OUT) :: rsendpartbuff(:), lsendpartbuff(:) INTEGER:: partpos, k INTEGER:: lsendpos, rsendpos lsendpos=0 rsendpos=0 ! Loop over the outgoing particles and fill the correct send buffer Do k=lsendnbparts+rsendnbparts,1,-1 - partpos=abs(sendholes(k)) - IF(sendholes(k) .GT. 0) THEN + partpos=abs(p%sendhole(k)) + IF(p%sendhole(k) .GT. 0) THEN rsendpos=rsendpos+1 CALL Insertsentpart(p, rsendpartbuff, rsendpos, partpos) - ELSE IF(sendholes(k) .LT. 0) THEN + ELSE IF(p%sendhole(k) .LT. 0) THEN lsendpos=lsendpos+1 CALL Insertsentpart(p, lsendpartbuff, lsendpos, partpos) END IF END DO ! ! END SUBROUTINE AddPartSendBuffers !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Add the particles stored in the buffer to the main particle storage p in particles form !> @param[in] p particles structure to add particles to !> @param[in] buffer memory containing the particles to be added !> @param[in] nb_ins number of particles stored in buffer !--------------------------------------------------------------------------- SUBROUTINE add_list_created_part(p, buffer,nb_ins) IMPLICIT NONE TYPE(particles), INTENT(INOUT):: p TYPE(particle), ALLOCATABLE, INTENT(in) :: buffer(:) INTEGER, OPTIONAL:: nb_ins INTEGER:: i, nptotinit, parts_size_increase, nb_added nptotinit=p%Nploc+1 if(present(nb_ins)) THEN nb_added=nb_ins ELSE nb_added=size(buffer,1) end if IF(nb_added .le. 0) RETURN ! No particles to add ! if there is not enough space in the parts simulation buffer, increase the parst size IF(p%Nploc + nb_added .gt. size(p%pos,2)) THEN parts_size_increase=Max(floor(0.1*size(p%pos,2)),nb_added) CALL change_parts_allocation(p, parts_size_increase) END IF DO i=1,nb_added CALL add_created_particle(p,buffer(i)) END DO nb_added=p%Nploc-nptotinit+1 if(p%is_field) then IF(allocated(p%addedlist)) then call change_array_size_int(p%addedlist,2) else allocate(p%addedlist(2)) end if p%addedlist(size(p%addedlist)-1)=nptotinit p%addedlist(size(p%addedlist))=nb_added end if END SUBROUTINE add_list_created_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Add the particles stored in the linked buffer to the main particle storage p in particles form !> @param[in] p particles structure to add particles to !> @param[in] linked_buffer memory containing the particles to be added in linked list format !> @param[in] destroy Indicates if the memory of the linked buffer must be freed after copy to p !> @param[in] zerovelocity Define if the velocity of the particles in p is set to 0 or copied from the buffer !--------------------------------------------------------------------------- SUBROUTINE add_linked_created_part(p, linked_buffer, destroy, zerovelocity) IMPLICIT NONE TYPE(particles), INTENT(INOUT):: p TYPE(linked_part_row), INTENT(in) :: linked_buffer LOGICAL:: destroy, zerovelocity TYPE(linked_part), POINTER:: part INTEGER:: i, nptotinit, parts_size_increase, nb_added nptotinit=p%Nploc+1 nb_added=linked_buffer%n IF(nb_added .le. 0) RETURN ! No particles to add ! if there is not enough space in the parts simulation buffer, increase the parst size IF(p%Nploc + nb_added .gt. size(p%pos,2)) THEN parts_size_increase=Max(floor(0.1*size(p%pos,2)),nb_added) CALL change_parts_allocation(p, parts_size_increase) END IF part=>linked_buffer%start DO i=1,nb_added CALL add_created_particle(p,part%p) part=>part%next END DO nb_added=p%Nploc-nptotinit+1 if(p%is_field) then IF(allocated(p%addedlist)) then call change_array_size_int(p%addedlist,2) else allocate(p%addedlist(2)) end if p%addedlist(size(p%addedlist)-1)=nptotinit p%addedlist(size(p%addedlist))=nb_added end if if(zerovelocity)then p%U(:,nptotinit:p%Nploc)=0 !p%UTHET(nptotinit:p%Nploc)=0 !p%UZ(nptotinit:p%Nploc)=0 end if if (destroy) call destroy_linked_parts(linked_buffer%start) if (p%is_field) then ! we keep track of energy by removing the ionisation energy ! with conversion from electronvolt to joules loc_etot0=loc_etot0-sum(p%pot(nptotinit:p%Nploc)*elchar) end if END SUBROUTINE add_linked_created_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Add created particles from a buffer of type particle to the main species storages. ! !> @param [in] p specie memory where we want to add particles !> @param [in] part particle buffer storing the data we want to add to p !--------------------------------------------------------------------------- SUBROUTINE add_created_particle(p,part) USE geometry TYPE(particles):: p TYPE(particle):: part p%Nploc=p%Nploc+1 p%newindex=p%newindex+1 ! add the data to the p structure CALL Insertincomingpart(p, part, p%Nploc) p%partindex(p%Nploc)=p%newindex ! calculate the new domain weight CALL dom_weight(p%pos(3,p%Nploc),p%pos(1,p%Nploc),p%geomweight(0,p%Nploc)) ! delete the particle if it is outside of the computational domain if( .not. is_inside(p,p%Nploc) ) then p%Nploc=p%Nploc-1 p%newindex=p%newindex-1 RETURN end if ! Calculate the geometric weight for the Poisson solver and the grid indices CALL geom_weight(p%pos(3,p%Nploc),p%pos(1,p%Nploc),p%geomweight(:,p%Nploc)) call p_calc_rzindex(p,p%Nploc) END SUBROUTINE add_created_particle !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Checks if the particle id in p is inside of the simulation domain ! !> @param [in] p specie memory !> @param [in] id index of the particle we want to test !--------------------------------------------------------------------------- function is_inside(p,id) Use basic, ONLY: rgrid,zgrid, nr, nz IMPLICIT NONE logical :: is_inside type(particles) :: p integer :: id is_inside=.true. ! Check if the particle is in the simulation domain if(p%geomweight(0,id).le.0)then is_inside=.false. return end if ! check if the particle is in the simulation grid if(p%pos(1,id).ge.rgrid(nr) .or. p%pos(1,id) .le. rgrid(0))then is_inside=.false. return end if if(p%pos(3,id).ge.zgrid(nz) .or. p%pos(3,id) .le. zgrid(0))then is_inside=.false. return end if end function is_inside !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Calculate the energy added by new particles to the system for diagnostic purposes ! !> @param [in] p specie memory !--------------------------------------------------------------------------- SUBROUTINE calc_newparts_energy(p) USE basic, ONLY: phinorm, nlclassical type(particles)::p integer::i,n,nptotinit,nbadded, nptotend ! exit if these particles dont participate in the Poisson equation if(.not. p%is_field) return if( allocated(p%addedlist)) then n=size(p%addedlist) ! For each set of added particles Do i=1,n,2 nptotinit=p%addedlist(i) nbadded=p%addedlist(i+1) p%nbadded=p%nbadded+nbadded nptotend=nptotinit+nbadded-1 ! Potential energy loc_etot0=loc_etot0+p%q*p%weight*sum(p%pot(nptotinit:nptotend))*phinorm ! Kinetic energy IF(.not. nlclassical) THEN loc_etot0=loc_etot0+p%m*p%weight*vlight**2*sum(0.5*(p%Gamma(nptotinit:nptotend)+p%Gammaold(nptotinit:nptotend))-1) ELSE loc_etot0=loc_etot0+0.5*p%m*p%weight*vlight**2*sum(p%U(1,nptotinit:nptotend)*p%Uold(1,nptotinit:nptotend) & & +p%U(3,nptotinit:nptotend)*p%Uold(3,nptotinit:nptotend) & & +p%U(2,nptotinit:nptotend)*p%Uold(2,nptotinit:nptotend)) END IF end do deallocate(p%addedlist) end if end subroutine calc_newparts_energy !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Delete particle at given index removing its energy from the diagnosed quantities ! !> @param [in] index index of particle to be deleted !--------------------------------------------------------------------------- SUBROUTINE delete_part(p, index, replace) !! This will destroy particle at the given index USE constants, ONLY: vlight USE bsplines USE geometry USE basic, ONLY: phinorm, nlclassical TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(IN) :: index LOGICAL, OPTIONAL :: replace LOGICAL:: repl IF(present(replace)) THEN repl=replace ELSE repl=.true. END IF !Computes the potential at the particle position with phi_ext+phi_s IF(index .le. p%Nploc) THEN IF(p%is_field) THEN loc_etot0=loc_etot0-p%q*p%weight*(p%pot(index))*phinorm IF(.not. nlclassical) THEN loc_etot0=loc_etot0-p%m*p%weight*vlight**2*(p%Gamma(index)-1) ELSE loc_etot0=loc_etot0-0.5*p%m*p%weight*vlight**2*(p%U(1,index)**2+p%U(3,index)**2+p%U(2,index)**2) END IF END IF IF(repl) THEN ! We fill the gap CALL move_part(p, p%Nploc, index) p%partindex(p%Nploc)=-1 ! Reduce the total number of simulated parts p%Nploc=p%Nploc-1 END IF END IF END SUBROUTINE delete_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Loads a uniform density of particles on a rectangular annulus qith maxwellian velocities ! !> @param [inout] p particle memory to load into !> @param [inout] VR array of radial velocity for the particles !> @param [inout] VTHET array of azimuthal velocity for the particles !> @param [inout] VZ array of axial velocity for the particles !--------------------------------------------------------------------------- SUBROUTINE loaduniformRZ(p, VR,VZ,VTHET) USE basic, ONLY: plasmadim, rnorm, temp, qsim, msim USE constants, ONLY: me, kb, elchar REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p CALL creat_parts(p, size(VR,1)) p%Nploc=size(VR,1) p%Nptot=size(VR,1) p%q=sign(elchar,qsim) p%weight=msim/me p%m=me p%qmRatio=qsim/msim ! Initial distribution in z with normalisation CALL loduni(1,p%pos(3,1:p%Nploc)) p%pos(3,1:p%Nploc)=(plasmadim(1)+(plasmadim(2)-plasmadim(1))*p%pos(3,1:p%Nploc))/rnorm ! Initial distribution in r with normalisation CALL lodlinr(2,p%pos(1,1:p%Nploc),plasmadim(3),plasmadim(4)) p%pos(1,1:p%Nploc)=p%pos(1,1:p%Nploc)/rnorm ! Initial velocities distribution CALL loadGaussianVelocities(p, VR, VZ, VTHET, temp) END SUBROUTINE loaduniformRZ !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Loads a cloud of electrons trapped in a magnetic mirror according to Davidsons equilibrium !> p117 of physics of non-neutral plasma book ! !> @param [inout] p particle memory to load into !> @param [inout] VR array of radial velocity for the particles !> @param [inout] VTHET array of azimuthal velocity for the particles !> @param [inout] VZ array of axial velocity for the particles !--------------------------------------------------------------------------- SUBROUTINE loadDavidson(p, VR,VZ,VTHET, lodr) USE constants, ONLY: me, kb, elchar USE basic, ONLY: nplasma, rnorm, plasmadim, distribtype, H0, P0, Rcurv, width, qsim, msim, & & omegac, zgrid, nz, rnorm, n0, nblock, temp procedure(rloader)::lodr TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT)::VZ(:), VR(:), VTHET(:) REAL(kind=db), DIMENSION(:), ALLOCATABLE::ra, rb, z REAL(kind=db) :: r0, deltar2, halfLz, Mirrorratio, Le, VOL INTEGER :: j, n, blockstart, blockend, addedpart, remainparts INTEGER, DIMENSION(:), ALLOCATABLE :: blocksize CALL creat_parts(p, size(VR,1)) p%Nploc=size(VR,1) p%Nptot=p%Nploc Allocate(ra(nblock),rb(nblock), z(0:nblock)) !r0=(plasmadim(4)+plasmadim(3))/2 r0=sqrt(4*H0/(me*omegac**2)) halfLz=(zgrid(nz)+zgrid(0))/2 MirrorRatio=(Rcurv-1)/(Rcurv+1) z(0)=plasmadim(1) DO n=1,nblock ! Compute limits in radius and load radii for each part Le=(plasmadim(2)-plasmadim(1))/nblock*(n-0.5)-halfLz*rnorm+plasmadim(1) z(n)=z(0)+n*(plasmadim(2)-plasmadim(1))/nblock deltar2=1-MirrorRatio*cos(2*pi*Le/width) rb(n)=r0/deltar2*sqrt(1-P0*abs(omegac)/2/H0*deltar2+sqrt(1-P0*abs(omegac)/H0*deltar2)) ra(n)=r0/deltar2*sqrt(1-P0*abs(omegac)/2/H0*deltar2-sqrt(1-P0*abs(omegac)/H0*deltar2)) END DO VOL=SUM(2*pi*MINVAL(ra)*(rb-ra)*(plasmadim(2)-plasmadim(1))/nblock) qsim=VOL*n0*elchar/nplasma msim=abs(qsim)/elchar*me p%weight=abs(qsim)/elchar p%m=me p%q=sign(elchar,qsim) p%qmRatio=p%q/p%m blockstart=1 blockend=0 ALLOCATE(blocksize(nblock)) WRITE(*,*) "blocksize: ", size(blocksize), nblock DO n=1,nblock blocksize(n)=nplasma/VOL*2*pi*MINVAL(ra)*(rb(n)-ra(n))*(plasmadim(2)-plasmadim(1))/nblock END DO remainparts=p%Nploc-SUM(blocksize) addedpart=1 n=nblock/2 j=1 DO WHILE(remainparts .GT. 0) blocksize(n)=blocksize(n)+addedpart remainparts=remainparts-addedpart n=n+j j=-1*(j+SIGN(1,j)) END DO CALL loadPartSlices(p, lodr, ra, rb, z, blocksize) IF(distribtype .eq. 5) THEN CALL loadGaussianVelocities(p, VR, VZ, VTHET, temp) VZ=VZ/4 VR=VR*8 VTHET=VTHET*8 ELSE Call loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) END IF END SUBROUTINE loadDavidson !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes the velocities for a cloud of electrons trapped in a magnetic mirror according to Davidsons equilibrium !> p117 of physics of non-neutral plasma book. This equilibrium assume mono energy and mono canonical angular momentum ! !> @param [inout] p particle memory to load into !> @param [inout] VR array of radial velocity for the particles !> @param [inout] VTHET array of azimuthal velocity for the particles !> @param [inout] VZ array of axial velocity for the particles !> @param [in] H0 Total energy of each particle !> @param [in] P0 Initial canonical angular momentum of each particle !--------------------------------------------------------------------------- SUBROUTINE loadDavidsonVelocities(p, VR,VZ,VTHET, H0, P0) USE constants, ONLY: me, kb, elchar USE basic, ONLY: rnorm, Rcurv, B0, width, vnorm, zgrid, nz TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT)::VZ(:), VR(:), VTHET(:) REAL(kind=db), INTENT(IN):: H0, P0 REAL(kind=db) :: athetpos, rg, zg, halfLz, Mirrorratio, Pcomp, Acomp INTEGER :: i MirrorRatio=(Rcurv-1)/(Rcurv+1) halfLz=(zgrid(nz)+zgrid(0))/2 ! Load velocities theta velocity ! Loading of r and z velocity is done in adapt_vinit to have ! access to parts%pot DO i=1,p%Nploc ! Interpolation for Magnetic potential rg=p%pos(1,i)*rnorm zg=(p%pos(3,i)-halfLz)*rnorm Athetpos=0.5*B0*(rg - width/pi*MirrorRatio*bessi1(2*pi*rg/width)*COS(2*pi*zg/width)) Pcomp=P0/rg/p%m Acomp=-p%qmRatio*Athetpos VTHET(i)=SIGN(MIN(abs(Pcomp+Acomp),sqrt(2*H0/p%m)),Pcomp+Acomp) !VTHET(i)=Pcomp+Acomp END DO VTHET=VTHET/vnorm VZ=0._db VR=0._db p%Davidson=.true. p%H0=H0 p%P0=P0 END SUBROUTINE loadDavidsonvelocities !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes the particles velocities according to a maxwellian distribution of temperature temperature [K] ! !> @param [inout] p particle memory to load into !> @param [inout] VR array of radial velocity for the particles !> @param [inout] VTHET array of azimuthal velocity for the particles !> @param [inout] VZ array of axial velocity for the particles !> @param [in] temperature temperature in [k] of the distribution function !--------------------------------------------------------------------------- SUBROUTINE loadGaussianVelocities(p, VR,VZ,VTHET, temperature) USE basic, ONLY: vnorm USE constants, ONLY: kb REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(IN):: temperature REAL(kind=db):: vth ! Initial velocities distribution vth=sqrt(2.0/3.0*kb*temperature/p%m)/vnorm !thermal velocity CALL lodgaus(3,VZ) CALL lodgaus(5,VR) CALL lodgaus(7,VTHET) VZ=VZ*vth VR=VR*vth VTHET=VTHET*vth p%temperature=temperature p%Davidson=.false. END SUBROUTINE loadGaussianVelocities !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes the particles velocities with a uniform distribution centered in meanv and limited by meanv+spanv and meanv-spanv ! !> @param [inout] p particle memory to load into !> @param [inout] VR array of radial velocity for the particles !> @param [inout] VTHET array of azimuthal velocity for the particles !> @param [inout] VZ array of axial velocity for the particles !> @param [in] meanv mean velocity in each direction [m/s] !> @param [in] spanv extent of the velocity in each direction above and below the mean velocity [m/s] !--------------------------------------------------------------------------- SUBROUTINE loadFlatTopVelocities(p, VR,VZ,VTHET, meanv, spanv) USE basic, ONLY: vnorm USE constants, ONLY: kb REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT):: meanv(3), spanv(3) ! Initial velocities distribution meanv=meanv/vnorm !thermal velocity spanv=spanv/vnorm CALL loduni(3,VZ) CALL loduni(5,VR) CALL loduni(7,VTHET) VR=(VR*2-1)*spanv(1)+meanv(1) VTHET=(VTHET*2-1)*spanv(2)+meanv(2) VZ=(VZ*2-1)*spanv(3)+meanv(3) p%Davidson=.false. END SUBROUTINE loadFlatTopVelocities !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load slices of particles defined by axial and radial limits ! !> @param [inout] p particle memory to load into !> @param [in] lodr sampling function definig the particle distribution in r !> @param [in] ra lower radial limit of the slice !> @param [in] rb upper radial limit of the slice !> @param [in] z array giving the axial limits of each slice (slice i is betwwen z(i-1) and z(i)) !> @param [in] blocksize array containing the number of particles for each slice !--------------------------------------------------------------------------- SUBROUTINE loadPartslices(p, lodr, ra, rb, z, blocksize) USE basic, ONLY: rnorm TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(IN)::ra(:), rb(:), z(0:) INTEGER, DIMENSION(:), INTENT(IN) :: blocksize procedure(rloader)::lodr INTEGER :: n, blockstart, blockend, nblock nblock=size(blocksize,1) blockstart=1 blockend=0 DO n=1,nblock blockstart=blockend+1 blockend=MIN(blockstart+blocksize(n)-1,p%Nploc) ! Initial distribution in z with normalisation between magnetic mirrors CALL loduni(1, p%pos(3,blockstart:blockend)) p%pos(3,blockstart:blockend)= (z(n-1)+p%pos(3,blockstart:blockend)*(z(n)-z(n-1)))/rnorm CALL lodr(2, p%pos(1,blockstart:blockend), ra(n), rb(n)) p%pos(1,blockstart:blockend)=p%pos(1,blockstart:blockend)/rnorm END DO END SUBROUTINE loadPartslices !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Read a particle file format to load a simulated specie in the simulation ! !--------------------------------------------------------------------------- SUBROUTINE read_part_file(p, partfilename, VR, VZ, VTHET) USE basic, ONLY: lu_partfile, rnorm, vnorm implicit None TYPE(particles), INTENT(INOUT):: p REAL(kind=db), DIMENSION(:), ALLOCATABLE, INTENT(INOUT)::VR, VZ, VTHET CHARACTER(len=*)::partfilename INTEGER:: nblock = 0 REAL(kind=db), Dimension(:), ALLOCATABLE:: ra, rb, z INTEGER, Dimension(:), ALLOCATABLE:: npartsslice INTEGER:: velocitytype=1 !< 1) gaussian with temp 2) Davidson with H0, P0 INTEGER:: radialtype=1 !< 1) 1/R 2) uniform 3) 1/R^2 4) gauss INTEGER:: npartsalloc !< initial size of particles arrays INTEGER:: iiee_id !< index of species to add particles to for IIEE INTEGER:: neuttype_id !< index of neutral gas producing ions INTEGER:: material_id !< index determining the type of electrode material LOGICAL:: zero_vel !< logical to chose wether or not el. are gen. with non 0 init. vel. REAL(kind=db):: mass=me REAL(kind=db):: charge=-elchar REAL(kind=db):: weight=1.0 REAL(kind=db):: qmratioscale REAL(kind=db):: meanv(3) !< mean velocity in each direction for velocitytype 3 [m/s] REAL(kind=db):: spanv(3) !< pos/neg extent of velocity in each direction for velocitytype 3 [m/s] CHARACTER(len=256) :: header=' ' !< header of csv file section REAL(kind=db):: H0=3.2e-14 !< Total energy [J] REAL(kind=db):: P0=8.66e-25 !< Canonical angula r momentum REAL(kind=db):: temperature=10000 !< temperature in kelvins real(kind=db):: n0 !< density factor LOGICAL :: is_test !< Defines if particle are saved on ittracer or not LOGICAL :: is_field !< Defines if particle contributes to Poisson solver LOGICAL :: calc_moments !< Defines if moments matrix must be calculated each it2d CHARACTER(len=16) :: partformat = 'slices' INTEGER:: i, ierr, openerr NAMELIST /partsload/ nblock, mass, charge, weight, npartsalloc, velocitytype, & & radialtype, temperature, H0, P0, is_test, n0, partformat, meanv, spanv, & & calc_moments, qmratioscale, is_field, iiee_id, neuttype_id, material_id, zero_vel ! Set defaults qmratioscale=1.0 weight=1.0 meanv=0 spanv=0 mass=me charge=-elchar calc_moments=.false. is_test=.false. is_field=.true. iiee_id = -1 neuttype_id=1 material_id=1 zero_vel = .true. ! Open the paticle file OPEN(UNIT=lu_partfile,FILE=trim(partfilename),ACTION='READ',IOSTAT=openerr) header=' ' IF(openerr .ne. 0) THEN CLOSE(unit=lu_partfile) RETURN END IF READ(lu_partfile,partsload) IF(mpirank .eq.0) THEN WRITE(*,'(a,a)')"reading partfile: ", trim(partfilename) WRITE(*,partsload) END IF ! The plasma cloud is defined as a set of slices IF(trim(partformat).eq.'slices') THEN IF( nblock .ge. 1) THEN ALLOCATE(z(0:nblock),ra(nblock),rb(nblock), npartsslice(nblock)) DO WHILE(header(1:8) .ne. '//slices') READ(lu_partfile,'(a)') header END DO DO i=1,nblock READ(lu_partfile,*) z(i-1),ra(i),rb(i),npartsslice(i) END DO READ(lu_partfile,*) z(nblock) CALL creat_parts(p,max(npartsalloc,sum(npartsslice))) p%Nploc=sum(npartsslice) p%Nptot=p%Nploc IF( allocated(VR) ) THEN DEALLOCATE(VR,VZ,VTHET) end if if(.not. allocated(VR)) THEN ALLOCATE(VR(p%Nploc)) ALLOCATE(VZ(p%Nploc)) ALLOCATE(VTHET(p%Nploc)) END IF p%m=mass p%q=charge p%weight=weight p%qmRatio=charge/mass*qmratioscale p%is_test=is_test p%is_field=is_field p%calc_moments=calc_moments p%Newindex=sum(npartsslice) p%iiee_id = iiee_id p%neuttype_id = neuttype_id p%material_id = material_id p%zero_vel = zero_vel SELECT CASE(radialtype) CASE(1) ! 1/R distribution in R CALL loadPartslices(p, lodunir, ra, rb, z, npartsslice) CASE(2) ! flat top distribution in R CALL loadPartslices(p, lodlinr, ra, rb, z, npartsslice) CASE(3) ! 1/R^2 distribution in R CALL loadPartslices(p, lodinvr, ra, rb, z, npartsslice) CASE(4) ! gaussian distribution in R CALL loadPartslices(p, lodgausr, ra, rb, z, npartsslice) CASE DEFAULT IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of radial distribution:", radialtype CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END SELECT SELECT CASE(velocitytype) CASE(1) ! Gaussian with temperature CALL loadGaussianVelocities(p, VR, VZ, VTHET, temperature) CASE(2) ! Davidson magnetic mirror high wr equilibrium CALL loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) CASE(3) ! flat top velocity CALL loadFlatTopVelocities(p, VR, VZ, VTHET, meanv, spanv) CASE DEFAULT IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of velocity distribution:", velocitytype CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END SELECT END IF END IF ! The plasma cloud is defined as a set individual particles IF( trim(partformat) .eq. 'parts' ) THEN IF( nblock .ge. 1) THEN !Allocate necessary memory CALL creat_parts(p,max(npartsalloc,nblock)) IF( allocated(VR) ) THEN DEALLOCATE(VR,VZ,VTHET) end if if(.not. allocated(VR)) THEN ALLOCATE(VR(nblock)) ALLOCATE(VZ(nblock)) ALLOCATE(VTHET(nblock)) END IF ! Read the particles from the file DO WHILE(header(1:8) .ne. '//parts') READ(lu_partfile,'(a)') header END DO DO i=1,nblock READ(lu_partfile,*) p%pos(1,i),p%pos(2,i),p%pos(3,i), VR(i), VTHET(i), VZ(i) END DO p%Nploc=nblock p%Nptot=p%Nploc p%m=mass p%q=charge p%Newindex=nblock p%weight=weight p%qmRatio=charge/mass*qmratioscale p%is_test=is_test p%is_field=is_field p%calc_moments=calc_moments p%iiee_id = iiee_id p%neuttype_id = neuttype_id p%material_id = material_id p%zero_vel = zero_vel !normalizations p%pos(1,:)=p%pos(1,:)/rnorm p%pos(3,:)=p%pos(3,:)/rnorm !p%z=p%z/rnorm VR=VR/vnorm VTHET=VTHET/vnorm VZ=VZ/vnorm END IF END IF CLOSE(unit=lu_partfile) END SUBROUTINE !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Increase the number of macroparticles by separating each previous macroparticles into !> samplefactor new macroparticles of equally divided weight. The new sub particles are distributed !> uniformly in space to maintain the density and other moments. ! !> @param [in] samplefactor multiplicator of the number of macroparticles. !> @param [in] p particles type to increase. !--------------------------------------------------------------------------- SUBROUTINE upsample(p, samplefactor) USE basic, ONLY : nplasma, dr, dz INTEGER, INTENT(IN) ::samplefactor TYPE(particles), INTENT(INOUT):: p INTEGER:: i, j, currentindex REAL(kind=db), DIMENSION(p%Nploc) :: spreaddir ! random direction for the spread of each initial macro particle REAL(kind=db) :: dir ! Direction in which the particle is moved REAL(kind=db) :: dl ! Particle displacement used for ! Load and scale the direction angle for spreading the new particles CALL loduni(2, spreaddir) spreaddir=spreaddir*2*pi/samplefactor dl=min(minval(dz,1,dz.GT.0),minval(dr,1,dr.GT.0))/10 DO i=1,p%Nploc DO j=1,samplefactor-1 currentindex=p%Nploc+(i-1)*(samplefactor-1)+j CALL move_part(p,i,currentindex) p%partindex(currentindex)=currentindex dir = spreaddir(i)+2*pi*j/samplefactor p%pos(1,currentindex)=p%pos(1,currentindex) + dl*cos(dir) p%pos(3,currentindex)=p%pos(3,currentindex) + dl*sin(dir) END DO p%partindex(i)=i p%pos(1,i)=p%pos(1,i) + dl*cos(spreaddir(i)) p%pos(3,i)=p%pos(3,i) + dl*sin(spreaddir(i)) END DO nplasma=nplasma*samplefactor p%weight=p%weight/samplefactor p%Nploc=p%Nploc*samplefactor p%Nptot=p%Nptot*samplefactor END SUBROUTINE upsample ! Taken from https://rosettacode.org/wiki/Sorting_algorithms/Radix_sort#Fortran ! No Copyright is exerted due to considerable prior art in the Public Domain. ! This Fortran version by Peter Kelly ~ peter.kelly@acm.org ! ! Permission is hereby granted, free of charge, to any person obtaining ! a copy of this software and associated documentation files (the ! "Software"), to deal in the Software without restriction, including ! without limitation the rights to use, copy, modify, merge, publish, ! distribute, sublicense, and/or sell copies of the Software, and to ! permit persons to whom the Software is furnished to do so, subject to ! the following conditions: ! The above copyright notice and this permission notice shall be ! included in all copies or substantial portions of the Software. ! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ! EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ! MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ! IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ! TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ! SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ! ! Implementation of a classic Radix Sort LSD style :) SUBROUTINE LSDRADIXSORT(A , N) IMPLICIT NONE ! ! Dummy arguments ! INTEGER :: N INTEGER , target, DIMENSION(0:N - 1) :: A ! All arrays based off zero, one day I'll fix it INTENT (IN) N INTENT (INOUT) A ! ! Local variables ! INTEGER , DIMENSION(0:9) :: counts INTEGER :: digitplace INTEGER :: i INTEGER :: j INTEGER :: largestnum INTEGER, DIMENSION(0:N - 1) :: results ! digitplace = 1 ! Count of the keys largestnum = MAXVAL(A) DO WHILE ( (largestnum/digitplace)>0 ) counts = 0 ! Init the count array DO i = 0 , N - 1 , 1 J = (A(i)/digitplace) J = MODULO(j , 10) counts(j) = counts(j) + 1 END DO ! Change count(i) so that count(i) now contains actual position of this digit in result() ! Working similar to the counting sort algorithm DO i = 1 , 9 , 1 counts(i) = counts(i) + counts(i - 1) ! Build up the prefix sum END DO ! DO i = N - 1 , 0 , -1 ! Move from left to right j = (A(i)/digitplace) j = MODULO(j, 10) results(counts(j) - 1) = A(i) ! Need to subtract one as we are zero based but prefix sum is 1 based counts(j) = counts(j) - 1 END DO ! DO i = 0 , N - 1 , 1 ! Copy the semi-sorted data into the input A(i) = results(i) END DO ! digitplace = digitplace*10 END DO ! While loop RETURN END SUBROUTINE LSDRADIXSORT END MODULE beam diff --git a/src/fields_mod.f90 b/src/fields_mod.f90 index e890a2b..817eef6 100644 --- a/src/fields_mod.f90 +++ b/src/fields_mod.f90 @@ -1,1468 +1,1467 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: beam ! !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for initializing the magnetic field, solving the Poisson equation and computing the moments of the particles distribution function !------------------------------------------------------------------------------ MODULE fields USE constants USE basic, ONLY: nr, nz, zgrid, rgrid, Br, Bz, Er, Ez, femorder, ngauss, nlppform, pot, Athet, & & splrz, splrz_ext, nlperiod, phinorm, nlPhis, nrank, mpirank, mpisize, step, it2d, timera, potxt, erxt, ezxt USE beam, ONLY: partslist USE bsplines USE mumps_bsplines use mpi Use omp_lib Use mpihelper, ONLY: db_type USE particletypes IMPLICIT NONE REAL(kind=db), allocatable, SAVE :: matcoef(:, :), phi_spline(:), vec1(:), vec2(:) REAL(kind=db), allocatable, SAVE :: loc_moments(:, :), loc_rhs(:), gradgtilde(:), fverif(:), ppformwork(:,:,:) INTEGER, SAVE:: loc_zspan TYPE(mumps_mat), SAVE :: femat !< Finite Element Method matrix for the full domain TYPE(mumps_mat), SAVE :: reduccedmat !< Finite Element Method matrix in the redduced web-spline sub-space !TYPE(mumps_mat), SAVE :: fematmpi !< Finite Element Method matrix prepared for mpi parallelism INTEGER :: nbmoments = 10 !< number of moments to be calculated and stored INTEGER(kind=omp_lock_kind), Allocatable:: mu_lock(:) !< Stores the lock for fields parallelism CONTAINS SUBROUTINE mag_init USE basic, ONLY: magnetfile, nr, nz USE bsplines USE mumps_bsplines USE mpihelper USE geometry ALLOCATE (Br((nr + 1)*(nz + 1)), Bz((nr + 1)*(nz + 1))) ALLOCATE (Athet((nr + 1)*(nz + 1))) ! Calculate magnetic field mirror components in grid points (Davidson analytical formula employed) ! or load it from magnetfile if present CALL magnet(magnetfile) end subroutine mag_init !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Set-up the necessary variables for solving Poisson and computes the magnetic field on the grid ! !--------------------------------------------------------------------------- SUBROUTINE fields_init USE basic, ONLY: pot, nlperiod, nrank, rhs, volume, rgrid USE bsplines USE geometry USE mumps_bsplines USE mpihelper INTEGER :: nrz(2), i, d2, k1, n1 ! Auxiliary vectors ALLOCATE(vec1((nz+1)*(nr+1)),vec2((nr+1)*(nz+1))) DO i=0,nr vec1(i*(nz+1)+1:(i+1)*(nz+1))=zgrid!(0:nz) vec2(i*(nz+1)+1:(i+1)*(nz+1))=rgrid(i) END DO ! Set up 2d spline splrz used in the FEM CALL set_spline(femorder, ngauss, zgrid, rgrid, splrz, nlppform=nlppform, period=nlperiod) ! Set up 2d spline splrz_ext used in the FEM to calculate the external electric field and potential CALL set_spline(femorder, ngauss, zgrid, rgrid, splrz_ext, nlppform=nlppform, period=nlperiod) !Allocate the work buffer to calculate the ppform d2 = splrz%sp2%dim k1 = splrz%sp1%order n1 = splrz%sp1%nints ALLOCATE(ppformwork(d2,k1,n1)) ! Calculate dimension of splines nrz(1) = nz nrz(2) = nr CALL get_dim(splrz, nrank, nrz, femorder) ! Allocate necessary variables ALLOCATE (matcoef(nrank(1), nrank(2))) ALLOCATE (pot((nr + 1)*(nz + 1))) ALLOCATE (potxt((nr + 1)*(nz + 1))) ALLOCATE (Erxt((nr + 1)*(nz + 1))) ALLOCATE (Ezxt((nr + 1)*(nz + 1))) ALLOCATE (rhs(nrank(1)*nrank(2))) ALLOCATE (gradgtilde(nrank(1)*nrank(2))) gradgtilde = 0 ALLOCATE (phi_spline(nrank(1)*nrank(2))) ALLOCATE (volume(nrank(1)*nrank(2))) volume = 0 ALLOCATE (Er((nr + 1)*(nz + 1)), Ez((nr + 1)*(nz + 1))) ALLOCATE (mu_lock(nrank(1)*nrank(2))) do i = 1, nrank(1)*nrank(2) call omp_init_lock(mu_lock(i)) end do end SUBROUTINE fields_init !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Set-up the geometry definition and read it from the standard input !> Precomputes the LHS matrix to solve Poisson abd the RHS effect of the dirichlet boundaries ! !--------------------------------------------------------------------------- SUBROUTINE fields_start USE geometry USE basic, ONLY: nrank implicit none INTEGER:: i,j, ierr DOUBLE PRECISION:: val ! set up the geometry module for setting up non-conforming boundary conditions call timera(0, "geom_init") call geom_init(splrz, vec1, vec2) call timera(1, "geom_init") ! Initialisation of FEM matrix CALL init(nrank(1)*nrank(2), 2, femat) ! Calculate and factorise FEM matrix (depends only on mesh) CALL fematrix(femat) If (walltype .lt. 0) then allocate (fverif(nrank(1)*nrank(2))) fverif = 0 end if ! Compute the volume of the splines and gtilde for solving E using web-splines CALL comp_volume !$OMP PARALLEL Call comp_gradgtilde !$OMP END PARALLEL if (nlweb) then ! Calculate reduced matrix for use of web splines call timera(0, "reduce femat") call Reducematrix(femat, reduccedmat) call timera(1, "reduce femat") call factor(reduccedmat) else call factor(femat) end if !WRITE(*,*) "Copy and to_mat worked" !CALL MPI_abort(MPI_COMM_WORLD,-1,ierr) !$OMP PARALLEL call vacuum_field !$OMP END PARALLEL END SUBROUTINE fields_start !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Recomputes the vacuum electric field ! !--------------------------------------------------------------------------- subroutine vacuum_field Use geometry USE basic, ONLY: pot, rhs implicit none ! Computes the externally imposed electric field !$OMP SINGLE rhs = -gradgtilde if (walltype .lt. 0) rhs = rhs + fverif !$OMP END SINGLE !$OMP BARRIER call poisson(splrz_ext) !$OMP BARRIER !$OMP SINGLE - rhs = 0 potxt = pot erxt = Er Ezxt = Ez !$OMP END SINGLE NOWAIT end subroutine !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Set-up the necessary variables for the communication of moments and rhs grid ! !--------------------------------------------------------------------------- SUBROUTINE fields_comm_init(Zbounds) USE basic, ONLY: nrank USE mpihelper INTEGER:: Zbounds(0:) loc_zspan = Zbounds(mpirank + 1) - Zbounds(mpirank) + femorder(1) if (allocated(loc_moments)) deallocate (loc_moments) ALLOCATE (loc_moments(nbmoments, loc_zspan*nrank(2))) if (allocated(loc_rhs)) deallocate (loc_rhs) ALLOCATE (loc_rhs(loc_zspan*nrank(2))) IF (mpisize .gt. 1) THEN CALL init_overlaps(nrank, femorder, Zbounds(mpirank), Zbounds(mpirank + 1), nbmoments) END IF END SUBROUTINE fields_comm_init !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Construct the right hand side vector used in the FEM Poisson solver ! !> @param[in] plist list of the particles type storing the desired specie parameters ! !--------------------------------------------------------------------------- SUBROUTINE rhscon(plist) USE bsplines use mpi USE basic, ONLY: rhs USE beam, ONLY: particles USE mpihelper Use geometry Use omp_lib type(particles), INTENT(INOUT):: plist(:) INTEGER:: i IF (nlphis) then ! We calculate the self-consistent field !$OMP SINGLE loc_rhs = 0 ! Reset the moments matrix !$OMP END SINGLE NOWAIT ! Assemble rhs for each specie Do i = 1, size(plist, 1) if (plist(i)%is_field) CALL deposit_charge(plist(i), loc_rhs) END Do !$OMP BARRIER ! If we are using MPI parallelism, reduce the rhs on the root process IF (mpisize .gt. 1) THEN CALL rhs_gather(rhs) ELSE !$OMP SINGLE rhs = loc_rhs !$OMP END SINGLE END IF ELSE ! We only consider the externally imposed field !$OMP SINGLE rhs = 0 !$OMP END SINGLE END IF !$OMP SINGLE IF (mpirank .eq. 0) THEN rhs = rhs - gradgtilde if (walltype .lt. 0) rhs = rhs + fverif END IF !$OMP END SINGLE END SUBROUTINE rhscon !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Calculate the 0th 1st and 2nd order moments of the particle p and stores it in moment ! !> @param[in] p the particles type storing the desired specie parameters !> @param[out] moment the 2d array storing the calculated moments ! !--------------------------------------------------------------------------- SUBROUTINE momentsdiag(p) USE bsplines use mpi USE beam, ONLY: particles USE mpihelper Use geometry type(particles), INTENT(INOUT):: p !REAL(kind=db), INTENT(INOUT):: moment(:, :) !$OMP SINGLE loc_moments = 0 ! Reset the moments matrix ! Assemble rhs !$OMP END SINGLE IF (p%Nploc .ne. 0) THEN CALL deposit_moments(p, loc_moments) END IF !$OMP SINGLE if(.not. allocated(p%moments))THEN if(mpirank.eq.0)THEN Allocate(p%moments(nbmoments,nrank(1)*nrank(2))) else Allocate(p%moments(0,0)) end if end if !$OMP END SINGLE ! If we are using MPI parallelism, reduce the rhs on the root process IF (mpisize .gt. 1) THEN CALL moments_gather(p%moments) ELSE !$OMP SINGLE p%moments = loc_moments !$OMP END SINGLE NOWAIT END IF END SUBROUTINE momentsdiag !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Deposit the particles moments (n,v,v^2) from p on the grid ! !> @param[in] p the particles type storing the desired specie parameters !> @param[in] p_loc_moments local tensor used to store the moments of the given specie !--------------------------------------------------------------------------- SUBROUTINE deposit_moments(p, p_loc_moments) USE bsplines use mpi USE basic, ONLY: Zbounds USE beam, ONLY: particles USE mpihelper USE geometry USE omp_lib TYPE(particles), INTENT(IN):: p REAL(kind=db), DIMENSION(:, :), INTENT(INOUT):: p_loc_moments REAL(kind=db), DIMENSION(:, :), Allocatable:: omp_loc_moments INTEGER ::irow, jcol, it, jw, mu, i, k, iend, nbunch INTEGER, DIMENSION(:), ALLOCATABLE::zleft, rleft REAL(kind=db) :: vr, vthet, vz, coeff REAL(kind=db), ALLOCATABLE :: fun(:, :, :), fun2(:, :, :) INTEGER:: num_threads num_threads = omp_get_max_threads() nbunch = p%Nploc/num_threads ! Particle bunch size used when calling basfun nbunch = max(nbunch, 1) ! Particle bunch size used when calling basfun nbunch = min(nbunch, 64) ! Particle bunch size used when calling basfun ! Assemble rhs IF (p%Nploc .gt. 0) THEN !!$OMP PARALLEL DEFAULT(SHARED), PRIVATE(zleft,rleft,jw,it,iend,irow,jcol,mu,k,vr,vz,vthet,coeff,fun,fun2) ALLOCATE (zleft(nbunch), rleft(nbunch)) ALLOCATE (fun(1:femorder(1) + 1, 0:0, nbunch), fun2(1:femorder(2) + 1, 0:0, nbunch)) ! Arrays keeping values of b-splines at gauss node allocate(omp_loc_moments(size(p_loc_moments,1),size(p_loc_moments,2))) omp_loc_moments=0 !$OMP DO DO i = 1, p%Nploc, nbunch ! Avoid segmentation fault by accessing non relevant data iend = min(i + nbunch - 1, p%Nploc) k = iend - i + 1 ! Localize the particle !CALL locintv(splrz%sp2, p%R(i:iend), rleft(1:k)) !CALL locintv(splrz%sp1, p%Z(i:iend), zleft(1:k)) rleft(1:k) = p%rindex(i:iend) zleft(1:k) = p%zindex(i:iend) ! Compute the value of the splines at the particles positions CALL basfun(p%pos(3,i:iend), splrz%sp1, fun(:, :, 1:k), zleft(1:k) + 1) CALL basfun(p%pos(1,i:iend), splrz%sp2, fun2(:, :, 1:k), rleft(1:k) + 1) DO k = 1, (iend - i + 1) DO jw = 1, (femorder(2) + 1) DO it = 1, (femorder(1) + 1) irow = zleft(k) + it - Zbounds(mpirank) jcol = rleft(k) + jw mu = irow + (jcol - 1)*(loc_zspan) coeff = p%weight*fun(it, 0, k)*fun2(jw, 0, k) ! Add contribution of particle nbunch to rhs grid point mu vr = 0.5*(p%U(1,i + k - 1)/p%Gamma(i + k - 1) + p%Uold(1,i + k - 1)/p%Gammaold(i + k - 1)) vz = 0.5*(p%U(3,i + k - 1)/p%Gamma(i + k - 1) + p%Uold(3,i + k - 1)/p%Gammaold(i + k - 1)) vthet = 0.5*(p%U(2,i + k - 1)/p%Gamma(i + k - 1) + p%Uold(2,i + k - 1)/p%Gammaold(i + k - 1)) !call omp_set_lock(mu_lock(mu)) !!$OMP ATOMIC UPDATE omp_loc_moments(1, mu) = omp_loc_moments(1, mu) + coeff !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(2, mu) = omp_loc_moments(2, mu) + coeff*vr !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(3, mu) = omp_loc_moments(3, mu) + coeff*vthet !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(4, mu) = omp_loc_moments(4, mu) + coeff*vz !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(5, mu) = omp_loc_moments(5, mu) + coeff*vr*vr !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(6, mu) = omp_loc_moments(6, mu) + coeff*vr*vthet !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(7, mu) = omp_loc_moments(7, mu) + coeff*vr*vz !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(8, mu) = omp_loc_moments(8, mu) + coeff*vthet*vthet !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(9, mu) = omp_loc_moments(9, mu) + coeff*vthet*vz !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE omp_loc_moments(10, mu) = omp_loc_moments(10, mu) + coeff*vz*vz !!$OMP END ATOMIC !call omp_unset_lock(mu_lock(mu)) END DO END DO END DO END DO !!$OMP END PARALLEL DO !$OMP END DO NOWAIT Do i=1,size(p_loc_moments,2) call omp_set_lock(mu_lock(i)) p_loc_moments(:,i)=p_loc_moments(:,i)+omp_loc_moments(:,i) call omp_unset_lock(mu_lock(i)) end do !!$OMP END CRITICAL(loc_moments_reduce) DEALLOCATE (fun, fun2, zleft, rleft) END IF END subroutine deposit_moments !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Deposit the particles charges (q) from p on the grid ! !> @param[in] p the particles type storing the desired specie parameters !> @param[in] p_loc_moments local tensor used to store the moments of the given specie !--------------------------------------------------------------------------- SUBROUTINE deposit_charge(p, p_loc_moments) USE bsplines use mpi USE constants USE basic, ONLY: Zbounds, rnorm, phinorm USE beam, ONLY: particles USE mpihelper USE geometry USE omp_lib TYPE(particles), INTENT(IN):: p REAL(kind=db), DIMENSION(:), INTENT(INOUT):: p_loc_moments REAL(kind=db), DIMENSION(:), allocatable:: omp_loc_moments INTEGER ::irow, jcol, it, jw, mu, i, k, iend, nbunch INTEGER, DIMENSION(:), ALLOCATABLE::zleft, rleft REAL(kind=db), ALLOCATABLE :: fun(:, :, :), fun2(:, :, :) INTEGER:: num_threads, curr_thread real(kind=db):: contrib, chargecoeff num_threads = omp_get_max_threads() nbunch = p%Nploc/num_threads ! Particle bunch size used when calling basfun nbunch = max(nbunch, 1) ! Particle bunch size used when calling basfun nbunch = min(nbunch, 16) ! Particle bunch size used when calling basfun chargecoeff = p%weight*p%q/(2*pi*eps_0*phinorm*rnorm) ! Normalized charge density simulated by each macro particle ! Assemble rhs IF (p%Nploc .ne. 0) THEN !!!$OMP PARALLEL DEFAULT(SHARED), PRIVATE(i,zleft, rleft, jw, it, iend, irow, jcol, mu, k, fun, fun2, contrib) ALLOCATE (zleft(nbunch), rleft(nbunch)) ALLOCATE (fun(1:femorder(1) + 1, 0:0, nbunch), fun2(1:femorder(2) + 1, 0:0, nbunch)) ! Arrays keeping values of b-splines at gauss node allocate(omp_loc_moments(size(p_loc_moments))) omp_loc_moments=0 zleft=0 rleft=0 curr_thread=omp_get_thread_num() !$OMP DO DO i = 1, p%Nploc, nbunch ! Avoid segmentation fault by accessing non relevant data iend = min(i + nbunch - 1, p%Nploc) k = iend - i + 1 ! Localize the particle rleft(1:k) = p%rindex(i:iend) zleft(1:k) = p%zindex(i:iend) ! Compute the value of the splines at the particles positions CALL basfun(p%pos(3,i:iend), splrz%sp1, fun, zleft(1:k) + 1) CALL basfun(p%pos(1,i:iend), splrz%sp2, fun2, rleft(1:k) + 1) !CALL geom_weight(p%Z(i:iend),p%R(i:iend),wgeom) DO k = 1, (iend - i + 1) DO jw = 1, (femorder(2) + 1) DO it = 1, (femorder(1) + 1) irow = zleft(k) + it - Zbounds(mpirank) jcol = rleft(k) + jw mu = irow + (jcol - 1)*(loc_zspan) ! Add contribution of particle k to rhs grid point mu contrib = fun(it, 0, k)*fun2(jw, 0, k)*p%geomweight(0,i + k - 1)*chargecoeff omp_loc_moments(mu) = omp_loc_moments(mu) + contrib END DO END DO END DO END DO !$OMP END DO NOWAIT DEALLOCATE (fun, fun2, zleft, rleft) !!!$OMP END PARALLEL !!$OMP CRITICAL(loc_charge_reduce) Do i=1,size(p_loc_moments) !$OMP ATOMIC p_loc_moments(i)=p_loc_moments(i)+omp_loc_moments(i) !$OMP END ATOMIC end do !!$OMP END CRITICAL(loc_charge_reduce) END IF END subroutine deposit_charge !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Do the communication of the local moment matrices between mpi workers for the overlap grid points !> and reduce the result on the host ! !--------------------------------------------------------------------------- SUBROUTINE rhs_gather(rhs) USE mpihelper USE Basic, ONLY: Zbounds, mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:), INTENT(INOUT):: rhs INTEGER:: ierr, i, j INTEGER:: displs(mpisize), counts(mpisize) INTEGER:: overlap_type INTEGER:: rcvoverlap_type displs = Zbounds(0:mpisize - 1) counts = Zbounds(1:mpisize) - Zbounds(0:mpisize - 1) counts(mpisize) = counts(mpisize) + femorder(1) !$OMP MASTER !WRITE(*,*) mpirank, "wE communicate overlap rhs" CALL rhsoverlapcomm(mpirank, leftproc, rightproc, loc_rhs, nrank, femorder, loc_zspan - femorder(1)) !$OMP END MASTER !$OMP BARRIER IF (mpirank .gt. 0) THEN !$OMP DO SIMD DO j = 1, femorder(1) DO i = 1, nrank(2) loc_rhs((i - 1)*loc_zspan + j) = loc_rhs((i - 1)*loc_zspan + j)& & + rhsoverlap_buffer(nrank(2)*(j - 1) + i) END DO END DO !$OMP END DO SIMD END IF ! Set communication vector type overlap_type = rhsoverlap_type rcvoverlap_type = rcvrhsoverlap_type !$OMP MASTER IF (mpirank .eq. 0) THEN rhs = 0 END IF CALL MPI_GATHERV(loc_rhs, counts(mpirank + 1), rhsoverlap_type, & & rhs, counts, displs, rcvrhsoverlap_type, 0, MPI_COMM_WORLD, ierr) !$OMP END MASTER !$OMP BARRIER END SUBROUTINE rhs_gather !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Do the communication of the local moment matrices between mpi workers for the overlap grid points !> and reduce the result on the host ! !--------------------------------------------------------------------------- SUBROUTINE moments_gather(moment) USE mpihelper USE Basic, ONLY: Zbounds, mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:, :), INTENT(INOUT):: moment INTEGER:: ierr, i, j INTEGER:: displs(mpisize), counts(mpisize) displs = Zbounds(0:mpisize - 1) counts = Zbounds(1:mpisize) - Zbounds(0:mpisize - 1) counts(mpisize) = counts(mpisize) + femorder(1) !$OMP MASTER CALL momentsoverlapcomm(mpirank, leftproc, rightproc, loc_moments, nrank, femorder, loc_zspan - femorder(1)) !$OMP END MASTER !$OMP BARRIER IF (mpirank .gt. 0) THEN !!$OMP PARALLEL DO SIMD DEFAULT(SHARED) private(i) !$OMP DO SIMD DO j = 1, femorder(1) DO i = 1, nrank(2) loc_moments(1:nbmoments, (i - 1)*loc_zspan + j) = loc_moments(1:nbmoments, (i - 1)*loc_zspan + j)& & + momentsoverlap_buffer(nbmoments*(nrank(2)*(j - 1) + i - 1) + 1:nbmoments*(nrank(2)*(j - 1) + i)) END DO END DO !$OMP END DO SIMD END IF !$OMP MASTER ! Set communication vector type IF (mpirank .eq. 0) THEN moment = 0 END IF CALL MPI_GATHERV(loc_moments, counts(mpirank + 1), momentsoverlap_type, & & moment, counts, displs, rcvmomentsoverlap_type, 0, MPI_COMM_WORLD, ierr) !$OMP END MASTER !$OMP BARRIER END SUBROUTINE moments_gather !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Solves Poisson equation using FEM. Distributes the result on all MPI workers and interpolate the electric forces !> for each particle. ! !--------------------------------------------------------------------------- SUBROUTINE poisson(splinevar) USE basic, ONLY: rhs, nrank, pot, nlend USE bsplines, ONLY: spline2d, gridval USE mumps_bsplines, ONLY: bsolve, vmx USE futils Use geometry type(spline2d):: splinevar INTEGER:: ierr real(kind=db), allocatable::reducedrhs(:) real(kind=db), allocatable:: reducedsol(:), tempcol(:) !$OMP MASTER if (nlweb) then ! we use the web-spline reduction for stability allocate (reducedrhs(nrank(1)*nrank(2))) allocate (reducedsol(nbreducedspline)) allocate (tempcol(nrank(1)*nrank(2))) if(mpirank.eq.0) then ! Only the root process solves Poisson reducedrhs = vmx(etilde, rhs) Call bsolve(reduccedmat, reducedrhs(1:nbreducedspline), reducedsol) end if CALL MPI_Bcast(reducedsol, nbreducedspline, db_type, 0, MPI_COMM_WORLD, ierr) tempcol = 0 tempcol(1:nbreducedspline) = reducedsol !phi_spline = 0 phi_spline = vmx(etildet, tempcol) else if(mpirank.eq.0) then CALL bsolve(femat, rhs, phi_spline) end if CALL MPI_Bcast(phi_spline, nrank(1)*nrank(2), db_type, 0, MPI_COMM_WORLD, ierr) end if matcoef = reshape(phi_spline, (/nrank(1), nrank(2)/)) !$OMP END MASTER !$OMP BARRIER ! update the ppform coefficients CALL updt_ppform2d(splinevar, matcoef) !$OMP BARRIER !$OMP SINGLE IF (mpirank .eq. 0 .and. (modulo(step, it2d) .eq. 0 .or. nlend)) THEN ! On the root process, compute the electric field for diagnostic purposes CALL gridval(splinevar, vec1, vec2, pot, (/0, 0/)) CALL gridval(splinevar, vec1, vec2, Ez, (/1, 0/)) CALL gridval(splinevar, vec1, vec2, Er, (/0, 1/)) Ez = -pot*gridwdir(1,:) - Ez*gridwdir(0,:) - gtilde(1,:) Er = -pot*gridwdir(2,:) - Er*gridwdir(0,:) - gtilde(2,:) pot = pot*gridwdir(0,:) + gtilde(0,:) END IF !$OMP END SINGLE NOWAIT END SUBROUTINE poisson !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the electric fields and potential at the particles position for particles !> between positions nstart and nend in the list ! !> @param[in] p the particles type storing the desired specie parameters !> @param[in] nstart starting index for the particle list !> @param[in] nend ending index for the particle list !--------------------------------------------------------------------------- SUBROUTINE EFieldscompatparts(p, nstart, nend) Use beam, ONLY: particles Use geometry Use splinebound TYPE(particles), INTENT(INOUT):: p INTEGER, OPTIONAL::nstart, nend INTEGER:: i, iend, nst, nnd INTEGER:: nbunch INTEGER:: num_threads Real(kind=db), ALLOCATABLE:: erext(:), ezext(:), gtildeloc(:, :) if (.not. present(nstart)) nst = 1 if (.not. present(nend)) nnd = p%Nploc !num_threads = omp_get_max_threads() !nbunch = (nnd - nst + 1)/num_threads ! Particle bunch size used when calling basfun !nbunch = max(nbunch, 1) ! Particle bunch size used when calling basfun nbunch = 64 ! Particle bunch size used when calling basfun Allocate (erext(nbunch), ezext(nbunch), gtildeloc(0:2,0:nbunch - 1)) ! Evaluate the electric potential and field at the particles position !$OMP DO DO i = nst, nnd, nbunch ! Avoid segmentation fault by accessing non relevant data iend = min(i + nbunch - 1, nnd) CALL speval(splrz, p%pos(3,i:iend), p%pos(1,i:iend),p%Zindex(i:iend),p%Rindex(i:iend), p%pot(i:iend), p%E(2,i:iend), p%E(1,i:iend)) CALL speval(splrz_ext, p%pos(3,i:iend), p%pos(1,i:iend),p%Zindex(i:iend),p%Rindex(i:iend), p%potxt(i:iend)) Call total_gtilde(p%pos(3,i:iend), p%pos(1,i:iend), gtildeloc(:,0:iend - i),p%geomweight(:,i:iend)) p%E(2,i:iend) = -p%E(2,i:iend)*p%geomweight(0,i:iend) - p%pot(i:iend)*p%geomweight(1,i:iend) - gtildeloc(1,0:iend - i) p%E(1,i:iend) = -p%E(1,i:iend)*p%geomweight(0,i:iend) - p%pot(i:iend)*p%geomweight(2,i:iend) - gtildeloc(2,0:iend - i) p%pot(i:iend) = p%geomweight(0,i:iend)*p%pot(i:iend) + gtildeloc(0,0:iend - i) p%potxt(i:iend) = p%geomweight(0,i:iend)*p%potxt(i:iend) + gtildeloc(0,0:iend - i) END DO !$OMP END DO NOWAIT END SUBROUTINE EFieldscompatparts !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Constucts the FEM matrix using bsplines initialized in fields_init !--------------------------------------------------------------------------- SUBROUTINE fematrix(mat) USE bsplines USE geometry USE omp_lib USE sparse type(mumps_mat):: mat REAL(kind=db), ALLOCATABLE :: xgauss(:, :), wgauss(:), wgeom(:, :) INTEGER, ALLOCATABLE :: f(:, :), aux(:) REAL(kind=db), ALLOCATABLE :: coefs(:) REAL(kind=db), ALLOCATABLE :: fun(:, :), fun2(:, :) REAL(kind=db) :: contrib INTEGER, ALLOCATABLE :: idert(:, :), iderw(:, :), iderg(:, :) INTEGER :: i, j, jt, iw, irow, jcol, mu, igauss, iterm, irow2, jcol2, mu2, kterms, gausssize kterms=8 ALLOCATE (fun(1:femorder(1) + 1, 0:1), fun2(1:femorder(2) + 1, 0:1))!Arrays keeping values of b-splines at gauss node !ALLOCATE(xgauss(ngauss(1)*ngauss(2),2), wgauss(ngauss(1)*ngauss(2)),zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2))) !Gaussian nodes and weights arrays ALLOCATE (f((femorder(1) + 1)*(femorder(2) + 1), 2), aux(femorder(1) + 1)) !Auxiliary arrays ordering bsplines ALLOCATE (idert(kterms, 2), iderw(kterms, 2), coefs(kterms), iderg(kterms, 2)) !Pointers on the order of derivatives call timera(0, "fematrix") ! Constuction of auxiliary array ordering bsplines in given interval DO i = 1, (femorder(1) + 1) aux(i) = i END DO DO i = 1, (femorder(2) + 1) f((i - 1)*(femorder(1) + 1) + 1:i*(femorder(1) + 1), 1) = aux f((i - 1)*(femorder(1) + 1) + 1:i*(femorder(1) + 1), 2) = i END DO ! Assemble FEM matrix !$OMP PARALLEL DO DEFAULT(SHARED), PRIVATE(j,i,xgauss,wgauss,gausssize,wgeom, igauss, iterm,jt,irow,jcol, mu, iw, irow2,jcol2, mu2, contrib, iderw, idert, iderg, coefs, fun, fun2) DO j = 1, nr ! Loop on r position DO i = 1, nz ! Loop on z position !! Computation of gauss weight and position in r and z direction for gaussian integration Call calc_gauss(splrz, ngauss, i, j, xgauss, wgauss, gausssize) if (gausssize .gt. 0) then If (allocated(wgeom)) deallocate (wgeom) ALLOCATE (wgeom(0:2,gausssize)) CALL geom_weight(xgauss(:, 1), xgauss(:, 2), wgeom) End if DO igauss = 1, gausssize ! Loop on gaussian weights and positions CALL basfun(xgauss(igauss, 1), splrz%sp1, fun, i) CALL basfun(xgauss(igauss, 2), splrz%sp2, fun2, j) CALL coefeq(xgauss(igauss, :), idert, iderw, iderg, coefs, kterms) DO jt = 1, (1 + femorder(1))*(femorder(2) + 1) irow = i + f(jt, 1) - 1; jcol = j + f(jt, 2) - 1 mu = irow + (jcol - 1)*nrank(1) call omp_set_lock(mu_lock(mu)) DO iw = 1, (1 + femorder(1))*(femorder(2) + 1) irow2 = i + f(iw, 1) - 1; jcol2 = j + f(iw, 2) - 1 mu2 = irow2 + (jcol2 - 1)*nrank(1) contrib=0.0_db DO iterm = 1, kterms ! Loop on the two integration dimensions contrib = contrib+wgeom(iderg(iterm, 1),igauss)*wgeom(iderg(iterm, 2),igauss)* & & fun(f(jt, 1), idert(iterm, 1))*fun(f(iw, 1), idert(iterm, 2))* & & fun2(f(jt, 2), iderw(iterm, 1))*fun2(f(iw, 2), iderw(iterm, 2))* & & wgauss(igauss)*coefs(iterm) END DO CALL updt_sploc(mat%mat%row(mu), mu2, contrib) END DO call omp_unset_lock(mu_lock(mu)) END DO END DO END DO END DO !$OMP End parallel do DEALLOCATE (f, aux) DEALLOCATE (idert, iderw, coefs, fun, fun2) call timera(1, "fematrix") END SUBROUTINE fematrix !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the volume of the splines cells needed to display the density in post-processing !--------------------------------------------------------------------------- SUBROUTINE comp_volume USE bsplines USE geometry USE basic, ONLY: Volume REAL(kind=db), ALLOCATABLE :: xgauss(:, :), wgauss(:), wgeom(:, :) INTEGER, ALLOCATABLE :: f(:, :), aux(:) REAL(kind=db), ALLOCATABLE :: coefs(:) REAL(kind=db), ALLOCATABLE :: fun(:, :), fun2(:, :), gtildeintegr(:, :), ftestpt(:, :) Integer, ALLOCATABLE, Dimension(:) :: idg, idt, idp, idw INTEGER :: i, j, jt, irow, jcol, mu, igauss, gausssize, iterm, nterms Real(kind=db)::newcontrib call timera(0, "comp_volume") ALLOCATE (fun(1:femorder(1) + 1, 0:1), fun2(1:femorder(2) + 1, 0:1))!Arrays keeping values of b-splines at gauss node !ALLOCATE(xgauss(ngauss(1)*ngauss(2),2), wgauss(ngauss(1)*ngauss(2)),zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2))) !Gaussian nodes and weights arrays ALLOCATE (f((femorder(1) + 1)*(femorder(2) + 1), 2), aux(femorder(1) + 1)) !Auxiliary arrays ordering bsplines nterms = 4 Allocate (idg(nterms), idt(nterms), idw(nterms), idp(nterms), coefs(nterms)) ! Constuction of auxiliary array ordering bsplines in given interval DO i = 1, (femorder(1) + 1) aux(i) = i END DO DO i = 1, (femorder(2) + 1) f((i - 1)*(femorder(1) + 1) + 1:i*(femorder(1) + 1), 1) = aux f((i - 1)*(femorder(1) + 1) + 1:i*(femorder(1) + 1), 2) = i END DO volume = 0 if (walltype .lt. 0) fverif = 0 ! Assemble Volume matrix !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(j,i,xgauss,wgauss,gausssize,wgeom, igauss, gtildeintegr, ftestpt, iterm,jt,irow,jcol, mu, idw, idt, idg, idp, coefs, fun, fun2, newcontrib) DO j = 1, nr ! Loop on r position DO i = 1, nz ! Loop on z position ! Computation of gauss weight and position in r and z direction for gaussian integration Call calc_gauss(splrz, ngauss, i, j, xgauss, wgauss, gausssize) If (allocated(wgeom)) deallocate (wgeom) if (gausssize .gt. 0) then ALLOCATE (wgeom(0:2,size(xgauss, 1))) CALL geom_weight(xgauss(:, 1), xgauss(:, 2), wgeom) End if If (allocated(gtildeintegr)) deallocate (gtildeintegr) ALLOCATE (gtildeintegr(0:2,size(xgauss, 1))) Call total_gtilde(xgauss(:, 1), xgauss(:, 2), gtildeintegr,wgeom) if (walltype .lt. 0) then If (allocated(ftestpt)) deallocate (ftestpt) ALLOCATE (ftestpt(0:0,size(xgauss, 1))) CALL ftest(xgauss(:, 1), xgauss(:, 2), ftestpt) end if DO igauss = 1, gausssize ! Loop on gaussian weights and positions CALL basfun(xgauss(igauss, 1), splrz%sp1, fun, i) CALL basfun(xgauss(igauss, 2), splrz%sp2, fun2, j) CALL coefeqext(xgauss(igauss, :), idt, idw, idg, idp, coefs) DO jt = 1, (1 + femorder(1))*(femorder(2) + 1) irow = i + f(jt, 1) - 1; jcol = j + f(jt, 2) - 1 mu = irow + (jcol - 1)*nrank(1) newcontrib = 2*pi*fun(f(jt, 1), 0)*fun2(f(jt, 2), 0)*wgauss(igauss)*xgauss(igauss, 2)!*wgeom(igauss,0) !$OMP ATOMIC UPDATE volume(mu) = volume(mu) + newcontrib !$OMP END ATOMIC if (walltype .lt. 0) THEN newcontrib = ftestpt(0,igauss)*fun(f(jt, 1), 0)*fun2(f(jt, 2), 0)& &*wgeom(0,igauss)*wgauss(igauss)*xgauss(igauss, 2) !$OMP ATOMIC UPDATE fverif(mu) = fverif(mu) + newcontrib !$OMP END ATOMIC end if END DO END DO END DO END DO !$OMP END PARALLEL DO !DEALLOCATE(xgauss, wgauss,zg,rg, wzg, wrg) DEALLOCATE (f, aux) DEALLOCATE (fun, fun2) call timera(1, "comp_volume") END SUBROUTINE comp_volume !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the gradient of the gtilde function for the web-spline method needed to correctly apply the dirichlet boundary conditions !--------------------------------------------------------------------------- SUBROUTINE comp_gradgtilde USE bsplines USE geometry REAL(kind=db), ALLOCATABLE :: xgauss(:, :), wgauss(:), wgeom(:, :) INTEGER, ALLOCATABLE :: f(:, :), aux(:) REAL(kind=db), ALLOCATABLE :: coefs(:) REAL(kind=db), ALLOCATABLE :: fun(:, :), fun2(:, :), gtildeintegr(:, :), ftestpt(:, :) Integer, ALLOCATABLE, Dimension(:) :: idg, idt, idp, idw INTEGER :: i, j, jt, irow, jcol, mu, igauss, gausssize, iterm, nterms Real(kind=db)::newcontrib !call timera(0, "comp_gradgtilde") ALLOCATE (fun(1:femorder(1) + 1, 0:1), fun2(1:femorder(2) + 1, 0:1))!Arrays keeping values of b-splines at gauss node !ALLOCATE(xgauss(ngauss(1)*ngauss(2),2), wgauss(ngauss(1)*ngauss(2)),zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2))) !Gaussian nodes and weights arrays ALLOCATE (f((femorder(1) + 1)*(femorder(2) + 1), 2), aux(femorder(1) + 1)) !Auxiliary arrays ordering bsplines nterms = 4 Allocate (idg(nterms), idt(nterms), idw(nterms), idp(nterms), coefs(nterms)) ! Constuction of auxiliary array ordering bsplines in given interval DO i = 1, (femorder(1) + 1) aux(i) = i END DO DO i = 1, (femorder(2) + 1) f((i - 1)*(femorder(1) + 1) + 1:i*(femorder(1) + 1), 1) = aux f((i - 1)*(femorder(1) + 1) + 1:i*(femorder(1) + 1), 2) = i END DO !$OMP SINGLE gradgtilde = 0 !$OMP END SINGLE ! Assemble gradgtilde matrix !! $OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(j,i,xgauss,wgauss,gausssize,wgeom, igauss, gtildeintegr, ftestpt, iterm,jt,irow,jcol, mu, idw, idt, idg, idp, coefs, fun, fun2, newcontrib) !$OMP DO DO j = 1, nr ! Loop on r position DO i = 1, nz ! Loop on z position ! Computation of gauss weight and position in r and z direction for gaussian integration Call calc_gauss(splrz, ngauss, i, j, xgauss, wgauss, gausssize) If (allocated(wgeom)) deallocate (wgeom) if (gausssize .gt. 0) then ALLOCATE (wgeom(0:2,size(xgauss, 1))) CALL geom_weight(xgauss(:, 1), xgauss(:, 2), wgeom) End if If (allocated(gtildeintegr)) deallocate (gtildeintegr) ALLOCATE (gtildeintegr(0:2,size(xgauss, 1))) Call total_gtilde(xgauss(:, 1), xgauss(:, 2), gtildeintegr,wgeom) if (walltype .lt. 0) then If (allocated(ftestpt)) deallocate (ftestpt) ALLOCATE (ftestpt(0:0,size(xgauss, 1))) CALL ftest(xgauss(:, 1), xgauss(:, 2), ftestpt) end if DO igauss = 1, gausssize ! Loop on gaussian weights and positions CALL basfun(xgauss(igauss, 1), splrz%sp1, fun, i) CALL basfun(xgauss(igauss, 2), splrz%sp2, fun2, j) CALL coefeqext(xgauss(igauss, :), idt, idw, idg, idp, coefs) DO jt = 1, (1 + femorder(1))*(femorder(2) + 1) irow = i + f(jt, 1) - 1; jcol = j + f(jt, 2) - 1 mu = irow + (jcol - 1)*nrank(1) newcontrib = 0 Do iterm = 1, nterms newcontrib = newcontrib + wgeom( idg(iterm),igauss)*gtildeintegr( idp(iterm),igauss)* & & fun(f(jt, 1), idt(iterm))*fun2(f(jt, 2), idw(iterm))* & & wgauss(igauss)*coefs(iterm) End do !$OMP ATOMIC UPDATE gradgtilde(mu) = gradgtilde(mu) + newcontrib !$OMP END ATOMIC END DO END DO END DO END DO !!! $OMP END PARALLEL DO !$OMP END DO !DEALLOCATE(xgauss, wgauss,zg,rg, wzg, wrg) DEALLOCATE (f, aux) DEALLOCATE (fun, fun2) !call timera(1, "comp_gradgtilde") END SUBROUTINE comp_gradgtilde !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Imposes the dirichlet boundary conditions on the FEM matrix for the case where we use regular splines ( not web-splines). !--------------------------------------------------------------------------- SUBROUTINE fe_dirichlet REAL(kind=db), ALLOCATABLE :: arr(:) INTEGER :: i ALLOCATE (arr(nrank(1)*nrank(2))) DO i = 1, nrank(1) IF (rgrid(0) .ne. 0.0_db) THEN arr = 0; arr(i) = 1; CALL putrow(femat, i, arr) END IF arr = 0; arr(nrank(1)*nrank(2) + 1 - i) = 1; CALL putrow(femat, nrank(1)*nrank(2) + 1 - i, arr) END DO DEALLOCATE (arr) END SUBROUTINE fe_dirichlet !________________________________________________________________________________ SUBROUTINE coefeq(x, idt, idw, idg, c, kterms) REAL(kind=db), INTENT(in) :: x(:) INTEGER, INTENT(out) :: idt(:, :), idw(:, :), idg(:, :),kterms REAL(kind=db), INTENT(out) :: c(:) kterms=8 c = x(2) idt(1, 1) = 0 idt(1, 2) = 0 idw(1, 1) = 0 idw(1, 2) = 0 idg(1, 1) = 1 idg(1, 2) = 1 idt(2, 1) = 0 idt(2, 2) = 1 idw(2, 1) = 0 idw(2, 2) = 0 idg(2, 1) = 1 idg(2, 2) = 0 idt(3, 1) = 1 idt(3, 2) = 0 idw(3, 1) = 0 idw(3, 2) = 0 idg(3, 1) = 0 idg(3, 2) = 1 idt(4, 1) = 1 idt(4, 2) = 1 idw(4, 1) = 0 idw(4, 2) = 0 idg(4, 1) = 0 idg(4, 2) = 0 idt(5, 1) = 0 idt(5, 2) = 0 idw(5, 1) = 0 idw(5, 2) = 0 idg(5, 1) = 2 idg(5, 2) = 2 idt(6, 1) = 0 idt(6, 2) = 0 idw(6, 1) = 0 idw(6, 2) = 1 idg(6, 1) = 2 idg(6, 2) = 0 idt(7, 1) = 0 idt(7, 2) = 0 idw(7, 1) = 1 idw(7, 2) = 0 idg(7, 1) = 0 idg(7, 2) = 2 idt(8, 1) = 0 idt(8, 2) = 0 idw(8, 1) = 1 idw(8, 2) = 1 idg(8, 1) = 0 idg(8, 2) = 0 END SUBROUTINE coefeq SUBROUTINE coefeqext(x, idt, idw, idg, idp, c) REAL(kind=db), INTENT(in) :: x(:) INTEGER, INTENT(out) :: idp(:), idt(:), idw(:), idg(:) REAL(kind=db), INTENT(out) :: c(:) c(1) = x(2) idp(1) = 1 idg(1) = 1 idt(1) = 0 idw(1) = 0 c(2) = x(2) idp(2) = 1 idg(2) = 0 idt(2) = 1 idw(2) = 0 c(3) = x(2) idp(3) = 2 idg(3) = 2 idt(3) = 0 idw(3) = 0 c(4) = x(2) idp(4) = 2 idg(4) = 0 idt(4) = 0 idw(4) = 1 END SUBROUTINE coefeqext !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the magnetic field on the grid according to a magnetic mirror, !> or according to the linear interpolation of the values on the !> grid saved in h5 file stored at magfile. !> @param[in] magfile filname of .h5 file containing the definitions of A and B !--------------------------------------------------------------------------- SUBROUTINE magnet(magfile) USE basic, ONLY: B0, Rcurv, rgrid, zgrid, width, rnorm, nr, nz, bnorm USE constants, ONLY: Pi CHARACTER(LEN=*), INTENT(IN), OPTIONAL:: magfile REAL(kind=db) :: rg, zg, halfLz, MirrorRatio INTEGER :: i, rindex IF (len_trim(magfile) .lt. 1) THEN halfLz = (zgrid(nz) + zgrid(0))/2 MirrorRatio = (Rcurv - 1)/(Rcurv + 1) DO i = 1, (nr + 1)*(nz + 1) rindex = (i - 1)/(nz + 1) rg = rgrid(rindex) zg = zgrid(i - rindex*(nz + 1) - 1) - halfLz Br(i) = -B0*MirrorRatio*SIN(2*pi*zg/width*rnorm)*bessi1(2*pi*rg/width*rnorm)/bnorm Bz(i) = B0*(1 - MirrorRatio*COS(2*pi*zg/width*rnorm)*bessi0(2*pi*rg/width*rnorm))/bnorm Athet(i) = 0.5*B0*(rg*rnorm - width/pi*MirrorRatio*bessi1(2*pi*rg/width*rnorm)*COS(2*pi*zg/width*rnorm)) END DO ELSE CALL load_mag_from_h5(magfile) END IF END SUBROUTINE magnet !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Loads the magnetic field defined in the .h5 file at location magfile !> @param[in] magfile filname of .h5 file containing the definitions of A and B !--------------------------------------------------------------------------- SUBROUTINE load_mag_from_h5(magfile) USE basic, ONLY: B0, rnorm, bnorm, bscaling USE constants, ONLY: Pi USE futils USE bsplines CHARACTER(LEN=*), INTENT(IN):: magfile REAL(kind=db), ALLOCATABLE :: magr(:), magz(:) REAL(kind=db), ALLOCATABLE :: tempBr(:, :), tempBz(:, :), tempAthet(:, :) real(kind=db), allocatable:: c(:,:) type(spline2d):: Maginterpolation REAL(kind=db) :: maxB INTEGER :: magfid, dims(2) LOGICAL:: B_is_saved INTEGER :: magn(2), magrank CALL openf(trim(magfile), magfid, 'r', real_prec='d') CALL getdims(magfid, '/mag/Athet', magrank, magn) ALLOCATE (magr(magn(2)), magz(magn(1))) ALLOCATE (tempAthet(magn(1), magn(2)), tempBr(magn(1), magn(2)), tempBz(magn(1), magn(2))) ! Read r and z coordinates for the definition of A_\thet, and B CALL getarr(magfid, '/mag/r', magr) CALL getarr(magfid, '/mag/z', magz) CALL getarr(magfid, '/mag/Athet', tempAthet) IF (isdataset(magfid, '/mag/Br') .and. isdataset(magfid, '/mag/Bz')) THEN CALL getarr(magfid, '/mag/Br', tempBr) CALL getarr(magfid, '/mag/Bz', tempBz) IF(bscaling .gt. 0) then maxB=sqrt(maxval(tempBr**2+tempBz**2)) tempBr=tempBr/maxB*B0 tempBz=tempBz/maxB*B0 end if B_is_saved = .true. ELSE B_is_saved = .false. END IF magz=magz/rnorm magr=magr/rnorm CALL set_splcoef((/3,3/),magz,magr,Maginterpolation) call get_dim(Maginterpolation,dims) ! Interpolation of the magnetic potential vector allocate(c(dims(1),dims(2))) call get_splcoef(Maginterpolation,tempAthet, c) CALL gridval(Maginterpolation,vec1,vec2, Athet ,(/0,0/),c) if(B_is_saved == .true.)then ! Interpolation of the Axial magnetic field call get_splcoef(Maginterpolation,tempBz, c) CALL gridval(Maginterpolation,vec1,vec2, Bz ,(/0,0/),c) ! Interpolation of the radial magnetic field call get_splcoef(Maginterpolation,tempBr, c) CALL gridval(Maginterpolation,vec1,vec2, Br ,(/0,0/),c) else CALL gridval(Maginterpolation,vec1,vec2, Br,(/1,0/)) Br=-Br CALL gridval(Maginterpolation,vec1,vec2, Bz,(/0,1/)) Bz=Bz+Athet/vec2 end if if( bscaling .lt. 0 ) then maxB = maxval(sqrt(Bz**2 + Br**2)) Bz = Bz/maxB*B0 Br = Br/maxB*B0 end if ! We normalize Br = Br/bnorm Bz = Bz/bnorm CALL closef(magfid) deallocate(c) call destroy_SP(Maginterpolation) END SUBROUTINE load_mag_from_h5 !________________________________________________________________________________ !Modified Bessel functions of the first kind of the zero order FUNCTION bessi0(x) REAL(kind=db) :: bessi0, x REAL(kind=db) :: ax REAL(kind=db) p1, p2, p3, p4, p5, p6, p7, q1, q2, q3, q4, q5, q6, q7, q8, q9, y SAVE p1, p2, p3, p4, p5, p6, p7, q1, q2, q3, q4, q5, q6, q7, q8, q9 DATA p1, p2, p3, p4, p5, p6, p7/1.0d0, 3.5156229d0, 3.0899424d0, 1.2067492d0, 0.2659732d0, 0.360768d-1, 0.45813d-2/ DATA q1, q2, q3, q4, q5, q6, q7, q8, q9/0.39894228d0, 0.1328592d-1, 0.225319d-2, -0.157565d-2, 0.916281d-2, & & -0.2057706d-1, 0.2635537d-1, -0.1647633d-1, 0.392377d-2/ if (abs(x) .lt. 3.75) then y = (x/3.75)**2 bessi0 = p1 + y*(p2 + y*(p3 + y*(p4 + y*(p5 + y*(p6 + y*p7))))) else ax = abs(x) y = 3.75/ax bessi0 = (exp(ax)/sqrt(ax))*(q1 + y*(q2 + y*(q3 + y*(q4 + y*(q5 + y*(q6 + y*(q7 + y*(q8 + y*q9)))))))) end if return END FUNCTION bessi0 !________________________________________________________________________________ !Modified Bessel functions of the first kind of the first order FUNCTION bessi1(x) REAL(kind=db) :: bessi1, x REAL(kind=db) :: ax REAL(kind=db) p1, p2, p3, p4, p5, p6, p7, q1, q2, q3, q4, q5, q6, q7, q8, q9, y SAVE p1, p2, p3, p4, p5, p6, p7, q1, q2, q3, q4, q5, q6, q7, q8, q9 DATA p1, p2, p3, p4, p5, p6, p7/0.5d0, 0.87890594d0, 0.51498869d0, 0.15084934d0, 0.2658733d-1, 0.301532d-2, 0.32411d-3/ DATA q1, q2, q3, q4, q5, q6, q7, q8, q9/0.39894228d0, -0.3988024d-1, -0.362018d-2, 0.163801d-2, -0.1031555d-1, & & 0.2282967d-1, -0.2895312d-1, 0.1787654d-1, -0.420059d-2/ if (abs(x) .lt. 3.75D0) then y = (x/3.75D0)**2 bessi1 = x*(p1 + y*(p2 + y*(p3 + y*(p4 + y*(p5 + y*(p6 + y*p7)))))) else ax = abs(x) y = 3.75D0/ax bessi1 = (exp(ax)/sqrt(ax))*(q1 + y*(q2 + y*(q3 + y*(q4 + y*(q5 + y*(q6 + y*(q7 + y*(q8 + y*q9)))))))) if (x .lt. 0.) bessi1 = -bessi1 end if return END FUNCTION bessi1 !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Free the memory used by the fields module !--------------------------------------------------------------------------- SUBROUTINE clean_fields Use bsplines USE basic, ONLY: rhs INTEGER:: i do i = 1, nrank(1)*nrank(2) call omp_destroy_lock(mu_lock(i)) end do DEALLOCATE (mu_lock) DEALLOCATE (matcoef) DEALLOCATE (pot) DEALLOCATE (rhs) DEALLOCATE (loc_rhs) DEALLOCATE (loc_moments) DEALLOCATE (phi_spline) DEALLOCATE (Br, Bz) DEALLOCATE (Er, Ez) DEALLOCATE (vec1, vec2) Call DESTROY_SP(splrz) Call DESTROY_SP(splrz_ext) END SUBROUTINE clean_fields SUBROUTINE updt_sploc(arow, j, val) ! ! Update element j of row arow or insert it in an increasing "index" ! USE sparse TYPE(sprow), TARGET :: arow INTEGER, INTENT(in) :: j DOUBLE PRECISION, INTENT(in) :: val ! TYPE(elt), TARGET :: pre_root TYPE(elt), POINTER :: t, p ! pre_root%next => arow%row0 ! pre_root is linked to the head of the list. t => pre_root DO WHILE (ASSOCIATED(t%next)) p => t%next IF (p%index .EQ. j) THEN p%val = p%val + val RETURN END IF IF (p%index .GT. j) EXIT t => t%next END DO ALLOCATE (p) p = elt(j, val, t%next) t%next => p ! arow%nnz = arow%nnz + 1 arow%row0 => pre_root%next ! In case the head is altered END SUBROUTINE updt_sploc SUBROUTINE updt_ppform2d(sp,c) use bsplines TYPE(spline2d), INTENT(inout) :: sp DOUBLE PRECISION, DIMENSION(:,:), INTENT(in) :: c !DOUBLE PRECISION, ALLOCATABLE :: work(:,:,:) INTEGER:: m,mm INTEGER :: d1, d2, k1, k2, n1, n2 d1 = sp%sp1%dim d2 = sp%sp2%dim k1 = sp%sp1%order k2 = sp%sp2%order n1 = sp%sp1%nints n2 = sp%sp2%nints !ALLOCATE(work(d2,k1,n1)) !$OMP DO DO m=1,SIZE(c,2) CALL topp0(sp%sp1, c(:,m), ppformwork(m,:,:)) END DO !$OMP END DO NOWAIT !$OMP SINGLE IF( ASSOCIATED(sp%ppform) ) DEALLOCATE(sp%ppform) ALLOCATE(sp%ppform(k1,n1,k2,n2)) !$OMP END SINGLE !$OMP DO DO mm=1,SIZE(ppformwork,3) DO m=1,SIZE(ppformwork,2) CALL topp0(sp%sp2, ppformwork(:,m,mm), sp%ppform(m,mm,:,:)) END DO END DO !$OMP END DO !DEALLOCATE(work) end subroutine updt_ppform2d !=========================================================================== SUBROUTINE topp0(sp, c, ppform) ! ! Compute PPFORM of a fuction defined by the spline SP ! and spline coefficients C(1:d) ! use bsplines TYPE(spline1d), INTENT(in) :: sp DOUBLE PRECISION, INTENT(in) :: c(:) DOUBLE PRECISION, INTENT(out) :: ppform(0:,:) INTEGER :: p, nints, i, j, k ! p = sp%order - 1 nints = sp%nints ! ppform = 0.0d0 DO i=1,nints ! on each knot interval DO j=1,p+1 ! all spline in interval i DO k=0,p ! k_th derivatives ppform(k,i) = ppform(k,i) + sp%val0(k,j,i)*c(j+i-1) END DO END DO END DO ! END SUBROUTINE topp0 !+ END MODULE fields diff --git a/src/particletypes_mod.f90 b/src/particletypes_mod.f90 index aa30982..add711d 100644 --- a/src/particletypes_mod.f90 +++ b/src/particletypes_mod.f90 @@ -1,605 +1,611 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: particletypes ! !> @author !> Guillaume Le Bars EPFL/SPC !> Patryk Kaminski EPFL/SPC !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> Module responsible for defining the particle types and defining some subroutines to change their size, !> initialize them or delete them !------------------------------------------------------------------------------ MODULE particletypes USE constants ! IMPLICIT NONE !> Stores the particles properties for the run. TYPE particles INTEGER :: Nploc !< Local number of simulated particles INTEGER :: Nptot !< Total number of simulated particles INTEGER :: Newindex !< Stores the higher partindex for the creation of new particles REAL(kind=db) :: m !< Particle mass REAL(kind=db) :: q !< Particle charge REAL(kind=db) :: weight !< Number of particles represented by one macro-particle REAL(kind=db) :: qmRatio !< Charge over mass ratio REAL(kind=db) :: nudcol(3) !< Effective momentum drag frequency REAL(kind=db) :: H0 REAL(kind=db) :: P0 REAL(kind=db) :: temperature LOGICAL :: Davidson=.false. LOGICAL :: is_test= .false. !< determines if particle is saved on ittracer LOGICAL :: is_field= .true. !< determines if particle contributes to Poisson solver LOGICAL :: calc_moments=.false. INTEGER, allocatable :: nblost(:) !< number of particles lost in domain boundaries at current timestep INTEGER :: nbadded !< number of particles added by source since last gather INTEGER, DIMENSION(2) :: nbcolls !< number of particles collisions with neutrals ionisation, elastic) INTEGER, DIMENSION(:), ALLOCATABLE :: Rindex !< Index in the electric potential grid for the R direction INTEGER, DIMENSION(:), ALLOCATABLE :: Zindex !< Index in the electric potential grid for the Z direction INTEGER, DIMENSION(:), ALLOCATABLE :: partindex !< Index of the particle to be able to follow it when it goes from one MPI host to the other INTEGER :: iiee_id=-1 !< Index defining whether or not ion induced ee are considered INTEGER :: neuttype_id=1 !< Index defining which type of neutral gas is used to produce the ions INTEGER :: material_id=1 !< Index defining the type of material for the electrodes (1=304SS) LOGICAL :: zero_vel=.true.!< Defines wether or not the electrons are gen. with init. vel !REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Z !< radial coordinates of the particles REAL(kind=db), DIMENSION(:,:), ALLOCATABLE :: pos !< (radial,azimuthal,longitudinal) coordinates of the particles !REAL(kind=db), DIMENSION(:), ALLOCATABLE :: THET !< azimuthal coordinates of the particles REAL(kind=db), DIMENSION(:,:), ALLOCATABLE :: B !< radial, axial Magnetic field REAL(kind=db), DIMENSION(:), ALLOCATABLE :: pot !< Electric potential REAL(kind=db), DIMENSION(:), ALLOCATABLE :: potxt !< External electric potential REAL(kind=db), DIMENSION(:,:), ALLOCATABLE :: E !< Radial Axial Electric field REAL(kind=db), DIMENSION(:,:), CONTIGUOUS, POINTER:: U !< normalized (radial, azimuthal, axial) velocity at the current time step REAL(kind=db), DIMENSION(:,:), CONTIGUOUS, POINTER:: Uold !< normalized (radial, azimuthal, axial) velocity at the previous time step - !REAL(kind=db), DIMENSION(:), CONTIGUOUS, POINTER:: UTHET !< normalized azimuthal velocity at the current time step - !REAL(kind=db), DIMENSION(:), CONTIGUOUS, POINTER:: UTHETold !< normalized azimuthal velocity at the previous time step - !REAL(kind=db), DIMENSION(:), CONTIGUOUS, POINTER:: UZ !< normalized axial velocity at the current time step - !REAL(kind=db), DIMENSION(:), CONTIGUOUS, POINTER:: UZold !< normalized axial velocity at the previous time step REAL(kind=db), DIMENSION(:), CONTIGUOUS, POINTER:: Gamma !< Lorentz factor at the current time step REAL(kind=db), DIMENSION(:), CONTIGUOUS, POINTER:: Gammaold !< Lorentz factor at the previous time step Real(kind=db), Dimension(:,:),ALLOCATABLE:: geomweight !< geometric weight at the particle position Real(kind=db), Dimension(:,:),ALLOCATABLE:: moments !< stores the moment matrix INTEGER, DIMENSION(:), ALLOCATABLE :: losthole INTEGER, DIMENSION(:), ALLOCATABLE :: sendhole INTEGER:: nbsendandlost(3)=0 LOGICAL:: collected !< Stores if the particles data have been collected to MPI root process during this timestep INTEGER, DIMENSION(:), ALLOCATABLE:: addedlist END TYPE particles !> Structure containing a single particle position and velocity used in MPI communications. TYPE particle INTEGER :: partindex =0 REAL(kind=db) :: Pos(3) =0 REAL(kind=db) :: U(3) =0 REAL(kind=db) :: Gamma =0 REAL(kind=db) :: pot =0 END TYPE particle TYPE linked_part type(particle) p type(linked_part), POINTER:: next=> NULL() type(linked_part), POINTER:: prev=> NULL() END TYPE linked_part TYPE linked_part_row INTEGER :: n = 0 type(linked_part), POINTER:: start=>NULL() type(linked_part), POINTER:: end=>NULL() END TYPE linked_part_row CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Allocate the memory for the particles variable storing the particles quantities. ! !> @param[inout] p the particles variable needing to be allocated. !> @param[in] nparts the maximum number of particles that will be stored in this variable !--------------------------------------------------------------------------- SUBROUTINE creat_parts(p, nparts) TYPE(particles) :: p INTEGER, INTENT(in) :: nparts IF (.NOT. ALLOCATED(p%pos) ) THEN p%Nploc = nparts p%Nptot = nparts ALLOCATE(p%pos(3,nparts)) !ALLOCATE(p%R(nparts)) !ALLOCATE(p%THET(nparts)) ALLOCATE(p%B(2,nparts)) ALLOCATE(p%U(3,nparts)) !ALLOCATE(p%UZ(nparts)) !ALLOCATE(p%UTHET(nparts)) ALLOCATE(p%Uold(3,nparts)) !ALLOCATE(p%UZold(nparts)) !ALLOCATE(p%UTHETold(nparts)) ALLOCATE(p%Gamma(nparts)) ALLOCATE(p%Rindex(nparts)) ALLOCATE(p%Zindex(nparts)) ALLOCATE(p%partindex(nparts)) ALLOCATE(p%pot(nparts)) ALLOCATE(p%potxt(nparts)) ALLOCATE(p%E(2,nparts)) ALLOCATE(p%GAMMAold(nparts)) Allocate(p%geomweight(0:2,nparts)) Allocate(p%losthole(nparts)) Allocate(p%sendhole(nparts)) if(.not.allocated(p%nblost)) allocate(p%nblost(4)) p%newindex=0 p%nblost=0 p%nbadded=0 p%partindex=-1 p%Uold=0 !p%UZold=0 !p%UTHETold=0 p%iiee_id=-1 p%neuttype_id=1 p%material_id=1 p%zero_vel=.true. p%rindex=0 p%zindex=0 p%B=0 p%U=0 !p%UZ=0 !p%UTHET=0 p%pos=0 !p%R=0 !p%THET=0 p%Gamma=1 p%E=0 p%pot=0 p%potxt=0 p%gammaold=1 p%collected=.false. p%Davidson=.false. p%is_test=.false. p%is_field=.true. p%calc_moments=.true. p%m=me p%q=-elchar p%qmRatio=p%q/p%m p%weight=1.0_db p%H0=0 p%P0=0 p%temperature=0 p%geomweight=0 p%losthole=0 p%sendhole=0 END IF END SUBROUTINE creat_parts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy one particle from the receive buffers to the local simulation variable parts. ! !> @param [in] part particle parameters to copy from !> @param [in] partsindex destination particle index in the local parts variable !--------------------------------------------------------------------------- SUBROUTINE Insertincomingpart(p, part, partsindex) TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: partsindex TYPE(particle), INTENT(in) :: part p%partindex(partsindex) = part%partindex p%pos(1:3,partsindex) = part%Pos(1:3) !p%THET(partsindex) = part%Pos(2) !p%Z(partsindex) = part%Pos(3) !p%UZ(partsindex) = part%U(3) p%U(1:3,partsindex) = part%U(1:3) !p%UTHET(partsindex) = part%U(2) p%Gamma(partsindex) = part%Gamma p%pot(partsindex) = part%pot ! END SUBROUTINE Insertincomingpart !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy one particle from the local parts variable to the send buffer. ! !> @param [in] buffer send buffer to copy to !> @param [in] bufferindex particle index in the send buffer !> @param [in] partsindex origin particle index in the local parts variable !--------------------------------------------------------------------------- SUBROUTINE Insertsentpart(p, buffer, bufferindex, partsindex) TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: bufferindex, partsindex TYPE(particle), DIMENSION(:), INTENT(inout) :: buffer buffer(bufferindex)%partindex = p%partindex(partsindex) buffer(bufferindex)%Pos(1:3) = p%pos(1:3,partsindex) !buffer(bufferindex)%Pos(2) = p%THET(partsindex) !buffer(bufferindex)%Pos(3) = p%Z(partsindex) !buffer(bufferindex)%U(3) = p%UZ(partsindex) buffer(bufferindex)%U(1:3) = p%U(1:3,partsindex) !buffer(bufferindex)%U(2) = p%UTHET(partsindex) buffer(bufferindex)%Gamma = p%Gamma(partsindex) buffer(bufferindex)%pot = p%pot(partsindex) ! END SUBROUTINE Insertsentpart !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Exchange two particles in the parts variable. ! !> @param [in] index1 index in parts of the first particle to exchange. !> @param [in] index2 index in parts of the second particle to exchange. !--------------------------------------------------------------------------- SUBROUTINE exchange_parts(p, index1, index2) TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(IN) :: index1, index2 REAL(kind=db):: pos(3), U(3), Gamma, geomweight(0:2),pot INTEGER :: Rindex, Zindex, partindex !! Exchange particle at index1 with particle at index2 ! Store part at index1 in temporary value partindex = p%partindex(index1) Gamma = p%Gamma(index1) pot = p%pot(index1) pos = p%pos(:,index1) !Z = p%Z(index1) !THET = p%THET(index1) U = p%U(:,index1) !UTHET = p%UTHET(index1) !UZ = p%UZ(index1) Rindex = p%Rindex(index1) Zindex = p%Zindex(index1) geomweight = p%geomweight(:,index1) ! Move part at index2 in part at index 1 p%partindex(index1) = p%partindex(index2) p%Gamma(index1) = p%Gamma(index2) p%pot(index1) = p%pot(index2) p%pos(:,index1) = p%pos(:,index2) !p%Z(index1) = p%Z(index2) !p%THET(index1) = p%THET(index2) p%U(:,index1) = p%U(:,index2) !p%UTHET(index1) = p%UTHET(index2) !p%UZ(index1) = p%UZ(index2) p%Rindex(index1) = p%Rindex(index2) p%Zindex(index1) = p%Zindex(index2) p%geomweight(:,index1) = p%geomweight(:,index2) ! Move temporary values from part(index1) to part(index2) p%partindex(index2) = partindex p%Gamma(index2) = Gamma p%pot(index2) = pot p%pos(:,index2) = pos !p%Z(index2) = Z !p%THET(index2) = THET p%U(:,index2) = U !p%UTHET(index2) = UTHET !p%UZ(index2) = UZ p%Rindex(index2) = Rindex p%Zindex(index2) = Zindex p%geomweight(:,index2) = geomweight END SUBROUTINE exchange_parts SUBROUTINE change_parts_allocation(p, sizedifference) implicit none TYPE(particles), INTENT(INOUT):: p INTEGER,INTENT(IN) :: sizedifference CALL change_array_size_int(p%Rindex, sizedifference) CALL change_array_size_int(p%Zindex, sizedifference) CALL change_array_size_int(p%partindex, sizedifference) CALL change_array_size_int(p%losthole, sizedifference) CALL change_array_size_int(p%sendhole, sizedifference) CALL change_array_size_dp12(p%E,sizedifference) CALL change_array_size_dp(p%pot,sizedifference) CALL change_array_size_dp(p%potxt,sizedifference) !CALL change_array_size_dp(p%R,sizedifference) CALL change_array_size_dp12(p%pos,sizedifference) !CALL change_array_size_dp(p%THET,sizedifference) CALL change_array_size_dp12(p%B,sizedifference) CALL change_array_size_dp012(p%geomweight,sizedifference) CALL change_array_size_dp_ptr12(p%U,sizedifference) CALL change_array_size_dp_ptr12(p%Uold,sizedifference) !CALL change_array_size_dp_ptr(p%UZ,sizedifference) !CALL change_array_size_dp_ptr(p%UZold,sizedifference) !CALL change_array_size_dp_ptr(p%UTHET,sizedifference) !CALL change_array_size_dp_ptr(p%UTHETold,sizedifference) CALL change_array_size_dp_ptr(p%Gamma,sizedifference) CALL change_array_size_dp_ptr(p%Gammaold,sizedifference) p%Nploc=MIN(p%Nploc,size(p%pos,2)) END SUBROUTINE change_parts_allocation SUBROUTINE change_array_size_dp(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:) - INTEGER:: current_size, new_size + INTEGER:: current_size, new_size, i if(allocated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) - temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) + Do i=1,min(current_size,new_size) + temp(i)=arr(i) + end do DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp SUBROUTINE change_array_size_dp2(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:,:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:,:) - INTEGER:: current_size, new_size + INTEGER:: current_size, new_size, i if(allocated(arr)) THEN current_size=size(arr,1) new_size=current_size+sizedifference ALLOCATE(temp(new_size,0:size(arr,2)-1)) - temp(1:min(current_size,new_size),:)=arr(1:min(current_size,new_size),:) + Do i=1,min(current_size,new_size) + temp(i,:)=arr(i,:) + end do DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp2 SUBROUTINE change_array_size_dp12(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:,:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:,:) - INTEGER:: current_size, new_size + INTEGER:: current_size, new_size, i if(allocated(arr)) THEN current_size=size(arr,2) new_size=current_size+sizedifference ALLOCATE(temp(size(arr,1),new_size)) - temp(:,1:min(current_size,new_size))=arr(:,1:min(current_size,new_size)) + Do i=1,min(current_size,new_size) + temp(:,i)=arr(:,i) + end do DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp12 SUBROUTINE change_array_size_dp012(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:,:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:,:) - INTEGER:: current_size, new_size + INTEGER:: current_size, new_size, i if(allocated(arr)) THEN current_size=size(arr,2) new_size=current_size+sizedifference ALLOCATE(temp(0:size(arr,1)-1,new_size)) - temp(:,1:min(current_size,new_size))=arr(:,1:min(current_size,new_size)) + Do i=1,min(current_size,new_size) + temp(:,i)=arr(:,i) + end do DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp012 SUBROUTINE change_array_size_dp_ptr12(arr, sizedifference) implicit none REAL(kind=db), POINTER, INTENT(INOUT):: arr(:,:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), POINTER:: temp(:,:) INTEGER:: current_size, new_size, i if(associated(arr)) THEN current_size=size(arr,2) new_size=current_size+sizedifference ALLOCATE(temp(size(arr,1),new_size)) !WRITE(*,*)"increase pointer size: ", current_size, new_size, "temp: ", size(temp,1),size(temp,2), "arr: ", size(arr,1),size(arr,2) Do i=1,min(current_size,new_size) temp(:,i)=arr(:,i) end do !WRITE(*,*)"copy done" DEALLOCATE(arr) arr=> temp END IF END SUBROUTINE change_array_size_dp_ptr12 SUBROUTINE change_array_size_dp_ptr(arr, sizedifference) implicit none REAL(kind=db), POINTER, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), CONTIGUOUS, POINTER:: temp(:) INTEGER:: current_size, new_size,i if(associated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) Do i=1,min(current_size,new_size) temp(i)=arr(i) end do !temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) arr=> temp END IF END SUBROUTINE change_array_size_dp_ptr SUBROUTINE change_array_size_int(arr, sizedifference) implicit none INTEGER, ALLOCATABLE, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference INTEGER, ALLOCATABLE:: temp(:) - INTEGER:: current_size, new_size + INTEGER:: current_size, new_size,i if(allocated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) - temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) + Do i=1,min(current_size,new_size) + temp(i)=arr(i) + end do DEALLOCATE(arr) CALL move_alloc(temp,arr) END IF END SUBROUTINE change_array_size_int !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Move particle with index sourceindex to particle with index destindex. !> !WARNING! This will overwrite particle at destindex. ! !> @param [in] sourceindex index in parts of the particle to move. !> @param [in] destindex index in parts of the moved particle destination. !--------------------------------------------------------------------------- SUBROUTINE move_part(p, sourceindex, destindex) !! This will destroy particle at destindex INTEGER, INTENT(IN) :: destindex, sourceindex TYPE(particles), INTENT(INOUT)::p IF(sourceindex .eq. destindex) RETURN IF(sourceindex .le. 0 .or. destindex .le. 0) RETURN ! Move part at sourceindex in part at destindex Call copy_part(p,sourceindex,destindex,p) END SUBROUTINE move_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy particle with index sourceindex in particles sourcep to particle with index destindex in particles destp. !> !WARNING! This will overwrite particle at destp(destindex). ! !> @param [inout] sourcep Structure of source particles. !> @param [in] sourceindex index in parts of the particle to move. !> @param [in] destindex index in parts of the moved particle destination. !> @param [inout] destp Structure of source particles. !--------------------------------------------------------------------------- SUBROUTINE copy_part(sourcep, sourceindex, destindex, destp) !! This will destroy particle at destindex INTEGER, INTENT(IN) :: destindex, sourceindex TYPE(particles), INTENT(IN)::sourcep TYPE(particles), INTENT(INOUT)::destp IF(sourceindex .le. 0 .or. destindex .le. 0) RETURN IF( destindex .gt. size(destp%pos,2)) RETURN ! Move part at sourceindex in part at destindex destp%partindex(destindex) = sourcep%partindex(sourceindex) destp%Gamma(destindex) = sourcep%Gamma(sourceindex) destp%Gammaold(destindex) = sourcep%Gammaold(sourceindex) destp%pos(:,destindex) = sourcep%pos(:,sourceindex) !destp%Z(destindex) = sourcep%Z(sourceindex) !destp%THET(destindex) = sourcep%THET(sourceindex) destp%U(:,destindex) = sourcep%U(:,sourceindex) !destp%UTHET(destindex) = sourcep%UTHET(sourceindex) !destp%UZ(destindex) = sourcep%UZ(sourceindex) destp%Uold(:,destindex) = sourcep%Uold(:,sourceindex) !destp%UTHETold(destindex) = sourcep%UTHETold(sourceindex) !destp%UZold(destindex) = sourcep%UZold(sourceindex) destp%Rindex(destindex) = sourcep%Rindex(sourceindex) destp%Zindex(destindex) = sourcep%Zindex(sourceindex) destp%geomweight(:,destindex) = sourcep%geomweight(:,sourceindex) destp%pot(destindex) = sourcep%pot(sourceindex) destp%potxt(destindex) = sourcep%potxt(sourceindex) END SUBROUTINE copy_part !________________________________________________________________________________ SUBROUTINE destroy_parts(p) TYPE(particles) :: p p%Nploc=0 IF(ALLOCATED(p%pos)) DEALLOCATE(p%pos) !IF(ALLOCATED(p%R)) DEALLOCATE(p%R) !IF(ALLOCATED(p%THET)) DEALLOCATE(p%THET) IF(ALLOCATED(p%B)) DEALLOCATE(p%B) IF(ALLOCATED(p%E)) DEALLOCATE(p%E) IF(ASSOCIATED(p%U)) DEALLOCATE(p%U) IF(Associated(p%Uold)) DEALLOCATE(p%Uold) !IF(Associated(p%UZ)) DEALLOCATE(p%UZ) !IF(Associated(p%UZold)) DEALLOCATE(p%UZold) !IF(Associated(p%UTHET)) DEALLOCATE(p%UTHET) !IF(Associated(p%UTHETold)) DEALLOCATE(p%UTHETold) IF(Associated(p%Gamma)) DEALLOCATE(p%Gamma) IF(Associated(p%Gammaold)) DEALLOCATE(p%Gammaold) IF(ALLOCATED(p%Rindex)) DEALLOCATE(p%Rindex) IF(ALLOCATED(p%Zindex)) DEALLOCATE(p%Zindex) IF(ALLOCATED(p%losthole)) DEALLOCATE(p%losthole) IF(ALLOCATED(p%sendhole)) DEALLOCATE(p%sendhole) IF(ALLOCATED(p%partindex)) DEALLOCATE(p%partindex) if(allocated(p%geomweight)) Deallocate(p%geomweight) if(allocated(p%moments)) Deallocate(p%moments) END SUBROUTINE !________________________________________________________________________________ SUBROUTINE clean_beam(partslist) ! INTEGER:: i type(particles):: partslist(:) Do i=1,size(partslist,1) CALL destroy_parts(partslist(i)) END DO ! END SUBROUTINE clean_beam !________________________________________________________________________________ SUBROUTINE swappointer( pointer1, pointer2) REAL(kind=db), DIMENSION(:), POINTER, INTENT(inout):: pointer1, pointer2 REAL(kind=db), DIMENSION(:), POINTER:: temppointer temppointer=>pointer1 pointer1=>pointer2 pointer2=>temppointer END SUBROUTINE swappointer SUBROUTINE swappointer2( pointer1, pointer2) REAL(kind=db), DIMENSION(:,:), POINTER, INTENT(inout):: pointer1, pointer2 REAL(kind=db), DIMENSION(:,:), POINTER:: temppointer temppointer=>pointer1 pointer1=>pointer2 pointer2=>temppointer END SUBROUTINE swappointer2 !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Deallocate recursively a linked_paticle linked list ! !> @param [in] l_p linked_part particle to be dallocated. !--------------------------------------------------------------------------- RECURSIVE SUBROUTINE destroy_linked_parts(l_p) TYPE(linked_part), POINTER :: l_p IF(associated(l_p%next)) call destroy_linked_parts(l_p%next) deallocate(l_p) END subroutine destroy_linked_parts !-------------------------------------------------------------------------- !> @author !> S.Guinchard EPFL/SPC !> Last modified on: 11/15/2022 ! !DESCRIPTION !> Function giving particle energy for a given partindex ! !------------------------------------------------------------------------- REAL(KIND=db) FUNCTION eKin_part(p, partind) TYPE(particles), INTENT(INOUT):: p INTEGER :: partind eKin_part = 0.5* p%m * (p%U(1,partind)**2 + p%U(2,partind)**2 + p%U(3,partind)**2 ) END FUNCTION eKin_part END MODULE particletypes diff --git a/src/stepon.f90 b/src/stepon.f90 index dc6ea67..e5a63f7 100644 --- a/src/stepon.f90 +++ b/src/stepon.f90 @@ -1,128 +1,128 @@ SUBROUTINE stepon ! ! Advance one time step ! USE basic USE constants USE fields USE beam USE maxwsrce USE celldiag USE neutcol USE sort Use psupply use omp_lib IMPLICIT NONE INTEGER:: i DO i=1,nbspecies ! Boundary conditions for plasma particles outside the plasma region CALL bound(partslist(i)) END DO !$OMP BARRIER DO i=1,nbspecies ! Localisation of particles in cells (calculation of the r and z indices) call boundary_loss(partslist(i)) END DO !$OMP BARRIER ! Cell diag quantities IF(modulo(step,itcelldiag).eq. 0 .or. nlend) THEN CALL celldiag_save(time, fidres) END IF ! We compute collisions on the main particles IF(modulo(step,itcol).eq. 0) THEN CALL neutcol_step(partslist) END IF !$OMP BARRIER !$OMP SINGLE ! The particles are injected by the source CALL maxwsrce_inject(time) !$OMP END SINGLE !$OMP BARRIER ! Sort particles for faster rhscon run time ! DO i=1,nbspecies ! IF(modulo(step,it2d) .eq. 0) THEN ! CALL gridsort(partslist(i),1,partslist(i)%Nploc) ! END IF ! END DO + ! update the power supply voltage if necessary + call psupply_step(the_ps,partslist,cstep) + ! Assemble right hand side of Poisson equation CALL rhscon(partslist) if (.not. nlfreezephi) THEN ! Solve Poisson equation CALL poisson(splrz) end if DO i=1,nbspecies ! Compute the electric field at the particle position CALL EFieldscompatparts(partslist(i)) ! Compute the magnetic field at the particle position call comp_mag_p(partslist(i)) END DO !$OMP BARRIER DO i=1,nbspecies ! Solve Newton eq. and advance velocity by delta t CALL comp_velocity(partslist(i)) !$OMP SINGLE ! Compute the energy of added particles CALL calc_newparts_energy(partslist(i)) !$OMP END SINGLE NOWAIT END DO !$OMP BARRIER ! Calculate main physical quantities CALL partdiagnostics IF (modulo(step,it2d).eq. 0 .or. nlend) THEN Do i=1,nbspecies if(partslist(i)%calc_moments) CALL momentsdiag(partslist(i)) End do END IF - - ! update the power supply voltage if necessary - call psupply_step(the_ps,partslist,cstep) !$OMP MASTER ! Save variables to file CALL diagnose(step) !$OMP END MASTER IF (modulo(step,itparts).eq. 0 .or. modulo(step,ittracer).eq. 0 .or. modulo(step,itrestart).eq. 0 .or. nlend) THEN !$OMP BARRIER END IF Do i=1,nbspecies ! Calculate new positions of particles at time t+delta t CALL push(partslist(i)) END DO !$OMP MASTER ! We recalculate the mpi axial boundaries and we adapt them if necessary IF(modulo(step,100) .eq. 0) THEN CALL calc_Zbounds(partslist(1),Zbounds, femorder) CALL fields_comm_init(Zbounds) CALL maxwsrce_calcfreq(Zbounds) END IF !$OMP END MASTER !$omp barrier END SUBROUTINE stepon