diff --git a/src/Makefile b/src/Makefile index f19f5a3..05e80ac 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1,101 +1,109 @@ .DEFAULT_GOAL := all ifeq ($(PLATFORM),) $(error Please specify the env variable PLATFORM (mac, intel)) else $(info *** Using $(PLATFORM).mk ***) include $(PLATFORM).mk endif include .depend PROG = espic2d SRCS = main.f90 basic_mod.f90 newrun.f90 restart.f90 \ auxval.f90 inital.f90 resume.f90 start.f90 diagnose.f90 \ stepon.f90 tesend.f90 endrun.f90 chkrst.f90 mv2bk.f90 \ constants.f90 fields_mod.f90 beam_mod.f90 \ mpihelper_mod.f90 sort_mod.f90 distrib_mod.f90 \ - maxwsrce_mod.f90 celldiag_mod.f90 geometry_mod.f90 + maxwsrce_mod.f90 celldiag_mod.f90 geometry_mod.f90 \ + random_mod.f90 SRCS_C = extra.c +SRCS_F = random.f randother.f MKDIR_P = mkdir -p OUT_DIR = release F90FLAGS += -I$(BSPLINES)/include -I$(FUTILS)/include \ -I$(MUMPS)/include -I../ LDFLAGS += -L$(BSPLINES)/lib -L$(FUTILS)/lib -L${HDF5}/lib -L${HDF5}/lib \ -L$(MUMPS)/lib -L$(PARMETIS)/lib LIBS += -lbsplines -lpppack -lfutils -lhdf5_fortran -lhdf5 -lz $(MUMPSLIBS) -lpputils2 ifeq ($(USE_X),) F90FLAGS+=-DUSE_X=0 else $(info *** Using Xgrafix ***) LIBS+=-lXGF -lXGC -lX11 LDFLAGS+=-L/usr/local/xgrafix_1.2/src-double F90FLAGS+=-DUSE_X=1 SRCS+=xg_mod.f90 endif -OBJS =${SRCS:.f90=.o} ${SRCS_F90:.F90=.o} ${SRCS_C:.c=.o} +OBJS =${SRCS:.f90=.o} ${SRCS_F90:.F90=.o} ${SRCS_C:.c=.o} ${SRCS_F:.f=.o} FPP =${SRCS:.f90=.i90} OBJS_ =$(addprefix ./$(OUT_DIR)/,$(OBJS)) debug: F90FLAGS = $(DEBUGFLAGS) -I$(BSPLINES)/include -I$(FUTILS)/include \ -I$(MUMPS)/include -D_DEBUG debug: OUT_DIR=debug debug: $(OUT_DIR) all profile: F90FLAGS+=$(PROFILEFLAGS) profile: LDFLAGS+= $(PROFILEFLAGS) profile: OUT_DIR=profile profile: $(OUT_DIR) all .PHONY: directories all: directories $(PROG) $(PROG): $(OBJS) $(F90) $(LDFLAGS) $(F90FLAGS) -o $@ $(OBJS_) $(LIBS) tags: etags *.f90 clean: - rm -f $(OBJS) *.mod $(FPP) + rm -f $(OBJS_) *.mod $(FPP) distclean: clean rm -f $(PROG) *~ a.out *.o TAGS ifeq ($(USE_X),) -diagnose.o : diagnose.f90 fields_mod.o beam_mod.o basic_mod.o +./$(OUT_DIR)/diagnose.o : diagnose.f90 fields_mod.o beam_mod.o basic_mod.o else -diagnose.o : diagnose.f90 fields_mod.o xg_mod.o beam_mod.o basic_mod.o +./$(OUT_DIR)/diagnose.o : diagnose.f90 fields_mod.o xg_mod.o beam_mod.o basic_mod.o endif directories: ${OUT_DIR} ${OUT_DIR}: ${MKDIR_P} ${OUT_DIR} -.SUFFIXES: $(SUFFIXES) .f90 .c +.SUFFIXES: $(SUFFIXES) .f90 .c .f $(OUT_DIR)/%.o: %.f90 $(F90) $(F90FLAGS) -MD -c -o $@ $< +$(OUT_DIR)/%.o: %.f + $(F90) $(F90FLAGS) -MD -c -o $@ $< + $(OUT_DIR)/%.o: %.c $(CC) $(CCFLAGS) -c -o $@ $< %.o: %.f90 $(F90) $(F90FLAGS) -c -o ./$(OUT_DIR)/$@ $< +%.o: %.f + $(F90) $(F90FLAGS) -c -o ./$(OUT_DIR)/$@ $< + %.o: %.c $(CC) $(CCFLAGS) -c -o ./$(OUT_DIR)/$@ $< depend .depend .depend_rel .depend_deb : makedepf90 *.[fF]90 > .depend diff --git a/src/auxval.f90 b/src/auxval.f90 index 8170a4a..079ad97 100644 --- a/src/auxval.f90 +++ b/src/auxval.f90 @@ -1,128 +1,146 @@ SUBROUTINE auxval ! USE constants USE basic USE fields USE beam + USE random + USE omp_lib ! ! Set auxiliary values ! IMPLICIT NONE ! - INTEGER:: i + INTEGER:: i, nbprocs !________________________________________________________________________________ IF(mpirank .eq. 0) WRITE(*,'(a/)') '=== Set auxiliary values ===' !________________________________________________________________________________ ! ! Total number of intervals nr=sum(nnr) ! Normalisation constants if(nplasma .gt. 0) then qsim=pi*(plasmadim(2)-plasmadim(1))*(plasmadim(4)**2-plasmadim(3)**2)*n0*elchar/nplasma else qsim=sign(n0,elchar) end if msim=abs(qsim)/elchar*partmass vnorm=vlight omegac=sign(elchar,qsim)/partmass*B0 omegap=sqrt(elchar**2*abs(n0)/partmass/eps_0) tnorm=min(abs(1/omegac),abs(1/omegap)) rnorm=vnorm*tnorm bnorm=B0 enorm=vlight*bnorm phinorm=enorm*rnorm ! Normalised boundary conditions potinn=potinn/phinorm potout=potout/phinorm ! Characteristic frequencies and normalised volume IF(mpirank .eq. 0) THEN IF(abs(omegap).GT. abs(omegac)) THEN WRITE(*,'(a,3(1pe12.3))') 'omegap, omegac, omegap/omegac', omegap, omegac, omegap/omegac ELSE WRITE(*,'(a,3(1pe12.3))') 'omegap, omegac, omegac/omegap', omegap, omegac, omegac/omegap END IF END IF ! Construction of the mesh rgrid in r and zgrid in z and its normalisation CALL mesh rgrid=rgrid/rnorm zgrid=zgrid/rnorm dz=dz/rnorm dr=dr/rnorm invdr=1/dr invdz=1/dz ! Define Zindex bounds for the MPI process in the case of sequential run ALLOCATE( Zbounds(0:mpisize), Nplocs_all(0:mpisize-1)) Zbounds=0 Zbounds(mpisize) = nz IF(mpisize.gt.1) THEN ! If we use mpi we define the left and right nodes used for communications leftproc=mpirank-1 IF(mpirank .eq. 0 .AND. partperiodic) THEN leftproc = mpisize-1 ELSE IF (mpirank .eq. 0) THEN leftproc=-1 END IF rightproc = mpirank+1 IF(mpirank .eq. mpisize-1 .AND. partperiodic) THEN rightproc = 0 ELSE IF (mpirank .eq. mpisize-1) THEN rightproc=-1 END IF ELSE - leftproc=-1 - rightproc=-1 + leftproc=-1 + rightproc=-1 END IF WRITE(*,*) "mpirank, left, right", mpirank, leftproc, rightproc ! Auxiliary vectors ALLOCATE(vec1((nz+1)*(nr+1)),vec2((nr+1)*(nz+1))) DO i=0,nr vec1(i*(nz+1)+1:(i+1)*(nz+1))=zgrid!(0:nz) vec2(i*(nz+1)+1:(i+1)*(nz+1))=rgrid(i) END DO +! Initialize random number generator +nbprocs = omp_get_max_threads() +allocate(seed(ran_s,nbprocs), ran_index(nbprocs), ran_array(ran_k,nbprocs)) + +Do i=1,nbprocs +! Generate seed from the default seed-string in random module + CALL decimal_to_seed(random_seed_str, seed(:,i)) + +! Generate a different seed for each processor from the mother seed + + CALL next_seed(mpirank*nbprocs+i,seed(:,i)) + +! Initialize the random array (first hundred numbers) + + CALL random_init(seed(:,i), ran_index(i), ran_array(:,i)) +end do !________________________________________________________________________________ CONTAINS !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC ! ! DESCRIPTION: !> !> @brief Creates the mesh in r and z direction for calculating the electric and magnetic fields. !--------------------------------------------------------------------------- SUBROUTINE mesh USE basic, ONLY: zgrid, rgrid, nr, nnr, nz, dr, dz, lz, radii INTEGER :: j ALLOCATE(zgrid(0:nz),rgrid(0:nr)) dz=(lz(2)-lz(1))/nz DO j=0,nz zgrid(j)=j*dz+lz(1) END DO dr(1)=(radii(2)-radii(1))/nnr(1) DO j=0,nnr(1) rgrid(j)=radii(1)+j*dr(1) END DO dr(2)=(radii(3)-radii(2))/nnr(2) DO j=1,nnr(2) rgrid(nnr(1)+j)=radii(2)+j*dr(2) END DO dr(3)=(radii(4)-radii(3))/nnr(3) DO j=1,nnr(3) rgrid(nnr(1)+nnr(2)+j)=radii(3)+j*dr(3) END DO END SUBROUTINE mesh !________________________________________________________________________________ END SUBROUTINE auxval diff --git a/src/basic_mod.f90 b/src/basic_mod.f90 index aef01ca..0310bcc 100644 --- a/src/basic_mod.f90 +++ b/src/basic_mod.f90 @@ -1,340 +1,298 @@ MODULE basic ! USE hashtable USE constants USE bsplines USE mumps_bsplines USE futils USE mpihelper IMPLICIT NONE ! ! Basic module for time dependent problems ! CHARACTER(len=128) :: label1, label2, label3, label4 ! ! BASIC Namelist ! LOGICAL :: nlres = .FALSE. !< Restart flag LOGICAL :: nlsave = .TRUE. !< Checkpoint (save) flag LOGICAL :: newres=.FALSE. !< New result HDF5 file LOGICAL :: nlxg=.FALSE. !< Show graphical interface Xgrafix LOGICAL :: nlmaxwellsource = .FALSE. !< Activate the maxwell source INTEGER :: nrun=1 !< Number of time steps to run REAL(kind=db) :: job_time=3600.0 !< Time allocated to this job in seconds REAL(kind=db) :: tmax=100000.0 !< Maximum simulation time REAL(kind=db) :: extra_time=60.0 !< Extra time allocated REAL(kind=db) :: dt=1 !< Time step REAL(kind=db) :: time=0 !< Current simulation time (Init from restart file) ! ! Other basic global vars and arrays ! INTEGER :: jobnum !< Job number INTEGER :: step !< Calculation step of this run INTEGER :: cstep=0 !< Current step number (Init from restart file) LOGICAL :: nlend !< Signal end of run INTEGER :: ierr !< Integer used for MPI INTEGER :: it0d=1 !< Number of iterations between 0d values writes to hdf5 INTEGER :: it2d=100 !< Number of iterations between 2d values writes to hdf5 INTEGER :: itparts=1000 !< Number of iterations between particles values writes to hdf5 INTEGER :: ittext=10 !< Number of iterations between text outputs in the console INTEGER :: itrestart=10000 !< Number of iterations between save of restart.h5 file INTEGER :: ittracer=100 !< Number of iterations between save of traced particles position and velocity INTEGER :: itcelldiag=100000 !< Number of iterations between save of celldiag diagnostic INTEGER :: nbcelldiag=0 !< Number of celldiagnostics INTEGER :: itgraph !< Number of iterations between graphical interface updates INTEGER :: mpirank !< MPIrank of the current processus INTEGER :: mpisize !< Size of the MPI_COMM_WORLD communicator INTEGER :: rightproc !< Rank of next processor in the z decomposition INTEGER :: leftproc !< Rank of previous processor in the z decomposition ! ! List of logical file units INTEGER :: lu_in = 90 !< File duplicated from STDIN INTEGER :: lu_stop = 91 !< stop file, see subroutine TESEND INTEGER :: lu_partfile = 120 !< particle loading file, see beam::loadpartfile ! ! HDF5 file CHARACTER(len=256) :: resfile = "results.h5" !< Main result file CHARACTER(len=256) :: rstfile = "restart.h5" !< Restart file CHARACTER(len=256) :: magnetfile = "" !< H5 file containing the magnetic field definition CHARACTER(len=256) :: partfile(10)="" !< Particle loading file + CHARACTER(len=256) :: addedtestspecfile(10)="" !< Particle file list for added particles at restart INTEGER :: fidres !< File ID for resfile INTEGER :: fidrst !< File ID for restart file TYPE(BUFFER_TYPE) :: hbuf0 !< Hashtable for 0d var ! ! Plasma parameters LOGICAL :: nlPhis= .TRUE. !< Calculate self consistent electric field flag LOGICAL :: nlfreezephi= .FALSE. !< Freeze the Poisson solver to the field obtained at (re-)start LOGICAL :: nlclassical= .FALSE. !< If true, solves the equation of motion according to classical !! dynamics LOGICAL :: nlperiod(2)=(/.false.,.false./)!< Set periodic splines on or off LOGICAL :: partperiodic= .TRUE. !< Sets if the particles boundary conditions are periodic or open + INTEGER :: nbaddtestspecies=0 !< On restart number of files to read to add test particles INTEGER :: nplasma !< Number of macro-particles on initialisation INTEGER :: nbspecies = 1 !< Number of particles species also counting tracing particles INTEGER :: npartsalloc = 0 !< Size of particle memory allocated at the begining of the simulation - INTEGER :: nblock !< Number of slices in Z for stable distribution initialisation + INTEGER :: nblock !< Number of slices in Z for stable distribution initialisation REAL(kind=db) :: potinn !< Electric potential at the inner metallic wall REAL(kind=db) :: potout !< Electric potential at the outer metallic wall REAL(kind=db) :: B0 !< Max magnitude of magnetic field REAL(kind=db), allocatable :: Bz(:), Br(:) !< Magnetic field components REAL(kind=db), allocatable :: Athet(:) !< Theta component of the magnetic vector potential TYPE(spline2d), SAVE :: splrz !< Spline at r and z for total electric field TYPE(spline2d), SAVE :: splrz_ext !< Spline at r and z for external electric field REAL(kind=db), allocatable :: Ez(:), Er(:) !< Electric field components REAL(kind=db), allocatable :: pot(:) !< Electro static potential REAL(kind=db) :: radii(4) !< Inner and outer radius of cylinder and radii of fine mesh region REAL(kind=db) :: plasmadim(4) !< Zmin Zmax Rmin Rmax values for plasma particle loading - INTEGER :: distribtype=1 !< Type of distribution function used to load the particles - !!1: gaussian, 2: Stable as defined in 4.95 of Davidson + INTEGER :: distribtype=1 !< Type of distribution function used to load the particles + !!1: gaussian, 2: Stable as defined in 4.95 of Davidson REAL(kind=db) :: H0=0 !< Initial value of Hamiltonian for distribution 2 REAL(kind=db) :: P0=0 !< Initial canonical angular momentum for distribution 2 REAL(kind=db) :: temprescale = -1.0 !< Factor used for temperature rescaling in case of a restart (<0 -> no rescaling) - INTEGER :: samplefactor =-1 !< Factor used for the up-sampling of the particles number - REAL(kind=db) :: lz(2) !< Lower and upper cylinder limits in z direction - REAL(kind=db) :: n0 !< Physical plasma density parameter + INTEGER :: samplefactor =-1 !< Factor used for the up-sampling of the particles number + REAL(kind=db) :: lz(2) !< Lower and upper cylinder limits in z direction + REAL(kind=db) :: n0 !< Physical plasma density parameter REAL(kind=db), DIMENSION(:,:), ALLOCATABLE, SAVE:: moments !< Moments of the distribution function evaluated every it2d REAL(kind=db), DIMENSION(:), ALLOCATABLE, SAVE:: rhs !< right hand side of the poisson equation solver REAL(kind=db), DIMENSION(:), ALLOCATABLE, SAVE:: volume !< Volume covered by each spline for density calculation - INTEGER :: nz !< Number of grid intervals in z - INTEGER :: nr !< Total number of grid intervals in r - INTEGER :: nnr(3) !< Number of grid intervals in r in each subdomain + INTEGER :: nz !< Number of grid intervals in z + INTEGER :: nr !< Total number of grid intervals in r + INTEGER :: nnr(3) !< Number of grid intervals in r in each subdomain REAL(kind=db) :: dz !< Cell size in z REAL(kind=db) :: dr(3) !< Cell size in r for each region REAL(kind=db), ALLOCATABLE :: zgrid(:) !< Nodes positions in longitudinal direction REAL(kind=db), ALLOCATABLE :: rgrid(:) !< Nodes positions in radial direction REAL(kind=db) :: bnorm,enorm,vnorm,tnorm,rnorm,phinorm !< Normalization constants REAL(kind=db) :: qsim !< Charge of superparticles REAL(kind=db) :: msim !< Mass of superparticles REAL(kind=db) :: partmass=me !< Mass of physical particle INTEGER :: femorder(2) !< FEM order INTEGER :: ngauss(2) !< Number of gauss points LOGICAL :: nlppform =.TRUE. !< Argument of set_spline INTEGER, SAVE :: nrank(2) !< Number of splines in both directions REAL(kind=db) :: omegac !< Cyclotronic frequency REAL(kind=db) :: omegap !< Plasma frequency REAL(kind=db) :: temp !< Initial temperature of plasma REAL(kind=db) :: Rcurv !< Magnetic field curvature coefficient REAL(kind=db) :: Width !< Distance between two magnetic mirrors REAL(kind=db) :: weights_scale=1.0 !< Scale factor for the particle weights on restart (only for newres=.true.) INTEGER, DIMENSION(:), ALLOCATABLE :: Zbounds !< Index of bounds for local processus in Z direction REAL(kind=db):: invdz, invdr(3) - - TYPE(BASICDATA) :: bdata !< Structure used for the mpi communication of the run parameters CONTAINS ! !================================================================================ SUBROUTINE basic_data ! ! Define basic data ! use mpihelper IMPLICIT NONE ! ! Local vars and arrays CHARACTER(len=256) :: inputfilename INTEGER, EXTERNAL :: OMP_GET_MAX_THREADS ! NAMELIST /BASIC/ job_time, extra_time, nrun, tmax, dt, nlres, nlsave, newres, nlxg, & & nplasma, potinn, potout, B0, lz, n0, nz, nnr, femorder, ngauss, & & nlppform, plasmadim, radii, temp, Rcurv, width, it0d, it2d, itparts, ittext, & & resfile, rstfile, itgraph, nlPhis, distribtype, nblock, nlclassical, H0, P0, partperiodic, & & temprescale, samplefactor, nlmaxwellsource, npartsalloc, partfile, partmass, nbspecies, & - & ittracer, itcelldiag, nbcelldiag, magnetfile, weights_scale, nlfreezephi + & ittracer, itcelldiag, nbcelldiag, magnetfile, weights_scale, nlfreezephi, nbaddtestspecies, & + & addedtestspecfile !________________________________________________________________________________ ! 1. Process Standard Input File ! IF(COMMAND_ARGUMENT_COUNT().NE.1)THEN WRITE(*,*)'ERROR, ONE COMMAND-LINE ARGUMENT REQUIRED, STOPPING' STOP ENDIF CALL GET_COMMAND_ARGUMENT(1,inputfilename) OPEN(UNIT=lu_in,FILE=trim(inputfilename),ACTION='READ') IF(mpirank .eq. 0) THEN !________________________________________________________________________________ ! 1. Label the run ! READ(lu_in,'(a)') label1 READ(lu_in,'(a)') label2 READ(lu_in,'(a)') label3 READ(lu_in,'(a)') label4 ! WRITE(*,'(12x,a/)') label1(1:len_trim(label1)) WRITE(*,'(12x,a/)') label2(1:len_trim(label2)) WRITE(*,'(12x,a/)') label3(1:len_trim(label3)) WRITE(*,'(12x,a/)') label4(1:len_trim(label4)) !________________________________________________________________________________ ! 2. Read in basic data specific to run ! READ(lu_in,basic) WRITE(*,basic) #if _DEBUG==1 WRITE(*,*) "Compiled in debug mode" #endif ELSE READ(lu_in,basic) END IF ! Distribute run parameters to all MPI workers CALL init_mpitypes ! initialize all mpi types that will be needed in the simulation WRITE(*,'(a,i4.2,a,i4.2,a)')"Running on ",mpisize," tasks with", omp_get_max_threads() ," openMP threads" IF(samplefactor .gt. 1 .and. .not. newres) THEN IF(mpirank.eq.0) WRITE(*,*)"To increase the number of particles, you need to create a new result file (set newres to 1)" CALL MPI_abort(MPI_COMM_WORLD,-1,ierr) END IF IF (npartsalloc .lt. nplasma) THEN npartsalloc=nplasma END IF ! END SUBROUTINE basic_data !================================================================================ SUBROUTINE daytim(str) ! ! Print date and time ! IMPLICIT NONE ! CHARACTER(len=*), INTENT(in) :: str ! ! Local vars and arrays CHARACTER(len=16) :: d, t, dat, functime !________________________________________________________________________________ ! CALL DATE_AND_TIME(d,t) dat=d(7:8) // '/' // d(5:6) // '/' // d(1:4) functime=t(1:2) // ':' // t(3:4) // ':' // t(5:10) WRITE(*,'(a,1x,a,1x,a)') str, dat(1:10), functime(1:12) ! END SUBROUTINE daytim !================================================================================ SUBROUTINE timera(cntrl, str, eltime) ! ! Timers (cntrl=0/1 to Init/Update) ! IMPLICIT NONE INTEGER, INTENT(in) :: cntrl CHARACTER(len=*), INTENT(in) :: str REAL(kind=db), OPTIONAL, INTENT(out) :: eltime ! INTEGER, PARAMETER :: ncmax=128 INTEGER, SAVE :: icall=0, nc=0 REAL(kind=db), SAVE :: startt0=0.0 CHARACTER(len=16), SAVE :: which(ncmax) INTEGER :: lstr, found, i REAL(kind=db) :: seconds REAL(kind=db), DIMENSION(ncmax), SAVE :: startt = 0.0, endt = 0.0 !________________________________________________________________________________ IF( icall .EQ. 0 ) THEN icall = icall+1 startt0 = seconds() END IF lstr = LEN_TRIM(str) IF( lstr .GT. 0 ) found = loc(str) !________________________________________________________________________________ ! SELECT CASE (cntrl) ! CASE(-1) ! Current wall time IF( PRESENT(eltime) ) THEN eltime = seconds() - startt0 ELSE WRITE(*,'(/a,a,1pe10.3/)') "++ ", ' Wall time used so far = ', seconds() - startt0 END IF ! CASE(0) ! Init Timer IF( found .EQ. 0 ) THEN ! Called for the 1st time for 'str' nc = nc+1 which(nc) = str(1:lstr) found = nc END IF startt(found) = seconds() ! CASE(1) ! Update timer endt(found) = seconds() - startt(found) IF( PRESENT(eltime) ) THEN eltime = endt(found) ELSE WRITE(*,'(/a,a,1pe10.3/)') "++ "//str, ' wall clock time = ', endt(found) END IF ! CASE(2) ! Update and reset timer endt(found) = endt(found) + seconds() - startt(found) startt(found) = seconds() IF( PRESENT(eltime) ) THEN eltime = endt(found) END IF ! CASE(9) ! Display all timers IF( nc .GT. 0 ) THEN WRITE(*,'(a)') "Timer Summary" WRITE(*,'(a)') "=============" DO i=1,nc WRITE(*,'(a20,2x,2(1pe12.3))') TRIM(which(i))//":", endt(i) END DO END IF ! END SELECT ! CONTAINS INTEGER FUNCTION loc(funcstr) CHARACTER(len=*), INTENT(in) :: funcstr INTEGER :: j, ind loc = 0 DO j=1,nc ind = INDEX(which(j), funcstr(1:lstr)) IF( ind .GT. 0 .AND. LEN_TRIM(which(j)) .EQ. lstr ) THEN loc = j EXIT END IF END DO END FUNCTION loc END SUBROUTINE timera -!================================================================================ -SUBROUTINE readbdata -nlres = bdata%nlres -nlsave = bdata%nlsave -newres = bdata%newres -nlxg = bdata%nlxg -nlppform = bdata%nlppform -nlPhis = bdata%nlPhis -nlclassical = bdata%nlclassical -nplasma = bdata%nplasma -nz = bdata%nz -it0d = bdata%it0d -it2d = bdata%it2d -itparts = bdata%itparts -itgraph = bdata%itgraph -distribtype = bdata%distribtype -nblock = bdata%nblock -nrun = bdata%nrun -job_time = bdata%job_time -extra_time = bdata%extra_time -tmax = bdata%tmax -dt = bdata%dt -potinn = bdata%potinn -potout = bdata%potout -B0 = bdata%B0 -n0 = bdata%n0 -temp = bdata%temp -Rcurv = bdata%Rcurv -width = bdata%width -H0 = bdata%H0 -P0 = bdata%P0 -femorder = bdata%femorder -ngauss = bdata%ngauss -nnr = bdata%nnr -lz = bdata%lz -radii = bdata%radii -plasmadim = bdata%plasmadim -resfile = bdata%resfile -partperiodic = bdata%partperiodic -samplefactor = bdata%samplefactor - -END SUBROUTINE readbdata - !================================================================================ END MODULE basic diff --git a/src/beam_mod.f90 b/src/beam_mod.f90 index a02596f..9af23c2 100644 --- a/src/beam_mod.f90 +++ b/src/beam_mod.f90 @@ -1,2107 +1,2216 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: beam ! !> @author !> Guillaume Le Bars EPFL/SPC !> Patryk Kaminski EPFL/SPC !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> Module responsible for loading, advancing and computing the necessary diagnostics for the simulated particles. !------------------------------------------------------------------------------ MODULE beam ! USE constants USE mpi USE mpihelper USE basic, ONLY: mpirank, mpisize USE distrib IMPLICIT NONE !> Stores the particles properties for the run. TYPE particles INTEGER :: Nploc !< Local number of simulated particles INTEGER :: Nptot !< Total number of simulated particles INTEGER :: Newindex !< Stores the higher partindex for the creation of new particles REAL(kind=db) :: m !< Particle mass REAL(kind=db) :: q !< Particle charge REAL(kind=db) :: weight !< Number of particles represented by one macro-particle REAL(kind=db) :: qmRatio !< Charge over mass ratio REAL(kind=db) :: H0 REAL(kind=db) :: P0 REAL(kind=db) :: temperature LOGICAL :: Davidson=.false. LOGICAL :: is_test= .false. INTEGER, DIMENSION(4) :: nblost !< number of particles lost in (z_min,z_max,r_min,r_max) since last gather INTEGER :: nbadded !< number of particles added by source since last gather INTEGER, DIMENSION(:), ALLOCATABLE :: Rindex !< Index in the electric potential grid for the R direction INTEGER, DIMENSION(:), ALLOCATABLE :: Zindex !< Index in the electric potential grid for the Z direction INTEGER, DIMENSION(:), ALLOCATABLE :: partindex !< Index of the particle to be able to follow it when it goes from one MPI host to the other REAL(kind=db), DIMENSION(:), ALLOCATABLE :: R !< radial coordinates of the particles REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Z !< longitudinal coordinates of the particles REAL(kind=db), DIMENSION(:), ALLOCATABLE :: THET !< azimuthal coordinates of the particles REAL(kind=db), DIMENSION(:), ALLOCATABLE :: BZ !< axial radial relative distances to the left grid line REAL(kind=db), DIMENSION(:), ALLOCATABLE :: BR !< radial relative distances to the bottom grid line REAL(kind=db), DIMENSION(:), ALLOCATABLE :: pot !< Electric potential REAL(kind=db), DIMENSION(:), ALLOCATABLE :: potxt !< External electric potential REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Er !< Radial Electric field REAL(kind=db), DIMENSION(:), ALLOCATABLE :: Ez !< Axial electric field REAL(kind=db), DIMENSION(:),POINTER:: UR !< normalized radial velocity at the current time step REAL(kind=db), DIMENSION(:),POINTER:: URold !< normalized radial velocity at the previous time step REAL(kind=db), DIMENSION(:),POINTER:: UTHET !< normalized azimuthal velocity at the current time step REAL(kind=db), DIMENSION(:),POINTER:: UTHETold !< normalized azimuthal velocity at the previous time step REAL(kind=db), DIMENSION(:),POINTER:: UZ !< normalized axial velocity at the current time step REAL(kind=db), DIMENSION(:),POINTER:: UZold !< normalized axial velocity at the previous time step REAL(kind=db), DIMENSION(:),POINTER:: Gamma !< Lorentz factor at the current time step REAL(kind=db), DIMENSION(:),POINTER:: Gammaold !< Lorentz factor at the previous time step Real(kind=db), Dimension(:,:),ALLOCATABLE:: geomweight !< geometric weight at the particle position LOGICAL:: collected !< Stores if the particles data have been collected to MPI root process during this timestep INTEGER, DIMENSION(:), ALLOCATABLE:: addedlist END TYPE particles + TYPE linked_part + type(particle) p + type(linked_part), POINTER:: next=> NULL() + END TYPE linked_part + + TYPE linked_part_row + INTEGER :: n = 0 + type(linked_part), POINTER:: start=>NULL() + END TYPE linked_part_row + ! !TYPE(particles) :: parts !< Storage for all the particles !SAVE :: parts TYPE(particles), DIMENSION(:), ALLOCATABLE, SAVE :: partslist ! Diagnostics (scalars) REAL(kind=db) :: ekin=0 !< Total kinetic energy (J) REAL(kind=db) :: epot=0 !< Total potential energy (J) REAL(kind=db) :: etot=0 !< Current total energy (J) REAL(kind=db) :: etot0=0 !< Initial total energy (J) REAL(kind=db) :: loc_etot0=0 !< theoretical local total energy (J) REAL(kind=db) :: Energies(4) !< (1) kinetic energy, (2) potential energy, (3) total energy and (4) gained/lossed energy due to gain or loss of particles (J) ! INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: Nplocs_all !< Array containing the local numbers of particles in each MPI process + INTERFACE add_created_part + MODULE PROCEDURE add_linked_created_part, add_list_created_part + END INTERFACE add_created_part ! CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Allocate the memory for the particles variable storing the particles quantities. ! !> @param[inout] p the particles variable needing to be allocated. !> @param[in] nparts the maximum number of particles that will be stored in this variable !--------------------------------------------------------------------------- SUBROUTINE creat_parts(p, nparts) TYPE(particles) :: p INTEGER, INTENT(in) :: nparts IF (.NOT. ALLOCATED(p%Z) ) THEN p%Nploc = nparts p%Nptot = nparts ALLOCATE(p%Z(nparts)) ALLOCATE(p%R(nparts)) ALLOCATE(p%THET(nparts)) ALLOCATE(p%BZ(nparts)) ALLOCATE(p%BR(nparts)) ALLOCATE(p%UR(nparts)) ALLOCATE(p%UZ(nparts)) ALLOCATE(p%UTHET(nparts)) ALLOCATE(p%URold(nparts)) ALLOCATE(p%UZold(nparts)) ALLOCATE(p%UTHETold(nparts)) ALLOCATE(p%Gamma(nparts)) ALLOCATE(p%Rindex(nparts)) ALLOCATE(p%Zindex(nparts)) ALLOCATE(p%partindex(nparts)) ALLOCATE(p%pot(nparts)) ALLOCATE(p%potxt(nparts)) ALLOCATE(p%Er(nparts)) ALLOCATE(p%Ez(nparts)) ALLOCATE(p%GAMMAold(nparts)) Allocate(p%geomweight(nparts,0:2)) p%newindex=0 p%nblost=0 p%nbadded=0 p%partindex=-1 p%URold=0 p%UZold=0 p%UTHETold=0 p%rindex=0 p%zindex=0 p%BR=0 p%BZ=0 p%UR=0 p%UZ=0 p%UTHET=0 p%Z=0 p%R=0 p%THET=0 p%Gamma=1 p%Er=0 p%Ez=0 p%pot=0 p%potxt=0 p%gammaold=1 p%collected=.false. p%Davidson=.false. p%is_test=.false. p%m=me p%q=-elchar p%qmRatio=p%q/p%m p%weight=1.0_db p%H0=0 p%P0=0 p%temperature=0 p%geomweight=0 END IF END SUBROUTINE creat_parts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Loads the particles at the beginning of the simulation and create the parts variable if necessary !--------------------------------------------------------------------------- SUBROUTINE load_parts - USE basic, ONLY: nplasma, mpirank, ierr, distribtype, mpisize, nlclassical, nbspecies, Zbounds + USE basic, ONLY: nplasma, mpirank, ierr, distribtype, mpisize, nlclassical, nbspecies, Zbounds, partfile USE mpi - INTEGER:: i, j + INTEGER:: i REAL(kind=db), DIMENSION(:), ALLOCATABLE :: VZ, VR, VTHET ALLOCATE(VZ(nplasma), VR(nplasma), VTHET(nplasma)) ! Select case to define the type of distribution SELECT CASE(distribtype) CASE(1) ! Gaussian distribution in V, uniform in Z and 1/R in R CALL loaduniformRZ(partslist(1), VR, VZ, VTHET) CASE(2) !Stable distribution from Davidson 4.95 p.119 CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodunir) CASE(3) !Stable distribution from Davidson 4.95 p.119 but with constant distribution in R CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodinvr) CASE(4) !Stable distribution from Davidson 4.95 p.119 but with gaussian distribution in R CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodgausr) CASE(5) !Stable distribution from Davidson 4.95 p.119 with gaussian in V computed from v_th given by temp CALL loadDavidson(partslist(1), VR, VZ, VTHET, lodunir) CASE(6) ! Uniform distribution in R and Z and Gaussian distribution in V with Vz @brief Checks for each particle if the z position is outside of the local/global simulation space. !> Depending on the boundary conditions, the leaving particles are sent to the correct neighbouring MPI process !> or deleted. ! !> @param[in] p particles structure ! !> @author Guillaume Le Bars EPFL/SPC !--------------------------------------------------------------------------- SUBROUTINE bound(p) USE basic, ONLY: zgrid, nz, Zbounds, mpirank, step, leftproc, rightproc, partperiodic USE IFPORT IMPLICIT NONE type(particles), INTENT(INOUT):: p INTEGER :: i, rsendnbparts, lsendnbparts, nblostparts - INTEGER :: receivednbparts, partdiff, lostindex, sentindex + INTEGER :: receivednbparts, partdiff INTEGER, DIMENSION(p%Nploc) :: sendhole INTEGER, DIMENSION(p%Nploc) :: losthole - LOGICAL:: leftcomm, rightcomm, sent + LOGICAL:: leftcomm, rightcomm INTEGER, ALLOCATABLE:: partstoremove(:) rsendnbparts=0 lsendnbparts=0 nblostparts=0 losthole=0 sendhole=0 receivednbparts=0 ! We communicate with the left processus leftcomm = leftproc .ne. -1 ! We communicate with the right processus rightcomm = rightproc .ne. -1 IF (p%Nploc .gt. 0) THEN ! Boundary condition at z direction !$OMP PARALLEL DO DEFAULT(SHARED) DO i=1,p%Nploc ! If the particle is to the right of the local simulation space, it is sent to the right MPI process IF (p%Z(i) .ge. zgrid(Zbounds(mpirank+1))) THEN IF(partperiodic) THEN DO WHILE (p%Z(i) .GT. zgrid(nz)) p%Z(i) = p%Z(i) - zgrid(nz) + zgrid(0) END DO END IF !$OMP CRITICAL (nbparts) IF(rightcomm) THEN rsendnbparts=rsendnbparts+1 sendhole(lsendnbparts+rsendnbparts)=i ELSE if(.not. partperiodic) THEN nblostparts=nblostparts+1 losthole(nblostparts)=i p%nblost(2)=p%nblost(2)+1 END IF !$OMP END CRITICAL (nbparts) ! If the particle is to the left of the local simulation space, it is sent to the left MPI process ELSE IF (p%Z(i) .lt. zgrid(Zbounds(mpirank))) THEN IF(partperiodic) THEN DO WHILE (p%Z(i) .LT. zgrid(0)) p%Z(i) = p%Z(i) + zgrid(nz) - zgrid(0) END DO END IF !$OMP CRITICAL (nbparts) IF(leftcomm) THEN ! We send the particle to the left process lsendnbparts=lsendnbparts+1 sendhole(lsendnbparts+rsendnbparts)=-i ELSE if(.not. partperiodic) THEN ! we destroy the particle nblostparts=nblostparts+1 losthole(nblostparts)=i p%nblost(1)=p%nblost(1)+1 END IF !$OMP END CRITICAL (nbparts) END IF END DO !$OMP END PARALLEL DO END IF IF(mpisize .gt. 1) THEN ! We send the particles leaving the local simulation space to the closest neighbour CALL particlescommunication(p, lsendnbparts, rsendnbparts, sendhole, receivednbparts, (/leftproc,rightproc/)) END IF ! If the boundary conditions are not periodic, we delete the corresponding particles IF(nblostparts .gt. 0 .and. step .ne. 0) THEN DO i=1,nblostparts CALL delete_part(p, losthole(i), .false. ) END DO !WRITE(*,'(i8.2,a,i4.2)') nblostparts, " particles lost in z on process: ", mpirank END IF ! computes if we received less particles than we sent partdiff=max(lsendnbparts+rsendnbparts-receivednbparts,0) IF(nblostparts + partdiff .gt. 0) THEN ALLOCATE(partstoremove(nblostparts+partdiff)) partstoremove(1:partdiff)=abs(sendhole(receivednbparts+1:receivednbparts+partdiff)) partstoremove(partdiff+1:partdiff+nblostparts)=abs(losthole(1:nblostparts)) call qsort(partstoremove,size(partstoremove),sizeof(partstoremove(1)),compare_int) ! If we received less particles than we sent, or lost particles we fill the remaining holes with the particles from the end of the ! parts arrays DO i=1,nblostparts+partdiff CALL move_part(p, p%Nploc, partstoremove(i)) p%partindex(p%Nploc)=-1 p%Nploc = p%Nploc-1 END DO END IF CONTAINS INTEGER(2) function compare_int(a,b) INTEGER :: a, b, c c=b-a if(c.ne.0) c=1 compare_int=sign(c,b-a) end function END subroutine bound !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Compute the grid cell indices for each particle as well as the distance weight Wr, Wz. !> @param[in] p particles structure !--------------------------------------------------------------------------- SUBROUTINE localisation(p) - USE basic, ONLY: zgrid, rgrid, dz, nr, nnr, dr, mpirank + USE basic, ONLY: rgrid, nr Use geometry, ONLY: r_a, geom_weight USE IFPORT type(particles), INTENT(INOUT):: p INTEGER :: i, nblostparts, iend,nbunch INTEGER, DIMENSION(p%Nploc) :: losthole INTEGER, DIMENSION(2):: nblost losthole=0 nblostparts=0 nblost=0 nbunch=64 IF (p%Nploc .gt. 0) THEN !$OMP PARALLEL DEFAULT(SHARED), private(i,iend) !$OMP DO DO i=1,p%Nploc,nbunch ! Avoid segmentation fault by accessing non relevant data iend=min(i+nbunch-1,p%Nploc) call geom_weight(p%Z(i:iend), p%r(i:iend), p%geomweight(i:iend,:)) END DO !$OMP END DO !$OMP DO reduction(+:nblost,nblostparts) DO i=1,p%Nploc if(p%geomweight(i,0).le.0 .or. p%R(i) .ge. rgrid(nr) .or. p%R(i) .le. rgrid(0)) then ! If the particle is outside of the simulation space in the r direction, it is deleted. !!$OMP CRITICAL (lostparts) nblostparts=nblostparts+1 losthole(i)=i !!$OMP END CRITICAL (lostparts) if(p%R(i) .le. r_a) then nblost(1)=nblost(1)+1 else nblost(2)=nblost(2)+1 end if else call p_calc_rzindex(p,i) end if END DO !$OMP END DO !$OMP END PARALLEL IF(nblostparts.gt.0) THEN p%nblost(3:4)=nblost+p%nblost(3:4) call qsort(losthole,p%Nploc,sizeof(losthole(1)),compare_int) DO i=1,nblostparts CALL delete_part(p,losthole(i)) END DO END IF END IF CONTAINS INTEGER(2) function compare_int(a,b) INTEGER :: a, b, c c=b-a if(c.ne.0) c=1 compare_int=sign(c,b-a) end function END SUBROUTINE localisation subroutine p_calc_rzindex(p,i) use basic, only: rgrid,zgrid,invdz,invdr, nnr, nr integer::i type(particles)::p IF (p%R(i) .GT. rgrid(0) .AND. p%R(i) .LT. rgrid(nnr(1))) THEN p%rindex(i)=(p%R(i)-rgrid(0))*invdr(1) ELSE IF(p%R(i) .GE. rgrid(nnr(1)) .AND. p%R(i) .LT. rgrid(nnr(1)+nnr(2))) THEN p%rindex(i)=(p%R(i)-rgrid(nnr(1)))*invdr(2)+nnr(1) ELSE IF(p%R(i) .GE. rgrid(nnr(1)+nnr(2)) .AND. p%R(i) .LT. rgrid(nr)) THEN p%rindex(i)=(p%R(i)-rgrid(nnr(1)+nnr(2)))*invdr(3)+nnr(1)+nnr(2) End if p%zindex(i)=(p%Z(i)-zgrid(0))*invdz end subroutine p_calc_rzindex SUBROUTINE comp_mag_p(p) - USE basic, ONLY: zgrid, rgrid, dz, BZ, BR, nz, invdz + USE basic, ONLY: zgrid, rgrid, BZ, BR, nz, invdz type(particles), INTENT(INOUT):: p - INTEGER :: i, nblostparts + INTEGER :: i Real(kind=db):: WZ,WR INTEGER:: j1,j2,j3,j4 !$OMP PARALLEL DO SIMD DEFAULT(SHARED) Private(J1,J2,J3,J4,WZ,WR) DO i=1,p%Nploc WZ=(p%Z(i)-zgrid(p%zindex(i)))*invdz; WR=(p%R(i)-rgrid(p%rindex(i)))/(rgrid(p%rindex(i)+1)-rgrid(p%rindex(i))); J1=(p%rindex(i))*(nz+1) + p%zindex(i)+1 J2=(p%rindex(i))*(nz+1) + p%zindex(i)+2 J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 J4=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+2 ! Interpolation for magnetic field p%BZ(i)=(1-WZ)*(1-WR)*Bz(J4) & & +WZ*(1-WR)*Bz(J3) & & +(1-WZ)*WR*Bz(J2) & & +WZ*WR*Bz(J1) p%BR(i)=(1-WZ)*(1-WR)*Br(J4) & & +WZ*(1-WR)*Br(J3) & & +(1-WZ)*WR*Br(J2) & & +WZ*WR*Br(J1) END DO !$OMP END PARALLEL DO SIMD end subroutine comp_mag_p !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief General routine to compute the velocities at time t+1. !> This routine allows to treat the classical and relativistic case efficiently from a numerical standpoint, !> by using a pointer to the routine computing gamma. This avoid the nlclassical flag check on each particle. ! !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE comp_velocity(p) ! ! Computes the new velocity of the particles due to Lorentz force ! USE basic, ONLY : nlclassical type(particles), INTENT(INOUT):: p ! Store old Velocities CALL swappointer(p%UZold, p%UZ) CALL swappointer(p%URold, p%UR) CALL swappointer(p%UTHETold, p%UTHET) CALL swappointer(p%Gammaold, p%Gamma) IF (nlclassical) THEN CALL comp_velocity_fun(p, gamma_classical) ELSE CALL comp_velocity_fun(p, gamma_relativistic) END IF END SUBROUTINE comp_velocity !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Routine called by comp_velocity to compute the velocities at time t+1. !> This routine allows to treat the classical and relativistic case efficiently from a numerical standpoint, !> by using the routine computing gamma as an input. This avoid the nlclassical flag check on each particle. ! !> @param[in] gamma the function used to compute the value of the lorentz factor \f$\gamma\f$ !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE comp_velocity_fun(p, gamma) ! ! Computes the new velocity of the particles due to Lorentz force ! - USE basic, ONLY : bnorm, dt, BZ, BR, nz + USE basic, ONLY : bnorm, dt interface REAL(kind=db) FUNCTION gamma(UZ, UR, UTHET) USE constants REAL(kind=db), INTENT(IN):: UR,UZ,UTHET end FUNCTION end interface type(particles), INTENT(INOUT):: p REAL(kind=db) :: tau REAL(kind=db):: BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR, ZBZ2, ZBR2 INTEGER:: J1, J2, J3, J4 INTEGER:: i ! Normalized time increment !tau=omegac/2/omegap*dt/tnorm tau=p%qmRatio*bnorm*0.5*dt IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(J1,J2,J3,J4,BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR, ZBZ2, ZBR2) DO i=1,p%Nploc - !J1=(p%rindex(i))*(nz+1) + p%zindex(i)+1 - !J2=(p%rindex(i))*(nz+1) + p%zindex(i)+2 - !J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 - !J4=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+2 - ! Interpolation for magnetic field - !BRZ=(1-p%WZ(i))*(1-p%WR(i))*Bz(J4) & - !& +p%WZ(i)*(1-p%WR(i))*Bz(J3) & - !& +(1-p%WZ(i))*p%WR(i)*Bz(J2) & - !& +p%WZ(i)*p%WR(i)*Bz(J1) - !BRR=(1-p%WZ(i))*(1-p%WR(i))*Br(J4) & - !& +p%WZ(i)*(1-p%WR(i))*Br(J3) & - !& +(1-p%WZ(i))*p%WR(i)*Br(J2) & - !& +p%WZ(i)*p%WR(i)*Br(J1) ! First half of electric pulse p%UZ(i)=p%UZold(i)+p%Ez(i)*tau p%UR(i)=p%URold(i)+p%ER(i)*tau p%Gamma(i)=gamma(p%UZ(i), p%UR(i), p%UTHETold(i)) ! Rotation along magnetic field - !ZBZ=tau*BRZ/p%Gamma(i) - !ZBR=tau*BRR/p%Gamma(i) ZBZ=tau*p%BZ(i)/p%Gamma(i) ZBR=tau*p%BR(i)/p%Gamma(i) ZPZ=p%UZ(i)-ZBR*p%UTHETold(i) !u'_{z} ZPR=p%UR(i)+ZBZ*p%UTHETold(i) !u'_{r} ZPTHET=p%UTHETold(i)+(ZBR*p%UZ(i)-ZBZ*p%UR(i)) !u'_{theta} SQR=1+ZBZ*ZBZ+ZBR*ZBR ZBZ2=2*ZBZ/SQR ZBR2=2*ZBR/SQR p%UZ(i)=p%UZ(i)-ZBR2*ZPTHET !u+_{z} p%UR(i)=p%UR(i)+ZBZ2*ZPTHET !u+_{r} p%UTHET(i)=p%UTHETold(i)+(ZBR2*ZPZ-ZBZ2*ZPR) !u+_{theta} ! Second half of acceleration p%UZ(i)=p%UZ(i)+p%EZ(i)*tau p%UR(i)=p%UR(i)+p%ER(i)*tau ! Final computation of the Lorentz factor p%Gamma(i)=gamma(p%UZ(i), p%UR(i), p%UTHET(i)) END DO !$OMP END PARALLEL DO SIMD END IF p%collected=.false. END SUBROUTINE comp_velocity_fun !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Routine used to compute the lorentz factor \f$\gamma\f$ in the classical simulations. !> This routine systematically returns 1.0 to treat the system according to classical dynamic. ! !> @param[out] gamma the lorentz factor \f$\gamma\f$ !> @param[in] UZ \f$\gamma\beta_z=\gamma v_z/c\f$ the normalized particle longitudinal velocity !> @param[in] UR \f$\gamma\beta_r=\gamma v_r/c\f$ the normalized particle radial velocity !> @param[in] UTHET \f$\gamma\beta_\theta=\gamma v_\theta/c\f$ the normalized particle azimuthal velocity !--------------------------------------------------------------------------- ELEMENTAL REAL(kind=db) FUNCTION gamma_classical(UZ, UR, UTHET) #if __INTEL_COMPILER > 1700 !$OMP declare simd(gamma_classical) #endif REAL(kind=db), INTENT(IN):: UR,UZ,UTHET gamma_classical=1.0 END FUNCTION gamma_classical !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Routine used to compute the lorentz factor \f$\gamma\f$ in the relativistic simulations. !> This routine computes the Lorentz factor \f$\gamma=\sqrt{1+\mathbf{\gamma\beta}^2}\f$ ! !> @param[out] gamma the lorentz factor \f$\gamma\f$ !> @param[in] UZ \f$\gamma\beta_z=\gamma v_z/c\f$ the normalized particle longitudinal velocity !> @param[in] UR \f$\gamma\beta_r=\gamma v_r/c\f$ the normalized particle radial velocity !> @param[in] UTHET \f$\gamma\beta_\theta=\gamma v_\theta/c\f$ the normalized particle azimuthal velocity !--------------------------------------------------------------------------- ELEMENTAL REAL(kind=db) FUNCTION gamma_relativistic(UZ, UR, UTHET) #if __INTEL_COMPILER > 1700 !$OMP declare simd(gamma_relativistic) #endif REAL(kind=db), INTENT(IN):: UR,UZ,UTHET gamma_relativistic=sqrt(1+UZ**2+UR**2+UTHET**2) END FUNCTION gamma_relativistic !--------------------------------------------------------------------------- !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes the particles position at time t+1 !> This routine computes the particles position at time t+1 according to the Bunemann algorithm. ! !> @param[in] p The particles structure being updated !--------------------------------------------------------------------------- SUBROUTINE push(p) Use basic, ONLY: dt, tnorm type(particles), INTENT(INOUT):: p REAL(kind=db):: XP, YP, COSA, SINA, U1, U2, ALPHA INTEGER :: i IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(XP, YP, COSA, SINA, U1, U2, ALPHA) DO i=1,p%Nploc ! Local Cartesian coordinates XP=p%R(i)+dt/tnorm*p%UR(i)/p%Gamma(i) YP=dt/tnorm*p%UTHET(i)/p%Gamma(i) ! Conversion to cylindrical coordiantes p%Z(i)=p%Z(i)+dt/tnorm*p%UZ(i)/p%Gamma(i) p%R(i)=sqrt(XP**2+YP**2) ! Computation of the rotation angle IF (p%R(i) .EQ. 0) THEN COSA=1 SINA=0 ALPHA=0 ELSE COSA=XP/p%R(i) SINA=YP/p%R(i) ALPHA=asin(SINA) END IF ! New azimuthal position p%THET(i)=MOD(p%THET(i)+ALPHA,2*pi) ! Velocity in rotated reference frame U1=COSA*p%UR(i)+SINA*p%UTHET(i) U2=-SINA*p%UR(i)+COSA*p%UTHET(i) p%UR(i)=U1 p%UTHET(i)=U2 END DO !$OMP END PARALLEL DO SIMD END IF p%collected=.false. END SUBROUTINE push !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Computes several diagnostic quantities !> This routine computes the total kinetic and electric potential energy. !> It keeps track of the reference energy and the number of particle per mpi node. ! !--------------------------------------------------------------------------- SUBROUTINE diagnostics ! ! Compute energies ! USE constants, ONLY: vlight USE basic, ONLY: phinorm, cstep, nlclassical, ierr, step, nlend,& & itparts INTEGER:: i ! Reset the quantities ekin=0 epot=0 etot=0 ! Computation of the kinetic and potential energy as well as fluid velocities and density !$OMP PARALLEL DO SIMD REDUCTION(+:epot, ekin) DEFAULT(SHARED) DO i=1,partslist(1)%Nploc ! Potential energy epot=epot+(partslist(1)%pot(i)+partslist(1)%potxt(i)) ! Kinetic energy IF(.not. nlclassical) THEN ekin=ekin+(partslist(1)%Gamma(i)-1) ELSE ekin=ekin+0.5*( partslist(1)%UR(i)**2 & & + partslist(1)%UZ(i)**2 & & + partslist(1)%UTHET(i)**2 ) END IF END DO !$OMP END PARALLEL DO SIMD epot=epot*phinorm*0.5*partslist(1)%q*partslist(1)%weight ekin=ekin*partslist(1)%m*partslist(1)%weight*vlight**2 ! Shift to Etot at cstep=1 (not valable yet at cstep=0!) IF(cstep.EQ. 0) THEN ! Compute the local total energy loc_etot0 = epot+ekin etot0=0 END IF !etot=loc_etot0 ! Compute the total energy etot=epot+ekin Energies=(/ekin,epot,etot,loc_etot0/) ! The computed energy is sent to the root process IF(mpisize .gt.1) THEN IF(mpirank .eq.0 ) THEN CALL MPI_REDUCE(MPI_IN_PLACE, Energies, 4, db_type, db_sum_op, & & 0, MPI_COMM_WORLD, ierr) etot0=etot0+Energies(4) ekin=Energies(1) epot=Energies(2) etot=Energies(3) ELSE CALL MPI_REDUCE(Energies, Energies, 4, db_type, db_sum_op, & & 0, MPI_COMM_WORLD, ierr) END IF ELSE etot0=etot0+loc_etot0 END IF loc_etot0=0 ! Send the local number of particles on each node to the root process IF(mpisize .gt. 1) THEN Nplocs_all(mpirank)=partslist(1)%Nploc IF(mpirank .eq.0 ) THEN CALL MPI_gather(MPI_IN_PLACE, 1, MPI_INTEGER, Nplocs_all, 1, MPI_INTEGER,& & 0, MPI_COMM_WORLD, ierr) partslist(1)%Nptot=sum(Nplocs_all) ELSE CALL MPI_gather(Nplocs_all(mpirank), 1, MPI_INTEGER, Nplocs_all, 1, MPI_INTEGER,& & 0, MPI_COMM_WORLD, ierr) END IF ELSE partslist(1)%Nptot=partslist(1)%Nploc END IF !Calls the communications to send the particles data to the root process for diagnostic file every itparts time steps IF(mpisize .gt. 1 .and. (modulo(step,itparts) .eq. 0 .or. nlend)) THEN CALL collectparts(partslist(1)) END IF end subroutine diagnostics !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Collect the particles positions and velocities on the root process. !> If the collection has already been performed at this time step, the routine does nothing. ! !--------------------------------------------------------------------------- SUBROUTINE collectparts(p) USE basic, ONLY: mpirank, mpisize, ierr type(particles), INTENT(INOUT):: p INTEGER, DIMENSION(:), ALLOCATABLE :: displs, Nploc INTEGER:: i IF(p%collected) RETURN ! exit subroutine if particles have already been collected during this time step ALLOCATE(Nploc(0:mpisize-1)) ALLOCATE(displs(0:mpisize-1)) displs=0 Nploc(mpirank)=p%Nploc CALL MPI_Allgather(MPI_IN_PLACE, 1, MPI_INTEGER, Nploc, 1, MPI_INTEGER,& & MPI_COMM_WORLD, ierr) p%Nptot=sum(Nploc) IF(p%Nptot .eq. 0 ) THEN p%collected=.true. RETURN END IF IF(mpirank .ne. 0) THEN ! Send Particles informations to root process CALL MPI_Gatherv(p%Z, Nploc(mpirank), db_type, p%Z, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%R, Nploc(mpirank), db_type, p%R, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%THET, Nploc(mpirank), db_type, p%THET, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%UR, Nploc(mpirank), db_type, p%UR, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%UZ, Nploc(mpirank), db_type, p%UZ, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%UTHET, Nploc(mpirank), db_type, p%UTHET, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%pot, Nploc(mpirank), db_type, p%pot, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%Rindex, Nploc(mpirank), MPI_INTEGER, p%Rindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%Zindex, Nploc(mpirank), MPI_INTEGER, p%Zindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(p%partindex, Nploc(mpirank), MPI_INTEGER, p%partindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) ELSE Do i=1,mpisize-1 displs(i)=displs(i-1)+Nploc(i-1) END DO IF(p%Nptot .gt. size(p%R,1)) THEN CALL change_parts_allocation(p,p%Nptot-size(P%R,1)) END IF ! Receive particle information from all processes CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%Z, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%R, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%THET, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%UR, Nploc, displs,& & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%UZ, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%UTHET, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), db_type, p%pot, Nploc, displs, & & db_type, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), MPI_INTEGER, p%Rindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), MPI_INTEGER, p%Zindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) CALL MPI_Gatherv(MPI_IN_PLACE, Nploc(mpirank), MPI_INTEGER, p%partindex, Nploc, displs, & & MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) p%partindex(sum(Nploc)+1:)=-1 END IF p%collected=.TRUE. END SUBROUTINE collectparts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Computes the velocities at time t-1/2 to keep the second order precision in time on the velocity. ! !--------------------------------------------------------------------------- SUBROUTINE adapt_vinit(p) !! Computes the velocity at time -dt/2 from velocities computed at time 0 ! - USE basic, ONLY : bnorm, dt, BZ, BR, nlclassical, phinorm, nz, distribtype, vnorm + USE basic, ONLY : bnorm, dt, nlclassical, phinorm, distribtype, vnorm type(particles), INTENT(INOUT):: p REAL(kind=db) :: tau, BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, & & SQR, Vperp, v2 INTEGER :: J1, J2, J3, J4, i REAL(kind=db), DIMENSION(:), ALLOCATABLE :: VZ, VR, VTHET ! In case Davidson distribution is used the longitudinal and radial velocities are adapted to take into account the ! electric potential. IF(distribtype .EQ. 2 .OR. distribtype .EQ. 3 .OR. distribtype .EQ. 4 .or. p%Davidson) THEN ALLOCATE(VR(p%Nploc),VZ(p%Nploc),VTHET(p%Nploc)) CALL loduni(7,VZ) VZ=VZ*2*pi VTHET=p%UTHET/p%Gamma*vnorm DO i=1,p%Nploc Vperp=sqrt(MAX(2*p%H0/p%m-2*p%qmRatio*p%pot(i)*phinorm-VTHET(i)**2,0.0_db)) VR(i)=Vperp*sin(VZ(i)) VZ(i)=Vperp*cos(VZ(i)) IF(nlclassical) THEN p%Gamma(i)=1 ELSE v2=VR(i)**2+VZ(i)**2+VTHET(i)**2 p%Gamma(i)=sqrt(1/(1-v2/vnorm**2)) END IF p%UR(i)=p%Gamma(i)*VR(i)/vnorm p%UZ(i)=p%Gamma(i)*VZ(i)/vnorm p%UTHET(i)=p%Gamma(i)*VTHET(i)/vnorm END DO DEALLOCATE(VR,VZ,VTHET) END IF RETURN ! Normalised time increment !tau=-omegac/2/omegap*dt/tnorm tau=p%qmRatio*bnorm*0.5*dt ! Store old Velocities CALL swappointer(p%UZold, p%UZ) CALL swappointer(p%URold, p%UR) CALL swappointer(p%UTHETold, p%UTHET) CALL swappointer(p%Gammaold, p%Gamma) IF (p%Nploc .NE. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) PRIVATE(J1,J2,J3,J4,BRZ, BRR, ZBR, ZBZ, ZPR, ZPZ, ZPTHET, SQR) DO i=1,p%Nploc ! Compute the particle linear indices for the magnetic field interpolation !J1=(p%rindex(i))*(nz+1)+p%zindex(i)+1 !J2=J1+1 !J3=(p%rindex(i)+1)*(nz+1)+p%zindex(i)+1 !J4=J3+1 ! ! Interpolation for magnetic field !BRZ=(1-p%BZ(i))*(1-p%BR(i))*Bz(J4) & !& +p%BZ(i)*(1-p%BR(i))*Bz(J3) & !& +(1-p%BZ(i))*p%BR(i)*Bz(J2) & !& +p%BZ(i)*p%BR(i)*Bz(J1) !BRR=(1-p%BZ(i))*(1-p%BR(i))*Br(J4) & !& +p%BZ(i)*(1-p%BR(i))*Br(J3) & !& +(1-p%BZ(i))*p%BR(i)*Br(J2) & !& +p%BZ(i)*p%BR(i)*Br(J1) ! Half inverse Rotation along magnetic field ZBZ=tau*p%BZ(i)/p%Gammaold(i) ZBR=tau*p%BR(i)/p%Gammaold(i) SQR=1+ZBZ*ZBZ+ZBR*ZBR ZPZ=(p%UZold(i)-ZBR*p%UTHETold(i))/SQR !u-_{z} ZPR=(p%URold(i)+ZBZ*p%UTHETold(i))/SQR !u-_{r} ZPTHET=p%UTHETold(i)+(ZBR*p%UZold(i)-ZBZ*p%URold(i))/SQR !u-_{theta} p%UZ(i)=ZPZ p%UR(i)=ZPR p%UTHET(i)=ZPTHET ! half of decceleration p%UZ(i)=p%UZ(i)+p%Ez(i)*tau p%UR(i)=p%UR(i)+p%Er(i)*tau IF(.not. nlclassical) THEN p%Gamma(i)=sqrt(1+p%UZ(i)**2+p%UR(i)**2+p%UTHET(i)**2) END IF END DO !$OMP END PARALLEL DO SIMD END IF END SUBROUTINE adapt_vinit !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief In the case of MPI parallelism, computes the indices of the particle assigned to the current process. !--------------------------------------------------------------------------- SUBROUTINE distribpartsonprocs(p) ! Computes the start and end indices for the Z boundaries on local processus ! Computes the particle indices from initial particle loading vector, that stay in current process USE basic, ONLY: nz, Zbounds TYPE(particles), INTENT(INOUT):: p INTEGER:: k, i, nbparts REAL(kind=db):: idealnbpartsperproc INTEGER, DIMENSION(0:nz):: partspercol ! Vector containing the number of particles between zgrid(n) and zgrid(n+1) INTEGER:: Zmin, Zmax ! Minimum and maximum indices of particles in Z direction INTEGER:: Zperproc, zindex partspercol=0 idealnbpartsperproc = FLOOR(REAL(p%Nploc)/REAL(mpisize)) Zmin=MINVAL(p%Zindex(1:p%Nploc)) Zmax=MAXVAL(p%Zindex(1:p%Nploc)) Zperproc=(Zmax-Zmin)/mpisize IF(Zmax .eq. 0) Zmax=nz DO k=1,p%Nploc zindex=p%Zindex(k) partspercol(zindex)=partspercol(zindex)+1 END DO IF (Zperproc .eq. 0) THEN !! No particles are present initially Zperproc=nz/mpisize Zmin=0 DO k=1,mpisize-1 IF(k .lt. mpisize-1-MODULO(Zmax-Zmin,mpisize)) THEN Zbounds(k)=Zmin+k*Zperproc-1 ELSE Zbounds(k)=Zmin+k*Zperproc-1+k-mpisize+2+MODULO(Zmax-Zmin,mpisize) END IF END DO ELSE i=0 DO k=1,mpisize-1 nbparts=0 DO WHILE(nbparts<0.99*idealnbpartsperproc .and. i .lt. Zmax) nbparts=nbparts+partspercol(i) i=i+1 END DO Zbounds(k)=i END DO END IF DO k=0,mpisize-1 Nplocs_all(k)=SUM(partspercol(Zbounds(k):Zbounds(k+1)-1)) END DO WRITE(*,*) mpirank, " Zbounds: ", Zbounds(mpirank), Zbounds(mpirank+1), " nptot", Nplocs_all(mpirank) END SUBROUTINE distribpartsonprocs SUBROUTINE keep_mpi_self_parts(p,Zbounds) TYPE(particles),INTENT(INOUT):: p INTEGER,INTENT(in)::Zbounds(0:) INTEGER :: i, partstart, old_sum,ierr partstart=1 p%Nploc=0 Do i=1,p%Nptot IF(p%Zindex(i).ge.Zbounds(mpirank).and.p%Zindex(i).lt.Zbounds(mpirank+1)) THEN p%Nploc=p%Nploc+1 CALL move_part(p,i,p%Nploc) END IF END DO old_sum=p%Nptot CALL MPI_REDUCE(p%Nploc, p%Nptot,1,MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, ierr) IF(p%Nptot .ne. old_sum) WRITE(*,*) "Error in particle distribution kept: ", p%Nptot, "/",old_sum END SUBROUTINE keep_mpi_self_parts !_______________________________________________________________________________ !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Manage the particle communication between neighbours. !> This routine is responsible to receive the incoming particles from the MPI neighbours and to send its outgoing !> particles to these neighbours ! !> @param [in] lsendnbparts number of particles to send to the left neighbour (mpirank-1) !> @param [in] rsendnbparts number of particles to send to the right neighbour (mpirank+1) !> @param [in] sendholes array containing the indices of the particle leaving the local domain in ascending order. If the index is positive, the particle goes to the right neigbour, and to the left neighbour if the index is negative !--------------------------------------------------------------------------- SUBROUTINE particlescommunication(p, lsendnbparts, rsendnbparts, sendholes, receivednbparts, procs) USE mpihelper, ONLY: particle_type #ifdef _DEBUG USE basic, ONLY: step #endif type(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: lsendnbparts, rsendnbparts INTEGER, INTENT(out) :: receivednbparts INTEGER, INTENT(in) :: sendholes(:) INTEGER, INTENT(in) :: procs(2) INTEGER, ASYNCHRONOUS :: rrecvnbparts=0, lrecvnbparts=0 INTEGER, ASYNCHRONOUS :: sendrequest(2), recvrequest(2) INTEGER, ASYNCHRONOUS :: sendstatus(MPI_STATUS_SIZE,2), recvstatus(MPI_STATUS_SIZE,2) TYPE(particle), ALLOCATABLE :: rrecvpartbuff(:), lrecvpartbuff(:), rsendpartbuff(:), lsendpartbuff(:) ! buffers to send and receive particle from left and right processes INTEGER :: lsentnbparts, rsentnbparts INTEGER :: lreceivednbparts, rreceivednbparts, ierr lsentnbparts=lsendnbparts rsentnbparts=rsendnbparts sendrequest=MPI_REQUEST_NULL recvrequest=MPI_REQUEST_NULL lrecvnbparts=0 rrecvnbparts=0 ! Send and receive the number of particles to exchange CALL MPI_IRECV(lrecvnbparts, 1, MPI_INTEGER, procs(1), 0, MPI_COMM_WORLD, recvrequest(1), ierr) CALL MPI_IRECV(rrecvnbparts, 1, MPI_INTEGER, procs(2), 0, MPI_COMM_WORLD, recvrequest(2), ierr) CALL MPI_ISEND(lsentnbparts, 1, MPI_INTEGER, procs(1), 0, MPI_COMM_WORLD, sendrequest(1), ierr) CALL MPI_ISEND(rsentnbparts, 1, MPI_INTEGER, procs(2), 0, MPI_COMM_WORLD, sendrequest(2), ierr) CALL MPI_Waitall(2,recvrequest(1:2), recvstatus(:,1:2), ierr) recvrequest=MPI_REQUEST_NULL lreceivednbparts=lrecvnbparts rreceivednbparts=rrecvnbparts ! Re/allocate enough memory to store the incoming particles ALLOCATE(rrecvpartbuff(rreceivednbparts)) ALLOCATE(lrecvpartbuff(lreceivednbparts)) ! Receive particles from left and right processes to the corresponding buffers IF ( lrecvnbparts .gt. 0) THEN CALL MPI_IRECV(lrecvpartbuff, lreceivednbparts, particle_type, procs(1), 1, MPI_COMM_WORLD, recvrequest(1), ierr) END IF IF( rrecvnbparts .gt. 0) THEN CALL MPI_IRECV(rrecvpartbuff, rreceivednbparts, particle_type, procs(2), 1, MPI_COMM_WORLD, recvrequest(2), ierr) END IF ALLOCATE(rsendpartbuff(rsendnbparts)) ALLOCATE(lsendpartbuff(lsendnbparts)) ! Copy the leaving particles to the corresponding send buffers IF ( (lsendnbparts + rsendnbparts) .gt. 0) THEN CALL AddPartSendBuffers(p, lsendnbparts, rsendnbparts, sendholes, lsendpartbuff, rsendpartbuff) END IF CALL MPI_Waitall(2,sendrequest(1:2), sendstatus(:,1:2), ierr) ! Send the particles to the left and right neighbours IF( lsendnbparts .gt. 0) THEN CALL MPI_ISEND(lsendpartbuff, lsendnbparts, particle_type, procs(1), 1, MPI_COMM_WORLD, sendrequest(1), ierr) #ifdef _DEBUG !WRITE(*,*)"snding ", lsendnbparts , " to left at step: ",step #endif END IF IF( rsendnbparts .gt. 0) THEN CALL MPI_ISEND(rsendpartbuff, rsendnbparts, particle_type, procs(2), 1, MPI_COMM_WORLD, sendrequest(2), ierr) #ifdef _DEBUG !WRITE(*,*)"snding ", rsendnbparts , " to right at step: ",step #endif END IF ! Receive the incoming parts in the receive buffers IF ( lreceivednbparts .gt. 0) THEN CALL MPI_Wait(recvrequest(1), recvstatus(:,1), ierr) IF(ierr .ne. MPI_SUCCESS) THEN WRITE(*,*) "Error in particle reception on proc:", mpirank, " error code:", ierr, "status:", recvstatus(:,1) CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END IF #ifdef _DEBUG !WRITE(*,*)"rcvd ", lreceivednbparts , " from left at step: ",step #endif END IF IF ( rreceivednbparts .gt. 0) THEN CALL MPI_Wait(recvrequest(2), recvstatus(:,2), ierr) IF(ierr .ne. MPI_SUCCESS) THEN WRITE(*,*) "Error in particle reception on proc:", mpirank, " error code:", ierr, "status:", recvstatus(:,2) CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) END IF #ifdef _DEBUG !WRITE(*,*)"rcvd ", rreceivednbparts , " from right at step: ",step #endif END IF receivednbparts=rreceivednbparts+lreceivednbparts IF(p%Nploc+receivednbparts-lsendnbparts-rsendnbparts .gt. size(p%R,1)) THEN CALL change_parts_allocation(p,receivednbparts) END IF ! Copy the incoming particles from the receive buffers to the simulation parts variable CALL Addincomingparts(p, rreceivednbparts, lreceivednbparts, lsendnbparts+rsendnbparts, & & sendholes, lrecvpartbuff, rrecvpartbuff) ! Wait for the outgoing particles to be fully received by the neighbours IF( lsendnbparts .gt. 0) THEN CALL MPI_Wait(sendrequest(1), sendstatus(:,1), ierr) #ifdef _DEBUG !WRITE(*,*)"sent ", lsentnbparts , " to left at step: ",step #endif END IF IF( rsendnbparts .gt. 0) THEN CALL MPI_Wait(sendrequest(2), sendstatus(:,2), ierr) #ifdef _DEBUG !WRITE(*,*)"sent ", rsentnbparts , " to right at step: ",step #endif END IF ! ! END SUBROUTINE particlescommunication !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy the particles from the receive buffers to the local simulation variable parts. !> The incoming particles will first be stored in the holes left by the outgoing particles, then they !> will be added at the end of the parts variable ! !> @param [in] rrecvnbparts number of particles received from the right neighbour (mpirank+1) !> @param [in] lrecvnbparts number of particles received from the left neighbour (mpirank-1) !> @param [in] sendnbparts total number of particles having left the local domain !> @param [in] sendholes array containing the indices of the particle having left the local domain in ascending order. !--------------------------------------------------------------------------- SUBROUTINE Addincomingparts(p, rrecvnbparts, lrecvnbparts, sendnbparts, sendholes,lrecvpartbuff, rrecvpartbuff) ! USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: rrecvnbparts, lrecvnbparts, sendnbparts INTEGER, INTENT(in) :: sendholes(:) TYPE(particle), INTENT(IN) :: rrecvpartbuff(:), lrecvpartbuff(:) INTEGER k,partpos ! First import the particles coming from the right IF(rrecvnbparts .gt. 0) THEN Do k=1,rrecvnbparts IF(k .le. sendnbparts) THEN ! Fill the holes left by sent parts partpos=abs(sendholes(k)) ELSE ! Add at the end of parts and keep track of number of parts p%Nploc=p%Nploc+1 partpos=p%Nploc END IF - CALL Insertincomingpart(p, rrecvpartbuff, partpos, k) + CALL Insertincomingpart(p, rrecvpartbuff(k), partpos) END DO END IF ! Then import the particles coming from the left IF(lrecvnbparts .gt. 0) THEN Do k=1,lrecvnbparts IF(k+rrecvnbparts .le. sendnbparts) THEN ! Fill the holes left by sent parts partpos=abs(sendholes(k+rrecvnbparts)) ELSE ! Add at the end of parts and keep track of number of parts p%Nploc=p%Nploc+1 partpos=p%Nploc END IF - CALL Insertincomingpart(p, lrecvpartbuff, partpos, k) + CALL Insertincomingpart(p, lrecvpartbuff(k), partpos) END DO END IF ! END SUBROUTINE Addincomingparts !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy one particle from the receive buffers to the local simulation variable parts. ! -!> @param [in] buffer receive buffer containing the particles parameters to copy from -!> @param [in] bufferindex particle index in the receive buffer +!> @param [in] part particle parameters to copy from !> @param [in] partsindex destination particle index in the local parts variable !--------------------------------------------------------------------------- - SUBROUTINE Insertincomingpart(p, buffer, partsindex, bufferindex) + SUBROUTINE Insertincomingpart(p, part, partsindex) USE mpihelper TYPE(particles), INTENT(INOUT):: p - INTEGER, INTENT(in) :: bufferindex, partsindex - TYPE(particle), DIMENSION(:), INTENT(in) :: buffer - p%partindex(partsindex) = buffer(bufferindex)%partindex - p%Rindex(partsindex) = buffer(bufferindex)%Rindex - p%Zindex(partsindex) = buffer(bufferindex)%Zindex - p%R(partsindex) = buffer(bufferindex)%R - p%Z(partsindex) = buffer(bufferindex)%Z - p%THET(partsindex) = buffer(bufferindex)%THET - p%UZ(partsindex) = buffer(bufferindex)%UZ - p%UR(partsindex) = buffer(bufferindex)%UR - p%UTHET(partsindex) = buffer(bufferindex)%UTHET - p%Gamma(partsindex) = buffer(bufferindex)%Gamma - p%pot(partsindex) = buffer(bufferindex)%pot + INTEGER, INTENT(in) :: partsindex + TYPE(particle), INTENT(in) :: part + p%partindex(partsindex) = part%partindex + p%Rindex(partsindex) = part%Rindex + p%Zindex(partsindex) = part%Zindex + p%R(partsindex) = part%R + p%Z(partsindex) = part%Z + p%THET(partsindex) = part%THET + p%UZ(partsindex) = part%UZ + p%UR(partsindex) = part%UR + p%UTHET(partsindex) = part%UTHET + p%Gamma(partsindex) = part%Gamma + p%pot(partsindex) = part%pot ! END SUBROUTINE Insertincomingpart !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy one particle from the local parts variable to the send buffer. ! !> @param [in] buffer send buffer to copy to !> @param [in] bufferindex particle index in the send buffer !> @param [in] partsindex origin particle index in the local parts variable !--------------------------------------------------------------------------- SUBROUTINE Insertsentpart(p, buffer, bufferindex, partsindex) USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: bufferindex, partsindex TYPE(particle), DIMENSION(:), INTENT(inout) :: buffer buffer(bufferindex)%partindex = p%partindex(partsindex) buffer(bufferindex)%Rindex = p%Rindex(partsindex) buffer(bufferindex)%Zindex = p%Zindex(partsindex) buffer(bufferindex)%R = p%R(partsindex) buffer(bufferindex)%Z = p%Z(partsindex) buffer(bufferindex)%THET = p%THET(partsindex) buffer(bufferindex)%UZ = p%UZ(partsindex) buffer(bufferindex)%UR = p%UR(partsindex) buffer(bufferindex)%UTHET = p%UTHET(partsindex) buffer(bufferindex)%Gamma = p%Gamma(partsindex) buffer(bufferindex)%pot = p%pot(partsindex) ! END SUBROUTINE Insertsentpart !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy the particles from the local parts variable to the left and right send buffers. ! !> @param [in] lsendnbparts number of particles to send to the left neighbour (mpirank-1) !> @param [in] rsendnbparts number of particles to send to the right neighbour (mpirank+1) !> @param [in] sendholes array containing the indices of the particle leaving the local domain in ascending order. If the index is positive, the particle goes to the right neigbour, and to the left neighbour if the index is negative !--------------------------------------------------------------------------- SUBROUTINE AddPartSendBuffers(p, lsendnbparts, rsendnbparts, sendholes, lsendpartbuff, rsendpartbuff) ! USE mpihelper TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(in) :: lsendnbparts, rsendnbparts INTEGER, INTENT(in) :: sendholes(:) TYPE(particle), INTENT(OUT) :: rsendpartbuff(:), lsendpartbuff(:) INTEGER:: partpos, k INTEGER:: lsendpos, rsendpos lsendpos=0 rsendpos=0 ! Loop over the outgoing particles and fill the correct send buffer Do k=lsendnbparts+rsendnbparts,1,-1 partpos=abs(sendholes(k)) IF(sendholes(k) .GT. 0) THEN rsendpos=rsendpos+1 CALL Insertsentpart(p, rsendpartbuff, rsendpos, partpos) ELSE IF(sendholes(k) .LT. 0) THEN lsendpos=lsendpos+1 CALL Insertsentpart(p, lsendpartbuff, lsendpos, partpos) END IF END DO ! ! END SUBROUTINE AddPartSendBuffers !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Exchange two particles in the parts variable. ! !> @param [in] index1 index in parts of the first particle to exchange. !> @param [in] index2 index in parts of the second particle to exchange. !--------------------------------------------------------------------------- SUBROUTINE exchange_parts(p, index1, index2) TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(IN) :: index1, index2 REAL(kind=db):: R, Z, THET, UR, UZ, UTHET, Gamma, geomweight(0:2),pot INTEGER :: Rindex, Zindex, partindex !! Exchange particle at index1 with particle at index2 ! Store part at index1 in temporary value partindex = p%partindex(index1) Gamma = p%Gamma(index1) pot = p%pot(index1) R = p%R(index1) Z = p%Z(index1) THET = p%THET(index1) UR = p%UR(index1) UTHET = p%UTHET(index1) UZ = p%UZ(index1) Rindex = p%Rindex(index1) Zindex = p%Zindex(index1) geomweight = p%geomweight(index1,:) ! Move part at index2 in part at index 1 p%partindex(index1) = p%partindex(index2) p%Gamma(index1) = p%Gamma(index2) p%pot(index1) = p%pot(index2) p%R(index1) = p%R(index2) p%Z(index1) = p%Z(index2) p%THET(index1) = p%THET(index2) p%UR(index1) = p%UR(index2) p%UTHET(index1) = p%UTHET(index2) p%UZ(index1) = p%UZ(index2) p%Rindex(index1) = p%Rindex(index2) p%Zindex(index1) = p%Zindex(index2) p%geomweight(index1,:) = p%geomweight(index2,:) ! Move temporary values from part(index1) to part(index2) p%partindex(index2) = partindex p%Gamma(index2) = Gamma p%pot(index2) = pot p%R(index2) = R p%Z(index2) = Z p%THET(index2) = THET p%UR(index2) = UR p%UTHET(index2) = UTHET p%UZ(index2) = UZ p%Rindex(index2) = Rindex p%Zindex(index2) = Zindex p%geomweight(index2,:) = geomweight END SUBROUTINE exchange_parts SUBROUTINE change_parts_allocation(p, sizedifference) implicit none TYPE(particles), INTENT(INOUT):: p INTEGER,INTENT(IN) :: sizedifference CALL change_array_size_int(p%Rindex, sizedifference) CALL change_array_size_int(p%Zindex, sizedifference) CALL change_array_size_int(p%partindex, sizedifference) CALL change_array_size_dp(p%ER,sizedifference) CALL change_array_size_dp(p%EZ,sizedifference) CALL change_array_size_dp(p%pot,sizedifference) CALL change_array_size_dp(p%potxt,sizedifference) CALL change_array_size_dp(p%R,sizedifference) CALL change_array_size_dp(p%Z,sizedifference) CALL change_array_size_dp(p%THET,sizedifference) CALL change_array_size_dp(p%BR,sizedifference) CALL change_array_size_dp(p%BZ,sizedifference) CALL change_array_size_dp2(p%geomweight,sizedifference) CALL change_array_size_dp_ptr(p%UR,sizedifference) CALL change_array_size_dp_ptr(p%URold,sizedifference) CALL change_array_size_dp_ptr(p%UZ,sizedifference) CALL change_array_size_dp_ptr(p%UZold,sizedifference) CALL change_array_size_dp_ptr(p%UTHET,sizedifference) CALL change_array_size_dp_ptr(p%UTHETold,sizedifference) CALL change_array_size_dp_ptr(p%Gamma,sizedifference) CALL change_array_size_dp_ptr(p%Gammaold,sizedifference) p%Nploc=MIN(p%Nploc,size(p%R)) END SUBROUTINE change_parts_allocation SUBROUTINE change_array_size_dp(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:) INTEGER:: current_size, new_size if(allocated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp SUBROUTINE change_array_size_dp2(arr, sizedifference) implicit none REAL(kind=db), ALLOCATABLE, INTENT(INOUT):: arr(:,:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), ALLOCATABLE:: temp(:,:) INTEGER:: current_size, new_size if(allocated(arr)) THEN current_size=size(arr,1) new_size=current_size+sizedifference ALLOCATE(temp(new_size,0:size(arr,2)-1)) temp(1:min(current_size,new_size),:)=arr(1:min(current_size,new_size),:) DEALLOCATE(arr) CALL move_alloc(temp, arr) END IF END SUBROUTINE change_array_size_dp2 SUBROUTINE change_array_size_dp_ptr(arr, sizedifference) implicit none REAL(kind=db), POINTER, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference REAL(kind=db), POINTER:: temp(:) INTEGER:: current_size, new_size if(associated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) arr=> temp END IF END SUBROUTINE change_array_size_dp_ptr SUBROUTINE change_array_size_int(arr, sizedifference) implicit none INTEGER, ALLOCATABLE, INTENT(INOUT):: arr(:) INTEGER, INTENT(IN):: sizedifference INTEGER, ALLOCATABLE:: temp(:) INTEGER:: current_size, new_size if(allocated(arr)) THEN current_size=size(arr) new_size=current_size+sizedifference ALLOCATE(temp(new_size)) temp(1:min(current_size,new_size))=arr(1:min(current_size,new_size)) DEALLOCATE(arr) CALL move_alloc(temp,arr) END IF END SUBROUTINE change_array_size_int - SUBROUTINE add_created_part(p, buffer,nb_ins) - USE bsplines - USE basic, ONLY: splrz, phinorm, nlclassical - USE geometry + SUBROUTINE add_list_created_part(p, buffer,nb_ins) IMPLICIT NONE TYPE(particles), INTENT(INOUT):: p TYPE(particle), ALLOCATABLE, INTENT(in) :: buffer(:) INTEGER, OPTIONAL:: nb_ins INTEGER:: i, nptotinit, parts_size_increase, nb_added - real(kind=db), ALLOCATABLE::gtildeloc(:,:) nptotinit=p%Nploc+1 if(present(nb_ins)) THEN nb_added=nb_ins ELSE nb_added=size(buffer,1) end if IF(nb_added .le. 0) RETURN ! No particles to add ! if there is not enough space in the parts simulation buffer, increase the parst size IF(p%Nploc + nb_added .gt. size(p%Z,1)) THEN parts_size_increase=Max(floor(0.1*size(p%Z,1)),nb_added) CALL change_parts_allocation(p, parts_size_increase) END IF DO i=1,nb_added - p%Nploc=p%Nploc+1 - p%newindex=p%newindex+1 - CALL Insertincomingpart(p, buffer, p%Nploc, i) - p%partindex(p%Nploc)=p%newindex - CALL geom_weight(p%Z(p%Nploc),p%R(p%Nploc),p%geomweight(p%Nploc,:)) - if( .not. is_inside(p,p%Nploc) ) then - p%Nploc=p%Nploc-1 - p%newindex=p%newindex-1 - CYCLE + CALL add_created_particle(p,buffer(i)) + END DO + nb_added=p%Nploc-nptotinit+1 + if( .not. p%is_test) then + IF(allocated(p%addedlist)) then + call change_array_size_int(p%addedlist,2) + else + allocate(p%addedlist(2)) end if - call p_calc_rzindex(p,p%Nploc) + p%addedlist(size(p%addedlist)-1)=nptotinit + p%addedlist(size(p%addedlist))=nb_added + end if + END SUBROUTINE add_list_created_part + + SUBROUTINE add_linked_created_part(p, linked_buffer) + + IMPLICIT NONE + TYPE(particles), INTENT(INOUT):: p + TYPE(linked_part_row), INTENT(in) :: linked_buffer + TYPE(linked_part), POINTER:: part + INTEGER:: i, nptotinit, parts_size_increase, nb_added + + nptotinit=p%Nploc+1 + nb_added=linked_buffer%n + + IF(nb_added .le. 0) RETURN ! No particles to add + + ! if there is not enough space in the parts simulation buffer, increase the parst size + IF(p%Nploc + nb_added .gt. size(p%Z,1)) THEN + parts_size_increase=Max(floor(0.1*size(p%Z,1)),nb_added) + CALL change_parts_allocation(p, parts_size_increase) + END IF + + part=>linked_buffer%start + DO i=1,nb_added + CALL add_created_particle(p,part%p) + part=>part%next END DO nb_added=p%Nploc-nptotinit+1 if( .not. p%is_test) then IF(allocated(p%addedlist)) then call change_array_size_int(p%addedlist,2) else allocate(p%addedlist(2)) end if p%addedlist(size(p%addedlist)-1)=nptotinit p%addedlist(size(p%addedlist))=nb_added end if - END SUBROUTINE add_created_part + call destroy_linked_parts(linked_buffer%start) + END SUBROUTINE add_linked_created_part + + SUBROUTINE add_created_particle(p,part) + USE geometry + TYPE(particles):: p + TYPE(particle):: part + p%Nploc=p%Nploc+1 + p%newindex=p%newindex+1 + CALL Insertincomingpart(p, part, p%Nploc) + p%partindex(p%Nploc)=p%newindex + CALL geom_weight(p%Z(p%Nploc),p%R(p%Nploc),p%geomweight(p%Nploc,:)) + if( .not. is_inside(p,p%Nploc) ) then + p%Nploc=p%Nploc-1 + p%newindex=p%newindex-1 + RETURN + end if + call p_calc_rzindex(p,p%Nploc) + END SUBROUTINE add_created_particle function is_inside(p,id) Use basic, ONLY: rgrid,zgrid, nr, nz IMPLICIT NONE logical :: is_inside type(particles) :: p integer :: id is_inside=.true. if(p%geomweight(id,0).le.0)then is_inside=.false. return end if if(p%R(id).ge.rgrid(nr) .or. p%R(id) .le. rgrid(0))then is_inside=.false. return end if if(p%Z(id).ge.zgrid(nz) .or. p%Z(id) .le. zgrid(0))then is_inside=.false. return end if end function is_inside SUBROUTINE calc_newparts_energy(p) USE basic, ONLY: phinorm, nlclassical type(particles)::p integer::i,n,nptotinit,nbadded, nptotend if(p%is_test) return if( allocated(p%addedlist)) then n=size(p%addedlist) !write(*,*) n, "addedlist: ", p%addedlist Do i=1,n,2 nptotinit=p%addedlist(i) nbadded=p%addedlist(i+1) p%nbadded=p%nbadded+nbadded nptotend=nptotinit+nbadded-1 loc_etot0=loc_etot0+p%q*p%weight*sum(p%pot(nptotinit:nptotend))*phinorm IF(.not. nlclassical) THEN loc_etot0=loc_etot0+p%m*p%weight*vlight**2*sum(p%Gamma(nptotinit:nptotend)-1) ELSE loc_etot0=loc_etot0+0.5*p%m*p%weight*vlight**2*sum(p%UR(nptotinit:nptotend)**2 & & +p%UZ(nptotinit:nptotend)**2 & & +p%UTHET(nptotinit:nptotend)**2) END IF end do deallocate(p%addedlist) end if end subroutine calc_newparts_energy !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Delete particle at given index removing its energy from the system ! !> @param [in] index index of particle to be deleted !--------------------------------------------------------------------------- SUBROUTINE delete_part(p, index, replace) !! This will destroy particle at the given index USE constants, ONLY: vlight USE bsplines USE geometry - USE basic, ONLY: phinorm, nlclassical,splrz + USE basic, ONLY: phinorm, nlclassical TYPE(particles), INTENT(INOUT):: p INTEGER, INTENT(IN) :: index LOGICAL, OPTIONAL :: replace LOGICAL:: repl IF(present(replace)) THEN repl=replace ELSE repl=.true. END IF !Computes the potential at the particle position with phi_ext+phi_s IF(index .le. p%Nploc) THEN IF(.not. p%is_test) THEN loc_etot0=loc_etot0-p%q*p%weight*(p%pot(index))*phinorm IF(.not. nlclassical) THEN loc_etot0=loc_etot0-p%m*p%weight*vlight**2*(p%Gamma(index)-1) ELSE loc_etot0=loc_etot0-0.5*p%m*p%weight*vlight**2*(p%UR(index)**2+p%UZ(index)**2+p%UTHET(index)**2) END IF END IF IF(repl) THEN ! We fill the gap CALL move_part(p, p%Nploc, index) p%partindex(p%Nploc)=-1 ! Reduce the total number of simulated parts p%Nploc=p%Nploc-1 END IF END IF END SUBROUTINE delete_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Move particle with index sourceindex to particle with index destindex. !> !WARNING! This will overwrite particle at destindex. ! !> @param [in] sourceindex index in parts of the particle to move. !> @param [in] destindex index in parts of the moved particle destination. !--------------------------------------------------------------------------- SUBROUTINE move_part(p, sourceindex, destindex) !! This will destroy particle at destindex INTEGER, INTENT(IN) :: destindex, sourceindex TYPE(particles), INTENT(INOUT)::p IF(sourceindex .eq. destindex) RETURN IF(sourceindex .le. 0 .or. destindex .le. 0) RETURN ! Move part at sourceindex in part at destindex Call copy_part(p,sourceindex,destindex,p) END SUBROUTINE move_part !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Copy particle with index sourceindex in particles sourcep to particle with index destindex in particles destp. !> !WARNING! This will overwrite particle at destp(destindex). ! !> @param [inout] sourcep Structure of source particles. !> @param [in] sourceindex index in parts of the particle to move. !> @param [in] destindex index in parts of the moved particle destination. !> @param [inout] destp Structure of source particles. !--------------------------------------------------------------------------- SUBROUTINE copy_part(sourcep, sourceindex, destindex, destp) !! This will destroy particle at destindex INTEGER, INTENT(IN) :: destindex, sourceindex TYPE(particles), INTENT(IN)::sourcep TYPE(particles), INTENT(INOUT)::destp IF(sourceindex .le. 0 .or. destindex .le. 0) RETURN IF( destindex .gt. size(destp%R,1)) RETURN ! Move part at sourceindex in part at destindex destp%partindex(destindex) = sourcep%partindex(sourceindex) destp%Gamma(destindex) = sourcep%Gamma(sourceindex) destp%R(destindex) = sourcep%R(sourceindex) destp%Z(destindex) = sourcep%Z(sourceindex) destp%THET(destindex) = sourcep%THET(sourceindex) destp%UR(destindex) = sourcep%UR(sourceindex) destp%UTHET(destindex) = sourcep%UTHET(sourceindex) destp%UZ(destindex) = sourcep%UZ(sourceindex) destp%Rindex(destindex) = sourcep%Rindex(sourceindex) destp%Zindex(destindex) = sourcep%Zindex(sourceindex) destp%geomweight(destindex,:) = sourcep%geomweight(sourceindex,:) destp%pot(destindex) = sourcep%pot(sourceindex) END SUBROUTINE copy_part !________________________________________________________________________________ SUBROUTINE destroy_parts(p) TYPE(particles) :: p p%Nploc=0 IF(ALLOCATED(p%Z)) DEALLOCATE(p%Z) IF(ALLOCATED(p%R)) DEALLOCATE(p%R) IF(ALLOCATED(p%THET)) DEALLOCATE(p%THET) IF(ALLOCATED(p%BZ)) DEALLOCATE(p%BZ) IF(ALLOCATED(p%BR)) DEALLOCATE(p%BR) IF(ASSOCIATED(p%UR)) DEALLOCATE(p%UR) IF(Associated(p%URold)) DEALLOCATE(p%URold) IF(Associated(p%UZ)) DEALLOCATE(p%UZ) IF(Associated(p%UZold)) DEALLOCATE(p%UZold) IF(Associated(p%UTHET)) DEALLOCATE(p%UTHET) IF(Associated(p%UTHETold)) DEALLOCATE(p%UTHETold) IF(Associated(p%Gamma)) DEALLOCATE(p%Gamma) IF(Associated(p%Gammaold)) DEALLOCATE(p%Gammaold) IF(ALLOCATED(p%Rindex)) DEALLOCATE(p%Rindex) IF(ALLOCATED(p%Zindex)) DEALLOCATE(p%Zindex) IF(ALLOCATED(p%partindex)) DEALLOCATE(p%partindex) if(allocated(p%geomweight)) Deallocate(p%geomweight) END SUBROUTINE !________________________________________________________________________________ SUBROUTINE clean_beam ! INTEGER:: i Do i=1,size(partslist,1) CALL destroy_parts(partslist(i)) END DO ! END SUBROUTINE clean_beam !________________________________________________________________________________ SUBROUTINE swappointer( pointer1, pointer2) REAL(kind=db), DIMENSION(:), POINTER, INTENT(inout):: pointer1, pointer2 REAL(kind=db), DIMENSION(:), POINTER:: temppointer temppointer=>pointer1 pointer1=>pointer2 pointer2=>temppointer END SUBROUTINE swappointer !_______________________________________________________________________________ SUBROUTINE loaduniformRZ(p, VR,VZ,VTHET) USE basic, ONLY: plasmadim, rnorm, temp, qsim, msim USE constants, ONLY: me, kb, elchar REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p CALL creat_parts(p, size(VR,1)) p%Nploc=size(VR,1) p%Nptot=size(VR,1) p%q=sign(elchar,qsim) p%weight=msim/me p%m=me p%qmRatio=qsim/msim ! Initial distribution in z with normalisation CALL loduni(1,p%Z(1:p%Nploc)) p%Z(1:p%Nploc)=(plasmadim(1)+(plasmadim(2)-plasmadim(1))*p%Z(1:p%Nploc))/rnorm ! Initial distribution in r with normalisation CALL lodlinr(2,p%R(1:p%Nploc),plasmadim(3),plasmadim(4)) p%R(1:p%Nploc)=p%R(1:p%Nploc)/rnorm ! Initial velocities distribution CALL loadGaussianVelocities(p, VR, VZ, VTHET, temp) END SUBROUTINE loaduniformRZ !_______________________________________________________________________________ SUBROUTINE loadDavidson(p, VR,VZ,VTHET, lodr) USE constants, ONLY: me, kb, elchar USE basic, ONLY: nplasma, rnorm, plasmadim, distribtype, H0, P0, Rcurv, width, qsim, msim, & & omegac, zgrid, nz, rnorm, n0, nblock, temp interface subroutine lodr(nbase,y,rminus,rplus) USE constants REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rplus, rminus end subroutine end interface TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT)::VZ(:), VR(:), VTHET(:) REAL(kind=db), DIMENSION(:), ALLOCATABLE::ra, rb, z REAL(kind=db) :: r0, deltar2, halfLz, Mirrorratio, Le, VOL INTEGER :: j, n, blockstart, blockend, addedpart, remainparts INTEGER, DIMENSION(:), ALLOCATABLE :: blocksize CALL creat_parts(p, size(VR,1)) p%Nploc=size(VR,1) p%Nptot=p%Nploc Allocate(ra(nblock),rb(nblock), z(0:nblock)) !r0=(plasmadim(4)+plasmadim(3))/2 r0=sqrt(4*H0/(me*omegac**2)) halfLz=(zgrid(nz)+zgrid(0))/2 MirrorRatio=(Rcurv-1)/(Rcurv+1) z(0)=plasmadim(1) DO n=1,nblock ! Compute limits in radius and load radii for each part Le=(plasmadim(2)-plasmadim(1))/nblock*(n-0.5)-halfLz*rnorm+plasmadim(1) z(n)=z(0)+n*(plasmadim(2)-plasmadim(1))/nblock deltar2=1-MirrorRatio*cos(2*pi*Le/width) rb(n)=r0/deltar2*sqrt(1-P0*abs(omegac)/2/H0*deltar2+sqrt(1-P0*abs(omegac)/H0*deltar2)) ra(n)=r0/deltar2*sqrt(1-P0*abs(omegac)/2/H0*deltar2-sqrt(1-P0*abs(omegac)/H0*deltar2)) END DO VOL=SUM(2*pi*MINVAL(ra)*(rb-ra)*(plasmadim(2)-plasmadim(1))/nblock) qsim=VOL*n0*elchar/nplasma msim=abs(qsim)/elchar*me p%weight=abs(qsim)/elchar p%m=me p%q=sign(elchar,qsim) p%qmRatio=p%q/p%m blockstart=1 blockend=0 ALLOCATE(blocksize(nblock)) WRITE(*,*) "blocksize: ", size(blocksize), nblock DO n=1,nblock blocksize(n)=nplasma/VOL*2*pi*MINVAL(ra)*(rb(n)-ra(n))*(plasmadim(2)-plasmadim(1))/nblock END DO remainparts=p%Nploc-SUM(blocksize) addedpart=1 n=nblock/2 j=1 DO WHILE(remainparts .GT. 0) blocksize(n)=blocksize(n)+addedpart remainparts=remainparts-addedpart n=n+j j=-1*(j+SIGN(1,j)) END DO CALL loadPartSlices(p, lodr, ra, rb, z, blocksize) IF(distribtype .eq. 5) THEN CALL loadGaussianVelocities(p, VR, VZ, VTHET, temp) VZ=VZ/4 VR=VR*8 VTHET=VTHET*8 ELSE Call loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) END IF END SUBROUTINE loadDavidson SUBROUTINE loadDavidsonVelocities(p, VR,VZ,VTHET, H0, P0) USE constants, ONLY: me, kb, elchar USE basic, ONLY: rnorm, Rcurv, B0, width, vnorm, zgrid, nz TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(INOUT)::VZ(:), VR(:), VTHET(:) REAL(kind=db), INTENT(IN):: H0, P0 REAL(kind=db) :: athetpos, rg, zg, halfLz, Mirrorratio, Pcomp, Acomp INTEGER :: i MirrorRatio=(Rcurv-1)/(Rcurv+1) halfLz=(zgrid(nz)+zgrid(0))/2 ! Load velocities theta velocity ! Loading of r and z velocity is done in adapt_vinit to have ! access to parts%pot DO i=1,p%Nploc ! Interpolation for Magnetic potential rg=p%R(i)*rnorm zg=(p%Z(i)-halfLz)*rnorm Athetpos=0.5*B0*(rg - width/pi*MirrorRatio*bessi1(2*pi*rg/width)*COS(2*pi*zg/width)) Pcomp=P0/rg/p%m Acomp=-p%qmRatio*Athetpos VTHET(i)=SIGN(MIN(abs(Pcomp+Acomp),sqrt(2*H0/p%m)),Pcomp+Acomp) !VTHET(i)=Pcomp+Acomp END DO VTHET=VTHET/vnorm VZ=0._db VR=0._db p%Davidson=.true. p%H0=H0 p%P0=P0 END SUBROUTINE loadDavidsonvelocities SUBROUTINE loadGaussianVelocities(p, VR,VZ,VTHET, temperature) USE basic, ONLY: vnorm USE constants, ONLY: kb REAL(kind=db), INTENT(inout) ::VZ(:), VR(:), VTHET(:) TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(IN):: temperature REAL(kind=db):: vth ! Initial velocities distribution vth=sqrt(kb*temperature/p%m)/vnorm !thermal velocity CALL lodgaus(3,VZ) CALL lodgaus(5,VR) CALL lodgaus(7,VTHET) VZ=VZ*vth VR=VR*vth VTHET=VTHET*vth p%temperature=temperature p%Davidson=.false. END SUBROUTINE loadGaussianVelocities SUBROUTINE loadPartslices(p, lodr, ra, rb, z, blocksize) USE basic, ONLY: rnorm interface subroutine lodr(nbase,y,rminus,rplus) USE constants REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rplus, rminus end subroutine end interface TYPE(particles), INTENT(INOUT):: p REAL(kind=db), INTENT(IN)::ra(:), rb(:), z(0:) INTEGER, DIMENSION(:), INTENT(IN) :: blocksize INTEGER :: n, blockstart, blockend, nblock nblock=size(blocksize,1) blockstart=1 blockend=0 DO n=1,nblock blockstart=blockend+1 blockend=MIN(blockstart+blocksize(n)-1,p%Nploc) ! Initial distribution in z with normalisation between magnetic mirrors CALL loduni(1,p%Z(blockstart:blockend)) p%Z(blockstart:blockend)= (z(n-1)+p%Z(blockstart:blockend)*(z(n)-z(n-1)))/rnorm CALL lodr(2,p%R(blockstart:blockend),ra(n), rb(n)) p%R(blockstart:blockend)=p%R(blockstart:blockend)/rnorm END DO END SUBROUTINE loadPartslices - SUBROUTINE loadPartFile(p, partfileindex, VR, VZ, VTHET) - USE basic, ONLY: partfile, nplasma, lu_partfile + SUBROUTINE read_part_file(p, partfilename, VR, VZ, VTHET) + USE basic, ONLY: lu_partfile, rnorm, vnorm implicit None TYPE(particles), INTENT(INOUT):: p REAL(kind=db), DIMENSION(:), ALLOCATABLE, INTENT(INOUT)::VR, VZ, VTHET - INTEGER, INTENT(IN):: partfileindex + CHARACTER(len=*)::partfilename INTEGER:: nblock = 0 REAL(kind=db), Dimension(:), ALLOCATABLE:: ra, rb, z INTEGER, Dimension(:), ALLOCATABLE:: npartsslice INTEGER:: velocitytype=1 !< 1) gaussian with temp 2) Davidson with H0, P0 INTEGER:: radialtype=1 !< 1) 1/R 2) uniform 3) 1/R^2 4) gauss INTEGER:: npartsalloc !< initial size of particles arrays REAL(kind=db):: mass=me REAL(kind=db):: charge=-elchar REAL(kind=db):: weight=1.0 CHARACTER(len=256) :: header=' ' !< header of csv file REAL(kind=db):: H0=3.2e-14 !< Total energy REAL(kind=db):: P0=8.66e-25 !< Canonical angular momentum REAL(kind=db):: temperature=10000 !< temperature in kelvins real(kind=db):: n0 !< density factor LOGICAL :: is_test = .false. !< Defines if particle are test particles or tracers + CHARACTER(len=16) :: partformat = 'slice' INTEGER:: i, ierr, openerr NAMELIST /partsload/ nblock, mass, charge, weight, npartsalloc, velocitytype, & - & radialtype, temperature, H0, P0, is_test, n0 + & radialtype, temperature, H0, P0, is_test, n0, partformat - OPEN(UNIT=lu_partfile,FILE=trim(partfile(partfileindex)),ACTION='READ',IOSTAT=openerr) + OPEN(UNIT=lu_partfile,FILE=trim(partfilename),ACTION='READ',IOSTAT=openerr) header=' ' IF(openerr .ne. 0) THEN CLOSE(unit=lu_partfile) RETURN END IF READ(lu_partfile,partsload) IF(mpirank .eq.0) THEN - WRITE(*,'(a,i2,a,a)')"reading partfile: ", partfileindex, " ", partfile(partfileindex) + WRITE(*,'(a,a)')"reading partfile: ", trim(partfilename) WRITE(*,partsload) END IF - - - IF( nblock .ge. 1) THEN - ALLOCATE(z(0:nblock),ra(nblock),rb(nblock), npartsslice(nblock)) - DO WHILE(header(1:8) .ne. '//slices') - READ(lu_partfile,'(a)') header - END DO - DO i=1,nblock - READ(lu_partfile,*) z(i-1),ra(i),rb(i),npartsslice(i) - END DO - READ(lu_partfile,*) z(nblock) - - CALL creat_parts(p,max(npartsalloc,sum(npartsslice))) - p%Nploc=sum(npartsslice) - p%Nptot=p%Nploc - IF(partfileindex .eq. 1) nplasma=p%Nploc - IF( allocated(VR) ) THEN - DEALLOCATE(VR,VZ,VTHET) - end if - if(.not. allocated(VR)) THEN - ALLOCATE(VR(p%Nploc)) - ALLOCATE(VZ(p%Nploc)) - ALLOCATE(VTHET(p%Nploc)) - END IF - p%m=mass - p%q=charge - p%weight=weight - p%qmRatio=charge/mass - p%is_test=is_test - p%Newindex=sum(npartsslice) - - SELECT CASE(radialtype) - CASE(1) ! 1/R distribution in R - CALL loadPartslices(p, lodunir, ra, rb, z, npartsslice) - CASE(2) ! flat top distribution in R - CALL loadPartslices(p, lodlinr, ra, rb, z, npartsslice) - CASE(3) ! 1/R^2 distribution in R - CALL loadPartslices(p, lodinvr, ra, rb, z, npartsslice) - CASE(4) ! gaussian distribution in R - CALL loadPartslices(p, lodgausr, ra, rb, z, npartsslice) + ! The plasma cloud is defined as a set of slices + IF(trim(partformat).eq.'slice') THEN + IF( nblock .ge. 1) THEN + ALLOCATE(z(0:nblock),ra(nblock),rb(nblock), npartsslice(nblock)) + DO WHILE(header(1:8) .ne. '//slices') + READ(lu_partfile,'(a)') header + END DO + DO i=1,nblock + READ(lu_partfile,*) z(i-1),ra(i),rb(i),npartsslice(i) + END DO + READ(lu_partfile,*) z(nblock) + + CALL creat_parts(p,max(npartsalloc,sum(npartsslice))) + p%Nploc=sum(npartsslice) + p%Nptot=p%Nploc + IF( allocated(VR) ) THEN + DEALLOCATE(VR,VZ,VTHET) + end if + if(.not. allocated(VR)) THEN + ALLOCATE(VR(p%Nploc)) + ALLOCATE(VZ(p%Nploc)) + ALLOCATE(VTHET(p%Nploc)) + END IF + p%m=mass + p%q=charge + p%weight=weight + p%qmRatio=charge/mass + p%is_test=is_test + p%Newindex=sum(npartsslice) + + SELECT CASE(radialtype) + CASE(1) ! 1/R distribution in R + CALL loadPartslices(p, lodunir, ra, rb, z, npartsslice) + CASE(2) ! flat top distribution in R + CALL loadPartslices(p, lodlinr, ra, rb, z, npartsslice) + CASE(3) ! 1/R^2 distribution in R + CALL loadPartslices(p, lodinvr, ra, rb, z, npartsslice) + CASE(4) ! gaussian distribution in R + CALL loadPartslices(p, lodgausr, ra, rb, z, npartsslice) + CASE DEFAULT + IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of radial distribution:", radialtype + CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) + END SELECT + + SELECT CASE(velocitytype) + CASE(1) ! Gaussian with temperature + CALL loadGaussianVelocities(p, VR, VZ, VTHET, temperature) + CASE(2) + CALL loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) CASE DEFAULT - IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of radial distribution:", radialtype + IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of velocity distribution:", velocitytype CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) - END SELECT + END SELECT - SELECT CASE(velocitytype) - CASE(1) ! Gaussian with temperature - CALL loadGaussianVelocities(p, VR, VZ, VTHET, temperature) - CASE(2) - CALL loadDavidsonVelocities(p, VR, VZ, VTHET, H0, P0) - CASE DEFAULT - IF (mpirank .eq. 0) WRITE(*,*) "Unknown type of velocity distribution:", velocitytype - CALL MPI_Abort(MPI_COMM_WORLD, -1, ierr) - END SELECT + END IF + + END IF + + + ! The plasma cloud is defined as a set individual particles + IF( trim(partformat) .eq. 'parts' ) THEN + IF( nblock .ge. 1) THEN + !Allocate necessary memory + CALL creat_parts(p,max(npartsalloc,nblock)) + IF( allocated(VR) ) THEN + DEALLOCATE(VR,VZ,VTHET) + end if + if(.not. allocated(VR)) THEN + ALLOCATE(VR(nblock)) + ALLOCATE(VZ(nblock)) + ALLOCATE(VTHET(nblock)) + END IF + + ! Read the particles from the file + DO WHILE(header(1:8) .ne. '//parts') + READ(lu_partfile,'(a)') header + END DO + DO i=1,nblock + READ(lu_partfile,*) p%R(i),p%THET(i),p%Z(i), VR(i), VTHET(i), VZ(i) + END DO + + p%Nploc=nblock + p%Nptot=p%Nploc + p%m=mass + p%q=charge + p%weight=weight + p%qmRatio=charge/mass + p%is_test=is_test + p%Newindex=nblock + + !normalizations + p%r=p%r/rnorm + p%z=p%z/rnorm + VR=VR/vnorm + VTHET=VTHET/vnorm + VZ=VZ/vnorm + END IF END IF CLOSE(unit=lu_partfile) END SUBROUTINE !=============================================================================== SUBROUTINE Temp_rescale ! USE basic, ONLY: temprescale, nlclassical, nz ! INTEGER:: i, gridindex ! REAL(kind=db) :: vr, vz, vthet ! DO i=1,parts%Nptot ! gridindex=parts%zindex(i)+1+parts%rindex(i)*nz ! vr=parts%UR(i)/parts%Gamma(i)-fluidur(gridindex) ! vz=parts%UZ(i)/parts%Gamma(i)-fluiduz(gridindex) ! vthet=parts%UTHET(i)/parts%Gamma(i)-fluiduthet(gridindex) ! parts%UR(i)=vr*temprescale+fluidur(gridindex) ! parts%UTHET(i)=vthet*temprescale+fluiduthet(gridindex) ! parts%UZ(i)=vz*temprescale+fluiduz(gridindex) ! IF(nlclassical) THEN ! parts%Gamma(i)=1.0 ! ELSE ! parts%Gamma(i)=1/sqrt(1-(parts%UR(i)**2+parts%UTHET(i)**2+parts%UZ(i)**2)) ! END IF ! parts%UR(i)=parts%UR(i)*parts%Gamma(i) ! parts%UTHET(i)=parts%UTHET(i)*parts%Gamma(i) ! parts%UZ(i)=parts%UZ(i)*parts%Gamma(i) ! END DO END SUBROUTINE Temp_rescale !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Increase the number of macroparticles by separating each previous macroparticles into !> samplefactor new macroparticles of equally divided weight. The new sub particles are distributed !> uniformly in space to maintain the density and other moments. ! !> @param [in] samplefactor multiplicator of the number of macroparticles. !> @param [in] p particles type to increase. !--------------------------------------------------------------------------- SUBROUTINE upsample(p, samplefactor) USE basic, ONLY : nplasma, dr, dz INTEGER, INTENT(IN) ::samplefactor TYPE(particles), INTENT(INOUT):: p INTEGER:: i, j, currentindex REAL(kind=db), DIMENSION(p%Nploc) :: spreaddir ! random direction for the spread of each initial macro particle REAL(kind=db) :: dir ! Direction in which the particle is moved REAL(kind=db) :: dl ! Particle displacement used for ! Load and scale the direction angle for spreading the new particles CALL loduni(2, spreaddir) spreaddir=spreaddir*2*pi/samplefactor dl=min(dz,minval(dr))/100 DO i=1,p%Nploc DO j=1,samplefactor-1 currentindex=p%Nploc+(i-1)*(samplefactor-1)+j CALL move_part(p,i,currentindex) p%partindex(currentindex)=currentindex dir = spreaddir(i)+2*pi*j/samplefactor p%R(currentindex)=p%R(currentindex) + dl*cos(dir) p%Z(currentindex)=p%Z(currentindex) + dl*sin(dir) END DO p%partindex(i)=i p%R(i)=p%R(i) + dl*cos(spreaddir(i)) p%Z(i)=p%Z(i) + dl*sin(spreaddir(i)) END DO nplasma=nplasma*samplefactor p%weight=p%weight/samplefactor p%Nploc=p%Nploc*samplefactor END SUBROUTINE upsample + !--------------------------------------------------------------------------- +!> @author +!> Guillaume Le Bars EPFL/SPC +! +! DESCRIPTION: +!> +!> @brief Deallocate recursively a linked_paticle linked list +! +!> @param [in] l_p linked_part particle to be dallocated. +!--------------------------------------------------------------------------- + RECURSIVE SUBROUTINE destroy_linked_parts(l_p) + TYPE(linked_part), POINTER :: l_p + + IF(associated(l_p%next)) call destroy_linked_parts(l_p%next) + deallocate(l_p) + END subroutine destroy_linked_parts + END MODULE beam diff --git a/src/celldiag_mod.f90 b/src/celldiag_mod.f90 index 2c449ff..ac980c1 100644 --- a/src/celldiag_mod.f90 +++ b/src/celldiag_mod.f90 @@ -1,186 +1,185 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: celldiag ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Represent a diagnostic positioned at cell indices (rindex,zindex) that saves the individual particles !> position and velocity !------------------------------------------------------------------------------ MODULE celldiag ! USE constants USE mpi USE mpihelper USE basic, ONLY: mpirank, mpisize, vnorm, rnorm, Zbounds, zgrid, & & nlclassical, nlmaxwellsource, phinorm, nbcelldiag USE beam USE futils IMPLICIT NONE PRIVATE INTEGER, SAVE, ALLOCATABLE :: specieid(:) !< position of the specie in partslist INTEGER, SAVE, ALLOCATABLE :: rindex(:) !< radial index for the diagnostic position INTEGER, SAVE, ALLOCATABLE :: zindex(:) !< axial index for the diagnostic position TYPE(particles), ALLOCATABLE, SAVE :: diagnosed_parts(:) !< Stores the particles properties at position (rindex,zindex) CHARACTER(len=20), SAVE, ALLOCATABLE :: groupname(:) !< Name of the group in the hdf5 file INTEGER, SAVE , ALLOCATABLE :: h5storelength(:) !< particles capacity of the hdf5 dataset NAMELIST /celldiagparams/ specieid, rindex, zindex PUBLIC:: celldiag_init, celldiag_save contains subroutine celldiag_init(lu_in, time, diagfile_id) implicit none INTEGER, INTENT(IN) :: lu_in REAL(kind=db), INTENT(IN):: time INTEGER, INTENT(IN):: diagfile_id INTEGER:: i ALLOCATE(specieid(nbcelldiag), rindex(nbcelldiag), zindex(nbcelldiag)) ALLOCATE(diagnosed_parts(nbcelldiag), groupname(nbcelldiag), h5storelength(nbcelldiag)) Rewind(lu_in) if(nbcelldiag .gt. 0) then READ(lu_in, celldiagparams) WRITE(*, celldiagparams) Do i=1,nbcelldiag CALL creat_parts(diagnosed_parts(i), 500) IF(mpirank .eq. 0) THEN WRITE(groupname(i),'(a,i2.2)') "/data/celldiag/",i If(.not. isgroup(diagfile_id, "/data/celldiag/")) THEN CALL creatg(diagfile_id, "/data/celldiag") CALL attach(diagfile_id, "/data/celldiag", "nbcelldiag", nbcelldiag) END IF CALL celldiag_createh5group(diagfile_id, groupname(i), rindex(i), zindex(i), specieid(i), diagnosed_parts(i), h5storelength(i)) END IF END DO END IF End subroutine celldiag_init subroutine celldiag_createh5group(diagfile_id, groupname, rindex, zindex, specid, diag_parts, h5strlength) INTEGER, INTENT(IN):: diagfile_id, rindex, zindex, specid CHARACTER(len=*), INTENT(IN):: groupname TYPE(particles), INTENT(IN):: diag_parts INTEGER, INTENT(INOUT):: h5strlength INTEGER:: partsrank, partsdim(2) If(.not. isgroup(diagfile_id, TRIM(groupname))) CALL creatg(diagfile_id, TRIM(groupname)) CALL attach(diagfile_id, TRIM(groupname), "rindex", rindex) CALL attach(diagfile_id, TRIM(groupname), "zindex", zindex) CALL attach(diagfile_id, trim(groupname), "q", partslist(specid)%q) CALL attach(diagfile_id, trim(groupname), "m", partslist(specid)%m) CALL attach(diagfile_id, trim(groupname), "weight", partslist(specid)%weight) If(.not. isdataset(diagfile_id, trim(groupname) // "/time")) CALL creatd(diagfile_id, 0, SHAPE(rindex), trim(groupname) // "/time", "time") If(.not. isdataset(diagfile_id, trim(groupname) // "/Nparts")) CALL creatd(diagfile_id, 0, SHAPE(rindex), trim(groupname) //"/Nparts", "number of remaining parts") If(.not. isdataset(diagfile_id, trim(groupname) // "/R")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/R", "radial pos") If(.not. isdataset(diagfile_id, trim(groupname) // "/Z")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/Z", "axial pos") If(.not. isdataset(diagfile_id, trim(groupname) // "/THET")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/THET", "azimuthal pos") If(.not. isdataset(diagfile_id, trim(groupname) // "/UZ")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/UZ", "axial beta*gamma") If(.not. isdataset(diagfile_id, trim(groupname) // "/UR")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/UR", "radial beta*gamma") If(.not. isdataset(diagfile_id, trim(groupname) // "/UTHET")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/UTHET", "azimuthal beta*gamma") If(.not. isdataset(diagfile_id, trim(groupname) // "/pot")) CALL creatd(diagfile_id, 1, SHAPE(diag_parts%R), trim(groupname) // "/pot", "electric potential") CALL getdims(diagfile_id, trim(groupname) // '/R', partsrank, partsdim) h5strlength=partsdim(1) END subroutine celldiag_createh5group subroutine celldiag_save(time, diagfile_id) implicit none REAL(kind=db), INTENT(IN) :: time INTEGER, INTENT(IN) :: diagfile_id INTEGER :: Nbtosave, i ! check if source is on IF(.not. celldiag_on(time)) THEN RETURN END IF Do i=1,nbcelldiag CALL celldiag_save_specie(partslist(specieid(i)),rindex(i),zindex(i),diagnosed_parts(i)) END DO Do i=1,nbcelldiag if(mpisize .gt. 1) then call collectparts(diagnosed_parts(i)) else diagnosed_parts(i)%Nptot=diagnosed_parts(i)%Nploc end if Nbtosave=min(diagnosed_parts(i)%Nptot,h5storelength(i)) CALL celldiag_write_specie(diagfile_id, diagnosed_parts(i), groupname(i), Nbtosave, time) END DO end subroutine celldiag_save SUBROUTINE celldiag_save_specie(p, rindex, zindex, savedp) USE basic, ONLY: rgrid, zgrid Type(particles), INTENT(IN) :: p Type(particles), INTENT(INOUT) :: savedp INTEGER, INTENT(IN) :: rindex, zindex INTEGER:: i, destcopyindex savedp%Nploc=0 savedp%collected=.false. IF (p%Nploc .gt. 0 .and. zindex .ge. Zbounds(mpirank) .and. zindex .lt. Zbounds(mpirank+1)) THEN ! Boundary condition at z direction !$OMP PARALLEL DO DEFAULT(SHARED) DO i=1,p%Nploc ! If the particle is in the correct cell, it is saved IF (p%Zindex(i) .eq. zindex.and. p%Rindex(i) .eq. rindex ) THEN !$OMP CRITICAL (diagparts) savedp%Nploc=savedp%Nploc+1 destcopyindex= savedp%Nploc !$OMP END CRITICAL (diagparts) CALL copy_part(p,i,destcopyindex,savedp) END IF END DO !$OMP END PARALLEL DO END IF END subroutine celldiag_save_specie SUBROUTINE celldiag_write_specie(diagfile_id, savedp, groupname, Nbtosave, time) Type(particles), INTENT(IN) :: savedp INTEGER, INTENT(IN) :: diagfile_id CHARACTER(LEN=*), INTENT(IN) :: groupname INTEGER, INTENT(IN) :: Nbtosave REAL(kind=db), INTENT(IN) :: time - IF(mpirank .eq. 0) THEN CALL append(diagfile_id, trim(groupname) // "/time", time) CALL append(diagfile_id, trim(groupname) // "/Nparts", REAL(savedp%Nptot,kind=db)) CALL append(diagfile_id, trim(groupname) // "/R", savedp%R(1:Nbtosave)*rnorm) CALL append(diagfile_id, trim(groupname) // "/Z", savedp%Z(1:Nbtosave)*rnorm) CALL append(diagfile_id, trim(groupname) // "/THET", savedp%THET(1:Nbtosave)) CALL append(diagfile_id, trim(groupname) // "/UZ", savedp%UZ(1:Nbtosave)/savedp%gamma(1:Nbtosave)) CALL append(diagfile_id, trim(groupname) // "/UR", savedp%UR(1:Nbtosave)/savedp%gamma(1:Nbtosave)) CALL append(diagfile_id, trim(groupname) // "/UTHET", savedp%UTHET(1:Nbtosave)/savedp%gamma(1:Nbtosave)) CALL append(diagfile_id, trim(groupname) // "/pot", savedp%pot(1:Nbtosave)*phinorm) END IF END subroutine celldiag_write_specie logical function celldiag_on(time) REAL(kind=db), intent(in):: time celldiag_on=.true. end function End Module celldiag diff --git a/src/chkrst.f90 b/src/chkrst.f90 index c98bc1b..96313c9 100644 --- a/src/chkrst.f90 +++ b/src/chkrst.f90 @@ -1,260 +1,261 @@ SUBROUTINE chkrst(flag) ! ! Process checkpoint/restart file ! USE basic USE futils USE beam USE fields USE constants, ONLY: elchar, me IMPLICIT NONE INTEGER, INTENT(in) :: flag INTEGER :: remainingparts REAL(kind=db):: old_msim, old_qsim, old_n0 INTEGER:: partsrank, partsdim(1), i REAL(kind=db), ALLOCATABLE:: charges(:), weights(:), masses(:) CHARACTER(len=64):: group CHARACTER(len=2):: specieindex real(kind=db):: old_rnorm, old_tnorm ! Only process 0 should save on file ! ! Local vars and arrays !________________________________________________________________________________ ! SELECT CASE(flag) !________________________________________________________________________________ ! 1. Open and read restart file ! CASE(0) CALL openf(rstfile, fidrst,'r',real_prec='d') CALL getatt(fidrst, '/Basic', 'cstep', cstep) CALL getatt(fidrst, '/Basic', 'time', time) CALL getatt(fidrst, '/Basic', 'n0', old_n0) IF(isgroup(fidrst,'/Basic/norm')) THEN CALL getatt(fidrst, '/Basic/norm', 'rnorm', old_rnorm) CALL getatt(fidrst, '/Basic/norm', 'tnorm', old_tnorm) end if IF(isdataset(fidrst,'/Parts/charges')) THEN ! If we have multiple saved species we need to load all of them CALL getatt(fidrst,'/Parts','nbspecies',nbspecies) + nbspecies=nbspecies ALLOCATE(charges(nbspecies),masses(nbspecies),weights(nbspecies)) - ALLOCATE(partslist(nbspecies)) + ALLOCATE(partslist(nbspecies+nbaddtestspecies)) CALL getarr(fidrst, '/Parts/charges', charges) CALL getarr(fidrst, '/Parts/masses', masses) CALL getarr(fidrst, '/Parts/weights', weights) weights(1)=weights(1)/old_n0*n0 ELSE ! If we have an old restart file, we load only the electrons CALL getatt(fidrst, '/Basic', 'msim', old_msim) CALL getatt(fidrst, '/Basic', 'qsim', old_qsim) qsim=old_qsim/old_n0*n0 msim=old_msim/old_n0*n0 nbspecies=1 ALLOCATE(charges(nbspecies),masses(nbspecies),weights(nbspecies)) ALLOCATE(partslist(nbspecies)) charges(1)=sign(elchar,qsim) weights(1)=msim/me masses(1)=me END IF if(newres) then weights=weights*weights_scale end if CALL getatt(fidrst, '/var0d', 'etot0', loc_etot0) etot0=loc_etot0 if(n0.ne.old_n0) cstep=0 loc_etot0=0 CALL getatt(fidrst, '/var0d', 'epot', epot) CALL getatt(fidrst, '/var0d', 'ekin', ekin) CALL getatt(fidrst, '/var0d', 'etot', etot) CALL getatt(fidrst, '/Parts', 'nplasma', nplasma) CALL getatt(fidrst, '/Parts', 'remainingparts', remainingparts) IF (samplefactor .gt. 1 ) THEN ! We increase the number of macro particles CALL creat_parts(partslist(1),remainingparts*samplefactor) ELSE if(remainingparts .gt. 0) Then CALL getdims(fidrst, '/Parts/Z', partsrank, partsdim) else partsdim=0 end if CALL creat_parts(partslist(1),partsdim(1)) END IF partslist(1)%q=charges(1) partslist(1)%m=masses(1) partslist(1)%weight=weights(1) partslist(1)%qmRatio=charges(1)/masses(1) if(remainingparts .gt. 0) then CALL getarr(fidrst, '/Parts/Z', partslist(1)%Z) CALL getarr(fidrst, '/Parts/R', partslist(1)%R) ! Renormalize R and Z partslist(1)%R=partslist(1)%R*sqrt(n0/old_n0) partslist(1)%Z=partslist(1)%Z*sqrt(n0/old_n0) CALL getarr(fidrst, '/Parts/THET', partslist(1)%THET) CALL getarr(fidrst, '/Parts/UZ', partslist(1)%UZ) CALL getarr(fidrst, '/Parts/UR', partslist(1)%UR) CALL getarr(fidrst, '/Parts/UTHET', partslist(1)%UTHET) CALL getarr(fidrst, '/Parts/Zindex', partslist(1)%Zindex) CALL getarr(fidrst, '/Parts/Rindex', partslist(1)%Rindex) CALL getarr(fidrst, '/Parts/partindex', partslist(1)%partindex) end if partslist(1)%Nploc=remainingparts partslist(1)%Nptot=partslist(1)%Nploc partslist(1)%Newindex=maxval(partslist(1)%partindex) WRITE(*,*) "Read ", partslist(1)%Nploc, " particles out of ", remainingparts IF(isdataset(fidrst,'/Parts/fluidur')) THEN CALL getarr(fidrst, '/Parts/GAMMA', partslist(1)%Gamma) IF(temprescale .gt. 0) THEN ! Does nothing for now CALL Temp_rescale END IF END IF IF(nbspecies .gt. 1) THEN DO i=2,nbspecies WRITE(group,'(a,i2)')'/Parts/',i WRITE(specieindex,'(i2)') i partsdim=0 CALL getatt(fidrst, trim(group), 'remainingparts', remainingparts) if(remainingparts .gt. 0) Then CALL getdims(fidrst, trim(group) // '/Z', partsrank, partsdim) else partsdim=0 end if IF(partsdim(1).gt.remainingparts) THEN CALL creat_parts(partslist(i),partsdim(1)) partslist(i)%Nploc=remainingparts ELSE CALL creat_parts(partslist(i),max(100,remainingparts)) ENDIF partslist(i)%q=charges(i) partslist(i)%m=masses(i) partslist(i)%weight=weights(i) partslist(i)%qmRatio=charges(i)/masses(i) partslist(i)%Nptot=remainingparts CALL getatt(fidrst, trim(group), 'is_test', partslist(i)%is_test) IF(partslist(i)%Nptot .gt. 0) THEN CALL getarr(fidrst, trim(group) // '/Z', partslist(i)%Z) CALL getarr(fidrst, trim(group) // '/R', partslist(i)%R) CALL getarr(fidrst, trim(group) // '/THET', partslist(i)%THET) CALL getarr(fidrst, trim(group) // '/UZ', partslist(i)%UZ) CALL getarr(fidrst, trim(group) // '/UR', partslist(i)%UR) CALL getarr(fidrst, trim(group) // '/UTHET', partslist(i)%UTHET) CALL getarr(fidrst, trim(group) // '/GAMMA', partslist(i)%Gamma) CALL getarr(fidrst, trim(group) // '/Zindex', partslist(i)%Zindex) CALL getarr(fidrst, trim(group) // '/Rindex', partslist(i)%Rindex) CALL getarr(fidrst, trim(group) // '/partindex', partslist(i)%partindex) partslist(i)%R=partslist(i)%R*sqrt(n0/old_n0) partslist(i)%Z=partslist(i)%Z*sqrt(n0/old_n0) partslist(i)%Newindex=maxval(partslist(i)%partindex) END IF END DO END IF CALL closef(fidrst) IF(samplefactor .gt. 1) THEN ! We increase the number of macro particles CALL upsample(partslist(1), samplefactor) END IF WRITE(*,'(3x,a)') "Reading from restart file "//TRIM(rstfile)//" completed!" !________________________________________________________________________________ ! 2. Create and write to restart file (DP reals) ! CASE(1) IF( .NOT. nlsave ) RETURN CALL mv2bk(rstfile) CALL creatf(rstfile, fidrst, real_prec='d', desc='Restart file') CALL creatg(fidrst, '/Basic', 'Basic data') CALL attach(fidrst, '/Basic', 'cstep', cstep) CALL attach(fidrst, '/Basic', 'time', time) CALL attach(fidrst, '/Basic', 'jobnum', jobnum) CALL attach(fidrst, '/Basic', 'qsim', partslist(1)%q*partslist(1)%weight) CALL attach(fidrst, '/Basic', 'msim', partslist(1)%m*partslist(1)%weight) CALL attach(fidrst, '/Basic', 'n0', n0) CALL creatg(fidrst, '/Basic/norm', 'Normalisation quantities') CALL attach(fidrst, '/Basic/norm', 'rnorm', rnorm) CALL attach(fidrst, '/Basic/norm', 'bnorm', bnorm) CALL attach(fidrst, '/Basic/norm', 'enorm', enorm) CALL attach(fidrst, '/Basic/norm', 'tnorm', tnorm) CALL attach(fidrst, '/Basic/norm', 'phinorm', phinorm) ! ! 0D variables ! CALL creatg(fidrst, '/var0d', '0D variables') CALL attach(fidrst, '/var0d','etot0', etot0) CALL attach(fidrst, '/var0d','epot', epot) CALL attach(fidrst, '/var0d','ekin', ekin) CALL attach(fidrst, '/var0d','etot', etot) ! ! Parts ! CALL creatg(fidrst, '/Parts', 'Particles data') CALL attach(fidrst, '/Parts', 'nplasma', nplasma) nbspecies=size(partslist,1) CALL attach(fidrst, '/Parts', 'nbspecies', nbspecies) ALLOCATE(charges(nbspecies),masses(nbspecies),weights(nbspecies)) DO i=1,nbspecies charges(i) = partslist(i)%q masses(i) = partslist(i)%m weights(i) = partslist(i)%weight END DO CALL putarr(fidrst, '/Parts/charges', charges) CALL putarr(fidrst, '/Parts/masses', masses ) CALL putarr(fidrst, '/Parts/weights', weights) IF(mpisize .gt. 1) THEN remainingparts=sum(Nplocs_all) ELSE remainingparts=partslist(1)%Nploc END IF CALL attach(fidrst, '/Parts', 'remainingparts', remainingparts) CALL putarr(fidrst, '/Parts/Z', partslist(1)%Z) CALL putarr(fidrst, '/Parts/R', partslist(1)%R) CALL putarr(fidrst, '/Parts/THET', partslist(1)%THET) CALL putarr(fidrst, '/Parts/UZ', partslist(1)%UZ) CALL putarr(fidrst, '/Parts/UR', partslist(1)%UR) CALL putarr(fidrst, '/Parts/UTHET', partslist(1)%UTHET) CALL putarr(fidrst, '/Parts/GAMMA', partslist(1)%Gamma) CALL putarr(fidrst, '/Parts/Zindex', partslist(1)%Zindex) CALL putarr(fidrst, '/Parts/Rindex', partslist(1)%Rindex) CALL putarr(fidrst, '/Parts/partindex', partslist(1)%partindex) CALL putarr(fidrst, '/Parts/fluidur', moments(2,:)) CALL putarr(fidrst, '/Parts/fluiduthet', moments(3,:)) CALL putarr(fidrst, '/Parts/fluiduz', moments(4,:)) IF(nbspecies .gt. 1) THEN DO i=2,nbspecies WRITE(group,'(a,i2)')'/Parts/',i WRITE(specieindex,'(i2)') i CALL creatg(fidrst, trim(group), 'Particles ' // specieindex// ' data') CALL attach(fidrst, trim(group), 'remainingparts', partslist(i)%Nptot) CALL attach(fidrst, trim(group), 'is_test', partslist(i)%is_test) IF(partslist(i)%Nptot .gt. 0) THEN CALL putarr(fidrst, trim(group) // '/Z', partslist(i)%Z(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/R', partslist(i)%R(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/THET', partslist(i)%THET(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/UZ', partslist(i)%UZ(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/UR', partslist(i)%UR(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/UTHET', partslist(i)%UTHET(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/GAMMA', partslist(i)%Gamma(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/Zindex', partslist(i)%Zindex(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/Rindex', partslist(i)%Rindex(1:partslist(i)%Nptot)) CALL putarr(fidrst, trim(group) // '/partindex', partslist(i)%partindex(1:partslist(i)%Nptot)) END IF END DO END IF ! ! Fields ! CALL closef(fidrst) WRITE(*,'(3x,a)') "Writing to restart file "//TRIM(rstfile)//" completed!" ! END SELECT ! END SUBROUTINE chkrst diff --git a/src/diagnose.f90 b/src/diagnose.f90 index 38ea95e..fd10ef9 100644 --- a/src/diagnose.f90 +++ b/src/diagnose.f90 @@ -1,364 +1,377 @@ SUBROUTINE diagnose(kstep) ! ! Diagnostics ! USE basic USE futils USE hashtable Use maxwsrce USE beam, ONLY : partslist, epot, ekin, etot, etot0, Nplocs_all, collectparts #if USE_X == 1 USE xg, ONLY : initw, updt_xg_var #endif USE fields, ONLY: phi_spline USE celldiag USE mpi Use geometry IMPLICIT NONE ! INTEGER, INTENT(in) :: kstep ! ! Local vars and arrays INTEGER, PARAMETER :: BUFSIZE = 20 CHARACTER(len=128) :: str, fname, grpname INTEGER:: Ntotal ! Total number of simulated particles remaining in the simulation INTEGER:: partsrank, partsdim(2) INTEGER:: Nplocs_all_save(64) INTEGER:: i !________________________________________________________________________________ ! 1. Initial diagnostics IF( kstep .EQ. 0 .and. mpirank .eq. 0) THEN ! Only process 0 should save on file ! WRITE(*,'(a)') ' Initial diagnostics' ! ! 1.1 Initial run or when NEWRES set to .TRUE. ! IF( .NOT. nlres .OR. newres) THEN CALL creatf(resfile, fidres, 'Espic2d result file', real_prec='d') WRITE(*,'(3x,a,a)') TRIM(resfile), ' created' ! ! Label the run IF( LEN_TRIM(label1).GT.0 ) CALL attach(fidres, "/", "label1", TRIM(label1)) IF( LEN_TRIM(label2).GT.0 ) CALL attach(fidres, "/", "label2", TRIM(label2)) IF( LEN_TRIM(label3).GT.0 ) CALL attach(fidres, "/", "label3", TRIM(label3)) IF( LEN_TRIM(label4).GT.0 ) CALL attach(fidres, "/", "label4", TRIM(label4)) ! ! Job number jobnum = 0 ! ! Data group CALL creatg(fidres, "/data", "data") CALL creatg(fidres, "/data/var0d", "0d history arrays") CALL creatg(fidres, "/data/var1d","1d history arrays") CALL creatg(fidres, "/data/part", "part phase space") CALL creatg(fidres, "/data/fields", "Electro static potential and Er Ez fields") ! ! File group CALL creatg(fidres, "/files", "files") CALL attach(fidres, "/files", "jobnum", jobnum) CALL putarr(fidres, "/data/var1d/rgrid", rgrid) CALL putarr(fidres, "/data/var1d/zgrid", zgrid) CALL creatd(fidres, 1, (/ 64 /), "/data/var0d/Nplocs_all", "Nplocs_all") ! Part and fields vectors ! Initialize time-dependent particle variables CALL creatd(fidres, 1, SHAPE(partslist(1)%R), "/data/part/R", "R",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%Z), "/data/part/Z", "Z",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%Z), "/data/part/THET", "THET",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%UZ), "/data/part/UR", "UZ",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%UR), "/data/part/UZ", "UR",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%UTHET), "/data/part/UTHET", "UTHET",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%Rindex), "/data/part/Rindex", "Rindex",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%Zindex), "/data/part/Zindex", "Zindex",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%partindex), "/data/part/partindex", "partindex",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 1, SHAPE(partslist(1)%pot), "/data/part/pot", "pot",compress=.true.,chunking=SHAPE(partslist(1)%R)) CALL creatd(fidres, 0, SHAPE(time), "/data/part/time", "time" ) CALL creatd(fidres, 0, SHAPE(Ntotal), "/data/part/Nparts", "number of remaining parts in the simulation space") CALL attach(fidres,'/data/part', "q", partslist(1)%q) CALL attach(fidres,'/data/part', "m", partslist(1)%m) CALL attach(fidres,'/data/part', "weight", partslist(1)%weight) - DO i=2,nbspecies - IF ( partslist(i)%is_test) THEN - WRITE(grpname,'(a,i2)')'/data/part/',i - CALL creatg(fidres, grpname, "specific specie phase space") - CALL creatd(fidres, 0, SHAPE(time), trim(grpname) // "/time", "time") - CALL creatd(fidres, 0, SHAPE(time), trim(grpname) //"/Nparts", "number of remaining parts") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/R", "radial pos") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/Z", "axial pos") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/THET", "azimuthal pos") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/UZ", "axial beta*gamma") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/UR", "radial beta*gamma") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/UTHET", "azimuthal beta*gamma") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/pot", "electric potential") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/Rindex", "radial grid index") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/Zindex", "axial grid index") - CALL creatd(fidres, 1, SHAPE(partslist(i)%R), trim(grpname) // "/partindex", "particle index") - CALL attach(fidres,trim(grpname), "q", partslist(i)%q) - CALL attach(fidres,trim(grpname), "m", partslist(i)%m) - CALL attach(fidres,trim(grpname), "weight", partslist(i)%weight) - end IF - END DO - - CALL creatd(fidres, 1, SHAPE(pot), "/data/fields/pot", "pot") CALL creatd(fidres, 1, SHAPE(Er), "/data/fields/Er", "Er") CALL creatd(fidres, 1, SHAPE(Ez), "/data/fields/Ez", "Ez") CALL creatd(fidres, 1, SHAPE(phi_spline), "/data/fields/phi", "spline form of Phi") CALL creatd(fidres, 2, SHAPE(moments), "/data/fields/moments", "moments",compress=.true.,chunking=(/1,nrank(1)*nrank(2)/)) !CALL creatd(fidres, 2, SHAPE(moments), "/data/fields/moments", "moments") CALL creatd(fidres, 0, SHAPE(time), "/data/fields/time", "time" ) CALL putarr(fidres, "/data/fields/Br", Br) CALL putarr(fidres, "/data/fields/Bz", Bz) CALL putarr(fidres, "/data/fields/Athet", Athet) CALL putarr(fidres, "/data/fields/volume", Volume) ! ! 1.2 Restart run ! ELSE CALL cp2bk(resfile) ! backup previous result file CALL openf(resfile, fidres, real_prec='d') WRITE(*,'(3x,a,a)') TRIM(resfile), ' open' CALL getatt(fidres, "/files", "jobnum", jobnum) jobnum = jobnum+1 WRITE(*,'(3x,a,i3)') "Current Job Number =", jobnum CALL attach(fidres, "/files", "jobnum", jobnum) END IF ! ! Add input namelist variables as attributes of /data/input WRITE(str,'(a,i2.2)') "/data/input.",jobnum CALL creatg(fidres, TRIM(str)) CALL attach(fidres, TRIM(str), "job_time", job_time) CALL attach(fidres, TRIM(str), "extra_time", extra_time) CALL attach(fidres, TRIM(str), "dt", dt) CALL attach(fidres, TRIM(str), "tmax", tmax) CALL attach(fidres, TRIM(str), "nrun", nrun) CALL attach(fidres, TRIM(str), "nlres", nlres) CALL attach(fidres, TRIM(str), "nlsave", nlsave) CALL attach(fidres, TRIM(str), "newres", newres) CALL attach(fidres, TRIM(str), "nz", nz) CALL putarr(fidres, TRIM(str)//"/lz", lz) CALL attach(fidres, TRIM(str), "nplasma", nplasma) CALL attach(fidres, TRIM(str), "potinn", potinn) CALL attach(fidres, TRIM(str), "potout", potout) CALL attach(fidres, TRIM(str), "B0", B0) CALL attach(fidres, TRIM(str), "Rcurv", Rcurv) CALL attach(fidres, TRIM(str), "width", width) CALL attach(fidres, TRIM(str), "n0", n0) CALL attach(fidres, TRIM(str), "temp", partslist(1)%temperature) CALL attach(fidres, TRIM(str), "it0d", it0d) CALL attach(fidres, TRIM(str), "it2d", it2d) CALL attach(fidres, TRIM(str), "itparts", itparts) CALL attach(fidres, TRIM(str), "nlclassical", nlclassical) CALL attach(fidres, TRIM(str), "nlPhis", nlPhis) CALL attach(fidres, TRIM(str), "qsim", partslist(1)%q*partslist(1)%weight) CALL attach(fidres, TRIM(str), "msim", partslist(1)%m*partslist(1)%weight) CALL attach(fidres, TRIM(str), "startstep", cstep) CALL attach(fidres, TRIM(str), "H0", partslist(1)%H0) CALL attach(fidres, TRIM(str), "P0", partslist(1)%P0) CALL putarr(fidres, TRIM(str)//"/femorder", femorder) CALL putarr(fidres, TRIM(str)//"/ngauss", ngauss) CALL putarr(fidres, TRIM(str)//"/nnr", nnr) CALL putarr(fidres, TRIM(str)//"/radii", radii) CALL putarr(fidres, TRIM(str)//"/plasmadim", plasmadim) CALL attach(fidres, TRIM(str), "rawparts", .true.) CALL attach(fidres, TRIM(str), "nbspecies", nbspecies) ! Save geometry parameters for non conforming boundary conditions Call geom_diag(fidres,str,rnorm) ! Save Maxwellsource parameters for the ad-hoc source Call maxwsrce_diag(fidres,str,vnorm) ! Save STDIN of this run WRITE(str,'(a,i2.2)') "/files/STDIN.",jobnum INQUIRE(unit=lu_in, name=fname) CALL putfile(fidres, TRIM(str), TRIM(fname)) +! Prepare hdf5 file for storing test particles + DO i=2,nbspecies + IF ( partslist(i)%is_test) THEN + WRITE(grpname,'(a,i2)')'/data/part/',i + call create_parts_group(partslist(i),trim(grpname),time) + END IF + END DO + CALL attach(fidres, "/data/part", "nbspecies", nbspecies) + ! ! Initialize buffers for 0d history arrays CALL htable_init(hbuf0, BUFSIZE) CALL set_htable_fileid(hbuf0, fidres, "/data/var0d") ! ! Initialize Xgrafix #if USE_X == 1 IF(nlxg) THEN CALL initw END IF #endif END IF IF(kstep .EQ. 0) THEN ! Initialize particle cell diagnostic CALL celldiag_init(lu_in,time, fidres) CLOSE(lu_in) END IF !________________________________________________________________________________ ! 2. Periodic diagnostics IF( kstep .NE. -1) THEN IF(modulo(step,ittracer) .eq. 0 .or. nlend) THEN ! We gather the traced particles on the mpi host DO i=1,nbspecies IF(partslist(i)%is_test) CALL collectparts(partslist(i)) END DO END IF IF(modulo(step,itrestart) .eq. 0 .or. modulo(step,itparts) .eq. 0 .or. nlend) THEN ! We gather the traced particles on the mpi host DO i=1,nbspecies CALL collectparts(partslist(i)) END DO END IF IF(modulo(step,ittext) .eq. 0 .or. nlend) THEN ! We gather the number of gained and lost particles on the mpi host do i=1,nbspecies IF(mpirank .eq.0 ) THEN CALL MPI_REDUCE(MPI_IN_PLACE, partslist(i)%nbadded, 1, MPI_INTEGER, MPI_SUM, & & 0, MPI_COMM_WORLD, ierr) CALL MPI_REDUCE(MPI_IN_PLACE, partslist(i)%nblost, 4, MPI_INTEGER, MPI_SUM, & & 0, MPI_COMM_WORLD, ierr) ELSE CALL MPI_REDUCE(partslist(i)%nbadded, partslist(i)%nbadded, 1, MPI_INTEGER, MPI_SUM, & & 0, MPI_COMM_WORLD, ierr) CALL MPI_REDUCE(partslist(i)%nblost, partslist(i)%nblost, 4, MPI_INTEGER, MPI_SUM, & & 0, MPI_COMM_WORLD, ierr) partslist(i)%nbadded=0 partslist(i)%nblost=0 END IF end do end if ! ! Only process 0 should save on file IF(mpirank .ne. 0) RETURN ! IF (mpisize .gt. 1) THEN partslist(1)%Nptot=sum(Nplocs_all) END IF ! IF(modulo(step,ittext).eq. 0 .or. nlend) THEN WRITE(*,'(a,1x,i8.8,a1,i8.8,20x,a,1pe10.3)') '*** Timestep (this run/total) =', & & step, '/', cstep, 'Time =', time if( abs(etot).gt. 0) then WRITE(*,'(a,6(1pe12.4),1x,i8.8,a1,i8.8)') 'Epot, Ekin, Etot, Etot0, Eerr, Eerr rel, Nbparts/Ntotal', epot, ekin, etot, etot0, etot-etot0,(etot-etot0)/etot, partslist(1)%Nptot,'/',nplasma else WRITE(*,'(a,4(1pe12.4),1x,i8.8,a1,i8.8)') 'Epot, Ekin, Etot, Eerr, Nbparts/Ntotal', epot, ekin, etot, etot-etot0, partslist(1)%Nptot,'/',nplasma end if IF(mpisize .gt. 1 ) then WRITE(*,'(a,64i10.7)') 'Nbparts per proc', Nplocs_all end if Write(*,'(a)')"speci, added, zmloss, zploss, rmloss, rploss, tot var, tot" do i=1,nbspecies WRITE(*,'(i04,7i9.7)') i, partslist(i)%nbadded,-partslist(i)%nblost,partslist(i)%nbadded-sum(partslist(i)%nblost), partslist(i)%Nptot partslist(i)%nbadded=0 partslist(i)%nblost=0 end do END IF !________________________________________________________________________________ ! ! 2.1 0d history arrays ! IF(modulo(step,it0d).eq. 0 .or. nlend) THEN CALL add_record(hbuf0, "time", "simulation time", time) CALL add_record(hbuf0, "epot", "potential energy", epot) CALL add_record(hbuf0, "ekin", "kinetic energy", ekin) CALL add_record(hbuf0, "etot", "total energy", etot) CALL add_record(hbuf0, "etot0", "theoretical total energy", etot0) CALL add_record(hbuf0, "nbparts", "number of remaining parts in the simulation space", REAL(partslist(1)%Nptot,kind=db)) CALL htable_endstep(hbuf0) Nplocs_all_save=0 Nplocs_all_save(1:mpisize)=Nplocs_all(0:mpisize-1) CALL append(fidres, "/data/var0d/Nplocs_all", REAL(Nplocs_all_save,kind=db)) END IF ! ! 2.2 2d profiles IF(modulo(step,it2d).eq. 0 .or. nlend) THEN CALL append(fidres, "/data/fields/time", time) CALL append(fidres, "/data/fields/pot", pot*phinorm) CALL append(fidres, "/data/fields/Er", Er*enorm) CALL append(fidres, "/data/fields/Ez", Ez*enorm) CALL append(fidres, "/data/fields/phi", phi_spline*phinorm) CALL append(fidres, "/data/fields/moments", moments) END IF ! ! 2.3 main specie quantities IF(modulo(step,itparts).eq. 0 .or. nlend) THEN !PRINT*, 'write particles to file_____________________' CALL append(fidres, "/data/part/time", time) CALL append(fidres, "/data/part/Nparts", REAL(partslist(1)%Nptot,kind=db)) IF ( isdataset(fidres,'/data/part/R') ) THEN CALL getdims(fidres, '/data/part/R', partsrank, partsdim) partsdim(1)=min(size(partslist(1)%R,1), partsdim(1)) CALL append(fidres, "/data/part/R", partslist(1)%R(1:partsdim(1))*rnorm) CALL append(fidres, "/data/part/Z", partslist(1)%Z(1:partsdim(1))*rnorm) CALL append(fidres, "/data/part/THET", partslist(1)%THET(1:partsdim(1))) CALL append(fidres, "/data/part/UZ", partslist(1)%UZ(1:partsdim(1))/partslist(1)%gamma(1:partsdim(1))) CALL append(fidres, "/data/part/UR", partslist(1)%UR(1:partsdim(1))/partslist(1)%gamma(1:partsdim(1))) CALL append(fidres, "/data/part/UTHET", partslist(1)%UTHET(1:partsdim(1))/partslist(1)%gamma(1:partsdim(1))) CALL append(fidres, "/data/part/pot", partslist(1)%pot(1:partsdim(1))*phinorm) CALL append(fidres, "/data/part/Rindex", REAL(partslist(1)%Rindex(1:partsdim(1)),kind=db)) CALL append(fidres, "/data/part/Zindex", REAL(partslist(1)%Zindex(1:partsdim(1)),kind=db)) CALL append(fidres, "/data/part/partindex", REAL(partslist(1)%partindex(1:partsdim(1)),kind=db)) END IF END IF ! ! 2.4 Tracer quantities IF(modulo(step,ittracer).eq. 0 .or. nlend) THEN !PRINT*, 'write particles to file_____________________' DO i=2,nbspecies IF ( .not. partslist(i)%is_test) CYCLE WRITE(grpname,'(a,i2,a)')'/data/part/',i,'/' CALL append(fidres, trim(grpname) // "time", time) CALL append(fidres, trim(grpname) //"Nparts", REAL(partslist(i)%Nptot,kind=db)) IF ( isdataset(fidres,trim(grpname)//'R') ) THEN CALL getdims(fidres, trim(grpname) // 'R', partsrank, partsdim) partsdim(1)=min(size(partslist(i)%R,1), partsdim(1)) CALL append(fidres, trim(grpname) // "R", partslist(i)%R(1:partsdim(1))*rnorm) CALL append(fidres, trim(grpname) // "Z", partslist(i)%Z(1:partsdim(1))*rnorm) CALL append(fidres, trim(grpname) // "THET", partslist(i)%THET(1:partsdim(1))) CALL append(fidres, trim(grpname) // "UZ", partslist(i)%UZ(1:partsdim(1))/partslist(i)%gamma(1:partsdim(1))) CALL append(fidres, trim(grpname) // "UR", partslist(i)%UR(1:partsdim(1))/partslist(i)%gamma(1:partsdim(1))) CALL append(fidres, trim(grpname) // "UTHET", partslist(i)%UTHET(1:partsdim(1))/partslist(i)%gamma(1:partsdim(1))) CALL append(fidres, trim(grpname) // "pot", partslist(i)%pot(1:partsdim(1))*phinorm) CALL append(fidres, trim(grpname) // "Rindex", REAL(partslist(i)%Rindex(1:partsdim(1)),kind=db)) CALL append(fidres, trim(grpname) // "Zindex", REAL(partslist(i)%Zindex(1:partsdim(1)),kind=db)) CALL append(fidres, trim(grpname) // "partindex", REAL(partslist(i)%partindex(1:partsdim(1)),kind=db)) END IF END DO ! END IF ! 2.5 3d profiles ! ! ! 2.6 Xgrafix ! #if USE_X == 1 IF(nlxg .AND. modulo(kstep,itgraph) .eq. 0) THEN call xgevent CALL updt_xg_var CALL xgupdate END IF #endif !________________________________________________________________________________ ! 3. Final diagnostics ELSE ! Only process 0 should save on file IF(mpirank .ne. 0) RETURN ! ! Flush 0d history array buffers CALL htable_hdf5_flush(hbuf0) ! ! Close all diagnostic files CALL closef(fidres) !________________________________________________________________________________ END IF ! + +CONTAINS + + SUBROUTINE create_parts_group(p,grpname, time) + USE beam,ONLY: particles + type(particles):: p + real(kind=db):: time + character(len=*):: grpname + If(isgroup(fidres, trim(grpname))) return + CALL creatg(fidres, grpname, "specific specie phase space") + CALL creatd(fidres, 0, SHAPE(time), trim(grpname) // "/time", "time") + CALL creatd(fidres, 0, SHAPE(time), trim(grpname) //"/Nparts", "number of remaining parts") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/R", "radial pos") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/Z", "axial pos") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/THET", "azimuthal pos") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/UZ", "axial beta*gamma") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/UR", "radial beta*gamma") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/UTHET", "azimuthal beta*gamma") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/pot", "electric potential") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/Rindex", "radial grid index") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/Zindex", "axial grid index") + CALL creatd(fidres, 1, SHAPE(p%R), trim(grpname) // "/partindex", "particle index") + CALL attach(fidres,trim(grpname), "q", p%q) + CALL attach(fidres,trim(grpname), "m", p%m) + CALL attach(fidres,trim(grpname), "weight", p%weight) + END SUBROUTINE create_parts_group + END SUBROUTINE diagnose diff --git a/src/distrib_mod.f90 b/src/distrib_mod.f90 index 66dd37f..f7ac473 100644 --- a/src/distrib_mod.f90 +++ b/src/distrib_mod.f90 @@ -1,268 +1,272 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: distrib ! !> @author !> Guillaume Le Bars EPFL/SPC !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> Contains the routines used to load several type of distribution functions !------------------------------------------------------------------------------ MODULE distrib USE constants + USE omp_lib + USE random ! IMPLICIT NONE contains !--------------------------------------------------------------------------- !> @author !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load a uniform distribution using the Hammersley's sequence (0<=y<=1). !> (nbase = 0 => Random sampling) ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [out] y array containing the random variables. !--------------------------------------------------------------------------- SUBROUTINE loduni(nbase, y) ! ! Load a uniform distribution using the Hammersley's sequence. ! (nbase = 0 => Random sampling) ! INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(inout) :: y(:) - INTEGER :: n, i + INTEGER :: n, i, ompthread ! n = SIZE(y) ! SELECT CASE (nbase) CASE(0) - CALL RANDOM_NUMBER(y) + ompthread=omp_get_thread_num()+1 + CALL random_array(y,n,ran_index(ompthread),ran_array(:,ompthread)) + !CALL RANDOM_NUMBER(y) CASE(1) DO i=1,n y(i) = (i-0.5_db)/n END DO CASE(2:) DO i=1,n y(i) = rev(nbase,i) END DO CASE default WRITE(*,'(a,i5)') 'Invalid value of NBASE =', nbase END SELECT ! END SUBROUTINE loduni !--------------------------------------------------------------------------- !> @author !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load a gaussian distribution using a uniform distribution according to the Hammersley's sequence. !> (nbase = 0 => Random sampling) ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [out] y array containing the random variables. !--------------------------------------------------------------------------- SUBROUTINE lodgaus(nbase, y) ! ! Sample y from the Gauss distributrion ! REAL(kind=db), INTENT(inout) :: y(:) INTEGER, INTENT(in) :: nbase ! INTEGER :: n, i, iflag REAL(kind=db) :: r(SIZE(y)) REAL(kind=db) :: t ! REAL(kind=db) :: c0, c1, c2 REAL(kind=db) :: d1, d2, d3 DATA c0, c1, c2/2.515517_db, 0.802853_db, 0.010328_db/ DATA d1, d2, d3/1.432788_db, 0.189269_db, 0.001308_db/ ! n = SIZE(y) CALL loduni(nbase, r) r = 1.0E-5 + 0.99998*r ! DO i=1,n iflag = 1 IF (r(i) .GT. 0.5) THEN r(i) = 1.0 - r(i) iflag = -1 END IF t = SQRT(LOG(1.0/(r(i)*r(i)))) y(i) = t - (c0+c1*t+c2*t**2) / (1.0+d1*t+d2*t**2+d3*t**3) y(i) = y(i) * iflag END DO y = y - SUM(y)/REAL(n) END SUBROUTINE lodgaus !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load the array y according to a probability distribution function proportional to 1/r !> between ra and rb ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [in] ra Lower limit for the values in y !> @param [in] rb Upper limit for the values in y !> @param [out] y array containing the random variables. !--------------------------------------------------------------------------- SUBROUTINE lodinvr(nbase, y, ra, rb) ! ! REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rb, ra ! REAL(kind=db) :: r(SIZE(y)) ! CALL loduni(nbase, r) r=r*log(rb/ra) y=ra*exp(r) END SUBROUTINE lodinvr !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load the array y according to a probability distribution function proportional to r !> between ra and rb ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [in] ra Lower limit for the values in y !> @param [in] rb Upper limit for the values in y !> @param [out] y array containing the random variables. !--------------------------------------------------------------------------- SUBROUTINE lodlinr(nbase, y, ra, rb) REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rb, ra ! REAL(kind=db) :: r(SIZE(y)) ! CALL loduni(nbase, r) y=sqrt(r*(rb**2-ra**2)+ra**2) END SUBROUTINE lodlinr !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load the array y according to a uniform probability distribution function !> between ra and rb ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [in] ra Lower limit for the values in y !> @param [in] rb Upper limit for the values in y !> @param [out] y array containing the random variables. !--------------------------------------------------------------------------- SUBROUTINE lodunir(nbase, y, ra, rb) REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rb, ra ! REAL(kind=db) :: r(SIZE(y)) ! CALL loduni(nbase, r) y=ra+(rb-ra)*r END SUBROUTINE lodunir !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief Load the array y according to a gaussian probability distribution function !> around the mean of rb and ra and a sigma of (rb-ra)/10 ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [in] ra Lower limit for the values in y !> @param [in] rb Upper limit for the values in y !> @param [out] y array containing the random variables. !--------------------------------------------------------------------------- SUBROUTINE lodgausr(nbase, y, ra, rb) REAL(kind=db), INTENT(out) :: y(:) INTEGER, INTENT(in) :: nbase REAL(kind=db), INTENT(in) :: rb, ra ! REAL(kind=db) :: r(SIZE(y)) ! CALL lodgaus(nbase, r) y=0.5*(rb+ra)+ 0.1*(rb-ra)*r END SUBROUTINE lodgausr !--------------------------------------------------------------------------- !> @author !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> !> @brief Return an element of the Hammersley's sequence ! !> @param [in] nbase Base of the Hammersley's sequence. (0 => Random) !> @param [in] i Index of the sequence. !> @param [out] rev Returned element of the Hammersley's sequence !--------------------------------------------------------------------------- REAL(kind=db) FUNCTION rev(nbase,i) ! INTEGER, INTENT(IN) :: nbase ! Base of the Hammersley's sequence (IN) INTEGER, INTENT(IN) :: i ! Index of the sequence (IN) ! ! Local vars INTEGER :: j1, j2 REAL(kind=db) :: xs, xsi !----------------------------------------------------------------------- xs = 0.0 xsi = 1.0 j2 = i DO xsi = xsi/nbase j1 = j2/nbase xs = xs + (j2-nbase*j1)*xsi j2 = j1 IF( j2.LE.0 ) EXIT END DO rev = xs END FUNCTION rev !--------------------------------------------------------------------------- !> @author !> Trach Minh Tran EPFL/SPC ! ! DESCRIPTION: !> !> @brief Modified Bessel functions of the first kind of the first order 1 ! !--------------------------------------------------------------------------- FUNCTION bessi1(x) REAL(kind=db) :: bessi1,x REAL(kind=db) :: ax REAL(kind=db) p1,p2,p3,p4,p5,p6,p7,q1,q2,q3,q4,q5,q6,q7,q8,q9,y SAVE p1,p2,p3,p4,p5,p6,p7,q1,q2,q3,q4,q5,q6,q7,q8,q9 DATA p1,p2,p3,p4,p5,p6,p7/0.5d0,0.87890594d0,0.51498869d0,0.15084934d0,0.2658733d-1,0.301532d-2,0.32411d-3/ DATA q1,q2,q3,q4,q5,q6,q7,q8,q9 /0.39894228d0, -0.3988024d-1, -0.362018d-2, 0.163801d-2,-0.1031555d-1, & & 0.2282967d-1, -0.2895312d-1, 0.1787654d-1,-0.420059d-2/ if (abs(x).lt.3.75) then y=(x/3.75)**2 bessi1=x*(p1+y*(p2+y*(p3+y*(p4+y*(p5+y*(p6+y*p7)))))) else ax=abs(x) y=3.75/ax bessi1=(exp(ax)/sqrt(ax))*(q1+y*(q2+y*(q3+y*(q4+y*(q5+y*(q6+y*(q7+y*(q8+y*q9)))))))) if(x.lt.0.)bessi1=-bessi1 endif return END FUNCTION bessi1 END MODULE distrib \ No newline at end of file diff --git a/src/fields_mod.f90 b/src/fields_mod.f90 index 9b399e1..fc974b5 100644 --- a/src/fields_mod.f90 +++ b/src/fields_mod.f90 @@ -1,1155 +1,1154 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: beam ! !> @author !> Patryk Kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for initializing the magnetic field and solve the Poisson equation !------------------------------------------------------------------------------ MODULE fields USE constants USE basic, ONLY: nr, nz, zgrid, rgrid, Br, Bz, Er, Ez, femorder, ngauss, nlppform, pot, Athet, & & splrz, splrz_ext, nlperiod, phinorm, nlPhis, nrank, mpirank, mpisize, step, it2d, timera USE beam, ONLY: partslist USE bsplines USE mumps_bsplines USE mpi Use omp_lib Use mpihelper, ONLY: db_type IMPLICIT NONE REAL(kind=db), allocatable, SAVE :: matcoef(:,:), phi_spline(:), vec1(:), vec2(:) REAL(kind=db), allocatable, SAVE :: loc_moments(:,:), gradgtilde(:), fverif(:) INTEGER, SAVE:: loc_zspan TYPE(mumps_mat), SAVE :: femat !< Finite Element Method matrix TYPE(mumps_mat), SAVE :: reduccedmat !< Finite Element Method matrix REAL(kind=db), SAVE:: potextA, potextB !< Variables used for fast calculation of the external potential INTEGER :: nbmoments= 10 !< number of moments to be calculated and stored INTEGER(kind=omp_lock_kind),Allocatable:: mu_lock(:) !< Stores the lock for fields parallelism CONTAINS SUBROUTINE init_mag USE basic, ONLY: magnetfile, nr, nz USE bsplines USE mumps_bsplines USE mpihelper USE geometry ALLOCATE(Br((nr+1)*(nz+1)),Bz((nr+1)*(nz+1))) ALLOCATE(Athet((nr+1)*(nz+1))) ! Calculate magnetic field mirror components in grid points (Davidson analytical formula employed) CALL magnet(magnetfile) end subroutine init_mag !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Set-up the necessary variables for solving Poisson and computes the magnetic field on the grid ! !--------------------------------------------------------------------------- SUBROUTINE fields_init - USE basic, ONLY: pot, nlperiod, nrank, rhs, moments, volume, rgrid, magnetfile + USE basic, ONLY: pot, nlperiod, nrank, rhs, moments, volume, rgrid USE bsplines USE mumps_bsplines USE mpihelper USE geometry INTEGER :: nrz(2), i ! Set up 2d spline splrz used in the FEM CALL set_spline(femorder, ngauss, zgrid, rgrid, splrz, nlppform=nlppform, period=nlperiod) ! Set up 2d spline splrz_ext used in the FEM to calculate the external electric field CALL set_spline(femorder, ngauss, zgrid, rgrid, splrz_ext, nlppform=nlppform, period=nlperiod) ! set up the geometry module for setting up non-conforming boundary conditions call init_geom(splrz, vec1, vec2) WRITE(*,*) "geometry initialized" ! Calculate dimension of splines nrz(1)=nz nrz(2)=nr CALL get_dim(splrz, nrank, nrz, femorder) ALLOCATE(matcoef(nrank(1),nrank(2))) ALLOCATE(pot((nr+1)*(nz+1))) ALLOCATE(rhs(nrank(1)*nrank(2))) ALLOCATE(gradgtilde(nrank(1)*nrank(2))) gradgtilde=0 IF (mpirank .eq. 0) THEN ALLOCATE(moments(nbmoments,nrank(1)*nrank(2))) ELSE ALLOCATE(moments(0,0)) END IF ALLOCATE(phi_spline(nrank(1)*nrank(2))) ALLOCATE(volume(nrank(1)*nrank(2))) volume=0 If(walltype.eq.-1) then allocate(fverif(nrank(1)*nrank(2))) fverif=0 end if ALLOCATE(Er((nr+1)*(nz+1)),Ez((nr+1)*(nz+1))) ALLOCATE(mu_lock(nrank(1)*nrank(2))) do i=1,nrank(1)*nrank(2) call omp_init_lock(mu_lock(i)) end do ! Calculate and factorise FEM matrix (depends only on mesh) CALL fematrix call timera(0, "factor") write(*,*)"start factor" if(nlweb) then call factor(reduccedmat) else CALL factor(femat) end if call timera(0, "factor") CALL comp_volume rhs = -gradgtilde call poisson(splrz_ext) rhs = 0 END SUBROUTINE fields_init !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Set-up the necessary variables for the communication of moments grid ! !--------------------------------------------------------------------------- SUBROUTINE fields_comm_init USE basic, ONLY: nrank, Zbounds USE mpihelper loc_zspan=Zbounds(mpirank+1)-Zbounds(mpirank)+femorder(1) if(allocated(loc_moments)) deallocate(loc_moments) ALLOCATE(loc_moments(nbmoments,loc_zspan*nrank(2))) IF(mpisize .gt. 1) THEN CALL init_overlaps(nrank, femorder, Zbounds(mpirank), Zbounds(mpirank+1), nbmoments) END IF END SUBROUTINE fields_comm_init !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Construct the right hand side vector used in the FEM Poisson solver ! !> @param[in] p the particles type storing the desired specie parameters ! !--------------------------------------------------------------------------- SUBROUTINE rhscon(p) USE bsplines USE mpi USE basic, ONLY: rnorm, rhs, moments, nlend, step, it2d USE beam, ONLY: particles USE mpihelper Use geometry type(particles), INTENT(INOUT):: p REAL(kind=db) :: chargecoeff IF(p%is_test) RETURN loc_moments=0 ! Reset the moments matrix rhs=-gradgtilde ! reset rhs and apply Dirichlet boundary conditions chargecoeff=p%q/(2*pi*eps_0*phinorm*rnorm) ! Normalized charge density simulated by each macro particle ! TODO: Separate charge deposition for each specie and avoid destruction of data with multiple non test species ! Assemble rhs IF (p%Nploc .ne. 0) THEN IF(modulo(step,it2d).eq. 0 .or. nlend) THEN CALL deposit_moments(p, loc_moments) ELSE CALL deposit_charge(p, loc_moments(1,:)) END IF END IF ! If we are using MPI parallelism, reduce the rhs on the root process IF(mpisize .gt. 1) THEN CALL fields_comm(moments) ELSE moments=loc_moments END IF IF(mpirank.eq.0 )THEN IF(nlphis) THEN ! We consider self-consistent interactions rhs=rhs+moments(1,:)*chargecoeff ! Normalized charge density simulated by each macro particle end if if (walltype .eq. -1) rhs=rhs+fverif END IF END SUBROUTINE rhscon !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Deposit the particles moments (q,v,v^2) from p on the grid ! !> @param[in] p the particles type storing the desired specie parameters !> @param[in] p_loc_moments local tensor used to store the moments of the given specie !--------------------------------------------------------------------------- SUBROUTINE deposit_moments(p, p_loc_moments) USE bsplines USE mpi USE basic, ONLY: Zbounds USE beam, ONLY: particles USE mpihelper USE geometry USE omp_lib TYPE(particles), INTENT(IN):: p REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: p_loc_moments INTEGER ::irow, jcol, it, jw, mu, i, k, iend, nbunch INTEGER,DIMENSION(:),ALLOCATABLE::zleft, rleft REAL(kind=db) :: vr, vthet, vz, coeff REAL(kind=db), ALLOCATABLE :: fun(:,:,:), fun2(:,:,:), wgeom(:,:) INTEGER:: num_threads num_threads=omp_get_max_threads() nbunch=p%Nploc/num_threads ! Particle bunch size used when calling basfun nbunch=max(nbunch,1) ! Particle bunch size used when calling basfun nbunch=min(nbunch,64) ! Particle bunch size used when calling basfun ALLOCATE(zleft(nbunch),rleft(nbunch)) ALLOCATE(fun(1:femorder(1)+1,0:0,nbunch),fun2(1:femorder(2)+1,0:0,nbunch)) ! Arrays keeping values of b-splines at gauss node ALLOCATE(wgeom(nbunch,0:0)) ! Assemble rhs IF (p%Nploc .ne. 0) THEN !$OMP PARALLEL DO DEFAULT(SHARED), PRIVATE(zleft, rleft, jw, it, iend, irow, jcol, mu, k, vr, vz, vthet, coeff, fun, fun2) DO i=1,p%Nploc,nbunch ! Avoid segmentation fault by accessing non relevant data iend=min(i+nbunch-1,p%Nploc) k=iend-i+1 ! Localize the particle !CALL locintv(splrz%sp2, p%R(i:iend), rleft(1:k)) !CALL locintv(splrz%sp1, p%Z(i:iend), zleft(1:k)) rleft(1:k)=p%rindex(i:iend) zleft(1:k)=p%zindex(i:iend) ! Compute the value of the splines at the particles positions CALL basfun(p%Z(i:iend), splrz%sp1, fun(:,:,1:k), zleft(1:k)+1) CALL basfun(p%R(i:iend), splrz%sp2, fun2(:,:,1:k), rleft(1:k)+1) !CALL geom_weight(p%Z(i:iend),p%R(i:iend),wgeom) DO k=1,(iend-i+1) DO jw=1,(femorder(2)+1) DO it=1,(femorder(1)+1) irow=zleft(k)+it-Zbounds(mpirank) jcol=rleft(k)+jw mu=irow+(jcol-1)*(loc_zspan) coeff=p%weight*fun(it,0,k)*fun2(jw,0,k)*p%geomweight(i+k-1,0) ! Add contribution of particle nbunch to rhs grid point mu vr=p%UR(i+k-1)/p%Gamma(i+k-1) vz=p%UZ(i+k-1)/p%Gamma(i+k-1) vthet=p%UTHET(i+k-1)/p%Gamma(i+k-1) call omp_set_lock(mu_lock(mu)) !!$OMP ATOMIC UPDATE p_loc_moments(1, mu) = p_loc_moments(1, mu) + coeff !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(2, mu) = p_loc_moments(2, mu) + coeff*vr !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(3, mu) = p_loc_moments(3, mu) + coeff*vthet !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(4, mu) = p_loc_moments(4, mu) + coeff*vz !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(5, mu) = p_loc_moments(5, mu) + coeff*vr*vr !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(6, mu) = p_loc_moments(6, mu) + coeff*vr*vthet !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(7, mu) = p_loc_moments(7, mu) + coeff*vr*vz !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(8, mu) = p_loc_moments(8, mu) + coeff*vthet*vthet !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(9, mu) = p_loc_moments(9, mu) + coeff*vthet*vz !!$OMP END ATOMIC !!$OMP ATOMIC UPDATE p_loc_moments(10,mu) = p_loc_moments(10,mu) + coeff*vz*vz !!$OMP END ATOMIC call omp_unset_lock(mu_lock(mu)) END DO END DO END DO END DO !$OMP END PARALLEL DO END IF DEALLOCATE(fun,fun2, zleft, rleft) END subroutine deposit_moments !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Deposit the particles charges (q) from p on the grid ! !> @param[in] p the particles type storing the desired specie parameters !> @param[in] p_loc_moments local tensor used to store the moments of the given specie !--------------------------------------------------------------------------- SUBROUTINE deposit_charge(p, p_loc_moments) USE bsplines USE mpi USE basic, ONLY: Zbounds USE beam, ONLY: particles USE mpihelper USE geometry USE omp_lib TYPE(particles), INTENT(IN):: p REAL(kind=db), DIMENSION(:), INTENT(INOUT):: p_loc_moments INTEGER ::irow, jcol, it, jw, mu, i, k, iend, nbunch INTEGER,DIMENSION(:),ALLOCATABLE::zleft, rleft REAL(kind=db), ALLOCATABLE :: fun(:,:,:), fun2(:,:,:) INTEGER:: num_threads real(kind=db):: contrib num_threads=omp_get_max_threads() nbunch=p%Nploc/num_threads ! Particle bunch size used when calling basfun nbunch=max(nbunch,1) ! Particle bunch size used when calling basfun nbunch=min(nbunch,64) ! Particle bunch size used when calling basfun ! Assemble rhs IF (p%Nploc .ne. 0) THEN !$OMP PARALLEL DEFAULT(SHARED), PRIVATE(zleft, rleft, jw, it, iend, irow, jcol, mu, k, fun, fun2, contrib) reduction(+:p_loc_moments) ALLOCATE(zleft(nbunch),rleft(nbunch)) ALLOCATE(fun(1:femorder(1)+1,0:0,nbunch),fun2(1:femorder(2)+1,0:0,nbunch)) ! Arrays keeping values of b-splines at gauss node !$OMP DO DO i=1,p%Nploc,nbunch ! Avoid segmentation fault by accessing non relevant data iend=min(i+nbunch-1,p%Nploc) k=iend-i+1 ! Localize the particle !CALL locintv(splrz%sp2, p%R(i:iend), rleft(1:k)) !CALL locintv(splrz%sp1, p%Z(i:iend), zleft(1:k)) rleft(1:k)=p%rindex(i:iend) zleft(1:k)=p%zindex(i:iend) ! Compute the value of the splines at the particles positions CALL basfun(p%Z(i:iend), splrz%sp1, fun, zleft(1:k)+1) CALL basfun(p%R(i:iend), splrz%sp2, fun2, rleft(1:k)+1) !CALL geom_weight(p%Z(i:iend),p%R(i:iend),wgeom) DO k=1,(iend-i+1) DO jw=1,(femorder(2)+1) DO it=1,(femorder(1)+1) irow=zleft(k)+it-Zbounds(mpirank) jcol=rleft(k)+jw mu=irow+(jcol-1)*(loc_zspan) ! Add contribution of particle nbunch to rhs grid point mu contrib= p%weight*fun(it,0,k)*fun2(jw,0,k)*p%geomweight(i+k-1,0) p_loc_moments(mu) = p_loc_moments(mu) + contrib END DO END DO END DO END DO !$OMP END DO DEALLOCATE(fun,fun2, zleft, rleft) !$OMP END PARALLEL END IF END subroutine deposit_charge !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Do the communication of the local moment matrices between mpi workers and reduce the result on the host ! !--------------------------------------------------------------------------- SUBROUTINE fields_comm(moments) USE mpihelper USE Basic, ONLY: Zbounds, mpirank, nlend, it2d, step, leftproc, rightproc REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: moments INTEGER:: ierr, i, j INTEGER:: displs(mpisize), counts(mpisize) INTEGER:: overlap_type INTEGER:: rcvoverlap_type displs=Zbounds(0:mpisize-1) counts=Zbounds(1:mpisize)-Zbounds(0:mpisize-1) counts(mpisize)=counts(mpisize)+femorder(1) IF(modulo(step,it2d).eq. 0 .or. nlend) THEN CALL start_momentscomm(mpirank, leftproc, rightproc, loc_moments, nrank, femorder, loc_zspan-femorder(1)) IF(mpirank .gt. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) private(i) DO j=1,femorder(1) DO i=1,nrank(2) loc_moments(1:nbmoments,(i-1)*loc_zspan+j) = loc_moments(1:nbmoments,(i-1)*loc_zspan+j)& & + momentsoverlap_buffer(nbmoments*(nrank(2)*(j-1)+i-1)+1:nbmoments*(nrank(2)*(j-1)+i)) END DO END DO !$OMP END PARALLEL DO SIMD END IF ! Set communication vector type overlap_type=momentsoverlap_type rcvoverlap_type=rcvmomentsoverlap_type ELSE CALL start_rhscomm(mpirank, leftproc, rightproc, loc_moments, nrank, femorder, loc_zspan-femorder(1)) IF(mpirank .gt. 0) THEN !$OMP PARALLEL DO SIMD DEFAULT(SHARED) private(i) DO j=1,femorder(1) DO i=1,nrank(2) loc_moments(1, (i-1)*loc_zspan+j) = loc_moments(1,(i-1)*loc_zspan+j)& & + rhsoverlap_buffer(nrank(2)*(j-1)+i) END DO END DO !$OMP END PARALLEL DO SIMD END IF ! Set communication vector type overlap_type=rhsoverlap_type rcvoverlap_type=rcvrhsoverlap_type END IF IF(mpirank.eq.0) THEN moments=0 END IF #ifdef _DEBUG WRITE(*,*)"started gatherv at step: ",step !WRITE(*,*)"counts:", counts !WRITE(*,*)"displs:", displs #endif CALL MPI_GATHERV(loc_moments(1,1), counts(mpirank+1), overlap_type, & & moments(1,1), counts, displs, rcvoverlap_type, 0, MPI_COMM_WORLD, ierr) #ifdef _DEBUG !WRITE(*,*)"ended gatherv at step: ",step #endif END SUBROUTINE fields_comm !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Solves Poisson equation using FEM. Distributes the result on all MPI workers and interpolate the electric forces !> for each particle. ! !--------------------------------------------------------------------------- SUBROUTINE poisson(splinevar) USE basic, ONLY: rhs, nrank, pot,nlend USE bsplines USE mumps_bsplines USE futils Use geometry type(spline2d):: splinevar INTEGER:: ierr, jder(2) real(kind=db), allocatable::reducedrhs(:) real(kind=db),allocatable:: reducedsol(:),tempcol(:) jder(1)=0 jder(2)=0 ! Only the root process solves Poisson IF(mpirank.eq.0) then if(nlweb) then allocate(reducedrhs(nrank(1)*nrank(2))) allocate(reducedsol(nbreducedspline), tempcol(nrank(1)*nrank(2))) reducedrhs=vmx(etilde,rhs) Call bsolve(reduccedmat,reducedrhs(1:nbreducedspline),reducedsol) tempcol=0 tempcol(1:nbreducedspline)=reducedsol phi_spline=0 phi_spline=vmx(etildet,tempcol) else CALL bsolve(femat,rhs,phi_spline) end if end if ! Broadcast the solution to all MPI workers CALL MPI_Bcast(phi_spline, nrank(1)*nrank(2), db_type, 0, MPI_COMM_WORLD, ierr) matcoef= reshape(phi_spline, (/nrank(1),nrank(2)/)) ! Evaluate the electric potential on the grid points CALL gridval(splinevar, vec1, vec2, pot, jder, matcoef) IF( mpirank .eq. 0 .and. (modulo(step,it2d) .eq. 0 .or. nlend)) THEN ! On the root process, compute the electric field for diagnostic purposes CALL gridval(splinevar, vec1, vec2, Ez, (/1,0/)) CALL gridval(splinevar, vec1, vec2, Er, (/0,1/)) Ez=-pot*gridwgeom(:,1)-Ez*gridwgeom(:,0)-gtilde(:,1) Er=-pot*gridwgeom(:,2)-Er*gridwgeom(:,0)-gtilde(:,2) pot=pot*gridwgeom(:,0)+gtilde(:,0) END IF END SUBROUTINE poisson !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the electric fields and potential at the particles position ! !> @param[in] p the particles type storing the desired specie parameters !--------------------------------------------------------------------------- SUBROUTINE EFieldscompatparts(p,nstart,nend) Use beam, ONLY: particles Use geometry TYPE(particles), INTENT(INOUT):: p INTEGER,OPTIONAL::nstart,nend INTEGER:: i, iend, nst, nnd INTEGER:: nbunch INTEGER:: num_threads Real(kind=db), ALLOCATABLE:: erext(:),ezext(:), gtildeloc(:,:) if(.not. present(nstart)) nst=1 if(.not. present(nend)) nnd=p%Nploc num_threads=omp_get_max_threads() nbunch=(nnd-nst+1)/num_threads ! Particle bunch size used when calling basfun nbunch=max(nbunch,1) ! Particle bunch size used when calling basfun nbunch=min(nbunch,64) ! Particle bunch size used when calling basfun Allocate(erext(nbunch), ezext(nbunch), gtildeloc(0:nbunch-1,0:2)) ! Evaluate the electric potential and field at the particles position !$OMP PARALLEL DO DEFAULT(SHARED), PRIVATE(iend) firstprivate(erext,ezext,gtildeloc) DO i=nst,nnd,nbunch ! Avoid segmentation fault by accessing non relevant data iend=min(i+nbunch-1,nnd) CALL getgrad(splrz, p%Z(i:iend), p%R(i:iend), p%pot(i:iend), p%EZ(i:iend), p%ER(i:iend)) CALL getgrad(splrz_ext, p%Z(i:iend), p%R(i:iend), p%potxt(i:iend), EZext(:), erext(:)) !Call geom_weight(p%Z(i:iend), p%R(i:iend), wgeom(0:iend-i,:)) Call CompgBoundary(p%Z(i:iend), p%R(i:iend), gtildeloc(0:iend-i,:)) p%EZ(i:iend)=-p%Ez(i:iend)*p%geomweight(i:iend,0)-p%pot(i:iend)*p%geomweight(i:iend,1)-gtildeloc(0:iend-i,1) p%ER(i:iend)=-p%ER(i:iend)*p%geomweight(i:iend,0)-p%pot(i:iend)*p%geomweight(i:iend,2)-gtildeloc(0:iend-i,2) p%pot(i:iend)=p%geomweight(i:iend,0)*p%pot(i:iend) + gtildeloc(0:iend-i,0) p%potxt(i:iend)=p%geomweight(i:iend,0)*p%potxt(i:iend) + gtildeloc(0:iend-i,0) END DO !$OMP END PARALLEL DO END SUBROUTINE EFieldscompatparts !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Constucts the FEM matrix using bsplines initialized in fields_init !--------------------------------------------------------------------------- SUBROUTINE fematrix USE bsplines USE geometry USE omp_lib USE sparse REAL(kind=db), ALLOCATABLE :: xgauss(:,:), wgauss(:), wgeom(:,:) REAL(kind=db), ALLOCATABLE :: f(:,:), aux(:) REAL(kind=db), ALLOCATABLE :: coefs(:) REAL(kind=db), ALLOCATABLE :: fun(:,:), fun2(:,:) REAL(kind=db) :: contrib INTEGER, ALLOCATABLE :: idert(:,:), iderw(:,:), iderg(:,:) INTEGER :: i, j, jt, iw, irow, jcol, mu, igauss, iterm, irow2, jcol2, mu2, kterms, gausssize - type(elt), POINTER:: t kterms=8 !Number of terms in weak form ALLOCATE(fun(1:femorder(1)+1,0:1),fun2(1:femorder(2)+1,0:1))!Arrays keeping values of b-splines at gauss node !ALLOCATE(xgauss(ngauss(1)*ngauss(2),2), wgauss(ngauss(1)*ngauss(2)),zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2))) !Gaussian nodes and weights arrays ALLOCATE(f((femorder(1)+1)*(femorder(2)+1),2),aux(femorder(1)+1)) !Auxiliary arrays ordering bsplines ALLOCATE(idert(kterms,2), iderw(kterms,2), coefs(kterms), iderg(kterms,2)) !Pointers on the order of derivatives call timera(0, "fematrix") ! Constuction of auxiliary array ordering bsplines in given interval DO i=1,(femorder(1)+1) aux(i)=i END DO DO i=1,(femorder(2)+1) f((i-1)*(femorder(1)+1)+1:i*(femorder(1)+1),1)=aux f((i-1)*(femorder(1)+1)+1:i*(femorder(1)+1),2)=i END DO ! Initialisation of FEM matrix CALL init(nrank(1)*nrank(2),2,femat) ! Assemble FEM matrix !$OMP PARALLEL DO DEFAULT(SHARED), PRIVATE(i,xgauss,wgauss,gausssize,wgeom, igauss, iterm,jt,irow,jcol, mu, iw, irow2,jcol2, mu2, contrib, iderw, idert, iderg, coefs, fun, fun2), firstprivate(ngauss) DO j=1,nr ! Loop on r position DO i=1,nz ! Loop on z position !! Computation of gauss weight and position in r and z direction for gaussian integration Call calc_gauss(splrz,ngauss,i,j,xgauss,wgauss,gausssize) if (gausssize.gt.0) then If(allocated(wgeom)) deallocate(wgeom) ALLOCATE(wgeom(size(xgauss,1),0:2)) CALL geom_weight(xgauss(:,1),xgauss(:,2),wgeom) End if DO igauss=1,gausssize ! Loop on gaussian weights and positions CALL basfun(xgauss(igauss,1), splrz%sp1, fun, i) CALL basfun(xgauss(igauss,2), splrz%sp2, fun2, j) CALL coefeq(xgauss(igauss,:), idert, iderw, iderg, coefs) DO iterm=1,kterms ! Loop on the two integration dimensions DO jt=1,(1+femorder(1))*(femorder(2)+1) irow=i+f(jt,1)-1; jcol=j+f(jt,2)-1 mu=irow+(jcol-1)*nrank(1) DO iw=1,(1+femorder(1))*(femorder(2)+1) irow2=i+f(iw,1)-1; jcol2=j+f(iw,2)-1 mu2=irow2+(jcol2-1)*nrank(1) contrib= wgeom(igauss,iderg(iterm,1)) * wgeom(igauss,iderg(iterm,2))* & & fun(f(jt,1),idert(iterm,1)) * fun(f(iw,1),idert(iterm,2)) * & & fun2(f(jt,2),iderw(iterm,1)) * fun2(f(iw,2),iderw(iterm,2))* & & wgauss(igauss)*coefs(iterm) call omp_set_lock(mu_lock(mu)) CALL updt_sploc(femat%mat%row(mu),mu2,contrib) call omp_unset_lock(mu_lock(mu)) END DO END DO END DO END DO END DO END DO !$OMP End parallel do DEALLOCATE(f, aux) DEALLOCATE(idert, iderw, coefs, fun,fun2) call timera(1, "fematrix") ! Calculate reduced matrix for use of web splines if(nlweb) then call timera(0, "reduccedmatrix") call Reducematrix(femat,reduccedmat) call timera(1, "reduccedmatrix") end if END SUBROUTINE fematrix !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the volume of the splines cells needed to display the density in post-processing !--------------------------------------------------------------------------- SUBROUTINE comp_volume USE bsplines USE geometry USE basic, ONLY: Volume REAL(kind=db), ALLOCATABLE :: xgauss(:,:), wgauss(:), wgeom(:,:) REAL(kind=db), ALLOCATABLE :: f(:,:), aux(:), coefs(:) REAL(kind=db), ALLOCATABLE :: fun(:,:), fun2(:,:), gtildeintegr(:,:),ftestpt(:,:) Integer, ALLOCATABLE, Dimension(:) :: idg,idt,idp,idw INTEGER :: i, j, jt, irow, jcol, mu, igauss, gausssize, iterm, nterms Real(kind=db)::newcontrib call timera(0, "comp_volume") ALLOCATE(fun(1:femorder(1)+1,0:1),fun2(1:femorder(2)+1,0:1))!Arrays keeping values of b-splines at gauss node !ALLOCATE(xgauss(ngauss(1)*ngauss(2),2), wgauss(ngauss(1)*ngauss(2)),zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2))) !Gaussian nodes and weights arrays ALLOCATE(f((femorder(1)+1)*(femorder(2)+1),2),aux(femorder(1)+1)) !Auxiliary arrays ordering bsplines nterms= 4 Allocate(idg(nterms),idt(nterms),idw(nterms),idp(nterms),coefs(nterms)) ! Constuction of auxiliary array ordering bsplines in given interval DO i=1,(femorder(1)+1) aux(i)=i END DO DO i=1,(femorder(2)+1) f((i-1)*(femorder(1)+1)+1:i*(femorder(1)+1),1)=aux f((i-1)*(femorder(1)+1)+1:i*(femorder(1)+1),2)=i END DO volume=0 gradgtilde=0 if(walltype.eq.-1) fverif=0 ! Assemble Volume matrix !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(j,xgauss,wgauss,gausssize,wgeom, igauss, gtildeintegr, ftestpt, iterm,jt,irow,jcol, mu, idw, idt, idg, idp, coefs, fun, fun2, newcontrib), firstprivate(ngauss) DO j=1,nr ! Loop on r position DO i=1,nz ! Loop on z position ! Computation of gauss weight and position in r and z direction for gaussian integration Call calc_gauss(splrz,ngauss,i,j,xgauss,wgauss,gausssize) If(allocated(wgeom)) deallocate(wgeom) if (gausssize.gt.0) then ALLOCATE(wgeom(size(xgauss,1),0:2)) CALL geom_weight(xgauss(:,1),xgauss(:,2),wgeom) End if If(allocated(gtildeintegr)) deallocate(gtildeintegr) ALLOCATE(gtildeintegr(size(xgauss,1),0:2)) Call CompgBoundary(xgauss(:,1),xgauss(:,2),gtildeintegr) if(walltype .eq. -1) then If(allocated(ftestpt)) deallocate(ftestpt) ALLOCATE(ftestpt(size(xgauss,1),0:0)) CALL ftest(xgauss(:,1),xgauss(:,2),ftestpt) end if DO igauss=1,gausssize ! Loop on gaussian weights and positions CALL basfun(xgauss(igauss,1), splrz%sp1, fun, i) CALL basfun(xgauss(igauss,2), splrz%sp2, fun2, j) CALL coefeqext(xgauss(igauss,:), idt, idw, idg, idp, coefs) DO jt=1,(1+femorder(1))*(femorder(2)+1) irow=i+f(jt,1)-1; jcol=j+f(jt,2)-1 mu=irow+(jcol-1)*nrank(1) newcontrib=2*pi*fun(f(jt,1),0) * fun2(f(jt,2),0)*wgeom(igauss,0)* wgauss(igauss)*xgauss(igauss,2) !$OMP ATOMIC UPDATE volume(mu)=volume(mu)+newcontrib !$OMP END ATOMIC if(walltype.eq.-1) THEN newcontrib= ftestpt(igauss,0) * fun(f(jt,1),0) * fun2(f(jt,2),0)*wgeom(igauss,0)* wgauss(igauss)*xgauss(igauss,2) !$OMP ATOMIC UPDATE fverif(mu)=fverif(mu)+newcontrib !$OMP END ATOMIC end if newcontrib=0 Do iterm=1,nterms newcontrib=newcontrib+wgeom(igauss,idg(iterm)) * gtildeintegr(igauss,idp(iterm))* & & fun(f(jt,1),idt(iterm)) * fun2(f(jt,2),idw(iterm)) * & & wgauss(igauss)*coefs(iterm) End do !$OMP ATOMIC UPDATE gradgtilde(mu)=gradgtilde(mu)+newcontrib !$OMP END ATOMIC END DO END DO END DO END DO !$OMP END PARALLEL DO !DEALLOCATE(xgauss, wgauss,zg,rg, wzg, wrg) DEALLOCATE(f, aux) DEALLOCATE(fun,fun2) call timera(1, "comp_volume") END SUBROUTINE comp_volume !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Imposes the dirichlet boundary conditions on the FEM matrix. !--------------------------------------------------------------------------- SUBROUTINE fe_dirichlet REAL(kind=db), ALLOCATABLE :: arr(:) INTEGER :: i ALLOCATE(arr(nrank(1)*nrank(2))) DO i=1,nrank(1) IF(rgrid(0).ne.0.0_db) THEN arr=0; arr(i)=1; CALL putrow(femat,i,arr) END IF arr=0; arr(nrank(1)*nrank(2)+1-i)=1 ; CALL putrow(femat,nrank(1)*nrank(2)+1-i,arr) END DO DEALLOCATE(arr) END SUBROUTINE fe_dirichlet !________________________________________________________________________________ SUBROUTINE coefeq(x, idt, idw, idg, c) REAL(kind=db), INTENT(in) :: x(2) INTEGER, INTENT(out) :: idt(:,:), idw(:,:), idg(:,:) REAL(kind=db), INTENT(out) :: c(:) c = x(2) idt(1,1) = 0 idt(1,2) = 0 idw(1,1) = 0 idw(1,2) = 0 idg(1,1) = 1 idg(1,2) = 1 idt(2,1) = 0 idt(2,2) = 1 idw(2,1) = 0 idw(2,2) = 0 idg(2,1) = 1 idg(2,2) = 0 idt(3,1) = 1 idt(3,2) = 0 idw(3,1) = 0 idw(3,2) = 0 idg(3,1) = 0 idg(3,2) = 1 idt(4,1) = 1 idt(4,2) = 1 idw(4,1) = 0 idw(4,2) = 0 idg(4,1) = 0 idg(4,2) = 0 idt(5,1) = 0 idt(5,2) = 0 idw(5,1) = 0 idw(5,2) = 0 idg(5,1) = 2 idg(5,2) = 2 idt(6,1) = 0 idt(6,2) = 0 idw(6,1) = 0 idw(6,2) = 1 idg(6,1) = 2 idg(6,2) = 0 idt(7,1) = 0 idt(7,2) = 0 idw(7,1) = 1 idw(7,2) = 0 idg(7,1) = 0 idg(7,2) = 2 idt(8,1) = 0 idt(8,2) = 0 idw(8,1) = 1 idw(8,2) = 1 idg(8,1) = 0 idg(8,2) = 0 END SUBROUTINE coefeq SUBROUTINE coefeqext(x, idt, idw, idg, idp, c) REAL(kind=db), INTENT(in) :: x(2) INTEGER, INTENT(out) :: idp(:), idt(:), idw(:), idg(:) REAL(kind=db), INTENT(out) :: c(:) c(1) = x(2) idp(1) = 1 idg(1) = 1 idt(1) = 0 idw(1) = 0 c(2) = x(2) idp(2) = 1 idg(2) = 0 idt(2) = 1 idw(2) = 0 c(3) = x(2) idp(3) = 2 idg(3) = 2 idt(3) = 0 idw(3) = 0 c(4) = x(2) idp(4) = 2 idg(4) = 0 idt(4) = 0 idw(4) = 1 END SUBROUTINE coefeqext !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the magnetic field on the grid according to a magnetic mirror, !> or according to the linear interpolation of the values on the !> grid saved in h5 file stored at magfile. !> @param[in] magfile filname of .h5 file containing the definitions of A and B !--------------------------------------------------------------------------- SUBROUTINE magnet(magfile) USE basic, ONLY: B0, Rcurv, rgrid, zgrid, width, rnorm, nr, nz, bnorm USE constants, ONLY: Pi CHARACTER(LEN=*), INTENT(IN), OPTIONAL:: magfile REAL(kind=db) :: rg, zg,halfLz, MirrorRatio INTEGER :: i, rindex IF(len_trim(magfile).lt.1) THEN halfLz=(zgrid(nz)+zgrid(0))/2 MirrorRatio=(Rcurv-1)/(Rcurv+1) DO i=1,(nr+1)*(nz+1) rindex=(i-1)/(nz+1) rg=rgrid(rindex) zg=zgrid(i-rindex*(nz+1)-1)-halfLz Br(i)=-B0*MirrorRatio*SIN(2*pi*zg/width*rnorm)*bessi1(2*pi*rg/width*rnorm)/bnorm Bz(i)=B0*(1-MirrorRatio*COS(2*pi*zg/width*rnorm)*bessi0(2*pi*rg/width*rnorm))/bnorm Athet(i)=0.5*B0*(rg*rnorm - width/pi*MirrorRatio*bessi1(2*pi*rg/width*rnorm)*COS(2*pi*zg/width*rnorm)) END DO ELSE CALL load_mag_from_h5(magfile) END IF END SUBROUTINE magnet !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Loads the magnetic field defined in the .h5 file at location magfile !> @param[in] magfile filname of .h5 file containing the definitions of A and B !--------------------------------------------------------------------------- SUBROUTINE load_mag_from_h5(magfile) USE basic, ONLY: B0, rgrid, zgrid, rnorm, nr, nz, bnorm USE constants, ONLY: Pi USE futils CHARACTER(LEN=*), INTENT(IN):: magfile REAL(kind=db), ALLOCATABLE :: magr(:), magz(:) REAL(kind=db), ALLOCATABLE :: tempBr(:,:), tempBz(:,:), tempAthet(:,:) REAL(kind=db) :: zi, rj, rcoeff, zcoeff, maxB INTEGER :: i,j,l, m, magfid LOGICAL:: B_is_saved INTEGER :: magn(2), magrank CALL openf(trim(magfile), magfid,'r',real_prec='d') CALL getdims(magfid, '/mag/Athet', magrank, magn) ALLOCATE(magr(magn(2)),magz(magn(1))) ALLOCATE(tempAthet(magn(1),magn(2)), tempBr(magn(1),magn(2)), tempBz(magn(1),magn(2))) ! Read r and z coordinates for the definition of A_\thet, and B CALL getarr(magfid,'/mag/r',magr) CALL getarr(magfid,'/mag/z',magz) CALL getarr(magfid,'/mag/Athet',tempAthet) IF(isdataset(magfid,'/mag/Br') .and. isdataset(magfid,'/mag/Bz')) THEN CALL getarr(magfid,'/mag/Br',tempBr) CALL getarr(magfid,'/mag/Bz',tempBz) B_is_saved = .true. ELSE B_is_saved = .false. END IF ! Interpolate the magnetic field and potential at the grid points m=1 Do j=0,nr rj=rgrid(j)*rnorm Do while(magr(m).lt.rj) m=m+1 END DO rcoeff=(rj-magr(m-1))/(magr(m)-magr(m-1)) l=1 Do i=0,nz zi=zgrid(i)*rnorm Do while(magz(l).lt.zi) l=l+1 END DO zcoeff=(zi-magz(l-1))/(magz(l)-magz(l-1)) Athet(i+j*(nz+1)+1)= interp2(tempAthet,l,m,zcoeff,rcoeff) IF(B_is_saved) THEN Br(i+j*(nz+1)+1)= interp2(tempBr,l,m,zcoeff,rcoeff) Bz(i+j*(nz+1)+1)= interp2(tempBz,l,m,zcoeff,rcoeff) ELSE Br(i+j*(nz+1)+1)= -(tempAthet(l+1,m+1)+tempAthet(l+1,m)-tempAthet(l,m+1)-tempAthet(l,m))/2/(magz(l)-magz(l-1)) Br(i+j*(nz+1)+1)= ((tempAthet(l+1,m+1)+tempAthet(l,m+1))*magr(m+1) & & -(tempAthet(l+1,m)-tempAthet(l,m))*magr(m)) & & /(magr(m)-magr(m-1))/(magr(m)+magr(m+1)) END IF END DO END DO maxB=maxval(Bz) Bz=Bz/maxB*B0 Br=Br/maxB*B0 ! We normalize Br=Br/bnorm Bz=Bz/bnorm CALL closef(magfid) END SUBROUTINE load_mag_from_h5 !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the weighted average of the 4 elements of matrix defined by !> xi, xi+1, yj, yj+1 ! !> @param[in] matrix The matrix containing the initial values to be interpolated !> @param[in] xi left reference along the first dimension for the average !> @param[in] yj left reference along the second dimension for the average !> @param[in] xcoeff left weight along the first dimension for the average !> @param[in] ycoeff left weight along the second dimension for the average !> @param[in] interp2 weighted average value !--------------------------------------------------------------------------- FUNCTION interp2(matrix,xi,yj,xcoeff,ycoeff) REAL(kind=db):: interp2 Real(kind=db), INTENT(IN):: matrix(:,:), xcoeff, ycoeff INTEGER, INTENT(IN):: xi, yj interp2=matrix(xi,yj)*xcoeff*ycoeff+ matrix(xi+1,yj)*(1-xcoeff)*ycoeff & & + matrix(xi,yj+1)*xcoeff*(1-ycoeff)+ matrix(xi+1,yj+1)*(1-xcoeff)*(1-ycoeff) END function !________________________________________________________________________________ !Modified Bessel functions of the first kind of the zero order FUNCTION bessi0(x) REAL(kind=db) :: bessi0,x REAL(kind=db) :: ax REAL(kind=db) p1,p2,p3,p4,p5,p6,p7,q1,q2,q3,q4,q5,q6,q7,q8,q9,y SAVE p1,p2,p3,p4,p5,p6,p7,q1,q2,q3,q4,q5,q6,q7,q8,q9 DATA p1,p2,p3,p4,p5,p6,p7/1.0d0,3.5156229d0,3.0899424d0,1.2067492d0,0.2659732d0,0.360768d-1,0.45813d-2/ DATA q1,q2,q3,q4,q5,q6,q7,q8,q9 /0.39894228d0, 0.1328592d-1, 0.225319d-2, -0.157565d-2 ,0.916281d-2, & & -0.2057706d-1, 0.2635537d-1,-0.1647633d-1, 0.392377d-2 / if (abs(x).lt.3.75) then y=(x/3.75)**2 bessi0=p1+y*(p2+y*(p3+y*(p4+y*(p5+y*(p6+y*p7))))) else ax=abs(x) y=3.75/ax bessi0=(exp(ax)/sqrt(ax))*(q1+y*(q2+y*(q3+y*(q4+y*(q5+y*(q6+y*(q7+y*(q8+y*q9)))))))) endif return END FUNCTION bessi0 !________________________________________________________________________________ !Modified Bessel functions of the first kind of the first order FUNCTION bessi1(x) REAL(kind=db) :: bessi1,x REAL(kind=db) :: ax REAL(kind=db) p1,p2,p3,p4,p5,p6,p7,q1,q2,q3,q4,q5,q6,q7,q8,q9,y SAVE p1,p2,p3,p4,p5,p6,p7,q1,q2,q3,q4,q5,q6,q7,q8,q9 DATA p1,p2,p3,p4,p5,p6,p7/0.5d0,0.87890594d0,0.51498869d0,0.15084934d0,0.2658733d-1,0.301532d-2,0.32411d-3/ DATA q1,q2,q3,q4,q5,q6,q7,q8,q9 /0.39894228d0, -0.3988024d-1, -0.362018d-2, 0.163801d-2,-0.1031555d-1, & & 0.2282967d-1, -0.2895312d-1, 0.1787654d-1,-0.420059d-2/ if (abs(x).lt.3.75D0) then y=(x/3.75D0)**2 bessi1=x*(p1+y*(p2+y*(p3+y*(p4+y*(p5+y*(p6+y*p7)))))) else ax=abs(x) y=3.75D0/ax bessi1=(exp(ax)/sqrt(ax))*(q1+y*(q2+y*(q3+y*(q4+y*(q5+y*(q6+y*(q7+y*(q8+y*q9)))))))) if(x.lt.0.)bessi1=-bessi1 endif return END FUNCTION bessi1 !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the value of the external electric field at position R,Z ! !> @param[in] R radial positon !> @param[in] Z axial position !> @param[out] external potential at position R,Z !--------------------------------------------------------------------------- ELEMENTAL FUNCTION ext_potential(R) REAL(kind=db), INTENT(IN):: R REAL(kind=db):: ext_potential ext_potential=potextA*log(R)+potextB END FUNCTION ext_potential !--------------------------------------------------------------------------- !> @author !> Patryk kaminski EPFL/SPC !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Free the memory used by the fields module !--------------------------------------------------------------------------- SUBROUTINE clean_fields Use bsplines USE basic, ONLY: rhs, moments INTEGER:: i do i=1,nrank(1)*nrank(2) call omp_destroy_lock(mu_lock(i)) end do DEALLOCATE(mu_lock) DEALLOCATE(matcoef) DEALLOCATE(pot) DEALLOCATE(rhs) DEALLOCATE(phi_spline) DEALLOCATE(moments) DEALLOCATE(Br,Bz) DEALLOCATE(Er,Ez) DEALLOCATE(vec1,vec2) Call DESTROY_SP(splrz) END SUBROUTINE clean_fields SUBROUTINE updt_sploc(arow, j, val) ! ! Update element j of row arow or insert it in an increasing "index" ! USE sparse TYPE(sprow), TARGET :: arow INTEGER, INTENT(in) :: j DOUBLE PRECISION, INTENT(in) :: val ! TYPE(elt), TARGET :: pre_root TYPE(elt), POINTER :: t, p ! pre_root%next => arow%row0 ! pre_root is linked to the head of the list. t => pre_root DO WHILE( ASSOCIATED(t%next) ) p => t%next IF( p%index .EQ. j ) THEN p%val = p%val+val RETURN END IF IF( p%index .GT. j ) EXIT t => t%next END DO ALLOCATE(p) p = elt(j, val, t%next) t%next => p ! arow%nnz = arow%nnz+1 arow%row0 => pre_root%next ! In case the head is altered END SUBROUTINE updt_sploc !+ END MODULE fields diff --git a/src/geometry_mod.f90 b/src/geometry_mod.f90 index 28f64aa..67741a7 100644 --- a/src/geometry_mod.f90 +++ b/src/geometry_mod.f90 @@ -1,956 +1,956 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: geometry ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for handling geometries with non constant radius using b-splines interpolation !> This module defines ways to comupte the weight function needed for weighted b-splines and !> can load the definition of the geometry from input files !------------------------------------------------------------------------------ MODULE geometry USE constants USE bsplines USE mumps_bsplines IMPLICIT NONE type innerspline Integer, Allocatable:: k(:) ! Index in reduced set real(kind=db), Allocatable:: weight(:) ! geomtric weight at relevant cell end type innerspline Real(kind=db),ALLOCATABLE, SAVE ::IVand (:,:) Real(kind=db),save:: z_0=0, r_0=0, invr_r=0, invr_z=0, r_a=0, r_b=0, z_r=1, r_r=1 Real(kind=db), save:: Phidown=0,Phiup=0 Real(kind=db), save:: L_r=0.16, L_z=0.24 Logical, save:: nlweb=.false. Integer, save::Interior=-1 Integer, save:: above1=1, above2=-1 Integer, save :: walltype = 0 Integer, save, Allocatable:: bsplinetype(:) ! Array containing the inner/outer type for each bspline Integer, save, Allocatable:: gridcelltype(:,:) ! Array containing the inner/outer type for each gridcell Real(kind=db), save, allocatable:: gridwgeom(:,:) ! Stores the geometric weight at the grid points Real(kind=db), save, allocatable:: gtilde(:,:) ! Stores the extension to the domain of the boundary conditions type(mumps_mat):: etilde ! Matrix of extendend web splines type(mumps_mat):: etildet ! Transpose of Matrix of extendend web splines integer,save :: nbreducedspline ! Number of splines in the reduced set PUBLIC:: geom_weight INTERFACE geom_weight MODULE PROCEDURE geom_weight0, geom_weight1, geom_weight2 END INTERFACE geom_weight NAMELIST /geomparams/ z_0, r_0, z_r, r_r, r_a, r_b, walltype, L_r, L_z, nlweb, above1, above2 contains SUBROUTINE read_geom(Fileid, rnorm, rgrid, Potinn, Potout) Use mpi Real(kind=db):: rnorm, rgrid(:), Potinn, Potout Integer:: Fileid, mpirank, ierr CALL MPI_COMM_RANK(MPI_COMM_WORLD, mpirank, ierr) Rewind(Fileid) READ(Fileid, geomparams) if(mpirank .eq. 0) WRITE(*, geomparams) !! Normalizations and initialization of geometric variables r_a=r_a/rnorm r_b=r_b/rnorm if(r_a .eq. 0 .and. r_b .eq.0) then !! in case no geom_params have been defined r_a=rgrid(1) r_b=rgrid(size(rgrid,1)) end if z_0=z_0/rnorm r_0=r_0/rnorm z_r=z_r/rnorm r_r=r_r/rnorm invr_r=1/r_r invr_z=1/z_r Interior=-1 above1=1 above2=-1 Phidown=Potinn Phiup=Potout L_r=L_r/rnorm L_z=L_z/rnorm end subroutine read_geom SUBROUTINE init_geom(spl2, vec1, vec2) type(spline2d):: spl2 real(kind=db):: vec1(:),vec2(:) Real(kind=db), Allocatable:: zgrid(:),rgrid(:) Integer:: nrank(2), nrz(2) Call get_dim(spl2, nrank, nrz) ! Obtain grid data drom the spline structure Allocate(zgrid(1:nrz(1)+1)) Allocate(rgrid(1:nrz(2)+1)) zgrid=spl2%sp1%knots(0:nrz(1)) rgrid=spl2%sp2%knots(0:nrz(2)) ! create a table of the calssification for each cell Allocate(gridcelltype(nrz(1),nrz(2))) Call classifycell(zgrid, rgrid, gridcelltype) if (nlweb) then Allocate(bsplinetype(nrank(1)*nrank(2))) Call classifyspline(spl2, gridcelltype, bsplinetype) Call buildetilde(spl2,bsplinetype,gridcelltype,etilde) end if ALLOCATE(gridwgeom((nrz(1)+1)*(nrz(2)+1),0:2)) gridwgeom=0 ALLOCATE(gtilde((nrz(1)+1)*(nrz(2)+1),0:2)) gtilde=0 CALL CompgBoundary(vec1,vec2,gtilde) Call geom_weight(vec1, vec2, gridwgeom) end Subroutine init_geom Subroutine geom_diag(File_handle, str, rnorm) Use mpi Use futils Integer:: File_handle Real(kind=db):: rnorm Character(len=*):: str CHARACTER(len=128):: grpname Integer:: ierr, mpirank CALL MPI_COMM_RANK(MPI_COMM_WORLD, mpirank, ierr) IF(mpirank .eq. 0) THEN Write(grpname,'(a,a)') trim(str),"/geometry" If(.not. isgroup(File_handle, trim(grpname))) THEN CALL creatg(File_handle, trim(grpname)) END IF Call attach(File_handle, trim(grpname), "r_a", r_a*RNORM) Call attach(File_handle, trim(grpname), "r_b", r_b*RNORM) Call attach(File_handle, trim(grpname), "z_0", z_0*RNORM) Call attach(File_handle, trim(grpname), "r_0", r_0*RNORM) Call attach(File_handle, trim(grpname), "r_r", r_r*RNORM) Call attach(File_handle, trim(grpname), "z_r", z_r*RNORM) Call attach(File_handle, trim(grpname), "interior", interior) Call attach(File_handle, trim(grpname), "above1", above1) Call attach(File_handle, trim(grpname), "above2", above2) Call attach(File_handle, trim(grpname), "walltype", walltype) Call putarr(File_handle, trim(grpname)//'/geomweight',gridwgeom) END IF End subroutine geom_diag Subroutine buildetilde(spl2, bsplinetype, celltype, etilde) USE mumps_bsplines type(spline2d):: spl2 Integer:: bsplinetype(:) Integer:: celltype(:,:) type(mumps_mat):: etilde Logical, Allocatable:: Ibsplinetype(:) Integer:: i,j,k, icellz, icellr, jcellz, jcellr, nrank(2), nrz(2), norder(2) real(kind=db), allocatable:: zgrid(:), rgrid(:) real(kind=db):: wgeomi, indexdistance, eij integer:: l,m, n, linkedi type(innerspline):: innersplinelist real(kind=db), allocatable:: rgridmesh(:),zgridmesh(:), d(:), hz(:), cz(:), hr(:), cr(:), hmesh(:) !if(allocated(etilde)) deallocate(etilde) nbreducedspline=count(bsplinetype .eq. 1) Call get_dim(spl2,nrank,nrz,norder) ! Obtain grid data drom the spline structure Allocate(zgrid(1:nrz(1)+1)) Allocate(rgrid(1:nrz(2)+1)) zgrid=spl2%sp1%knots(0:nrz(1)) rgrid=spl2%sp2%knots(0:nrz(2)) allocate(innersplinelist%k(nrank(1)*nrank(2)),innersplinelist%weight(nrank(1)*nrank(2))) allocate(Ibsplinetype(nrank(1)*nrank(2))) allocate(rgridmesh(nrank(1)*nrank(2)), zgridmesh(nrank(1)*nrank(2))) allocate(hmesh(nrank(1)*nrank(2))) allocate(d(size(bsplinetype))) Ibsplinetype=.False. call calcsplinecenters(spl2%sp1,cz,hz) call calcsplinecenters(spl2%sp2,cr,hr) Do i=0,nrank(2)-1 zgridmesh(i*nrank(1)+1:(i+1)*nrank(1))=cz rgridmesh(i*nrank(1)+1:(i+1)*nrank(1))=cr(i+1) hmesh(i*nrank(1)+1:(i+1)*nrank(1))=hr(i+1)*hz End do !allocate(etilde(nbinspline,nrank(1)*nrank(2))) !etilde=0 call init(nrank(1)*nrank(2),nbreducedspline,etilde) call init(nrank(1)*nrank(2),nbreducedspline,etildet) k=1 Do i=1,nrank(1)*nrank(2) if(bsplinetype(i) .ne. 1) cycle ! span of this this spline is completely outside D ! one cell of bspline i is completely in D icellz=mod(i-1,nrank(1))+1 icellr=(i-1)/(nrank(1))+1 outer: do l=max(1,icellz-norder(1)),min(nrz(1),icellz) do m=max(1,icellr-norder(2)),min(nrz(2),icellr) if (celltype(l,m).eq.1) then call geom_weight((zgrid(l)+zgrid(l+1))/2,(rgrid(m)+rgrid(m+1))/2,wgeomi) EXIT outer end if end do end do outer call putele(etilde,k,i,1/wgeomi) call putele(etildet,i,k,1/wgeomi) innersplinelist%k(i)=k innersplinelist%weight(i)=1/wgeomi k=k+1 !Ibsplinetype(i)=icellz-norder(1) .gt. 0 .and. icellr-norder(2) .gt. 0 .and. icellz .le. nrz(1) .and. icellr .le. nrz(2) Ibsplinetype(i)=icellz+norder(1) .le. nrank(1) .and. icellr+norder(2) .le. nrank(2) if(.not. Ibsplinetype(i)) cycle do m=0,norder(2) do l=0,norder(1) ! Check if all linked splines are inner splines Ibsplinetype(i)=Ibsplinetype(i) .and. (bsplinetype(i+l+m*nrank(1)) .eq. 1) end do end do end do !!$OMP PARALLEL DO DEFAULT(SHARED), PRIVATE(d,k,icellz,icellr,jcellr,jcellz,indexdistance,n,i,m,l,eij,linkedi) Do j=1,nrank(1)*nrank(2) if(bsplinetype(j) .ne. 0) cycle d=0 where(Ibsplinetype) d=(rgridmesh(j)-rgridmesh)**2+(zgridmesh(j)-zgridmesh)**2 end where k=minloc(d,1,MASK=Ibsplinetype) icellz=mod(k-1,nrank(1)) icellr=(k-1)/(nrank(1)) jcellz=mod(j-1,nrank(1)) jcellr=(j-1)/(nrank(1)) indexdistance=sqrt(real((jcellz-icellz)**2 + (jcellr-icellr)**2,kind=db)) if(indexdistance .gt. 20)then Write(*,*) 'Error on system conditioning, the number of radial or axial points is too low' stop end if do n=0,norder(2) do i=0,norder(1) eij=1 !eij=1 do m=0,norder(2) if(n.eq.m) cycle eij=eij*(rgridmesh(j)-rgridmesh(k+m*nrank(1)))/(rgridmesh(k+n*nrank(1))-rgridmesh(k+m*nrank(1))) end do do l=0,norder(1) if( i .eq. l ) cycle eij=eij*(zgridmesh(j)-zgridmesh(k+l))/(zgridmesh(k+i)-zgridmesh(k+l)) !eij=eij*(jcellr-icellr-m)/(n-m)*(jcellz-icellz-l)/(i-l) end do linkedi=innersplinelist%k(k+i+n*nrank(1)) ! equivalent to findloc, necessary for ifort 17 !findloc(innersplinelist%mu,k,1) eij=eij*innersplinelist%weight(k+i+n*nrank(1)) !eij=eij*innersplinelist%weight(k+i+n*nrank(1)) call putele(etilde,linkedi,j,eij) call putele(etildet,j,linkedi,eij) end do end do end do !!$OMP End parallel do call to_mat(etilde) call to_mat(etildet) end subroutine Subroutine calcsplinecenters(spl,ctrs,heights) use bsplines type(spline1d):: spl real(kind=db), allocatable:: ctrs(:), heights(:) integer:: nrank, nx, order, i, left1, j, left2, left3 real(kind=db):: x1, x2, x3 real(kind=db), allocatable:: fun1(:,:), fun2(:,:), fun3(:,:) real(kind=db),allocatable:: init_heights(:) call get_dim(spl, nrank, nx, order) if (allocated(ctrs)) deallocate(ctrs) if (allocated(heights)) deallocate(heights) allocate(heights(nrank)) allocate(ctrs(nrank)) allocate(fun1(1:order+1,0:1), fun2(1:order+1,0:1), fun3(1:order+1,0:1)) allocate(init_heights(nrank)) init_heights=1.0 ctrs(:)=spl%knots(0:nrank-1) call gridval(spl, ctrs, heights, 0, init_heights) if (order .gt.1) then do i=2,nrank-1 x1=spl%knots(i-order)+1e5*Epsilon(x1) x2=spl%knots(i-1)-1e5*Epsilon(x2) left1=max(0,i-order) left2=min(nx-2,i-2) call basfun(x1,spl,fun1, left1+1) !Write(*,*) 'i,xpt,xptm,w,wold,',i,xpt,xptm,w,wold call basfun(x2,spl,fun2, left2+1) fun3=1 j=0 Do while( j .le.100) !Write(*,*) 'i,xpt,xptm,w,wold,',i,xpt,xptm,w,wold x3=(x1+x2)/2 call locintv(spl, x3, left3) call basfun(x3,spl,fun3, left3+1) if( (x2-x1)/2.lt.1e-12) exit if(abs(fun3(i-left3,1)).lt.1e-15) exit if(fun3(i-left3,1)*fun1(i-left1,1).le.0) then fun2=fun3 x2=x3 left2=left3 else fun1=fun3 x1=x3 left1=left3 end if j=j+1 End do ctrs(i)=x3 heights(i)=fun3(i-left3,0) end do end if end subroutine !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Subroutine classifycell(zgrid,rgrid,gridctype) Real(kind=db):: zgrid(:),rgrid(:) Integer:: gridctype(:,:) Integer::i,j gridctype=-1 !$OMP parallel do private(i) Do j=1,size(rgrid,1)-1 Do i=1,size(zgrid,1)-1 ! Determines the type inner/boundary/outer for each cell Call classification((/zgrid(i),zgrid(i+1)/),(/rgrid(j),rgrid(j+1)/),& & gridctype(i,j)) End Do End do !$OMP end parallel do End subroutine !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine classifyspline(spl2,gridctype,bsptype) type(spline2d):: spl2 Integer:: gridctype(:,:) Integer:: bsptype(:) Integer:: nrank(2), nrz(2), norder(2) Integer:: i, j, mu, imin, imax, jmin, jmax Integer, Allocatable:: splinespan(:,:) call get_dim(spl2, nrank, nrz, norder) bsptype=0 Allocate(splinespan(norder(1)+1,norder(2)+1)) Do mu=1,nrank(1)*nrank(2) i=mod(mu-1,nrank(1))+1 j=(mu-1)/nrank(1)+1 splinespan=-1 imin=max(1,i-norder(1)) imax=min(nrz(1),i) jmin=max(1,j-norder(2)) jmax=min(nrz(2),j) splinespan(1:(imax-imin+1),1:(jmax-jmin+1))=gridctype(imin:imax,jmin:jmax) if ( ANY( splinespan==1 ) ) then bsptype(mu)=1 else if (sum(splinespan).eq. -(norder(1)+1)*(1+norder(2))) then bsptype(mu)=-1 end if end do End subroutine !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> general placeholder function used in fields_mod to compute the weight for weighted_bsplines !> This functions combine an ellipse function and an horizontal wall ! !--------------------------------------------------------------------------- SUBROUTINE geom_weight0(z,r,w) Real(kind=db), INTENT(IN):: r,z Real(kind=db), INTENT(OUT):: w Real(kind=db):: rtmp(1:1), ztmp(1:1), walltmp(1:1,1:1), elliptmp(1:1,1:1) ztmp=z rtmp=r call wallweight(ztmp,rtmp,walltmp, r_a, above1) if (walltype .ne. 0) then call ellipseweight(ztmp,rtmp,elliptmp,Interior) else call wallweight(ztmp,rtmp,elliptmp, r_b, above2) end if if(interior.eq.0 .and. above2.eq.0) then w=walltmp(1,1) else if (above1 .eq. 0) then w=elliptmp(1,1) else w=walltmp(1,1)+elliptmp(1,1)-sqrt(walltmp(1,1)**2+elliptmp(1,1)**2) ! weight at position r,z end if End SUBROUTINE geom_weight0 SUBROUTINE geom_weight1(z,r,w) Real(kind=db), INTENT(IN):: r,z Real(kind=db), INTENT(OUT):: w(0:) Real(kind=db):: rtmp(1:1), ztmp(1:1), walltmp(1:1,0:size(w,1)-1), elliptmp(1:1,0:size(w,1)-1) Real(kind=db):: squareroot, denom ztmp=z rtmp=r call wallweight(ztmp,rtmp,walltmp, r_a, above1) if (walltype .ne. 0) then call ellipseweight(ztmp,rtmp,elliptmp,Interior) else call wallweight(ztmp,rtmp,elliptmp, r_b, above2) end if if(interior.eq.0 .and. above2.eq.0) then w=walltmp(1,:) else if (above1 .eq. 0) then w=elliptmp(1,:) else denom=walltmp(1,0)**2+elliptmp(1,0)**2 squareroot=sqrt(denom) w(0)=walltmp(1,0)+elliptmp(1,0)-squareroot ! weight at position r,z If(size(w,1) .gt. 1) then ! first derivative w(1)=walltmp(1,1)+elliptmp(1,1)-(elliptmp(1,1)*elliptmp(1,0)+walltmp(1,1)*walltmp(1,0))/& & squareroot ! z derivative of w w(2)=walltmp(1,2)+elliptmp(1,2)-(elliptmp(1,2)*elliptmp(1,0)+walltmp(1,2)*walltmp(1,0))/& & squareroot ! r derivative of w End If End if End SUBROUTINE geom_weight1 SUBROUTINE geom_weight2(z,r,w) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: w(:,0:) Real(kind=db):: walltmp(size(w,1),0:size(w,2)-1), elliptmp(size(w,1),0:size(w,2)-1) Real(kind=db):: squareroot(size(w,1)), denom(size(w,1)) call wallweight(z,r,walltmp, r_a, above1) if (walltype .ne. 0) then call ellipseweight(z,r,elliptmp,Interior) else call wallweight(z,r,elliptmp, r_b, above2) end if if(interior.eq.0 .and. above2.eq.0) then w=walltmp(:,:) else if (above1 .eq. 0) then w=elliptmp(:,:) else denom=walltmp(:,0)**2+elliptmp(:,0)**2 squareroot=sqrt(denom) w(:,0)=walltmp(:,0)+elliptmp(:,0)-squareroot ! weight at position r,z If(size(w,2) .gt. 1) then ! first derivative w(:,1)=walltmp(:,1)+elliptmp(:,1)-(elliptmp(:,1)*elliptmp(:,0)+walltmp(:,1)*walltmp(:,0))/& & squareroot ! z derivative of w w(:,2)=walltmp(:,2)+elliptmp(:,2)-(elliptmp(:,2)*elliptmp(:,0)+walltmp(:,2)*walltmp(:,0))/& & squareroot ! r derivative of w End If End if End SUBROUTINE geom_weight2 Subroutine Boundary_limits(pts) Real(kind=db), Allocatable, INTENT(out) :: pts(:,:) If(allocated(pts))Deallocate(pts) allocate(pts(4,2)) pts(1,1:2)=(/z_0-invr_z,r_0/) pts(2,1:2)=(/z_0,r_0+invr_r/) pts(3,1:2)=(/z_0+invr_z,r_0/) pts(4,1:2)=(/z_0,r_0-invr_r/) End subroutine Boundary_limits SUBROUTINE classification(z, r, ctype) real(kind=db), INTENT(IN):: r(2), z(2) INTEGER, INTENT(OUT):: ctype Real(kind=db)::weights(5,1:1) Real(kind=db):: zeval(5),reval(5) zeval=(/ z(1),z(2),z(1),z(2), (z(2)+z(1))/2 /) reval=(/ r(1),r(1),r(2),r(2), (r(2)+r(1))/2 /) CAll geom_weight(zeval,reval,weights) ctype=int(sign(1.0_db,weights(5,1))) If(weights(1,1)*weights(2,1) .lt. 0 ) then ctype=0 return End If If(weights(1,1)*weights(3,1) .lt. 0 ) then ctype=0 return End If If(weights(2,1)*weights(4,1) .lt. 0 ) then ctype=0 return End If If(weights(3,1)*weights(4,1) .lt. 0 ) then ctype=0 return End If end subroutine ! ################################################################## Subroutine calc_gauss(spl2, ngauss, i, j, xgauss, wgauss, gausssize, celltype) type(spline2d), INTENT(IN):: spl2 Integer, Intent(out):: gausssize INTEGER, Intent(out), Optional :: celltype Real(kind=db), Allocatable::rgrid(:),zgrid(:) Real(kind=db), ALLOCATABLE::xgauss(:,:), wgauss(:) Integer:: i,j, ngauss(2) Real(kind=db),Allocatable:: pts(:,:), zpoints(:) Real(kind=db):: zg(ngauss(1)),rg(ngauss(2)), wzg(ngauss(1)), wrg(ngauss(2)) Integer:: k, l, directiondown, directionup, nbzpoints, direction, ctype Logical:: hasmaxpoint Real(kind=db):: xptup, xptdown, w, rmin, rmax type(spline1d):: splz, splr splz=spl2%sp1 splr=spl2%sp2 Allocate(zgrid(1:splz%nints+1)) Allocate(rgrid(1:splr%nints+1)) zgrid=splz%knots(0:splz%nints) rgrid=splr%knots(0:splr%nints) hasmaxpoint=.false. If(allocated(xgauss)) deallocate(xgauss) if(allocated(wgauss)) deallocate(wgauss) !Call classification((/zgrid(i),zgrid(i+1)/),(/rgrid(j),rgrid(j+1)/),ctype) ctype=gridcelltype(i,j) If (ctype .ge. 1) then ! we have a normal internal cell Allocate(xgauss(ngauss(1)*ngauss(2),2)) Allocate(wgauss(ngauss(1)*ngauss(2))) gausssize=ngauss(1)*ngauss(2) !Computation of gauss weight and position in r and z direction for gaussian integration CALL get_gauss(spl2%sp1, ngauss(1), i, zg, wzg) CALL get_gauss(spl2%sp2, ngauss(2), j, rg, wrg) ! Construction of matrix xgauss and wgauss storing the weight and position for 2d gaussian integration DO k=1,ngauss(2) xgauss((k-1)*ngauss(1)+1:k*ngauss(1),1)=zg xgauss((k-1)*ngauss(1)+1:k*ngauss(1),2)=rg(k) wgauss((k-1)*ngauss(1)+1:k*ngauss(1))=wrg(k)*wzg END DO Else If(ctype.eq.0) then ! we have a boundary cell Call Boundary_Limits(pts) !Do k=1,size(pts,1) ! hasmaxpoint=hasmaxpoint .or. (pts(k,1) .ge. zgrid(i) .and. pts(k,1) .le. zgrid(i+1) & ! & .and. pts(k,2) .ge. rgrid(j) .and. pts(k,2) .le. rgrid(j+1)) !End do If(.not. hasmaxpoint) Then directiondown=1 directionup=1 Call Find_crosspoint((/zgrid(i),zgrid(i+1)/),rgrid(j),xptdown,directiondown) Call Find_crosspoint((/zgrid(i),zgrid(i+1)/),rgrid(j+1),xptup,directionup) select case ( directionup+directiondown) Case (0) ! The intersections are only on the left and right cell boundaries nbzpoints=2 Allocate(zpoints(nbzpoints)) zpoints=(/zgrid(i),zgrid(i+1)/) Case(1) nbzpoints=3 Allocate(zpoints(nbzpoints)) if(directiondown.eq.1) zpoints=(/zgrid(i), xptdown,zgrid(i+1)/) if(directionup .eq. 1) zpoints=(/zgrid(i), xptup,zgrid(i+1)/) Case(2) nbzpoints=3 Allocate(zpoints(nbzpoints)) call geom_weight(zgrid(i),rgrid(j),w) If(w.ge.0) zpoints=(/zgrid(1),min(xptdown,xptup),max(xptdown,xptup) /) If(w.lt.0) zpoints=(/min(xptdown,xptup),max(xptdown,xptup), zgrid(i+1) /) End select Allocate(xgauss(ngauss(1)*ngauss(2)*(nbzpoints-1),2)) Allocate(wgauss(ngauss(1)*ngauss(2)*(nbzpoints-1))) gausssize=ngauss(1)*ngauss(2)*(nbzpoints-1) ! Compute gauss points Do l=1,nbzpoints-1 !CALL get_gauss(spl2%sp1, ngauss(1), i, zg, wzg) Call gauleg(zpoints(l),zpoints(l+1),zg,wzg,ngauss(1)) ! We test if the lower or upper side is in the domain call geom_weight(zg(1),rgrid(j),w) rmin=rgrid(j) if (w .lt. 0) rmin = rgrid(j+1) Do k=1,ngauss(1) direction=2 Call Find_crosspoint((/rgrid(j),rgrid(j+1)/),zg(k),rmax,direction) ! We compute the radial limits at each z position Call gauleg(min(rmin,rmax),max(rmin,rmax),rg,wrg,ngauss(2)) ! We obtain the gauss w and pos for these boundaries xgauss(k+(l-1)*ngauss(1)*ngauss(2) : l*ngauss(2)*ngauss(1) : ngauss(1),1) = zg(k) xgauss(k+(l-1)*ngauss(1)*ngauss(2) : l*ngauss(2)*ngauss(1) : ngauss(1),2) = rg wgauss(k+(l-1)*ngauss(1)*ngauss(2) : l*ngauss(2)*ngauss(1) : ngauss(1)) = wrg*wzg(k) End do End Do End If Else gausssize=1 Allocate(xgauss(1:1,2)) Allocate(wgauss(1:1)) !Computation of gauss weight and position in r and z direction for gaussian integration CALL get_gauss(spl2%sp1, ngauss(1), i, zg, wzg) CALL get_gauss(spl2%sp2, ngauss(2), j, rg, wrg) ! Construction of matrix xgauss and wgauss storing the weight and position for 2d gaussian integration xgauss(1,1)=(zg(1)+zg(ngauss(1)))*0.5 xgauss(1,2)=(rg(1)+rg(ngauss(2)))*0.5 wgauss(1)=0 End If If(PRESENT(celltype)) celltype=ctype End Subroutine calc_gauss Subroutine Find_crosspoint(x,y,xpt, direction) Real(kind=db):: x(2), y Real(kind=db):: xptm, xpt, temp Real(kind=db):: w, wold Integer, Intent(INOUT):: direction Integer:: i xptm=x(1) xpt=x(2) i=0 if(direction .eq.1) Call geom_weight(xptm,y,wold) if(direction .eq.2) Call geom_weight(y,xptm,wold) if(direction .eq.1) Call geom_weight(xpt,y,w) if(direction .eq.2) Call geom_weight(y,xpt,w) if(w*wold.gt.0) then direction=0 return End If Do while( i .le.100 .and. w*wold .lt. 0) !Write(*,*) 'i,xpt,xptm,w,wold,',i,xpt,xptm,w,wold if(direction .eq.1) Call geom_weight(xpt,y,w) if(direction .eq.2) Call geom_weight(y,xpt,w) xptm=xpt-w*(xpt-xptm)/(w-wold) wold=w temp=xptm xptm=xpt xpt=temp i=i+1 End do if(xpt .ge. x(2) .or. xpt .le. x(1)) direction=0 End Subroutine SUBROUTINE wallweight(z,r,w, r_lim, above) Real(kind=db):: z(:),r(:),w(:,0:), r_lim Integer:: above w(:,0)=above*(r-r_lim) ! weight at position r,z If(size(w,2) .gt. 1) then ! first derivative w(:,1)=0 ! z derivative of w w(:,2)=above ! r derivative of w End If End subroutine wallweight Subroutine ellipseweight(z,r,w, Interior) Real(kind=db):: z(:),r(:),w(:,0:) Integer:: Interior w(:,0)=Interior*(1-(r-r_0)**2*invr_r**2-(z-z_0)**2*invr_z**2) ! weight at position r,z If(size(w,2) .gt. 1) then ! first derivative w(:,1)=-2*(z-z_0)*invr_z**2*Interior ! z derivative of w w(:,2)=-2*(r-r_0)*invr_r**2*Interior ! r derivative of w End If End subroutine ellipseweight Subroutine CompgBoundary(z,r,gtilde) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: gtilde(:,0:) if (walltype .eq. -1) then call gtest(z,r,gtilde) else call gstd(z,r,gtilde) end if End subroutine Subroutine gstd(z,r,gtilde) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: gtilde(:,0:) Real(kind=db):: belowtmp(size(r,1),0:size(gtilde,2)-1), abovetmp(size(r,1),0:size(gtilde,2)-1) Real(kind=db):: denom(size(r,1)) call wallweight(z,r,belowtmp, r_a, above1) if (walltype .eq. 1) then call ellipseweight(z,r,abovetmp,Interior) else call wallweight(z,r,abovetmp, r_b, above2) end if ! Extension to the whole domain of the boundary conditions gtilde(:,0)=(Phidown*abovetmp(:,0) + Phiup*belowtmp(:,0) ) / & & (abovetmp(:,0)+belowtmp(:,0)) If(size(gtilde,2) .gt. 2) then ! first derivative denom=(abovetmp(:,0)+belowtmp(:,0))**2 gtilde(:,1)=(Phiup-Phidown)*(belowtmp(:,1)*abovetmp(:,0)-belowtmp(:,0)*abovetmp(:,1)) / & & denom ! Axial derivative gtilde(:,2)=(Phiup-Phidown)*(belowtmp(:,2)*abovetmp(:,0)-belowtmp(:,0)*abovetmp(:,2)) / & & denom ! Radial derivative End If End subroutine Subroutine gtest(z,r,gtilde) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT):: gtilde(:,0:) ! gtilde(:,0)=sin(2*pi*z/L_z)*cos(2*pi*r/L_r) + 2 If(size(gtilde,2) .gt. 1) then ! first derivative gtilde(:,1)=2*pi/L_z*cos(2*pi*z/L_z)*cos(2*pi*r/L_r) gtilde(:,2)=-2*pi/L_r*sin(2*pi*z/L_z)*sin(2*pi*r/L_r) End If End subroutine Subroutine ftest(z,r,f) Real(kind=db), INTENT(IN):: r(:),z(:) Real(kind=db), INTENT(OUT)::f(:,0:) ! f(:,0)=2*pi*sin(2*pi*z/L_z)*( & & 2*pi*cos(2*pi*r/L_r)*(1/L_z**2+1/L_r**2) & & +1/(r*L_r)*sin(2*pi*r/L_r) ) End Subroutine ftest subroutine Reducematrix(full,reduced) use mumps_bsplines type(mumps_mat):: full, reduced, tempmat Real(kind=db), allocatable:: temparray(:), rsltarray(:) Integer:: fullrank, reducedrank Integer:: i,j call to_mat(full) fullrank=full%rank reducedrank=nbreducedspline call init(reducedrank,2,reduced) call init(fullrank,2,tempmat) write(*,*) "start reducced mat 1" !$OMP parallel private(j), firstprivate(temparray,rsltarray), default(shared) allocate(temparray(fullrank),rsltarray(fullrank)) !$OMP do do i=1,fullrank call getcol(etildet,i,temparray) rsltarray=vmx_mumps_mat_loc(full,temparray) DO j=1,fullrank CALL putele_sploc(tempmat%mat%row(i), j, rsltarray(j), .false.) END DO end do !$OMP end do !$OMP barrier !$OMP single call to_mat(tempmat) write(*,*) "start reducced mat 2" !$OMP end single !$OMP do do i=1,reducedrank call getcol(etildet,i,temparray) rsltarray=vmx_mumps_mat_loc(tempmat,temparray) DO j=1,reducedrank CALL putele_sploc(reduced%mat%row(i), j, rsltarray(j), .false.) END DO end do !$OMP end do deallocate(temparray,rsltarray) !$OMP end parallel end subroutine !!############################################################################################### ! subroutines imported from mumps_bsplines and sparse lib to have thread-private routines SUBROUTINE putele_sploc(arow, j, val, nlforce_zero) ! ! Put (overwrite) element j of row arow or insert it in an increasing "index" ! USE sparse TYPE(sprow), TARGET :: arow INTEGER, INTENT(in) :: j DOUBLE PRECISION, INTENT(in) :: val LOGICAL, INTENT(in), OPTIONAL :: nlforce_zero ! TYPE(elt), TARGET :: pre_root TYPE(elt), POINTER :: t, p LOGICAL :: rmnode ! pre_root%next => arow%row0 ! pre_root is linked to the head of the list. t => pre_root ! ! Remove node which has zero val or not? ! But never create new node with zero val ! rmnode = .TRUE. IF(PRESENT(nlforce_zero)) rmnode = .NOT.nlforce_zero ! DO WHILE( ASSOCIATED(t%next) ) p => t%next IF( p%index .EQ. j ) THEN IF(ABS(val).LE.EPSILON(0.0d0) .AND. rmnode) THEN ! Remove the node for zero val! t%next => p%next arow%nnz = arow%nnz-1 arow%row0 => pre_root%next ! In case the head is altered DEALLOCATE(p) ELSE p%val = val END IF RETURN END IF IF( p%index .GT. j ) EXIT t => t%next END DO ! ! Never create new node with zero val ! IF(ABS(val).GT.EPSILON(0.0d0)) THEN ALLOCATE(p) p = elt(j, val, t%next) t%next => p arow%nnz = arow%nnz+1 arow%row0 => pre_root%next END IF END SUBROUTINE putele_sploc ! SUBROUTINE getcol_loc(amat, j, arr) ! ! Get a column from sparse matrix ! USE mumps_bsplines TYPE(mumps_mat), INTENT(in) :: amat INTEGER, INTENT(in) :: j DOUBLE PRECISION, INTENT(out) :: arr(:) INTEGER :: i ! DO i=amat%istart,amat%iend CALL getele_loc(amat, i, j, arr(i)) END DO END SUBROUTINE getcol_loc SUBROUTINE getele_loc(mat, i, j, val) ! ! Get element (i,j) of sparse matrix ! USE mumps_bsplines TYPE(mumps_mat) :: mat INTEGER, INTENT(in) :: i, j DOUBLE PRECISION, INTENT(out) :: val INTEGER :: iget, jget INTEGER :: k, s, e ! iget = i jget = j IF(mat%nlsym) THEN IF( i.GT.j ) THEN ! Lower triangular part iget = j jget = i END IF END IF ! val = 0.0d0 ! Assume zero val if outside IF( iget.LT.mat%istart .OR. iget.GT.mat%iend ) RETURN ! IF(ASSOCIATED(mat%mat)) THEN CALL getele(mat%mat, iget, jget, val) ELSE s = mat%irow(iget) - mat%nnz_start + 1 e = mat%irow(iget+1) - mat%nnz_start k = isearch(mat%cols(s:e), jget) IF( k.GE.0 ) THEN val =mat%val(s+k) ELSE val = 0.0d0 ! Assume zero val if not found END IF END IF END SUBROUTINE getele_loc !++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ FUNCTION vmx_mumps_mat_loc(mat, xarr) RESULT(yarr) ! ! Return product mat*x ! Use mumps_bsplines TYPE(mumps_mat) :: mat DOUBLE PRECISION, INTENT(in) :: xarr(:) DOUBLE PRECISION :: yarr(SIZE(xarr)) DOUBLE PRECISION :: alpha=1.0d0, beta=0.0d0 CHARACTER(len=6) :: matdescra - INTEGER :: n, i, j + INTEGER :: n ! n = mat%rank ! !#ifdef MKL IF(mat%nlsym) THEN matdescra = 'sun' ELSE matdescra = 'g' END IF ! CALL mkl_dcsrmv('N', n, n, alpha, matdescra, mat%val, & & mat%cols, mat%irow(1), mat%irow(2), xarr, & & beta, yarr) !#else ! yarr = 0.0d0 ! DO i=1,n ! DO j=mat%irow(i), mat%irow(i+1)-1 ! yarr(i) = yarr(i) + mat%val(j)*xarr(mat%cols(j)) ! END DO ! IF(mat%nlsym) THEN ! DO j=mat%irow(i)+1, mat%irow(i+1)-1 ! yarr(mat%cols(j)) = yarr(mat%cols(j)) & ! & + mat%val(j)*xarr(i) ! END DO ! END IF ! END DO ! Write(*,*) "linear mult") !#endif ! END FUNCTION vmx_mumps_mat_loc END MODULE geometry \ No newline at end of file diff --git a/src/mpihelper_mod.f90 b/src/mpihelper_mod.f90 index 25288c1..2136c95 100644 --- a/src/mpihelper_mod.f90 +++ b/src/mpihelper_mod.f90 @@ -1,319 +1,309 @@ !------------------------------------------------------------------------------ ! EPFL/Swiss Plasma Center !------------------------------------------------------------------------------ ! ! MODULE: mpihelper ! !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> Module responsible for setting up the MPI variables used in the communications. !------------------------------------------------------------------------------ MODULE mpihelper USE constants USE mpi IMPLICIT NONE -!> Structure containing the simulation parameters read from the input file -TYPE BASICDATA - LOGICAL :: nlres, nlsave, newres, nlxg, nlppform, nlPhis, nlclassical - INTEGER :: nplasma, nz, it0d, it2d, itparts, & - & itgraph, distribtype, nblock, nrun - REAL(kind=db) :: job_time, extra_time, tmax, dt, & - & potinn, potout, B0, n0, temp, & - & Rcurv, width, H0, P0 - INTEGER, DIMENSION(2) :: femorder, ngauss - INTEGER, DIMENSION(3) :: nnr - REAL(kind=db), DIMENSION(2):: lz - REAL(kind=db), DIMENSION(4):: radii, plasmadim - CHARACTER(len=64) :: resfile= "results.h5" - LOGICAL :: partperiodic - INTEGER :: samplefactor - END TYPE BASICDATA - !> Structure containing a single particle position and velocity used in MPI communications. TYPE particle - INTEGER :: Rindex, Zindex, partindex - REAL(kind=db) :: R, Z, THET,& - & UZ, UR, UTHET, & - & Gamma, pot + INTEGER :: Rindex =0 + INTEGER :: Zindex =0 + INTEGER :: partindex =0 + REAL(kind=db) :: R =0 + REAL(kind=db) :: Z =0 + REAL(kind=db) :: THET =0 + REAL(kind=db) :: UZ =0 + REAL(kind=db) :: UR =0 + REAL(kind=db) :: UTHET =0 + REAL(kind=db) :: Gamma =0 + REAL(kind=db) :: pot =0 END TYPE particle INTEGER, SAVE :: basicdata_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for communicating basicdata INTEGER, SAVE :: particle_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for particles communication INTEGER, SAVE :: rhsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a rhs column INTEGER, SAVE :: db_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a REAL(kind=db) INTEGER, SAVE :: momentsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the communication of a column of a grid variable INTEGER, SAVE :: rcvrhsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the receive communication of a rhs column INTEGER, SAVE :: rcvmomentsoverlap_type=MPI_DATATYPE_NULL !< Stores the MPI data type used for the receive communication of a column of a grid variable INTEGER, SAVE:: db_sum_op !< Store the MPI sum operation for db_type REAL(kind=db), ALLOCATABLE, SAVE:: rhsoverlap_buffer(:) !< buffer used for storing the rhs ghost cells !< received from the left or right MPI process REAL(kind=db), ALLOCATABLE, SAVE:: momentsoverlap_buffer(:) !< buffer used for storing the moments ghost cells !< received from the left or right MPI process !INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL !INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: rhsoverlap_tag= 20 INTEGER:: momentsoverlap_tag= 30 CONTAINS !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI types used for inter process communications ! !--------------------------------------------------------------------------- SUBROUTINE init_mpitypes IMPLICIT NONE INTEGER:: ierr ! Initialize db_type to use real(kind=db) in MPI and the sum operator for reduce CALL MPI_TYPE_CREATE_F90_REAL(dprequestedprec,MPI_UNDEFINED,db_type,ierr) CALL MPI_Type_commit(db_type,ierr) CALL MPI_Op_Create(DB_sum, .true., db_sum_op, ierr) CALL init_particlempi END SUBROUTINE init_mpitypes !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Computes the sum in MPI_Reduce operations involving Real(kinc=db) ! !--------------------------------------------------------------------------- SUBROUTINE DB_sum(INVEC, INOUTVEC, LEN, TYPE) REAL(kind=db):: INVEC(0:LEN-1), INOUTVEC(0:LEN-1) INTEGER:: LEN, TYPE INTEGER:: i Do i=0,LEN-1 INOUTVEC(i)=INVEC(i)+INOUTVEC(i) END DO END SUBROUTINE DB_sum !-------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the MPI communicators used for allreduce between neighbors ! !> @param[in] nrank ranks of the FEM array in (1) z direction and (2) r direction !> @param[in] femorder finite element method order in z and r direction !> @param[in] zlimleft z index delimiting the mpi local left boundary !> @param[in] zlimright z index delimiting the mpi local right boundary !> @param[in] nbmoments number of moments calculated and stored. ! !--------------------------------------------------------------------------- SUBROUTINE init_overlaps(nrank, femorder, zlimleft, zlimright, nbmoments) INTEGER, INTENT(IN):: nrank(:), femorder(:), zlimright, zlimleft, nbmoments IF(ALLOCATED(rhsoverlap_buffer)) DEALLOCATE(rhsoverlap_buffer) IF(ALLOCATED(momentsoverlap_buffer)) DEALLOCATE(momentsoverlap_buffer) ALLOCATE(rhsoverlap_buffer(nrank(2)*femorder(1))) ALLOCATE(momentsoverlap_buffer(nbmoments*nrank(2)*femorder(1))) WRITE(*,*)"nrank, femorder", nrank, femorder ! Initialize the MPI column overlap type for rhs CALL init_coltypempi(nrank(2), zlimright-zlimleft+femorder(1), 1, nbmoments, db_type, rhsoverlap_type) ! Initialize the MPI grid col type CALL init_coltypempi(nrank(2), zlimright-zlimleft+femorder(1), nbmoments, 1, db_type, momentsoverlap_type) ! Initialize the MPI receive column overlap type for rhs CALL init_coltypempi(nrank(2), nrank(1), 1, nbmoments, db_type, rcvrhsoverlap_type) ! Initialize the MPI receive grid col type CALL init_coltypempi(nrank(2), nrank(1), nbmoments, 1, db_type, rcvmomentsoverlap_type) END SUBROUTINE init_overlaps SUBROUTINE start_persistentcomm(requests, mpirank, leftproc, rightproc) INTEGER, ASYNCHRONOUS, INTENT(INOUT):: requests(:) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) LOGICAL:: completed=.false. IF(leftproc .lt. mpirank) THEN ! Start to receive CALL MPI_START(requests(2),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send CALL MPI_START(requests(1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF IF(leftproc .lt. mpirank) THEN ! Start to receive completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(2), completed,stats(:,2),ierr) END DO WRITE(*,*)"status 2", completed, stats(:,2) !CALL MPI_WAIT(requests(2),stats(:,2),ierr) !WRITE(*,*)"status 2", stats(:,2) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in recv_init" END IF IF(rightproc .gt. mpirank) THEN ! Start to send completed=.FALSE. DO WHILE(.not. completed) CALL MPI_TEST(requests(1), completed,stats(:,1),ierr) END DO !CALL MPI_WAIT(requests(1),stats(:,1),ierr) IF(IERR .ne. MPI_SUCCESS) WRITE(*,*) "error in send_init" END IF END SUBROUTINE start_persistentcomm SUBROUTINE start_rhscomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER, SAVE:: rhsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) rhsoverlap_requests=MPI_REQUEST_NULL rhsoverlap_buffer=0 IF(rightproc .gt. mpirank) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), rhsoverlap_type, rightproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank) THEN CALL MPI_IRECV(rhsoverlap_buffer, nrank(2)*(femorder(1)), db_type, leftproc, rhsoverlap_tag, & & MPI_COMM_WORLD, rhsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,rhsoverlap_requests,stats, ierr) END SUBROUTINE start_rhscomm SUBROUTINE start_momentscomm(mpirank, leftproc, rightproc, moments, nrank, femorder, zlimright) INTEGER, INTENT(IN):: mpirank, leftproc, rightproc REAL(kind=db), DIMENSION(:,:), INTENT(INOUT):: moments INTEGER, INTENT(IN):: nrank(2), femorder(2), zlimright INTEGER, SAVE:: momentsoverlap_requests(2) = MPI_REQUEST_NULL INTEGER:: ierr INTEGER:: stats(MPI_STATUS_SIZE,2) momentsoverlap_requests=MPI_REQUEST_NULL momentsoverlap_buffer=0 IF(rightproc .gt. mpirank) THEN CALL MPI_ISEND(moments(1,zlimright+1), femorder(1), momentsoverlap_type, rightproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(1), ierr ) END IF ! If the processor on the left has actually lower z positions IF(leftproc .lt. mpirank) THEN CALL MPI_IRECV(momentsoverlap_buffer, 10*nrank(2)*(femorder(1)), db_type, leftproc, momentsoverlap_tag, & & MPI_COMM_WORLD, momentsoverlap_requests(2), ierr ) END IF CALL MPI_WAITALL(2,momentsoverlap_requests,stats, ierr) END SUBROUTINE start_momentscomm !--------------------------------------------------------------------------- !> @author !> Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> !> @brief !> Initialize the particle MPI type used for inter process communications and publish it to !> the process in the communicator ! !--------------------------------------------------------------------------- SUBROUTINE init_particlempi() INTEGER :: nblock = 11 INTEGER:: blocklength(11) INTEGER(kind=MPI_ADDRESS_KIND):: displs(11), displ0 INTEGER:: types(11) TYPE(particle) :: part INTEGER:: ierr CALL mpi_get_address(part%Rindex, displs(1), ierr) CALL mpi_get_address(part%Zindex, displs(2), ierr) CALL mpi_get_address(part%partindex, displs(3), ierr) types(1:3)=MPI_INTEGER CALL mpi_get_address(part%R, displs(4), ierr) CALL mpi_get_address(part%Z, displs(5), ierr) CALL mpi_get_address(part%THET, displs(6), ierr) CALL mpi_get_address(part%UZ, displs(7), ierr) CALL mpi_get_address(part%UR, displs(8), ierr) CALL mpi_get_address(part%UTHET, displs(9), ierr) CALL mpi_get_address(part%GAMMA, displs(10), ierr) CALL mpi_get_address(part%pot, displs(11), ierr) types(4:11)=db_type blocklength(1:11) = 1 CALL mpi_get_address(part, displ0, ierr) displs=displs-displ0 CALL MPI_Type_create_struct(nblock, blocklength, displs, types, particle_type, ierr) CALL MPI_Type_commit(particle_type,ierr) END SUBROUTINE init_particlempi !--------------------------------------------------------------------------- !> @author Guillaume Le Bars EPFL/SPC ! ! DESCRIPTION: !> @brief Initialize the column MPI type used for inter process communications and publish it to !> the processes in the communicator (can be rhs or grid quantities) ! !> @param[in] nr number of elements in the r direction !> @param[in] nz number of elements in the z direction !> @param[in] init_type MPI type of the initial data !> @param[inout] mpi_coltype final type usable in communications !--------------------------------------------------------------------------- SUBROUTINE init_coltypempi(nr, nz, block_size, stride, init_type, mpi_coltype) INTEGER, INTENT(IN) :: nr INTEGER, INTENT(IN) :: nz INTEGER, INTENT(IN) :: block_size INTEGER, INTENT(IN) :: stride INTEGER, INTENT(IN) :: init_type INTEGER, INTENT(OUT) :: mpi_coltype INTEGER :: temp_mpi_coltype, ierr INTEGER(KIND=MPI_ADDRESS_KIND):: init_type_lb, init_type_extent !(nrank(2), nrank(1), 1, 10, db_type, rhsoverlap_type) ! if mpi_coltype was used, we free it first IF( mpi_coltype .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_coltype,ierr) ! Create vector type of length nx CALL MPI_TYPE_VECTOR(nr, block_size, stride*block_size*nz, init_type, temp_mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(temp_mpi_coltype, ierr) ! Get the size in bytes of the initial type CALL MPI_TYPE_GET_EXTENT(init_type, init_type_lb, init_type_extent, ierr) if(mpi_coltype .ne. MPI_DATATYPE_NULL) CALL MPI_TYPE_FREE(mpi_coltype,ierr) ! Resize temp_mpi_coltype such that the next item to read is at j+1 CALL MPI_TYPE_CREATE_RESIZED(temp_mpi_coltype, init_type_lb, stride*block_size*init_type_extent ,& & mpi_coltype, ierr) CALL MPI_TYPE_COMMIT(mpi_coltype, ierr) CALL MPI_TYPE_FREE(temp_mpi_coltype,ierr) END SUBROUTINE init_coltypempi END MODULE mpihelper diff --git a/src/random.f b/src/random.f new file mode 100644 index 0000000..a6ca773 --- /dev/null +++ b/src/random.f @@ -0,0 +1,376 @@ +* Version 1.0 of random number routines +* Author: Charles Karney +* Date: 1999-08-05 10:44:51 -0400 +* + function random(p,r) + implicit none + DOUBLE PRECISION ulp2 + parameter(ulp2=2.0d0**(-47-1)) + REAL ulps + DOUBLE PRECISION mult + parameter(ulps=2.0**(-23),mult=2.0d0**23) + DOUBLE PRECISION random + REAL srandom + integer p + DOUBLE PRECISION r(0:100-1) + external rand_batch + if(p.GE.100)then + call rand_batch(p,r(0)) + end if + random=r(p)+ulp2 + p=p+1 + return + entry srandom(p,r) + if(p.GE.100)then + call rand_batch(p,r(0)) + end if + srandom=(int(mult*r(p))+0.5)*ulps + p=p+1 + return + end +* + subroutine random_array(y,n,p,r) + implicit none + DOUBLE PRECISION ulp2 + parameter(ulp2=2.0d0**(-47-1)) + REAL ulps + DOUBLE PRECISION mult + parameter(ulps=2.0**(-23),mult=2.0d0**23) + integer n + DOUBLE PRECISION y(0:n-1) + REAL ys(0:n-1) + integer p + DOUBLE PRECISION r(0:100-1) + integer i,k,j + external rand_batch + if(n.LE.0)return + k=min(n,100-p) + do i=0,k-1 + y(i)=r(i+p)+ulp2 + end do + p=p+(k) + do j=k,n-1,100 + call rand_batch(p,r(0)) + do i=j,min(j+100,n)-1 + y(i)=r(i-j+p)+ulp2 + end do + p=p+(min(100,n-j)) + end do + return + entry srandom_array(ys,n,p,r) + if(n.LE.0)return + k=min(n,100-p) + do i=0,k-1 + ys(i)=(int(mult*r(i+p))+0.5)*ulps + end do + p=p+(k) + do j=k,n-1,100 + call rand_batch(p,r(0)) + do i=j,min(j+100,n)-1 + ys(i)=(int(mult*r(i-j+p))+0.5)*ulps + end do + p=p+(min(100,n-j)) + end do + return + end +* + subroutine rand_batch(p,r) + implicit none + integer p + DOUBLE PRECISION r(0:100-1) + integer i + DOUBLE PRECISION w(0:1009-100-1) + DOUBLE PRECISION tmp + do i=0,63-1 + tmp=r(i)+r(i+100-63) + w(i)=tmp-int(tmp) + end do + do i=63,100-1 + tmp=r(i)+w(i-63) + w(i)=tmp-int(tmp) + end do + do i=100,1009-100-1 + tmp=w(i-100)+w(i-63) + w(i)=tmp-int(tmp) + end do + do i=1009-100,1009-100+63-1 + tmp=w(i-100)+w(i-63) + r(i-1009+100)=tmp-int(tmp) + end do + do i=1009-100+63,1009-1 + tmp=w(i-100)+r(i-1009+100-63) + r(i-1009+100)=tmp-int(tmp) + end do + p=0 + return + end +* + subroutine random_init(seed,p,r) + implicit none + integer b + DOUBLE PRECISION del,ulp + parameter(del=2.0d0**(-14),ulp=2.0d0**(-47),b=2**14) + integer seed(0:8-1) + integer p + DOUBLE PRECISION r(0:100-1) + integer i,j,t,s(0:8-1) + logical even + integer z(0:8-1) + integer a0,a1,a2,a3,a4,a5,a6,c0 + data a0,a1,a2,a3,a4,a5,a6,c0/15661,678,724,5245,13656,11852,29,1/ + do i=0,8-1 + s(i)=seed(i) + end do + even=.TRUE. + even=even.AND.(mod(s(7),2).EQ.0) + r(0)=(((s(7)*del+s(6))*del+s(5))*del+int(s(4)/512))*512*del + do j=1,100-1 + t=c0+a0*s(0) + z(0)=mod(t,b) + t=int(t/b)+a0*s(1)+a1*s(0) + z(1)=mod(t,b) + t=int(t/b)+a0*s(2)+a1*s(1)+a2*s(0) + z(2)=mod(t,b) + t=int(t/b)+a0*s(3)+a1*s(2)+a2*s(1)+a3*s(0) + z(3)=mod(t,b) + t=int(t/b)+a0*s(4)+a1*s(3)+a2*s(2)+a3*s(1)+a4*s(0) + z(4)=mod(t,b) + t=int(t/b)+a0*s(5)+a1*s(4)+a2*s(3)+a3*s(2)+a4*s(1)+a5*s(0) + z(5)=mod(t,b) + t=int(t/b)+a0*s(6)+a1*s(5)+a2*s(4)+a3*s(3)+a4*s(2)+a5*s(1)+a6*s(0) + z(6)=mod(t,b) + t=int(t/b)+a0*s(7)+a1*s(6)+a2*s(5)+a3*s(4)+a4*s(3)+a5*s(2)+a6*s(1) + z(7)=mod(t,b) + do i=0,8-1 + s(i)=z(i) + end do + even=even.AND.(mod(s(7),2).EQ.0) + r(j)=(((s(7)*del+s(6))*del+s(5))*del+int(s(4)/512))*512*del + end do + if(even)then + t=c0+a0*s(0) + z(0)=mod(t,b) + t=int(t/b)+a0*s(1)+a1*s(0) + z(1)=mod(t,b) + t=int(t/b)+a0*s(2)+a1*s(1)+a2*s(0) + z(2)=mod(t,b) + t=int(t/b)+a0*s(3)+a1*s(2)+a2*s(1)+a3*s(0) + z(3)=mod(t,b) + t=int(t/b)+a0*s(4)+a1*s(3)+a2*s(2)+a3*s(1)+a4*s(0) + z(4)=mod(t,b) + t=int(t/b)+a0*s(5)+a1*s(4)+a2*s(3)+a3*s(2)+a4*s(1)+a5*s(0) + z(5)=mod(t,b) + t=int(t/b)+a0*s(6)+a1*s(5)+a2*s(4)+a3*s(3)+a4*s(2)+a5*s(1)+a6*s(0) + z(6)=mod(t,b) + t=int(t/b)+a0*s(7)+a1*s(6)+a2*s(5)+a3*s(4)+a4*s(3)+a5*s(2)+a6*s(1) + z(7)=mod(t,b) + do i=0,8-1 + s(i)=z(i) + end do + j=int((s(8-1)*100)/b) + r(j)=r(j)+(ulp) + end if + p=100 + return + end +* + subroutine decimal_to_seed(decimal,seed) + implicit none + integer b + parameter(b=2**14) + character*(*)decimal + integer seed(0:8-1) + integer i,ten(0:8-1),c(0:8-1),ch + data ten/10,7*0/ + external rand_axc + do i=0,8-1 + seed(i)=0 + c(i)=0 + end do + do i=1,len(decimal) + ch=ichar(decimal(i:i)) + if(ch.GE.ichar('0').AND.ch.LE.ichar('9'))then + c(0)=ch-ichar('0') + call rand_axc(ten,seed,c) + end if + end do + return + end +* + subroutine string_to_seed(string,seed) + implicit none + integer b + parameter(b=2**14) + character*(*)string + integer seed(0:8-1) + integer t,i,k,unity(0:8-1),c(0:8-1),ch + data unity/1,7*0/ + external rand_axc + do i=0,8-1 + seed(i)=0 + c(i)=0 + end do + do i=1,len(string) + ch=ichar(string(i:i)) + if(ch.GT.ichar(' ').AND.ch.LT.127)then + t=mod(seed(0),2)*(b/2) + do k=0,8-1 + seed(k)=int(seed(k)/2) + if(k.LT.8-1)then + seed(k)=seed(k)+(mod(seed(k+1),2)*(b/2)) + else + seed(k)=seed(k)+(t) + end if + end do + c(0)=ch + call rand_axc(unity,seed,c) + end if + end do + return + end +* + subroutine set_random_seed(time,seed) + implicit none + integer time(8) + integer seed(0:8-1) + character*21 c + external decimal_to_seed + write(c(1:8),'(i4.4,2i2.2)')time(1),time(2),time(3) + write(c(9:12),'(i1.1,i3.3)') (1-sign(1,time(4)))/2,abs(time(4)) + write(c(13:21),'(3i2.2,i3.3)')time(5),time(6),time(7),time(8) + call decimal_to_seed(c,seed) + return + end +* + subroutine seed_to_decimal(seed,decimal) + implicit none + integer pow,decbase,b + parameter(pow=4,decbase=10**pow,b=2**14) + character*(*)decimal + integer seed(0:8-1) + integer z(0:8-1),i,t,j,k + character*36 str + k=-1 + do i=0,8-1 + z(i)=seed(i) + if(z(i).GT.0)k=i + end do + str=' ' + i=9 +90000 continue + i=i-1 + t=0 + do j=k,0,-1 + z(j)=z(j)+t*b + t=mod(z(j),decbase) + z(j)=int(z(j)/decbase) + end do + if(z(max(0,k)).EQ.0)k=k-1 + if(k.GE.0)then + write(str(pow*i+1:pow*(i+1)),'(i4.4)')t + else + write(str(pow*i+1:pow*(i+1)),'(i4)')t + end if + if(k.GE.0)goto 90000 + if(len(decimal).GT.len(str))then + decimal(:len(decimal)-len(str))=' ' + decimal(len(decimal)-len(str)+1:)=str + else + decimal=str(len(str)-len(decimal)+1:) + end if + return + end +* + subroutine rand_next_seed(n,ax,cx,y) + implicit none + integer n,ax(0:8-1),cx(0:8-1) + integer y(0:8-1) + integer a(0:8-1),c(0:8-1),z(0:8-1),t(0:8-1),m,i + data z/8*0/ + external rand_axc + if(n.EQ.0)return + m=n + do i=0,8-1 + a(i)=ax(i) + c(i)=cx(i) + end do +90000 continue + if(mod(m,2).GT.0)then + call rand_axc(a,y,c) + end if + m=int(m/2) + if(m.EQ.0)return + do i=0,8-1 + t(i)=c(i) + end do + call rand_axc(a,c,t) + do i=0,8-1 + t(i)=a(i) + end do + call rand_axc(t,a,z) + goto 90000 + end +* + subroutine next_seed3(n0,n1,n2,seed) + implicit none + integer n0,n1,n2 + integer seed(0:8-1) + integer af0(0:8-1),cf0(0:8-1) + integer ab0(0:8-1),cb0(0:8-1) + integer af1(0:8-1),cf1(0:8-1) + integer ab1(0:8-1),cb1(0:8-1) + integer af2(0:8-1),cf2(0:8-1) + integer ab2(0:8-1),cb2(0:8-1) + data af0/15741,8689,9280,4732,12011,7130,6824,12302/ + data cf0/16317,10266,1198,331,10769,8310,2779,13880/ + data ab0/9173,9894,15203,15379,7981,2280,8071,429/ + data cb0/8383,3616,597,12724,15663,9639,187,4866/ + data af1/8405,4808,3603,6718,13766,9243,10375,12108/ + data cf1/13951,7170,9039,11206,8706,14101,1864,15191/ + data ab1/6269,3240,9759,7130,15320,14399,3675,1380/ + data cb1/15357,5843,6205,16275,8838,12132,2198,10330/ + data af2/445,10754,1869,6593,385,12498,14501,7383/ + data cf2/2285,8057,3864,10235,1805,10614,9615,15522/ + data ab2/405,4903,2746,1477,3263,13564,8139,2362/ + data cb2/8463,575,5876,2220,4924,1701,9060,5639/ + external rand_next_seed + if(n2.GT.0)then + call rand_next_seed(n2,af2,cf2,seed) + else if(n2.LT.0)then + call rand_next_seed(-n2,ab2,cb2,seed) + end if + if(n1.GT.0)then + call rand_next_seed(n1,af1,cf1,seed) + else if(n1.LT.0)then + call rand_next_seed(-n1,ab1,cb1,seed) + end if + entry next_seed(n0,seed) + if(n0.GT.0)then + call rand_next_seed(n0,af0,cf0,seed) + else if(n0.LT.0)then + call rand_next_seed(-n0,ab0,cb0,seed) + end if + return + end +* + subroutine rand_axc(a,x,c) + implicit none + integer b + parameter(b=2**14) + integer a(0:8-1),c(0:8-1) + integer x(0:8-1) + integer z(0:8-1),i,j,t + t=0 + do i=0,8-1 + t=c(i)+int(t/b) + do j=0,i + t=t+(a(j)*x(i-j)) + end do + z(i)=mod(t,b) + end do + do i=0,8-1 + x(i)=z(i) + end do + return + end +* diff --git a/src/random.h b/src/random.h new file mode 100644 index 0000000..ac33242 --- /dev/null +++ b/src/random.h @@ -0,0 +1,2 @@ + integer ran_k,ran_s,ran_c + parameter (ran_k=100,ran_s=8,ran_c=34) diff --git a/src/random_mod.f90 b/src/random_mod.f90 new file mode 100644 index 0000000..9a37315 --- /dev/null +++ b/src/random_mod.f90 @@ -0,0 +1,21 @@ +MODULE random + +! +! Random module for the random number generator (RNG) +! + + +! This module, together with the file "random.h", allow to use +! a high-quality, fast, parallel RNG developed by Charles Karney in Princeton University + + + INCLUDE "random.h" + + CHARACTER(ran_c) :: random_seed_str = '2.7182818' + INTEGER, allocatable, DIMENSION(:,:) :: seed ! nbprocs*ran_s size + + INTEGER, PARAMETER :: n_rand = 1 + INTEGER, allocatable:: ran_index(:) ! nbporcs size + DOUBLE PRECISION, allocatable :: ran_array(:,:) ! nbprocs*ran_k size + +END MODULE random \ No newline at end of file diff --git a/src/randother.f b/src/randother.f new file mode 100644 index 0000000..629e5a8 --- /dev/null +++ b/src/randother.f @@ -0,0 +1,123 @@ +* Version 1.1 of random number routines +* Author: Charles Karney +* Date: 1999-08-06 14:18:03 -0400 +* + subroutine random_gauss(y,n,p,r) + implicit none + integer p + DOUBLE PRECISION r(0:100-1) + integer n + DOUBLE PRECISION y(0:n-1) + integer i + DOUBLE PRECISION pi,theta,z + DOUBLE PRECISION random + external random_array,random + data pi/3.14159265358979323846264338328d0/ + REAL ys(0:n-1) + REAL spi,stheta,sz + REAL srandom + external srandom_array,srandom + data spi/3.14159265358979323846264338328/ + if(n.LE.0)return + call random_array(y,n,p,r(0)) + do i=0,int(n/2)*2-1,2 + theta=pi*(2.0d0*y(i)-1.0d0) + z=sqrt(-2.0d0*log(y(i+1))) + y(i)=z*cos(theta) + y(i+1)=z*sin(theta) + end do + if(mod(n,2).EQ.0)return + theta=pi*(2.0d0*y(n-1)-1.0d0) + z=sqrt(-2.0d0*log(random(p,r(0)))) + y(n-1)=z*cos(theta) + return + entry srandom_gauss(ys,n,p,r) + if(n.LE.0)return + call srandom_array(ys,n,p,r(0)) + do i=0,int(n/2)*2-1,2 + stheta=spi*(2.0*ys(i)-1.0) + sz=sqrt(-2.0*log(ys(i+1))) + ys(i)=sz*cos(stheta) + ys(i+1)=sz*sin(stheta) + end do + if(mod(n,2).EQ.0)return + stheta=spi*(2.0*ys(n-1)-1.0) + sz=sqrt(-2.0*srandom(p,r(0))) + ys(n-1)=sz*cos(stheta) + return + end +* + subroutine random_isodist(v,n,p,r) + implicit none + integer p + DOUBLE PRECISION r(0:100-1) + integer n + DOUBLE PRECISION v(0:3*n-1) + integer i + DOUBLE PRECISION pi,costheta,phi + external random_array + data pi/3.14159265358979323846264338328d0/ + REAL vs(0:3*n-1) + REAL spi,scostheta,sphi + external srandom_array + data spi/3.14159265358979323846264338328/ + if(n.LE.0)return + call random_array(v(n),2*n,p,r(0)) + do i=0,n-1 + costheta=2.0d0*v(n+2*i)-1.0d0 + phi=pi*(2.0d0*v(n+2*i+1)-1.0d0) + v(3*i)=cos(phi)*sqrt(1.0d0-costheta**2) + v(3*i+1)=sin(phi)*sqrt(1.0d0-costheta**2) + v(3*i+2)=costheta + end do + return + entry srandom_isodist(vs,n,p,r) + if(n.LE.0)return + call srandom_array(vs(n),2*n,p,r(0)) + do i=0,n-1 + scostheta=2.0*vs(n+2*i)-1.0 + sphi=spi*(2.0*vs(n+2*i+1)-1.0) + vs(3*i)=cos(sphi)*sqrt(1.0-scostheta**2) + vs(3*i+1)=sin(sphi)*sqrt(1.0-scostheta**2) + vs(3*i+2)=scostheta + end do + return + end +* + subroutine random_cosdist(v,n,p,r) + implicit none + integer p + DOUBLE PRECISION r(0:100-1) + integer n + DOUBLE PRECISION v(0:3*n-1) + integer i + DOUBLE PRECISION pi,costheta2,phi + external random_array + data pi/3.14159265358979323846264338328d0/ + REAL vs(0:2*n-1) + REAL spi,scostheta2,sphi + external srandom_array + data spi/3.14159265358979323846264338328/ + if(n.LE.0)return + call random_array(v(n),2*n,p,r(0)) + do i=0,n-1 + costheta2=v(n+2*i) + phi=pi*(2.0d0*v(n+2*i+1)-1.0d0) + v(3*i)=cos(phi)*sqrt(1.0d0-costheta2) + v(3*i+1)=sin(phi)*sqrt(1.0d0-costheta2) + v(3*i+2)=sqrt(costheta2) + end do + return + entry srandom_cosdist(vs,n,p,r) + if(n.LE.0)return + call srandom_array(vs(n),2*n,p,r(0)) + do i=0,n-1 + scostheta2=vs(n+2*i) + sphi=spi*(2.0*vs(n+2*i+1)-1.0) + vs(3*i)=cos(sphi)*sqrt(1.0-scostheta2) + vs(3*i+1)=sin(sphi)*sqrt(1.0-scostheta2) + vs(3*i+2)=sqrt(scostheta2) + end do + return + end +* diff --git a/src/resume.f90 b/src/resume.f90 index d9015e8..0fb03bd 100644 --- a/src/resume.f90 +++ b/src/resume.f90 @@ -1,57 +1,67 @@ SUBROUTINE resume ! ! Resume from previous run ! Use beam Use basic Use fields Use sort Use maxwsrce Use geometry IMPLICIT NONE ! ! Local vars and arrays INTEGER:: i !________________________________________________________________________________ WRITE(*,'(a)') ' Resume from previous run' !________________________________________________________________________________ ! ! Open and read initial conditions from restart file CALL chkrst(0) +! If we want to start a new run with a new file IF (newres) THEN + ! we start time and step from 0 cstep=0 time=0 END IF call read_geom(lu_in, rnorm, rgrid, Potinn, Potout) -! Compute Self consistent electric field +! Initialize the magnetic field and the electric field solver call init_mag CALL fields_init call localisation(partslist(1)) IF(mpisize .gt. 1) THEN CALL distribpartsonprocs(partslist(1)) DO i=1,nbspecies CALL keep_mpi_self_parts(partslist(i),Zbounds) call collectparts(partslist(i)) END DO END IF + ! Add the requested test particles and read them from input files + IF (nbaddtestspecies .gt. 0) THEN + Do i=1,nbaddtestspecies + CALL load_part_file(partslist(nbspecies+i),addedtestspecfile(i)) + END DO + nbspecies=nbspecies+nbaddtestspecies + END IF + CALL bound(partslist(1)) CALL localisation(partslist(1)) - ! Init Electric and Magnetic Fields + ! Allocate the variables for MPI communications CALL fields_comm_init - WRITE(*,*) "Fields initialized" + ! Solve Poisson for the initial conditions CALL rhscon(partslist(1)) - WRITE(*,*) "Initial rhs computed" CALL poisson(splrz) - WRITE(*,*) "Initial field solved" + ! Compute the fields at the particles positions CALL EFieldscompatparts(partslist(1)) WRITE(*,*) "Initial forces computed" IF (newres) THEN CALL diagnostics WRITE(*,*) "Initial beam%diagnostics done!" END IF + ! Activate the source if present IF (nlmaxwellsource) CALL maxwsrce_init(lu_in, time) ! END SUBROUTINE resume