Page MenuHomec4science

No OneTemporary

File Metadata

Created
Sat, Mar 29, 01:03
This file is larger than 256 KB, so syntax highlighting was skipped.
This document is not UTF8. It was detected as ISO-8859-1 (Latin 1) and converted to UTF8 for display.
diff --git a/PyChem/scripts/pychem_sample_IMF_and_properties b/PyChem/scripts/pychem_sample_IMF_and_properties
new file mode 100755
index 0000000..4d594b8
--- /dev/null
+++ b/PyChem/scripts/pychem_sample_IMF_and_properties
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+
+
+import Ptools as pt
+
+
+from optparse import OptionParser
+from pNbody import *
+from pNbody import units
+import string
+
+from scipy import optimize
+
+from PyChem import chemistry
+
+
+
+def parse_options():
+
+
+ usage = "usage: %prog [options] file"
+ parser = OptionParser(usage=usage)
+
+ parser = pt.add_limits_options(parser)
+ parser = pt.add_log_options(parser)
+ parser = pt.add_postscript_options(parser)
+
+ parser.add_option("--x",
+ action="store",
+ dest="x",
+ type="string",
+ default = 'Fe',
+ help="x value to plot",
+ metavar=" STRING")
+
+ parser.add_option("--y",
+ action="store",
+ dest="y",
+ type="string",
+ default = 'Mg',
+ help="y value to plot",
+ metavar=" STRING")
+
+ parser.add_option("--dt",
+ action="store",
+ dest="dt",
+ type="float",
+ default = 0.1,
+ help="dt",
+ metavar=" FLOAT")
+
+
+ parser.add_option("--mstar",
+ action="store",
+ dest="mstar",
+ type="float",
+ default = 1e5,
+ help="initial mass of the SSP in solar mass",
+ metavar=" FLOAT")
+
+ parser.add_option("--tstar",
+ action="store",
+ dest="tstar",
+ type="float",
+ default = 0,
+ help="formation time of the SSP",
+ metavar=" FLOAT")
+
+ parser.add_option("-o",
+ action="store",
+ dest="obs",
+ type="string",
+ default = 'Y',
+ help="observable to plot",
+ metavar=" STRING")
+
+ parser.add_option("--timeunit",
+ action="store",
+ dest="timeunit",
+ type="string",
+ default = None,
+ help="unit of time",
+ metavar=" STRING")
+
+
+ parser.add_option("--NumberOfTables",
+ action="store",
+ dest="NumberOfTables",
+ type="int",
+ default = 1,
+ help="NumberOfTables",
+ metavar=" INT")
+
+ parser.add_option("--DefaultTable",
+ action="store",
+ dest="DefaultTable",
+ type="int",
+ default = 0,
+ help="DefaultTable",
+ metavar=" INT")
+
+
+ (options, args) = parser.parse_args()
+
+
+ pt.check_files_number(args)
+
+ files = args
+
+ return files,options
+
+
+########################################################################
+# M A I N
+########################################################################
+
+
+
+SOLAR_MASS = 1.989e33
+
+UnitLength_in_cm = 3.085e+21
+UnitMass_in_g = 1.989e+43
+UnitVelocity_in_cm_per_s = 20725573.785998672
+UnitTime_in_s = 148849920000000.0
+
+
+
+files,opt = parse_options()
+file = files[0]
+
+
+
+# some parameters
+M0 = opt.mstar *SOLAR_MASS/UnitMass_in_g # gas mass of the SSP (in code unit)
+
+
+
+
+# init
+chemistry.init_chimie(file,opt.NumberOfTables,opt.DefaultTable)
+mmax = chemistry.get_Mmax()
+mmin = chemistry.get_Mmin()
+
+
+
+##################
+# theoretical imf
+##################
+'''
+here, the imf is normed in order that
+the integral of it, i.e, the mass fraction
+of stars is equal to 1.
+'''
+bins_th = arange(mmin,mmax,1e-12).astype(float32)
+hx_th = chemistry.get_imf(bins_th).astype(float32)
+bins_th = bins_th.astype(float)*UnitMass_in_g/SOLAR_MASS
+
+
+
+##################
+# sampled imf
+##################
+'''
+here, the imf is normed, in order that the total mass of
+the stars = M0
+==> the normalisation is different from the theoretical imf.
+'''
+# compute the number of stars per mass between m1 and m2 (dep on M0)
+# N this is thus the number of stars in a particle of mass M0
+N = chemistry.get_imf_N(array([mmin]),array([mmax]))[0]*M0
+# compute the masses
+ms = chemistry.imf_sampling(int(N),1)
+
+
+# in Msol
+Mtot = sum(ms)*chemistry.CodeUnits_to_SolarMass_Factor()
+print "number of stars",N
+print "total mass %g [Msol] "%Mtot
+#print min(ms)*UnitMass_in_g/SOLAR_MASS
+#print max(ms)*UnitMass_in_g/SOLAR_MASS
+
+
+# nombre de SNII
+ms_ns = compress(ms>chemistry.get_SNII_Mmin(),ms)
+NSNII = len(ms_ns)
+MSNII = sum(ms_ns)*chemistry.CodeUnits_to_SolarMass_Factor()
+MSNII_MTOT = MSNII/Mtot
+
+print "number of SNII ",NSNII
+print "enregy ejected in SNII %g ergs"%(1e51*NSNII)
+
+
+
+print "gas mass fraction ejected in SNII %g [Msol]"%MSNII_MTOT
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/Makefile b/src/Makefile
deleted file mode 100644
index 4852c42..0000000
--- a/src/Makefile
+++ /dev/null
@@ -1,894 +0,0 @@
-
-#----------------------------------------------------------------------
-# From the list below, please activate/deactivate the options that
-# apply to your run. If you modify any of these options, make sure
-# that you recompile the whole code by typing "make clean; make".
-#
-# Look at end of file for a brief guide to the compile-time options.
-#----------------------------------------------------------------------
-
-
-#--------------------------------------- Basic operation mode of code
-OPT += -DPERIODIC
-#OPT += -DUNEQUALSOFTENINGS
-
-
-#--------------------------------------- Things that are always recommended
-OPT += -DPEANOHILBERT
-OPT += -DWALLCLOCK
-
-
-#--------------------------------------- TreePM Options
-#OPT += -DPMGRID=128
-#OPT += -DPLACEHIGHRESREGION=3
-#OPT += -DENLARGEREGION=1.2
-#OPT += -DASMTH=1.25
-#OPT += -DRCUT=4.5
-
-
-#--------------------------------------- Single/Double Precision
-#OPT += -DDOUBLEPRECISION
-#OPT += -DDOUBLEPRECISION_FFTW
-
-
-#--------------------------------------- Time integration options
-OPT += -DSYNCHRONIZATION
-#OPT += -DFLEXSTEPS
-#OPT += -DPSEUDOSYMMETRIC
-OPT += -DNOSTOP_WHEN_BELOW_MINTIMESTEP
-#OPT += -DNOPMSTEPADJUSTMENT
-
-
-#--------------------------------------- Output
-OPT += -DADVANCEDSTATISTICS
-OPT += -DADVANCEDCPUSTATISTICS
-OPT += -DSYSTEMSTATISTICS
-OPT += -DBLOCK_SKIPPING
-#OPT += -DHAVE_HDF5
-#OPT += -DOUTPUTPOTENTIAL
-#OPT += -DOUTPUTACCELERATION
-#OPT += -DOUTPUTCHANGEOFENTROPY
-#OPT += -DOUTPUTTIMESTEP
-#OPT += -DOUTPUTERADSTICKY
-#OPT += -DOUTPUTERADFEEDBACK
-#OPT += -DOUTPUTENERGYFLUX
-#OPT += -DOUTPUTOPTVAR1
-#OPT += -DOUTPUTOPTVAR2
-#OPT += -DOUTPUTSTELLAR_PROP
-
-#--------------------------------------- Things for special behaviour
-OPT += -DNOGRAVITY
-#OPT += -DNOTREERND
-#OPT += -DNOTYPEPREFIX_FFTW
-#OPT += -DLONG_X=60
-#OPT += -DLONG_Y=5
-#OPT += -DLONG_Z=0.2
-#OPT += -DTWODIMS
-#OPT += -DSPH_BND_PARTICLES
-#OPT += -DNOVISCOSITYLIMITER
-#OPT += -DCOMPUTE_POTENTIAL_ENERGY
-#OPT += -DLONGIDS
-OPT += -DISOTHERM_EQS
-#OPT += -DADAPTIVE_GRAVSOFT_FORGAS
-#OPT += -DSELECTIVE_NO_GRAVITY=2+4+8+16
-#OPT += -DAVOIDNUMNGBPROBLEM
-#OPT += -DLIMIT_DVEL=1.0
-#OPT += -DOTHERINFO
-#OPT += -DDOMAIN_AT_ORIGIN
-OPT += -DNO_NEGATIVE_PRESSURE
-#OPT += -DCOMPUTE_VELOCITY_DISPERSION
-#OPT += -DCYLINDRICAL_SYMMETRY
-
-OPT += -DWRITE_ALL_MASSES
-#OPT += -DENTROPYPRED
-#OPT += -DCOUNT_ACTIVE_PARTICLES
-OPT += -DRANDOMSEED_AS_PARAMETER
-#OPT += -DDETAILED_CPU
-#OPT += -DDETAILED_CPU_GRAVITY
-#OPT += -DDETAILED_CPU_DOMAIN
-#OPT += -DDETAILED_CPU_OUTPUT_IN_GRAVTREE
-#OPT += -DDETAILED_CPU_OUTPUT_IN_HYDRA
-#OPT += -DDETAILED_CPU_OUTPUT_IN_DENSITY
-#OPT += -DDETAILED_CPU_OUTPUT_IN_STARS_DENSITY
-#OPT += -DDETAILED_CPU_OUTPUT_IN_CHIMIE
-#OPT += -DSPLIT_DOMAIN_USING_TIME
-
-
-#OPT += -DCOSMICTIME
-
-OPT += -DONLY_MASTER_READ_EWALD
-
-#OPT += -DPNBODY
-#OPT += -DPNBODY_OUTPUT_POS
-#OPT += -DPNBODY_OUTPUT_VEL
-#OPT += -DPNBODY_OUTPUT_NUM
-#OPT += -DPNBODY_OUTPUT_MASS
-#OPT += -DPNBODY_OUTPUT_TYPE
-#OPT += -DPNBODY_OUTPUT_ENERGY
-#OPT += -DPNBODY_OUTPUT_DENSITY
-#OPT += -DPNBODY_OUTPUT_HSML
-#OPT += -DPNBODY_OUTPUT_METALS
-
-#--------------------------------------- Physical processes
-#OPT += -DCOOLING
-#OPT += -DIMPLICIT_COOLING_INTEGRATION
-#OPT += -DDO_NO_USE_HYDROGEN_MASSFRAC_IN_COOLING
-
-#OPT += -DHEATING
-#OPT += -DHEATING_PE # photo-electric heating
-
-#OPT += -DSFR
-#OPT += -DCOMPUTE_SFR_ENERGY
-#OPT += -DSFR_NEG_DIV_ONLY
-
-#OPT += -DSTELLAR_PROP
-
-#OPT += -DCHIMIE # need stellar prop
-#OPT += -DCHIMIE_THERMAL_FEEDBACK
-#OPT += -DCHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY
-#OPT += -DCHIMIE_KINETIC_FEEDBACK
-#OPT += -DCHIMIE_COMPUTE_KINETIC_FEEDBACK_ENERGY
-#OPT += -DCHIMIE_EXTRAHEADER
-#OPT += -DCHIMIE_INPUT_ALL
-#OPT += -DCHIMIE_MC_SUPERNOVAE
-
-#OPT += -DFEEDBACK
-#OPT += -DFEEDBACK_WIND
-
-#--------------------------------------- multiphase
-#OPT += -DMULTIPHASE
-#OPT += -DNO_HYDRO_FOR_GAS # do not use hydro routine (at all)
-#OPT += -DNO_DENSITY_FOR_STICKY # do not compute density in sticky (need to be done in sfr)
-
-#OPT += -DPHASE_MIXING # need MULTIPHASE : enable phase mixing
-#OPT += -DCOLDGAS_CYCLE # need MULTIPHASE and PHASE_MIXING
-#OPT += -DEXTERNAL_FLUX
-#OPT += -DSTELLAR_FLUX
-#OPT += -DCOUNT_COLLISIONS # count sticky collisions
-
-
-#--------------------------------------- Outer potential
-#OPT += -DOUTERPOTENTIAL
-#OPT += -DNFW
-#OPT += -DPISOTHERM
-#OPT += -DPLUMMER
-#OPT += -DMIYAMOTONAGAI
-#OPT += -DCORIOLIS
-
-#--------------------------------------- Testing and Debugging options
-#OPT += -DFORCETEST=0.1
-#OPT += -DWITH_ID_IN_HYDRA
-#OPT += -DPARTICLE_FLAG
-#OPT += -DOUTPUT_EVERY_TIMESTEP
-#OPT += -DOUTPUT_COOLING_FUNCTION
-
-#OPT += -DCHECK_BLOCK_ORDER
-#OPT += -DCHECK_ENTROPY_SIGN
-#OPT += -DCHECK_TYPE_DURING_IO
-#OPT += -DCHECK_ID_CORRESPONDENCE
-
-
-#--------------------------------------- Glass making
-#OPT += -DMAKEGLASS=262144
-
-
-#--------------------------------------- Artificial Conductivity
-#OPT += -DART_CONDUCTIVITY
-#OPT += -DOUTPUT_CONDUCTIVITY
-#OPT += -DOUTPUTOPTVAR1
-#OPT += -DOUTPUTOPTVAR2
-
-#--------------------------------------- Agn
-#OPT += -DBUBBLES
-#OPT += -DAGN_ACCRETION
-#OPT += -DAGN_FEEDBACK
-#OPT += -DAGN_USE_ANGULAR_MOMENTUM
-#OPT += -DAGN_HEATING
-#OPT += -DBONDI_ACCRETION
-#OPT += -DUSE_BONDI_POWER
-
-#--------------------------------------- Driven Turbulence
-OPT += -DAB_TURB
-
-#--------------------------------------- Artificial Viscosity
-
-OPT += -DART_VISCO_CD
-#OPT += -DART_VISCO_MM
-#OPT += -DART_VISCO_RO
-
-#--------------------------------------- SPH flavour
-OPT += -DPRESSURE_ENTROPY_FORMULATION
-OPT += -DENTROPYPRED
-
-#----------------------------------------------------------------------
-# Here, select compile environment for the target machine. This may need
-# adjustment, depending on your local system. Follow the examples to add
-# additional target platforms, and to get things properly compiled.
-#----------------------------------------------------------------------
-
-#--------------------------------------- Select some defaults
-
-CC = mpicc # sets the C-compiler
-OPTIMIZE = -O2 -Wall -g # sets optimization and warning flags
-MPICHLIB = -lmpich
-
-
-#--------------------------------------- Select target computer
-
-SYSTYPE="obscalc"
-#SYSTYPE="callisto-intel"
-#SYSTYPE="bg1"
-#SYSTYPE="obsds"
-#SYSTYPE="leo_openmpi"
-#SYSTYPE="leo_mpich2shm"
-#SYSTYPE="graphor0"
-#SYSTYPE="obsrevaz"
-#SYSTYPE="regor_openmpigcc"
-#SYSTYPE="regor_mvapich2gcc"
-#SYSTYPE="meso_mpich2"
-#SYSTYPE="meso"
-#SYSTYPE="revaz/local"
-#SYSTYPE="revaz/local_mpich2"
-#SYSTYPE="horizon3_mpich1"
-#SYSTYPE="horizon3_mpich2"
-#SYSTYPE="horizon3"
-#SYSTYPE="LUXOR"
-#SYSTYPE="MPA"
-#SYSTYPE="Mako"
-#SYSTYPE="Regatta"
-#SYSTYPE="RZG_LinuxCluster"
-#SYSTYPE="RZG_LinuxCluster-gcc"
-#SYSTYPE="OpteronMPA"
-#SYSTYPE="OPA-Cluster32"
-#SYSTYPE="OPA-Cluster64"
-
-
-#--------------------------------------- Adjust settings for target computer
-
-# module add openmpi-x86_64
-ifeq ($(SYSTYPE),"obscalc")
-CC = mpicc
-OPTIMIZE =
-GSL_INCL =
-GSL_LIBS =
-FFTW_INCL=
-FFTW_LIBS=
-MPICHLIB =
-HDF5INCL =
-HDF5LIB =
-NO_FFTW_LIB = "yes"
-PY_INCL = -I/usr/include/python2.6/
-PY_LIB = -lpython2.6
-endif
-
-ifeq ($(SYSTYPE),"callisto-intel")
-CC = mpicc
-OPTIMIZE =
-GSL_INCL = -I/u1/yrevaz/local/gsl-intel/include
-GSL_LIBS = -L/u1/yrevaz/local/gsl-intel/lib
-FFTW_INCL= -I/u1/yrevaz/local/fftw-2.1.5-intel/include
-FFTW_LIBS= -L/u1/yrevaz/local/fftw-2.1.5-intel/lib
-MPICHLIB =
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"bg1")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g
-GSL_INCL = -I/home/yrevaz/local/include
-GSL_LIBS = -L/home/yrevaz/local/lib
-FFTW_INCL=
-FFTW_LIBS=
-MPICHLIB =
-HDF5INCL =
-HDF5LIB =
-NO_FFTW_LIB = "yes"
-endif
-
-ifeq ($(SYSTYPE),"obsds")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g
-GSL_INCL =
-GSL_LIBS =
-FFTW_INCL=
-FFTW_LIBS=
-MPICHLIB =
-HDF5INCL =
-HDF5LIB =
-NO_FFTW_LIB = "yes"
-endif
-
-ifeq ($(SYSTYPE),"graphor0")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g
-GSL_INCL = -I/home/epfl/revaz/local/include
-GSL_LIBS = -L/home/epfl/revaz/local/lib
-FFTW_INCL= -I/home/epfl/revaz/local/include
-FFTW_LIBS= -L/home/epfl/revaz/local/lib
-MPICHLIB = -L/home/epfl/revaz/local/openmpi/lib -lmpi
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"obsrevaz")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -fpack-struct
-GSL_INCL =
-GSL_LIBS =
-FFTW_INCL= -I/home/revaz/local/include/
-FFTW_LIBS= -L/home/revaz/local/lib/
-MPICHLIB = -lmpi
-HDF5INCL =
-HDF5LIB =
-endif
-
-
-ifeq ($(SYSTYPE),"regor_openmpigcc")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -fpack-struct
-GSL_INCL = -I/usr/include
-GSL_LIBS = -L/usr/lib64/
-FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
-FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
-MPICHLIB = -lmpi
-HDF5INCL =
-HDF5LIB =
-OPT += -DMESOMACHINE
-endif
-
-ifeq ($(SYSTYPE),"regor_mpich2")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -fpack-struct
-GSL_INCL = -I/usr/include
-GSL_LIBS = -L/usr/lib64/
-FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
-FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
-MPICHLIB = -L/home/revaz/local/mpich2-1.0.6nemesis/lib/ -lmpich
-HDF5INCL =
-HDF5LIB =
-OPT += -DMESOMACHINE
-endif
-
-
-ifeq ($(SYSTYPE),"regor_mvapich2gcc")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -fpack-struct
-GSL_INCL = -I/usr/include
-GSL_LIBS = -L/usr/lib64/
-FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
-FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
-MPICHLIB = -L/cvos/shared/apps/ofed/1.2.5.3/mpi/gcc/mvapich2-0.9.8-15/lib/ -lmpich
-HDF5INCL =
-HDF5LIB =
-OPT += -DMESOMACHINE
-endif
-
-ifeq ($(SYSTYPE),"leo_openmpi")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -fpack-struct
-GSL_INCL = -I/export/revaz/local/include
-GSL_LIBS = -L/export/revaz/local/lib
-FFTW_INCL= -I/export/revaz/local/include
-FFTW_LIBS= -L/export/revaz/local/lib
-MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpi
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"leo_mpich2shm")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g -fpack-struct
-GSL_INCL = -I/export/revaz/local/include
-GSL_LIBS = -L/export/revaz/local/lib
-FFTW_INCL= -I/export/revaz/local/include
-FFTW_LIBS= -L/export/revaz/local/lib
-MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"meso_mpich2")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g -fpack-struct
-GSL_INCL = -I/home/revaz/local/include
-GSL_LIBS = -L/home/revaz/local/lib
-FFTW_INCL= -I/horizon1/x86_64_sl4/fftw/2.1.5/include/
-FFTW_LIBS= -L/horizon1/x86_64_sl4/fftw/2.1.5/lib/
-MPICHLIB = -L/home/revaz/local/mpich2-1.0.3/lib -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"meso")
-CC = mpicc
-OPTIMIZE = -O3 -g
-GSL_INCL =
-GSL_LIBS =
-FFTW_INCL= -I/horizon1/x86_64_sl4/fftw/2.1.5/include/
-FFTW_LIBS= -L/horizon1/x86_64_sl4/fftw/2.1.5/lib/
-MPICHLIB =
-HDF5INCL =
-HDF5LIB =
-OPT += -DMESOMACHINE
-endif
-
-ifeq ($(SYSTYPE),"revaz/local")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g
-GSL_INCL = -I/home/revaz/local/include
-GSL_LIBS = -L/home/revaz/local/lib
-FFTW_INCL= -I/home/revaz/local/include
-FFTW_LIBS= -L/home/revaz/local/lib
-MPICHLIB = -L/home/revaz/local/mpich-1.2.5/ch_p4/lib -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"revaz/local_mpich2")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g
-GSL_INCL = -I/home/revaz/local/include
-GSL_LIBS = -L/home/revaz/local/lib
-FFTW_INCL= -I/home/revaz/local/include
-FFTW_LIBS= -L/home/revaz/local/lib
-MPICHLIB = -L/home/revaz/local/mpich2-1.0.3/lib/ -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"LUXOR")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g
-#GSL_INCL = -I/home/revaz/local/include
-#GSL_LIBS = -L/home/revaz/local/lib
-#FFTW_INCL= -I/home/revaz/local/include
-#FFTW_LIBS= -L/home/revaz/local/lib
-MPICHLIB = -L/home/revaz/local/mpich-1.2.7/ch_p4/lib -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-
-
-ifeq ($(SYSTYPE),"horizon3")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g -fpack-struct
-GSL_INCL = -I/home/revaz/local/include
-GSL_LIBS = -L/home/revaz/local/lib
-FFTW_INCL= -I/home/revaz/local/include
-FFTW_LIBS= -L/home/revaz/local/lib
-MPICHLIB = -llam
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"horizon3_mpich1")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g -fpack-struct
-GSL_INCL = -I/home/revaz/local/include
-GSL_LIBS = -L/home/revaz/local/lib
-FFTW_INCL= -I/home/revaz/local/include
-FFTW_LIBS= -L/home/revaz/local/lib
-MPICHLIB = -L/home/revaz/local/mpich-1.2.7/lib -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-
-ifeq ($(SYSTYPE),"horizon3_mpich2")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -g -fpack-struct
-GSL_INCL = -I/home/revaz/local/include
-GSL_LIBS = -L/home/revaz/local/lib
-FFTW_INCL= -I/home/revaz/local/include
-FFTW_LIBS= -L/home/revaz/local/lib
-MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpich
-HDF5INCL =
-HDF5LIB =
-endif
-
-ifeq ($(SYSTYPE),"MPA")
-CC = mpicc
-OPTIMIZE = -O3 -Wall
-GSL_INCL = -I/usr/common/pdsoft/include
-GSL_LIBS = -L/usr/common/pdsoft/lib -Wl,"-R /usr/common/pdsoft/lib"
-FFTW_INCL=
-FFTW_LIBS=
-MPICHLIB =
-HDF5INCL =
-HDF5LIB = -lhdf5 -lz
-endif
-
-
-ifeq ($(SYSTYPE),"OpteronMPA")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -m64
-GSL_INCL = -L/usr/local/include
-GSL_LIBS = -L/usr/local/lib
-FFTW_INCL=
-FFTW_LIBS=
-MPICHLIB =
-HDF5INCL = -I/opt/hdf5/include
-HDF5LIB = -L/opt/hdf5/lib -lhdf5 -lz -Wl,"-R /opt/hdf5/lib"
-endif
-
-
-ifeq ($(SYSTYPE),"OPA-Cluster32")
-CC = mpicc
-OPTIMIZE = -O3 -Wall
-GSL_INCL = -I/afs/rzg/bc-b/vrs/opteron32/include
-GSL_LIBS = -L/afs/rzg/bc-b/vrs/opteron32/lib -Wl,"-R /afs/rzg/bc-b/vrs/opteron32/lib"
-FFTW_INCL= -I/afs/rzg/bc-b/vrs/opteron32/include
-FFTW_LIBS= -L/afs/rzg/bc-b/vrs/opteron32/lib
-MPICHLIB =
-HDF5INCL =
-HDF5LIB = -lhdf5 -lz
-endif
-
-
-ifeq ($(SYSTYPE),"OPA-Cluster64")
-CC = mpicc
-OPTIMIZE = -O3 -Wall -m64
-GSL_INCL = -I/afs/rzg/bc-b/vrs/opteron64/include
-GSL_LIBS = -L/afs/rzg/bc-b/vrs/opteron64/lib -Wl,"-R /afs/rzg/bc-b/vrs/opteron64/lib"
-FFTW_INCL= -I/afs/rzg/bc-b/vrs/opteron64/include
-FFTW_LIBS= -L/afs/rzg/bc-b/vrs/opteron64/lib
-MPICHLIB =
-HDF5INCL =
-HDF5LIB = -lhdf5 -lz
-endif
-
-
-ifeq ($(SYSTYPE),"Mako")
-CC = mpicc # sets the C-compiler
-OPTIMIZE = -O3 -march=athlon-mp -mfpmath=sse
-GSL_INCL =
-GSL_LIBS =
-FFTW_INCL=
-FFTW_LIBS=
-MPICHLIB =
-HDF5INCL =
-HDF5LIB = -lhdf5 -lz
-endif
-
-
-ifeq ($(SYSTYPE),"Regatta")
-CC = mpcc_r
-OPTIMIZE = -O5 -qstrict -qipa -q64
-GSL_INCL = -I/afs/rzg/u/vrs/gsl_psi64/include
-GSL_LIBS = -L/afs/rzg/u/vrs/gsl_psi64/lib
-FFTW_INCL= -I/afs/rzg/u/vrs/fftw_psi64/include
-FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_psi64/lib -q64 -qipa
-MPICHLIB =
-HDF5INCL = -I/afs/rzg/u/vrs/hdf5_psi64/include
-HDF5LIB = -L/afs/rzg/u/vrs/hdf5_psi64/lib -lhdf5 -lz
-endif
-
-
-ifeq ($(SYSTYPE),"RZG_LinuxCluster")
-CC = mpicci
-OPTIMIZE = -O3 -ip # Note: Don't use the "-rcd" optimization of Intel's compiler! (causes code crashes)
-GSL_INCL = -I/afs/rzg/u/vrs/gsl_linux/include
-GSL_LIBS = -L/afs/rzg/u/vrs/gsl_linux/lib -Wl,"-R /afs/rzg/u/vrs/gsl_linux/lib"
-FFTW_INCL= -I/afs/rzg/u/vrs/fftw_linux/include
-FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_linux/lib
-HDF5INCL = -I/afs/rzg/u/vrs/hdf5_linux/include
-HDF5LIB = -L/afs/rzg/u/vrs/hdf5_linux/lib -lhdf5 -lz -Wl,"-R /afs/rzg/u/vrs/hdf5_linux/lib"
-endif
-
-
-ifeq ($(SYSTYPE),"RZG_LinuxCluster-gcc")
-CC = mpiccg
-OPTIMIZE = -Wall -g -O3 -march=pentium4
-GSL_INCL = -I/afs/rzg/u/vrs/gsl_linux_gcc3.2/include
-GSL_LIBS = -L/afs/rzg/u/vrs/gsl_linux_gcc3.2/lib -Wl,"-R /afs/rzg/u/vrs/gsl_linux_gcc3.2/lib"
-FFTW_INCL= -I/afs/rzg/u/vrs/fftw_linux_gcc3.2/include
-FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_linux_gcc3.2/lib
-HDF5INCL = -I/afs/rzg/u/vrs/hdf5_linux/include
-HDF5LIB = -L/afs/rzg/u/vrs/hdf5_linux/lib -lhdf5 -lz -Wl,"-R /afs/rzg/u/vrs/hdf5_linux/lib"
-endif
-
-
-ifneq (HAVE_HDF5,$(findstring HAVE_HDF5,$(OPT)))
-HDF5INCL =
-HDF5LIB =
-endif
-
-
-OPTIONS = $(OPTIMIZE) $(OPT)
-
-EXEC = Gadget2
-
-OBJS = main.o run.o predict.o begrun.o endrun.o global.o \
- timestep.o init.o restart.o io.o \
- accel.o read_ic.o ngb.o \
- system.o allocate.o density.o \
- gravtree.o hydra.o driftfac.o \
- domain.o allvars.o potential.o \
- forcetree.o peano.o gravtree_forcetest.o \
- pm_periodic.o pm_nonperiodic.o longrange.o \
- cooling.o agn_heating.o phase.o sticky.o outerpotential.o starformation.o \
- agn_feedback.o bubbles.o bondi_accretion.o chimie.o stars_density.o cosmictime.o \
- pnbody.o ab_turb.o art_visc.o chemistry.o
-
-INCL = allvars.h proto.h tags.h Makefile
-
-
-CFLAGS = $(OPTIONS) $(GSL_INCL) $(FFTW_INCL) $(HDF5INCL) $(PY_INCL)
-
-
-ifeq (NOTYPEPREFIX_FFTW,$(findstring NOTYPEPREFIX_FFTW,$(OPT))) # fftw installed with type prefix?
- FFTW_LIB = $(FFTW_LIBS) -lrfftw_mpi -lfftw_mpi -lrfftw -lfftw
-else
-ifeq (DOUBLEPRECISION_FFTW,$(findstring DOUBLEPRECISION_FFTW,$(OPT)))
- FFTW_LIB = $(FFTW_LIBS) -ldrfftw_mpi -ldfftw_mpi -ldrfftw -ldfftw
-else
- FFTW_LIB = $(FFTW_LIBS) -lsrfftw_mpi -lsfftw_mpi -lsrfftw -lsfftw
-endif
-endif
-
-ifeq ($(NO_FFTW_LIB),"yes")
- FFTW_LIB =
-endif
-
-LIBS = $(HDF5LIB) -g $(MPICHLIB) $(GSL_LIBS) -lgsl -lgslcblas -lm $(FFTW_LIB) $(PY_LIB)
-
-$(EXEC): $(OBJS)
- $(CC) $(OBJS) $(LIBS) -o $(EXEC)
-
-$(OBJS): $(INCL)
-
-
-clean:
- rm -f $(OBJS) $(EXEC)
-
-
-#-----------------------------------------------------------------------
-#
-# Brief guide to compile-time options of the code. More information
-# can be found in the code documentation.
-#
-# - PERIODIC:
-# Set this if you want to have periodic boundary conditions.
-#
-# - UNEQUALSOFTENINGS:
-# Set this if you use particles with different gravitational
-# softening lengths.
-#
-# - PEANOHILBERT:
-# This is a tuning option. When set, the code will bring the
-# particles after each domain decomposition into Peano-Hilbert
-# order. This improves cache utilization and performance.
-#
-# - WALLCLOCK:
-# If set, a wallclock timer is used by the code to measure internal
-# time consumption (see cpu-log file). Otherwise, a timer that
-# measures consumed processor ticks is used.
-#
-# - PMGRID:
-# This enables the TreePM method, i.e. the long-range force is
-# computed with a PM-algorithm, and the short range force with the
-# tree. The parameter has to be set to the size of the mesh that
-# should be used, (e.g. 64, 96, 128, etc). The mesh dimensions need
-# not necessarily be a power of two. Note: If the simulation is
-# not in a periodic box, then a FFT method for vacuum boundaries is
-# employed, using an actual mesh with dimension twice(!) that
-# specified by PMGRID.
-#
-# - PLACEHIGHRESREGION:
-# If this option is set (will only work together with PMGRID), then
-# the long range force is computed in two stages: One Fourier-grid
-# is used to cover the whole simulation volume, allowing the
-# computation of the longe-range force. A second Fourier mesh is
-# placed on the region occupied by "high-resolution" particles,
-# allowing the computation of an intermediate scale force. Finally,
-# the force on short scales is computed with the tree. This
-# procedure can be useful for "zoom-simulations", provided the
-# majority of particles (the high-res particles) are occupying only
-# a small fraction of the volume. To activate this option, the
-# parameter needs to be set to an integer bit mask that encodes the
-# particle types that make up the high-res particles.
-# For example, if types 0, 1, and 4 form the high-res
-# particles, set the parameter to PLACEHIGHRESREGION=19, because
-# 2^0 + 2^1 + 2^4 = 19. The spatial region covered by the high-res
-# grid is determined automatically from the initial conditions.
-# Note: If a periodic box is used, the high-res zone may not intersect
-# the box boundaries.
-#
-# - ENLARGEREGION:
-# The spatial region covered by the high-res zone has a fixed size
-# during the simulation, which initially is set to the smallest
-# region that encompasses all high-res particles. Normally, the
-# simulation will be interrupted if high-res particles leave this
-# region in the course of the run. However, by setting this
-# parameter to a value larger than one, the size of the high-res
-# region can be expanded, providing a buffer region. For example,
-# setting it to 1.4 will enlarge its side-length by 40% (it remains
-# centered on the high-res particles). Hence, with this setting, the
-# high-res region may expand or move by a limited amount.
-# Note: If SYNCHRONIZATION is activated, the code will be able to
-# continue even if high-res particles leave the initial high-res
-# grid. In this case, the code will update the size and position of
-# the grid that is placed onto the high-resolution region
-# automatically. To prevent that this potentially happens every
-# single PM step, one should nevertheless assign a value slightly
-# larger than 1 to ENLARGEREGION.
-#
-# - ASMTH:
-# This can be used to override the value assumed for the scale that
-# defines the long-range/short-range force-split in the TreePM
-# algorithm. The default value is 1.25, in mesh-cells.
-#
-# - RCUT:
-# This can be used to override the maximum radius in which the
-# short-range tree-force is evaluated (in case the TreePM algorithm
-# is used). The default value is 4.5, given in mesh-cells.
-#
-# - DOUBLEPRECISION:
-# This makes the code store and compute internal particle data in
-# double precision. Note that output files are nevertheless written
-# by converting the particle data to single precision.
-#
-# - DDOUBLEPRECISION_FFTW:
-# If this is set, the code will use the double-precision version of
-# FTTW, provided the latter has been explicitly installed with a
-# "d" prefix, and NOTYPEPREFIX_FFTW is not set. Otherwise the
-# single precision version ("s" prefix) is used.
-#
-# - SYNCHRONIZATION:
-# When this is set, particles are kept in a binary hierarchy of
-# timesteps and may only increase their timestep if the new
-# timestep will put them into synchronization with the higher time
-# level.
-#
-# - FLEXSTEPS:
-# This is an alternative to SYNCHRONIZATION. Particle timesteps are
-# here allowed to be integer multiples of the minimum timestep that
-# occurs among the particles, which in turn is rounded down to the
-# nearest power-of-two devision of the total simulated
-# timespan. This option distributes particles more evenly over
-# individual system timesteps, particularly once a simulation has
-# run for a while, and may then result in a reduction of work-load
-# imbalance losses.
-#
-# - PSEUDOSYMMETRIC:
-# When this option is set, the code will try to "anticipate"
-# timestep changes by extrapolating the change of the acceleration
-# into the future. This can in certain idealized cases improve the
-# long-term integration behaviour of periodic orbits, but should
-# make little or no difference in most real-world applications. May
-# only be used together with SYNCHRONIZATION.
-#
-# - NOSTOP_WHEN_BELOW_MINTIMESTEP:
-# If this is activated, the code will not terminate when the
-# timestep falls below the value of MinSizeTimestep specified in
-# the parameterfile. This is useful for runs where one wants to
-# enforce a constant timestep for all particles. This can be done
-# by activating this option, and by setting MinSizeTimestep and
-# MaxSizeTimestep to an equal value.
-#
-# - NOPMSTEPADJUSTMENT:
-# When this is set, the long-range timestep for the PM-force
-# computation (when the TreePM algorithm is used) is always
-# determined by MaxSizeTimeStep. Otherwise, it is determined by
-# the MaxRMSDisplacement parameter, or MaxSizeTimeStep, whichever
-# gives the smaller step.
-#
-# - HAVE_HDF5:
-# If this is set, the code will be compiled with support for input
-# and output in the HDF5 format. You need to have the HDF5
-# libraries and headers installed on your computer for this option
-# to work. The HDF5 format can then be selected as format "3" in
-# Gadget's parameterfile.
-#
-# - OUTPUTPOTENTIAL:
-# This will make the code compute gravitational potentials for
-# all particles each time a snapshot file is generated. The values
-# are then included in the snapshot file. Note that the computation
-# of the values of the gravitational potential costs additional CPU.
-#
-# - OUTPUTACCELERATION:
-# This will include the physical acceleration of each particle in
-# snapshot files.
-#
-# - OUTPUTCHANGEOFENTROPY:
-# This will include the rate of change of entropy of gas particles
-# in snapshot files.
-#
-# - OUTPUTTIMESTEP:
-# This will include the current timesteps of all particles in the
-# snapshot files.
-#
-# - NOGRAVITY
-# This switches off gravity. Useful only for pure SPH simulations
-# in non-expanding space.
-#
-# - NOTREERND:
-# If this is not set, the tree construction will succeed even when
-# there are a few particles at identical locations. This is done by
-# `rerouting' particles once the node-size has fallen below 1.0e-3
-# of the softening length. When this option is activated, this will
-# be surpressed and the tree construction will always fail if there
-# are particles at extremely close coordinates.
-#
-# - NOTYPEPREFIX_FFTW:
-# This is an option that signals that FFTW has been compiled
-# without the type-prefix option, i.e. no leading "d" or "s"
-# characters are used to access the library.
-#
-# - LONG_X/Y/Z:
-# These options can be used together with PERIODIC and NOGRAVITY only.
-# When set, the options define numerical factors that can be used to
-# distorts the periodic simulation cube into a parallelepiped of
-# arbitrary aspect ratio. This can be useful for idealized SPH tests.
-#
-# - TWODIMS:
-# This effectively switches of one dimension in SPH, i.e. the code
-# follows only 2d hydrodynamics in the xy-, yz-, or xz-plane. This
-# only works with NOGRAVITY, and if all coordinates of the third
-# axis are exactly equal. Can be useful for idealized SPH tests.
-#
-# - SPH_BND_PARTICLES:
-# If this is set, particles with a particle-ID equal to zero do not
-# receive any SPH acceleration. This can be useful for idealized
-# SPH tests, where these particles represent fixed "walls".
-#
-# - NOVISCOSITYLIMITER:
-# If this is set, the code will not try to put an upper limit on
-# the viscous force in case an implausibly high pair-wise viscous
-# force (which may lead to a particle 'reflection' in case of poor
-# timestepping) should arise. Note: For proper settings of the
-# timestep parameters, this situation should not arise.
-#
-# - COMPUTE_POTENTIAL_ENERGY:
-# When this option is set, the code will compute the gravitational
-# potential energy each time a global statistics is computed. This
-# can be useful for testing global energy conservation.
-#
-# - LONGIDS:
-# If this is set, the code assumes that particle-IDs are stored as
-# 64-bit long integers. This is only really needed if you want to
-# go beyond ~2 billion particles.
-#
-# - ISOTHERM_EQS:
-# This special option makes the gas behave like an isothermal gas
-# with equation of state P = cs^2 * rho. The sound-speed cs is set by
-# the thermal energy per unit mass in the intial conditions,
-# i.e. cs^2=u. If the value for u is zero, then the initial gas
-# temperature in the parameter file is used to define the sound speed
-# according to cs^2 = 3/2 kT/mp, where mp is the proton mass.
-#
-# - ADAPTIVE_GRAVSOFT_FORGAS:
-# When this option is set, the gravitational softening lengths used for
-# gas particles is tied to their SPH smoothing length. This can be useful
-# for dissipative collapse simulations. The option requires the setting
-# of UNEQUALSOFTENINGS.
-#
-# - SELECTIVE_NO_GRAVITY:
-# This can be used for special computations where one wants to
-# exclude certain particle types from receiving gravitational
-# forces. The particle types that are excluded in this fashion are
-# specified by a bit mask, in the same as for the PLACEHIGHRESREGION
-# option.
-#
-# - FORCETEST:
-# This can be set to check the force accuracy of the code. The
-# option needs to be set to a number between 0 and 1 (e.g. 0.01),
-# which is taken to specify a random fraction of particles for
-# which at each timestep forces by direct summation are
-# computed. The normal tree-forces and the correct direct
-# summation forces are collected in a file. Note that the
-# simulation itself is unaffected by this option, but it will of
-# course run much(!) slower, especially if
-# FORCETEST*NumPart*NumPart >> NumPart. Note: Particle IDs must
-# be set to numbers >=1 for this to work.
-#
-# - MAKEGLASS
-# This option can be used to generate a glass-like particle
-# configuration. The value assigned gives the particle load,
-# which is initially generated as a Poisson sample and then
-# evolved towards a glass with the sign of gravity reversed.
-#
-#-----------------------------------------------------------------------
-
diff --git a/src/Makefile b/src/Makefile
new file mode 120000
index 0000000..2295e44
--- /dev/null
+++ b/src/Makefile
@@ -0,0 +1 @@
+Makefiles/Makefile.SNs
\ No newline at end of file
diff --git a/src/Makefile b/src/Makefiles/Makefile.SNs
similarity index 94%
copy from src/Makefile
copy to src/Makefiles/Makefile.SNs
index 4852c42..7064463 100644
--- a/src/Makefile
+++ b/src/Makefiles/Makefile.SNs
@@ -1,894 +1,875 @@
#----------------------------------------------------------------------
# From the list below, please activate/deactivate the options that
# apply to your run. If you modify any of these options, make sure
# that you recompile the whole code by typing "make clean; make".
#
# Look at end of file for a brief guide to the compile-time options.
#----------------------------------------------------------------------
#--------------------------------------- Basic operation mode of code
OPT += -DPERIODIC
-#OPT += -DUNEQUALSOFTENINGS
+OPT += -DUNEQUALSOFTENINGS
#--------------------------------------- Things that are always recommended
OPT += -DPEANOHILBERT
OPT += -DWALLCLOCK
#--------------------------------------- TreePM Options
#OPT += -DPMGRID=128
#OPT += -DPLACEHIGHRESREGION=3
#OPT += -DENLARGEREGION=1.2
#OPT += -DASMTH=1.25
#OPT += -DRCUT=4.5
#--------------------------------------- Single/Double Precision
#OPT += -DDOUBLEPRECISION
#OPT += -DDOUBLEPRECISION_FFTW
#--------------------------------------- Time integration options
OPT += -DSYNCHRONIZATION
#OPT += -DFLEXSTEPS
#OPT += -DPSEUDOSYMMETRIC
OPT += -DNOSTOP_WHEN_BELOW_MINTIMESTEP
#OPT += -DNOPMSTEPADJUSTMENT
+OPT += -DSYNCHRONIZE_NGB_TIMESTEP # ngb particles have synchronized time steps
+OPT += -DTIMESTEP_UPDATE_FOR_FEEDBACK # timestep is updated when feedback occurs
+OPT += -DIMPROVED_TIMESTEP_CRITERION_FORGAS
#--------------------------------------- Output
OPT += -DADVANCEDSTATISTICS
OPT += -DADVANCEDCPUSTATISTICS
OPT += -DSYSTEMSTATISTICS
OPT += -DBLOCK_SKIPPING
#OPT += -DHAVE_HDF5
#OPT += -DOUTPUTPOTENTIAL
#OPT += -DOUTPUTACCELERATION
#OPT += -DOUTPUTCHANGEOFENTROPY
#OPT += -DOUTPUTTIMESTEP
#OPT += -DOUTPUTERADSTICKY
#OPT += -DOUTPUTERADFEEDBACK
#OPT += -DOUTPUTENERGYFLUX
-#OPT += -DOUTPUTOPTVAR1
-#OPT += -DOUTPUTOPTVAR2
-#OPT += -DOUTPUTSTELLAR_PROP
+OPT += -DOUTPUTOPTVAR1
+OPT += -DOUTPUTSTELLAR_PROP
#--------------------------------------- Things for special behaviour
-OPT += -DNOGRAVITY
+#OPT += -DNOGRAVITY
#OPT += -DNOTREERND
#OPT += -DNOTYPEPREFIX_FFTW
#OPT += -DLONG_X=60
#OPT += -DLONG_Y=5
#OPT += -DLONG_Z=0.2
#OPT += -DTWODIMS
#OPT += -DSPH_BND_PARTICLES
#OPT += -DNOVISCOSITYLIMITER
-#OPT += -DCOMPUTE_POTENTIAL_ENERGY
+OPT += -DCOMPUTE_POTENTIAL_ENERGY
#OPT += -DLONGIDS
-OPT += -DISOTHERM_EQS
+#OPT += -DISOTHERM_EQS
#OPT += -DADAPTIVE_GRAVSOFT_FORGAS
#OPT += -DSELECTIVE_NO_GRAVITY=2+4+8+16
#OPT += -DAVOIDNUMNGBPROBLEM
#OPT += -DLIMIT_DVEL=1.0
#OPT += -DOTHERINFO
#OPT += -DDOMAIN_AT_ORIGIN
OPT += -DNO_NEGATIVE_PRESSURE
#OPT += -DCOMPUTE_VELOCITY_DISPERSION
#OPT += -DCYLINDRICAL_SYMMETRY
OPT += -DWRITE_ALL_MASSES
-#OPT += -DENTROPYPRED
-#OPT += -DCOUNT_ACTIVE_PARTICLES
+OPT += -DENTROPYPRED
+OPT += -DCOUNT_ACTIVE_PARTICLES
OPT += -DRANDOMSEED_AS_PARAMETER
-#OPT += -DDETAILED_CPU
-#OPT += -DDETAILED_CPU_GRAVITY
-#OPT += -DDETAILED_CPU_DOMAIN
-#OPT += -DDETAILED_CPU_OUTPUT_IN_GRAVTREE
-#OPT += -DDETAILED_CPU_OUTPUT_IN_HYDRA
-#OPT += -DDETAILED_CPU_OUTPUT_IN_DENSITY
-#OPT += -DDETAILED_CPU_OUTPUT_IN_STARS_DENSITY
-#OPT += -DDETAILED_CPU_OUTPUT_IN_CHIMIE
+OPT += -DDETAILED_CPU
+OPT += -DDETAILED_CPU_GRAVITY
+OPT += -DDETAILED_CPU_DOMAIN
+OPT += -DDETAILED_OUTPUT_IN_GRAVTREE
#OPT += -DSPLIT_DOMAIN_USING_TIME
-#OPT += -DCOSMICTIME
+OPT += -DCOSMICTIME
OPT += -DONLY_MASTER_READ_EWALD
#OPT += -DPNBODY
#OPT += -DPNBODY_OUTPUT_POS
#OPT += -DPNBODY_OUTPUT_VEL
#OPT += -DPNBODY_OUTPUT_NUM
#OPT += -DPNBODY_OUTPUT_MASS
#OPT += -DPNBODY_OUTPUT_TYPE
#OPT += -DPNBODY_OUTPUT_ENERGY
#OPT += -DPNBODY_OUTPUT_DENSITY
#OPT += -DPNBODY_OUTPUT_HSML
#OPT += -DPNBODY_OUTPUT_METALS
#--------------------------------------- Physical processes
-#OPT += -DCOOLING
+OPT += -DCOOLING
#OPT += -DIMPLICIT_COOLING_INTEGRATION
#OPT += -DDO_NO_USE_HYDROGEN_MASSFRAC_IN_COOLING
#OPT += -DHEATING
#OPT += -DHEATING_PE # photo-electric heating
-#OPT += -DSFR
-#OPT += -DCOMPUTE_SFR_ENERGY
-#OPT += -DSFR_NEG_DIV_ONLY
+OPT += -DSFR
+OPT += -DCOMPUTE_SFR_ENERGY
+OPT += -DSFR_NEG_DIV_ONLY
-#OPT += -DSTELLAR_PROP
+OPT += -DSTELLAR_PROP
-#OPT += -DCHIMIE # need stellar prop
-#OPT += -DCHIMIE_THERMAL_FEEDBACK
-#OPT += -DCHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY
+OPT += -DCHIMIE # need stellar prop
+OPT += -DCHIMIE_THERMAL_FEEDBACK
+OPT += -DCHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY
#OPT += -DCHIMIE_KINETIC_FEEDBACK
#OPT += -DCHIMIE_COMPUTE_KINETIC_FEEDBACK_ENERGY
-#OPT += -DCHIMIE_EXTRAHEADER
+OPT += -DCHIMIE_EXTRAHEADER
#OPT += -DCHIMIE_INPUT_ALL
-#OPT += -DCHIMIE_MC_SUPERNOVAE
+OPT += -DCHIMIE_MC_SUPERNOVAE
+OPT += -DCHIMIE_ONE_SN_ONLY
+
#OPT += -DFEEDBACK
#OPT += -DFEEDBACK_WIND
#--------------------------------------- multiphase
#OPT += -DMULTIPHASE
#OPT += -DNO_HYDRO_FOR_GAS # do not use hydro routine (at all)
#OPT += -DNO_DENSITY_FOR_STICKY # do not compute density in sticky (need to be done in sfr)
#OPT += -DPHASE_MIXING # need MULTIPHASE : enable phase mixing
#OPT += -DCOLDGAS_CYCLE # need MULTIPHASE and PHASE_MIXING
#OPT += -DEXTERNAL_FLUX
#OPT += -DSTELLAR_FLUX
#OPT += -DCOUNT_COLLISIONS # count sticky collisions
#--------------------------------------- Outer potential
#OPT += -DOUTERPOTENTIAL
#OPT += -DNFW
#OPT += -DPISOTHERM
#OPT += -DPLUMMER
#OPT += -DMIYAMOTONAGAI
#OPT += -DCORIOLIS
#--------------------------------------- Testing and Debugging options
#OPT += -DFORCETEST=0.1
#OPT += -DWITH_ID_IN_HYDRA
#OPT += -DPARTICLE_FLAG
#OPT += -DOUTPUT_EVERY_TIMESTEP
#OPT += -DOUTPUT_COOLING_FUNCTION
-#OPT += -DCHECK_BLOCK_ORDER
-#OPT += -DCHECK_ENTROPY_SIGN
-#OPT += -DCHECK_TYPE_DURING_IO
-#OPT += -DCHECK_ID_CORRESPONDENCE
+OPT += -DCHECK_BLOCK_ORDER
+OPT += -DCHECK_ENTROPY_SIGN
+OPT += -DCHECK_TYPE_DURING_IO
+OPT += -DCHECK_ID_CORRESPONDENCE
#--------------------------------------- Glass making
#OPT += -DMAKEGLASS=262144
-
-#--------------------------------------- Artificial Conductivity
-#OPT += -DART_CONDUCTIVITY
-#OPT += -DOUTPUT_CONDUCTIVITY
-#OPT += -DOUTPUTOPTVAR1
-#OPT += -DOUTPUTOPTVAR2
-
#--------------------------------------- Agn
#OPT += -DBUBBLES
#OPT += -DAGN_ACCRETION
#OPT += -DAGN_FEEDBACK
#OPT += -DAGN_USE_ANGULAR_MOMENTUM
#OPT += -DAGN_HEATING
#OPT += -DBONDI_ACCRETION
#OPT += -DUSE_BONDI_POWER
-#--------------------------------------- Driven Turbulence
-OPT += -DAB_TURB
-
-#--------------------------------------- Artificial Viscosity
-
-OPT += -DART_VISCO_CD
-#OPT += -DART_VISCO_MM
-#OPT += -DART_VISCO_RO
-
-#--------------------------------------- SPH flavour
-OPT += -DPRESSURE_ENTROPY_FORMULATION
-OPT += -DENTROPYPRED
#----------------------------------------------------------------------
# Here, select compile environment for the target machine. This may need
# adjustment, depending on your local system. Follow the examples to add
# additional target platforms, and to get things properly compiled.
#----------------------------------------------------------------------
#--------------------------------------- Select some defaults
CC = mpicc # sets the C-compiler
OPTIMIZE = -O2 -Wall -g # sets optimization and warning flags
MPICHLIB = -lmpich
#--------------------------------------- Select target computer
SYSTYPE="obscalc"
#SYSTYPE="callisto-intel"
#SYSTYPE="bg1"
#SYSTYPE="obsds"
#SYSTYPE="leo_openmpi"
#SYSTYPE="leo_mpich2shm"
#SYSTYPE="graphor0"
#SYSTYPE="obsrevaz"
#SYSTYPE="regor_openmpigcc"
#SYSTYPE="regor_mvapich2gcc"
#SYSTYPE="meso_mpich2"
#SYSTYPE="meso"
#SYSTYPE="revaz/local"
#SYSTYPE="revaz/local_mpich2"
#SYSTYPE="horizon3_mpich1"
#SYSTYPE="horizon3_mpich2"
#SYSTYPE="horizon3"
#SYSTYPE="LUXOR"
#SYSTYPE="MPA"
#SYSTYPE="Mako"
#SYSTYPE="Regatta"
#SYSTYPE="RZG_LinuxCluster"
#SYSTYPE="RZG_LinuxCluster-gcc"
#SYSTYPE="OpteronMPA"
#SYSTYPE="OPA-Cluster32"
#SYSTYPE="OPA-Cluster64"
#--------------------------------------- Adjust settings for target computer
# module add openmpi-x86_64
ifeq ($(SYSTYPE),"obscalc")
CC = mpicc
OPTIMIZE =
GSL_INCL =
GSL_LIBS =
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB =
NO_FFTW_LIB = "yes"
PY_INCL = -I/usr/include/python2.6/
PY_LIB = -lpython2.6
endif
ifeq ($(SYSTYPE),"callisto-intel")
CC = mpicc
OPTIMIZE =
GSL_INCL = -I/u1/yrevaz/local/gsl-intel/include
GSL_LIBS = -L/u1/yrevaz/local/gsl-intel/lib
FFTW_INCL= -I/u1/yrevaz/local/fftw-2.1.5-intel/include
FFTW_LIBS= -L/u1/yrevaz/local/fftw-2.1.5-intel/lib
MPICHLIB =
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"bg1")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/yrevaz/local/include
GSL_LIBS = -L/home/yrevaz/local/lib
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB =
NO_FFTW_LIB = "yes"
endif
ifeq ($(SYSTYPE),"obsds")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL =
GSL_LIBS =
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB =
NO_FFTW_LIB = "yes"
endif
ifeq ($(SYSTYPE),"graphor0")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/epfl/revaz/local/include
GSL_LIBS = -L/home/epfl/revaz/local/lib
FFTW_INCL= -I/home/epfl/revaz/local/include
FFTW_LIBS= -L/home/epfl/revaz/local/lib
MPICHLIB = -L/home/epfl/revaz/local/openmpi/lib -lmpi
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"obsrevaz")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL =
GSL_LIBS =
FFTW_INCL= -I/home/revaz/local/include/
FFTW_LIBS= -L/home/revaz/local/lib/
MPICHLIB = -lmpi
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"regor_openmpigcc")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/usr/include
GSL_LIBS = -L/usr/lib64/
FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
MPICHLIB = -lmpi
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"regor_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/usr/include
GSL_LIBS = -L/usr/lib64/
FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
MPICHLIB = -L/home/revaz/local/mpich2-1.0.6nemesis/lib/ -lmpich
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"regor_mvapich2gcc")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/usr/include
GSL_LIBS = -L/usr/lib64/
FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
MPICHLIB = -L/cvos/shared/apps/ofed/1.2.5.3/mpi/gcc/mvapich2-0.9.8-15/lib/ -lmpich
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"leo_openmpi")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/export/revaz/local/include
GSL_LIBS = -L/export/revaz/local/lib
FFTW_INCL= -I/export/revaz/local/include
FFTW_LIBS= -L/export/revaz/local/lib
MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpi
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"leo_mpich2shm")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/export/revaz/local/include
GSL_LIBS = -L/export/revaz/local/lib
FFTW_INCL= -I/export/revaz/local/include
FFTW_LIBS= -L/export/revaz/local/lib
MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"meso_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/horizon1/x86_64_sl4/fftw/2.1.5/include/
FFTW_LIBS= -L/horizon1/x86_64_sl4/fftw/2.1.5/lib/
MPICHLIB = -L/home/revaz/local/mpich2-1.0.3/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"meso")
CC = mpicc
OPTIMIZE = -O3 -g
GSL_INCL =
GSL_LIBS =
FFTW_INCL= -I/horizon1/x86_64_sl4/fftw/2.1.5/include/
FFTW_LIBS= -L/horizon1/x86_64_sl4/fftw/2.1.5/lib/
MPICHLIB =
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"revaz/local")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich-1.2.5/ch_p4/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"revaz/local_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich2-1.0.3/lib/ -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"LUXOR")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
#GSL_INCL = -I/home/revaz/local/include
#GSL_LIBS = -L/home/revaz/local/lib
#FFTW_INCL= -I/home/revaz/local/include
#FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich-1.2.7/ch_p4/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"horizon3")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -llam
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"horizon3_mpich1")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich-1.2.7/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"horizon3_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"MPA")
CC = mpicc
OPTIMIZE = -O3 -Wall
GSL_INCL = -I/usr/common/pdsoft/include
GSL_LIBS = -L/usr/common/pdsoft/lib -Wl,"-R /usr/common/pdsoft/lib"
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"OpteronMPA")
CC = mpicc
OPTIMIZE = -O3 -Wall -m64
GSL_INCL = -L/usr/local/include
GSL_LIBS = -L/usr/local/lib
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL = -I/opt/hdf5/include
HDF5LIB = -L/opt/hdf5/lib -lhdf5 -lz -Wl,"-R /opt/hdf5/lib"
endif
ifeq ($(SYSTYPE),"OPA-Cluster32")
CC = mpicc
OPTIMIZE = -O3 -Wall
GSL_INCL = -I/afs/rzg/bc-b/vrs/opteron32/include
GSL_LIBS = -L/afs/rzg/bc-b/vrs/opteron32/lib -Wl,"-R /afs/rzg/bc-b/vrs/opteron32/lib"
FFTW_INCL= -I/afs/rzg/bc-b/vrs/opteron32/include
FFTW_LIBS= -L/afs/rzg/bc-b/vrs/opteron32/lib
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"OPA-Cluster64")
CC = mpicc
OPTIMIZE = -O3 -Wall -m64
GSL_INCL = -I/afs/rzg/bc-b/vrs/opteron64/include
GSL_LIBS = -L/afs/rzg/bc-b/vrs/opteron64/lib -Wl,"-R /afs/rzg/bc-b/vrs/opteron64/lib"
FFTW_INCL= -I/afs/rzg/bc-b/vrs/opteron64/include
FFTW_LIBS= -L/afs/rzg/bc-b/vrs/opteron64/lib
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"Mako")
CC = mpicc # sets the C-compiler
OPTIMIZE = -O3 -march=athlon-mp -mfpmath=sse
GSL_INCL =
GSL_LIBS =
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"Regatta")
CC = mpcc_r
OPTIMIZE = -O5 -qstrict -qipa -q64
GSL_INCL = -I/afs/rzg/u/vrs/gsl_psi64/include
GSL_LIBS = -L/afs/rzg/u/vrs/gsl_psi64/lib
FFTW_INCL= -I/afs/rzg/u/vrs/fftw_psi64/include
FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_psi64/lib -q64 -qipa
MPICHLIB =
HDF5INCL = -I/afs/rzg/u/vrs/hdf5_psi64/include
HDF5LIB = -L/afs/rzg/u/vrs/hdf5_psi64/lib -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"RZG_LinuxCluster")
CC = mpicci
OPTIMIZE = -O3 -ip # Note: Don't use the "-rcd" optimization of Intel's compiler! (causes code crashes)
GSL_INCL = -I/afs/rzg/u/vrs/gsl_linux/include
GSL_LIBS = -L/afs/rzg/u/vrs/gsl_linux/lib -Wl,"-R /afs/rzg/u/vrs/gsl_linux/lib"
FFTW_INCL= -I/afs/rzg/u/vrs/fftw_linux/include
FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_linux/lib
HDF5INCL = -I/afs/rzg/u/vrs/hdf5_linux/include
HDF5LIB = -L/afs/rzg/u/vrs/hdf5_linux/lib -lhdf5 -lz -Wl,"-R /afs/rzg/u/vrs/hdf5_linux/lib"
endif
ifeq ($(SYSTYPE),"RZG_LinuxCluster-gcc")
CC = mpiccg
OPTIMIZE = -Wall -g -O3 -march=pentium4
GSL_INCL = -I/afs/rzg/u/vrs/gsl_linux_gcc3.2/include
GSL_LIBS = -L/afs/rzg/u/vrs/gsl_linux_gcc3.2/lib -Wl,"-R /afs/rzg/u/vrs/gsl_linux_gcc3.2/lib"
FFTW_INCL= -I/afs/rzg/u/vrs/fftw_linux_gcc3.2/include
FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_linux_gcc3.2/lib
HDF5INCL = -I/afs/rzg/u/vrs/hdf5_linux/include
HDF5LIB = -L/afs/rzg/u/vrs/hdf5_linux/lib -lhdf5 -lz -Wl,"-R /afs/rzg/u/vrs/hdf5_linux/lib"
endif
ifneq (HAVE_HDF5,$(findstring HAVE_HDF5,$(OPT)))
HDF5INCL =
HDF5LIB =
endif
OPTIONS = $(OPTIMIZE) $(OPT)
EXEC = Gadget2
OBJS = main.o run.o predict.o begrun.o endrun.o global.o \
timestep.o init.o restart.o io.o \
accel.o read_ic.o ngb.o \
system.o allocate.o density.o \
gravtree.o hydra.o driftfac.o \
domain.o allvars.o potential.o \
forcetree.o peano.o gravtree_forcetest.o \
pm_periodic.o pm_nonperiodic.o longrange.o \
cooling.o agn_heating.o phase.o sticky.o outerpotential.o starformation.o \
agn_feedback.o bubbles.o bondi_accretion.o chimie.o stars_density.o cosmictime.o \
- pnbody.o ab_turb.o art_visc.o chemistry.o
+ pnbody.o ab_turb.o art_visc.o sigvel.o
INCL = allvars.h proto.h tags.h Makefile
CFLAGS = $(OPTIONS) $(GSL_INCL) $(FFTW_INCL) $(HDF5INCL) $(PY_INCL)
ifeq (NOTYPEPREFIX_FFTW,$(findstring NOTYPEPREFIX_FFTW,$(OPT))) # fftw installed with type prefix?
FFTW_LIB = $(FFTW_LIBS) -lrfftw_mpi -lfftw_mpi -lrfftw -lfftw
else
ifeq (DOUBLEPRECISION_FFTW,$(findstring DOUBLEPRECISION_FFTW,$(OPT)))
FFTW_LIB = $(FFTW_LIBS) -ldrfftw_mpi -ldfftw_mpi -ldrfftw -ldfftw
else
FFTW_LIB = $(FFTW_LIBS) -lsrfftw_mpi -lsfftw_mpi -lsrfftw -lsfftw
endif
endif
ifeq ($(NO_FFTW_LIB),"yes")
FFTW_LIB =
endif
LIBS = $(HDF5LIB) -g $(MPICHLIB) $(GSL_LIBS) -lgsl -lgslcblas -lm $(FFTW_LIB) $(PY_LIB)
$(EXEC): $(OBJS)
$(CC) $(OBJS) $(LIBS) -o $(EXEC)
$(OBJS): $(INCL)
clean:
rm -f $(OBJS) $(EXEC)
#-----------------------------------------------------------------------
#
# Brief guide to compile-time options of the code. More information
# can be found in the code documentation.
#
# - PERIODIC:
# Set this if you want to have periodic boundary conditions.
#
# - UNEQUALSOFTENINGS:
# Set this if you use particles with different gravitational
# softening lengths.
#
# - PEANOHILBERT:
# This is a tuning option. When set, the code will bring the
# particles after each domain decomposition into Peano-Hilbert
# order. This improves cache utilization and performance.
#
# - WALLCLOCK:
# If set, a wallclock timer is used by the code to measure internal
# time consumption (see cpu-log file). Otherwise, a timer that
# measures consumed processor ticks is used.
#
# - PMGRID:
# This enables the TreePM method, i.e. the long-range force is
# computed with a PM-algorithm, and the short range force with the
# tree. The parameter has to be set to the size of the mesh that
# should be used, (e.g. 64, 96, 128, etc). The mesh dimensions need
# not necessarily be a power of two. Note: If the simulation is
# not in a periodic box, then a FFT method for vacuum boundaries is
# employed, using an actual mesh with dimension twice(!) that
# specified by PMGRID.
#
# - PLACEHIGHRESREGION:
# If this option is set (will only work together with PMGRID), then
# the long range force is computed in two stages: One Fourier-grid
# is used to cover the whole simulation volume, allowing the
# computation of the longe-range force. A second Fourier mesh is
# placed on the region occupied by "high-resolution" particles,
# allowing the computation of an intermediate scale force. Finally,
# the force on short scales is computed with the tree. This
# procedure can be useful for "zoom-simulations", provided the
# majority of particles (the high-res particles) are occupying only
# a small fraction of the volume. To activate this option, the
# parameter needs to be set to an integer bit mask that encodes the
# particle types that make up the high-res particles.
# For example, if types 0, 1, and 4 form the high-res
# particles, set the parameter to PLACEHIGHRESREGION=19, because
# 2^0 + 2^1 + 2^4 = 19. The spatial region covered by the high-res
# grid is determined automatically from the initial conditions.
# Note: If a periodic box is used, the high-res zone may not intersect
# the box boundaries.
#
# - ENLARGEREGION:
# The spatial region covered by the high-res zone has a fixed size
# during the simulation, which initially is set to the smallest
# region that encompasses all high-res particles. Normally, the
# simulation will be interrupted if high-res particles leave this
# region in the course of the run. However, by setting this
# parameter to a value larger than one, the size of the high-res
# region can be expanded, providing a buffer region. For example,
# setting it to 1.4 will enlarge its side-length by 40% (it remains
# centered on the high-res particles). Hence, with this setting, the
# high-res region may expand or move by a limited amount.
# Note: If SYNCHRONIZATION is activated, the code will be able to
# continue even if high-res particles leave the initial high-res
# grid. In this case, the code will update the size and position of
# the grid that is placed onto the high-resolution region
# automatically. To prevent that this potentially happens every
# single PM step, one should nevertheless assign a value slightly
# larger than 1 to ENLARGEREGION.
#
# - ASMTH:
# This can be used to override the value assumed for the scale that
# defines the long-range/short-range force-split in the TreePM
# algorithm. The default value is 1.25, in mesh-cells.
#
# - RCUT:
# This can be used to override the maximum radius in which the
# short-range tree-force is evaluated (in case the TreePM algorithm
# is used). The default value is 4.5, given in mesh-cells.
#
# - DOUBLEPRECISION:
# This makes the code store and compute internal particle data in
# double precision. Note that output files are nevertheless written
# by converting the particle data to single precision.
#
# - DDOUBLEPRECISION_FFTW:
# If this is set, the code will use the double-precision version of
# FTTW, provided the latter has been explicitly installed with a
# "d" prefix, and NOTYPEPREFIX_FFTW is not set. Otherwise the
# single precision version ("s" prefix) is used.
#
# - SYNCHRONIZATION:
# When this is set, particles are kept in a binary hierarchy of
# timesteps and may only increase their timestep if the new
# timestep will put them into synchronization with the higher time
# level.
#
# - FLEXSTEPS:
# This is an alternative to SYNCHRONIZATION. Particle timesteps are
# here allowed to be integer multiples of the minimum timestep that
# occurs among the particles, which in turn is rounded down to the
# nearest power-of-two devision of the total simulated
# timespan. This option distributes particles more evenly over
# individual system timesteps, particularly once a simulation has
# run for a while, and may then result in a reduction of work-load
# imbalance losses.
#
# - PSEUDOSYMMETRIC:
# When this option is set, the code will try to "anticipate"
# timestep changes by extrapolating the change of the acceleration
# into the future. This can in certain idealized cases improve the
# long-term integration behaviour of periodic orbits, but should
# make little or no difference in most real-world applications. May
# only be used together with SYNCHRONIZATION.
#
# - NOSTOP_WHEN_BELOW_MINTIMESTEP:
# If this is activated, the code will not terminate when the
# timestep falls below the value of MinSizeTimestep specified in
# the parameterfile. This is useful for runs where one wants to
# enforce a constant timestep for all particles. This can be done
# by activating this option, and by setting MinSizeTimestep and
# MaxSizeTimestep to an equal value.
#
# - NOPMSTEPADJUSTMENT:
# When this is set, the long-range timestep for the PM-force
# computation (when the TreePM algorithm is used) is always
# determined by MaxSizeTimeStep. Otherwise, it is determined by
# the MaxRMSDisplacement parameter, or MaxSizeTimeStep, whichever
# gives the smaller step.
#
# - HAVE_HDF5:
# If this is set, the code will be compiled with support for input
# and output in the HDF5 format. You need to have the HDF5
# libraries and headers installed on your computer for this option
# to work. The HDF5 format can then be selected as format "3" in
# Gadget's parameterfile.
#
# - OUTPUTPOTENTIAL:
# This will make the code compute gravitational potentials for
# all particles each time a snapshot file is generated. The values
# are then included in the snapshot file. Note that the computation
# of the values of the gravitational potential costs additional CPU.
#
# - OUTPUTACCELERATION:
# This will include the physical acceleration of each particle in
# snapshot files.
#
# - OUTPUTCHANGEOFENTROPY:
# This will include the rate of change of entropy of gas particles
# in snapshot files.
#
# - OUTPUTTIMESTEP:
# This will include the current timesteps of all particles in the
# snapshot files.
#
# - NOGRAVITY
# This switches off gravity. Useful only for pure SPH simulations
# in non-expanding space.
#
# - NOTREERND:
# If this is not set, the tree construction will succeed even when
# there are a few particles at identical locations. This is done by
# `rerouting' particles once the node-size has fallen below 1.0e-3
# of the softening length. When this option is activated, this will
# be surpressed and the tree construction will always fail if there
# are particles at extremely close coordinates.
#
# - NOTYPEPREFIX_FFTW:
# This is an option that signals that FFTW has been compiled
# without the type-prefix option, i.e. no leading "d" or "s"
# characters are used to access the library.
#
# - LONG_X/Y/Z:
# These options can be used together with PERIODIC and NOGRAVITY only.
# When set, the options define numerical factors that can be used to
# distorts the periodic simulation cube into a parallelepiped of
# arbitrary aspect ratio. This can be useful for idealized SPH tests.
#
# - TWODIMS:
# This effectively switches of one dimension in SPH, i.e. the code
# follows only 2d hydrodynamics in the xy-, yz-, or xz-plane. This
# only works with NOGRAVITY, and if all coordinates of the third
# axis are exactly equal. Can be useful for idealized SPH tests.
#
# - SPH_BND_PARTICLES:
# If this is set, particles with a particle-ID equal to zero do not
# receive any SPH acceleration. This can be useful for idealized
# SPH tests, where these particles represent fixed "walls".
#
# - NOVISCOSITYLIMITER:
# If this is set, the code will not try to put an upper limit on
# the viscous force in case an implausibly high pair-wise viscous
# force (which may lead to a particle 'reflection' in case of poor
# timestepping) should arise. Note: For proper settings of the
# timestep parameters, this situation should not arise.
#
# - COMPUTE_POTENTIAL_ENERGY:
# When this option is set, the code will compute the gravitational
# potential energy each time a global statistics is computed. This
# can be useful for testing global energy conservation.
#
# - LONGIDS:
# If this is set, the code assumes that particle-IDs are stored as
# 64-bit long integers. This is only really needed if you want to
# go beyond ~2 billion particles.
#
# - ISOTHERM_EQS:
# This special option makes the gas behave like an isothermal gas
# with equation of state P = cs^2 * rho. The sound-speed cs is set by
# the thermal energy per unit mass in the intial conditions,
# i.e. cs^2=u. If the value for u is zero, then the initial gas
# temperature in the parameter file is used to define the sound speed
# according to cs^2 = 3/2 kT/mp, where mp is the proton mass.
#
# - ADAPTIVE_GRAVSOFT_FORGAS:
# When this option is set, the gravitational softening lengths used for
# gas particles is tied to their SPH smoothing length. This can be useful
# for dissipative collapse simulations. The option requires the setting
# of UNEQUALSOFTENINGS.
#
# - SELECTIVE_NO_GRAVITY:
# This can be used for special computations where one wants to
# exclude certain particle types from receiving gravitational
# forces. The particle types that are excluded in this fashion are
# specified by a bit mask, in the same as for the PLACEHIGHRESREGION
# option.
#
# - FORCETEST:
# This can be set to check the force accuracy of the code. The
# option needs to be set to a number between 0 and 1 (e.g. 0.01),
# which is taken to specify a random fraction of particles for
# which at each timestep forces by direct summation are
# computed. The normal tree-forces and the correct direct
# summation forces are collected in a file. Note that the
# simulation itself is unaffected by this option, but it will of
# course run much(!) slower, especially if
# FORCETEST*NumPart*NumPart >> NumPart. Note: Particle IDs must
# be set to numbers >=1 for this to work.
#
# - MAKEGLASS
# This option can be used to generate a glass-like particle
# configuration. The value assigned gives the particle load,
# which is initially generated as a Poisson sample and then
# evolved towards a glass with the sign of gravity reversed.
#
#-----------------------------------------------------------------------
diff --git a/src/Makefile b/src/Makefiles/Makefile.chemistry3
similarity index 94%
copy from src/Makefile
copy to src/Makefiles/Makefile.chemistry3
index 4852c42..eb2bb8b 100644
--- a/src/Makefile
+++ b/src/Makefiles/Makefile.chemistry3
@@ -1,894 +1,872 @@
#----------------------------------------------------------------------
# From the list below, please activate/deactivate the options that
# apply to your run. If you modify any of these options, make sure
# that you recompile the whole code by typing "make clean; make".
#
# Look at end of file for a brief guide to the compile-time options.
#----------------------------------------------------------------------
#--------------------------------------- Basic operation mode of code
-OPT += -DPERIODIC
-#OPT += -DUNEQUALSOFTENINGS
+#OPT += -DPERIODIC
+OPT += -DUNEQUALSOFTENINGS
#--------------------------------------- Things that are always recommended
OPT += -DPEANOHILBERT
OPT += -DWALLCLOCK
#--------------------------------------- TreePM Options
#OPT += -DPMGRID=128
#OPT += -DPLACEHIGHRESREGION=3
#OPT += -DENLARGEREGION=1.2
#OPT += -DASMTH=1.25
#OPT += -DRCUT=4.5
#--------------------------------------- Single/Double Precision
#OPT += -DDOUBLEPRECISION
#OPT += -DDOUBLEPRECISION_FFTW
#--------------------------------------- Time integration options
OPT += -DSYNCHRONIZATION
#OPT += -DFLEXSTEPS
#OPT += -DPSEUDOSYMMETRIC
OPT += -DNOSTOP_WHEN_BELOW_MINTIMESTEP
#OPT += -DNOPMSTEPADJUSTMENT
-
+OPT += -DSYNCHRONIZE_NGB_TIMESTEP # ngb particles have synchronized time steps
+OPT += -DTIMESTEP_UPDATE_FOR_FEEDBACK # timestep is updated when feedback occurs
+OPT += -DIMPROVED_TIMESTEP_CRITERION_FORGAS
#--------------------------------------- Output
OPT += -DADVANCEDSTATISTICS
OPT += -DADVANCEDCPUSTATISTICS
OPT += -DSYSTEMSTATISTICS
OPT += -DBLOCK_SKIPPING
#OPT += -DHAVE_HDF5
#OPT += -DOUTPUTPOTENTIAL
#OPT += -DOUTPUTACCELERATION
#OPT += -DOUTPUTCHANGEOFENTROPY
#OPT += -DOUTPUTTIMESTEP
#OPT += -DOUTPUTERADSTICKY
#OPT += -DOUTPUTERADFEEDBACK
#OPT += -DOUTPUTENERGYFLUX
-#OPT += -DOUTPUTOPTVAR1
-#OPT += -DOUTPUTOPTVAR2
-#OPT += -DOUTPUTSTELLAR_PROP
+#OPT += -DOUTPUTOPTVAR
+OPT += -DOUTPUTSTELLAR_PROP
#--------------------------------------- Things for special behaviour
-OPT += -DNOGRAVITY
+#OPT += -DNOGRAVITY
#OPT += -DNOTREERND
#OPT += -DNOTYPEPREFIX_FFTW
#OPT += -DLONG_X=60
#OPT += -DLONG_Y=5
#OPT += -DLONG_Z=0.2
#OPT += -DTWODIMS
#OPT += -DSPH_BND_PARTICLES
#OPT += -DNOVISCOSITYLIMITER
-#OPT += -DCOMPUTE_POTENTIAL_ENERGY
+OPT += -DCOMPUTE_POTENTIAL_ENERGY
#OPT += -DLONGIDS
-OPT += -DISOTHERM_EQS
+#OPT += -DISOTHERM_EQS
#OPT += -DADAPTIVE_GRAVSOFT_FORGAS
#OPT += -DSELECTIVE_NO_GRAVITY=2+4+8+16
#OPT += -DAVOIDNUMNGBPROBLEM
#OPT += -DLIMIT_DVEL=1.0
#OPT += -DOTHERINFO
#OPT += -DDOMAIN_AT_ORIGIN
OPT += -DNO_NEGATIVE_PRESSURE
#OPT += -DCOMPUTE_VELOCITY_DISPERSION
#OPT += -DCYLINDRICAL_SYMMETRY
OPT += -DWRITE_ALL_MASSES
-#OPT += -DENTROPYPRED
-#OPT += -DCOUNT_ACTIVE_PARTICLES
+OPT += -DENTROPYPRED
+OPT += -DCOUNT_ACTIVE_PARTICLES
OPT += -DRANDOMSEED_AS_PARAMETER
#OPT += -DDETAILED_CPU
#OPT += -DDETAILED_CPU_GRAVITY
#OPT += -DDETAILED_CPU_DOMAIN
-#OPT += -DDETAILED_CPU_OUTPUT_IN_GRAVTREE
-#OPT += -DDETAILED_CPU_OUTPUT_IN_HYDRA
-#OPT += -DDETAILED_CPU_OUTPUT_IN_DENSITY
-#OPT += -DDETAILED_CPU_OUTPUT_IN_STARS_DENSITY
-#OPT += -DDETAILED_CPU_OUTPUT_IN_CHIMIE
+#OPT += -DDETAILED_OUTPUT_IN_GRAVTREE
#OPT += -DSPLIT_DOMAIN_USING_TIME
-#OPT += -DCOSMICTIME
+OPT += -DCOSMICTIME
OPT += -DONLY_MASTER_READ_EWALD
#OPT += -DPNBODY
#OPT += -DPNBODY_OUTPUT_POS
#OPT += -DPNBODY_OUTPUT_VEL
#OPT += -DPNBODY_OUTPUT_NUM
#OPT += -DPNBODY_OUTPUT_MASS
#OPT += -DPNBODY_OUTPUT_TYPE
#OPT += -DPNBODY_OUTPUT_ENERGY
#OPT += -DPNBODY_OUTPUT_DENSITY
#OPT += -DPNBODY_OUTPUT_HSML
#OPT += -DPNBODY_OUTPUT_METALS
#--------------------------------------- Physical processes
-#OPT += -DCOOLING
+OPT += -DCOOLING
#OPT += -DIMPLICIT_COOLING_INTEGRATION
#OPT += -DDO_NO_USE_HYDROGEN_MASSFRAC_IN_COOLING
#OPT += -DHEATING
#OPT += -DHEATING_PE # photo-electric heating
-#OPT += -DSFR
-#OPT += -DCOMPUTE_SFR_ENERGY
-#OPT += -DSFR_NEG_DIV_ONLY
+OPT += -DSFR
+OPT += -DCOMPUTE_SFR_ENERGY
+OPT += -DSFR_NEG_DIV_ONLY
-#OPT += -DSTELLAR_PROP
+OPT += -DSTELLAR_PROP
-#OPT += -DCHIMIE # need stellar prop
-#OPT += -DCHIMIE_THERMAL_FEEDBACK
-#OPT += -DCHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY
+OPT += -DCHIMIE # need stellar prop
+OPT += -DCHIMIE_THERMAL_FEEDBACK
+OPT += -DCHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY
#OPT += -DCHIMIE_KINETIC_FEEDBACK
#OPT += -DCHIMIE_COMPUTE_KINETIC_FEEDBACK_ENERGY
-#OPT += -DCHIMIE_EXTRAHEADER
-#OPT += -DCHIMIE_INPUT_ALL
-#OPT += -DCHIMIE_MC_SUPERNOVAE
+OPT += -DCHIMIE_EXTRAHEADER
+OPT += -DCHIMIE_INPUT_ALL
+OPT += -DCHIMIE_MC_SUPERNOVAE
#OPT += -DFEEDBACK
#OPT += -DFEEDBACK_WIND
#--------------------------------------- multiphase
#OPT += -DMULTIPHASE
#OPT += -DNO_HYDRO_FOR_GAS # do not use hydro routine (at all)
#OPT += -DNO_DENSITY_FOR_STICKY # do not compute density in sticky (need to be done in sfr)
#OPT += -DPHASE_MIXING # need MULTIPHASE : enable phase mixing
#OPT += -DCOLDGAS_CYCLE # need MULTIPHASE and PHASE_MIXING
#OPT += -DEXTERNAL_FLUX
#OPT += -DSTELLAR_FLUX
#OPT += -DCOUNT_COLLISIONS # count sticky collisions
#--------------------------------------- Outer potential
#OPT += -DOUTERPOTENTIAL
#OPT += -DNFW
#OPT += -DPISOTHERM
#OPT += -DPLUMMER
#OPT += -DMIYAMOTONAGAI
#OPT += -DCORIOLIS
#--------------------------------------- Testing and Debugging options
#OPT += -DFORCETEST=0.1
#OPT += -DWITH_ID_IN_HYDRA
#OPT += -DPARTICLE_FLAG
#OPT += -DOUTPUT_EVERY_TIMESTEP
#OPT += -DOUTPUT_COOLING_FUNCTION
-#OPT += -DCHECK_BLOCK_ORDER
-#OPT += -DCHECK_ENTROPY_SIGN
-#OPT += -DCHECK_TYPE_DURING_IO
-#OPT += -DCHECK_ID_CORRESPONDENCE
+OPT += -DCHECK_BLOCK_ORDER
+OPT += -DCHECK_ENTROPY_SIGN
+OPT += -DCHECK_TYPE_DURING_IO
+OPT += -DCHECK_ID_CORRESPONDENCE
#--------------------------------------- Glass making
#OPT += -DMAKEGLASS=262144
-
-#--------------------------------------- Artificial Conductivity
-#OPT += -DART_CONDUCTIVITY
-#OPT += -DOUTPUT_CONDUCTIVITY
-#OPT += -DOUTPUTOPTVAR1
-#OPT += -DOUTPUTOPTVAR2
-
#--------------------------------------- Agn
#OPT += -DBUBBLES
#OPT += -DAGN_ACCRETION
#OPT += -DAGN_FEEDBACK
#OPT += -DAGN_USE_ANGULAR_MOMENTUM
#OPT += -DAGN_HEATING
#OPT += -DBONDI_ACCRETION
#OPT += -DUSE_BONDI_POWER
-#--------------------------------------- Driven Turbulence
-OPT += -DAB_TURB
-
-#--------------------------------------- Artificial Viscosity
-
-OPT += -DART_VISCO_CD
-#OPT += -DART_VISCO_MM
-#OPT += -DART_VISCO_RO
-
-#--------------------------------------- SPH flavour
-OPT += -DPRESSURE_ENTROPY_FORMULATION
-OPT += -DENTROPYPRED
-
#----------------------------------------------------------------------
# Here, select compile environment for the target machine. This may need
# adjustment, depending on your local system. Follow the examples to add
# additional target platforms, and to get things properly compiled.
#----------------------------------------------------------------------
#--------------------------------------- Select some defaults
CC = mpicc # sets the C-compiler
OPTIMIZE = -O2 -Wall -g # sets optimization and warning flags
MPICHLIB = -lmpich
#--------------------------------------- Select target computer
SYSTYPE="obscalc"
#SYSTYPE="callisto-intel"
#SYSTYPE="bg1"
#SYSTYPE="obsds"
#SYSTYPE="leo_openmpi"
#SYSTYPE="leo_mpich2shm"
#SYSTYPE="graphor0"
#SYSTYPE="obsrevaz"
#SYSTYPE="regor_openmpigcc"
#SYSTYPE="regor_mvapich2gcc"
#SYSTYPE="meso_mpich2"
#SYSTYPE="meso"
#SYSTYPE="revaz/local"
#SYSTYPE="revaz/local_mpich2"
#SYSTYPE="horizon3_mpich1"
#SYSTYPE="horizon3_mpich2"
#SYSTYPE="horizon3"
#SYSTYPE="LUXOR"
#SYSTYPE="MPA"
#SYSTYPE="Mako"
#SYSTYPE="Regatta"
#SYSTYPE="RZG_LinuxCluster"
#SYSTYPE="RZG_LinuxCluster-gcc"
#SYSTYPE="OpteronMPA"
#SYSTYPE="OPA-Cluster32"
#SYSTYPE="OPA-Cluster64"
#--------------------------------------- Adjust settings for target computer
# module add openmpi-x86_64
ifeq ($(SYSTYPE),"obscalc")
CC = mpicc
OPTIMIZE =
GSL_INCL =
GSL_LIBS =
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB =
NO_FFTW_LIB = "yes"
PY_INCL = -I/usr/include/python2.6/
PY_LIB = -lpython2.6
endif
ifeq ($(SYSTYPE),"callisto-intel")
CC = mpicc
OPTIMIZE =
GSL_INCL = -I/u1/yrevaz/local/gsl-intel/include
GSL_LIBS = -L/u1/yrevaz/local/gsl-intel/lib
FFTW_INCL= -I/u1/yrevaz/local/fftw-2.1.5-intel/include
FFTW_LIBS= -L/u1/yrevaz/local/fftw-2.1.5-intel/lib
MPICHLIB =
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"bg1")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/yrevaz/local/include
GSL_LIBS = -L/home/yrevaz/local/lib
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB =
NO_FFTW_LIB = "yes"
endif
ifeq ($(SYSTYPE),"obsds")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL =
GSL_LIBS =
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB =
NO_FFTW_LIB = "yes"
endif
ifeq ($(SYSTYPE),"graphor0")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/epfl/revaz/local/include
GSL_LIBS = -L/home/epfl/revaz/local/lib
FFTW_INCL= -I/home/epfl/revaz/local/include
FFTW_LIBS= -L/home/epfl/revaz/local/lib
MPICHLIB = -L/home/epfl/revaz/local/openmpi/lib -lmpi
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"obsrevaz")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL =
GSL_LIBS =
FFTW_INCL= -I/home/revaz/local/include/
FFTW_LIBS= -L/home/revaz/local/lib/
MPICHLIB = -lmpi
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"regor_openmpigcc")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/usr/include
GSL_LIBS = -L/usr/lib64/
FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
MPICHLIB = -lmpi
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
+NO_FFTW_LIB = "yes"
endif
ifeq ($(SYSTYPE),"regor_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/usr/include
GSL_LIBS = -L/usr/lib64/
FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
MPICHLIB = -L/home/revaz/local/mpich2-1.0.6nemesis/lib/ -lmpich
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"regor_mvapich2gcc")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/usr/include
GSL_LIBS = -L/usr/lib64/
FFTW_INCL= -I/home/revaz/local_mvapich2gcc/include/
FFTW_LIBS= -L/home/revaz/local_mvapich2gcc/lib/
MPICHLIB = -L/cvos/shared/apps/ofed/1.2.5.3/mpi/gcc/mvapich2-0.9.8-15/lib/ -lmpich
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"leo_openmpi")
CC = mpicc
OPTIMIZE = -O3 -Wall -fpack-struct
GSL_INCL = -I/export/revaz/local/include
GSL_LIBS = -L/export/revaz/local/lib
FFTW_INCL= -I/export/revaz/local/include
FFTW_LIBS= -L/export/revaz/local/lib
MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpi
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"leo_mpich2shm")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/export/revaz/local/include
GSL_LIBS = -L/export/revaz/local/lib
FFTW_INCL= -I/export/revaz/local/include
FFTW_LIBS= -L/export/revaz/local/lib
MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"meso_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/horizon1/x86_64_sl4/fftw/2.1.5/include/
FFTW_LIBS= -L/horizon1/x86_64_sl4/fftw/2.1.5/lib/
MPICHLIB = -L/home/revaz/local/mpich2-1.0.3/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"meso")
CC = mpicc
OPTIMIZE = -O3 -g
GSL_INCL =
GSL_LIBS =
FFTW_INCL= -I/horizon1/x86_64_sl4/fftw/2.1.5/include/
FFTW_LIBS= -L/horizon1/x86_64_sl4/fftw/2.1.5/lib/
MPICHLIB =
HDF5INCL =
HDF5LIB =
OPT += -DMESOMACHINE
endif
ifeq ($(SYSTYPE),"revaz/local")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich-1.2.5/ch_p4/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"revaz/local_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich2-1.0.3/lib/ -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"LUXOR")
CC = mpicc
OPTIMIZE = -O3 -Wall -g
#GSL_INCL = -I/home/revaz/local/include
#GSL_LIBS = -L/home/revaz/local/lib
#FFTW_INCL= -I/home/revaz/local/include
#FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich-1.2.7/ch_p4/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"horizon3")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -llam
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"horizon3_mpich1")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/home/revaz/local/mpich-1.2.7/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"horizon3_mpich2")
CC = mpicc
OPTIMIZE = -O3 -Wall -g -fpack-struct
GSL_INCL = -I/home/revaz/local/include
GSL_LIBS = -L/home/revaz/local/lib
FFTW_INCL= -I/home/revaz/local/include
FFTW_LIBS= -L/home/revaz/local/lib
MPICHLIB = -L/usr/local/mpich2-pgi/lib -lmpich
HDF5INCL =
HDF5LIB =
endif
ifeq ($(SYSTYPE),"MPA")
CC = mpicc
OPTIMIZE = -O3 -Wall
GSL_INCL = -I/usr/common/pdsoft/include
GSL_LIBS = -L/usr/common/pdsoft/lib -Wl,"-R /usr/common/pdsoft/lib"
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"OpteronMPA")
CC = mpicc
OPTIMIZE = -O3 -Wall -m64
GSL_INCL = -L/usr/local/include
GSL_LIBS = -L/usr/local/lib
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL = -I/opt/hdf5/include
HDF5LIB = -L/opt/hdf5/lib -lhdf5 -lz -Wl,"-R /opt/hdf5/lib"
endif
ifeq ($(SYSTYPE),"OPA-Cluster32")
CC = mpicc
OPTIMIZE = -O3 -Wall
GSL_INCL = -I/afs/rzg/bc-b/vrs/opteron32/include
GSL_LIBS = -L/afs/rzg/bc-b/vrs/opteron32/lib -Wl,"-R /afs/rzg/bc-b/vrs/opteron32/lib"
FFTW_INCL= -I/afs/rzg/bc-b/vrs/opteron32/include
FFTW_LIBS= -L/afs/rzg/bc-b/vrs/opteron32/lib
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"OPA-Cluster64")
CC = mpicc
OPTIMIZE = -O3 -Wall -m64
GSL_INCL = -I/afs/rzg/bc-b/vrs/opteron64/include
GSL_LIBS = -L/afs/rzg/bc-b/vrs/opteron64/lib -Wl,"-R /afs/rzg/bc-b/vrs/opteron64/lib"
FFTW_INCL= -I/afs/rzg/bc-b/vrs/opteron64/include
FFTW_LIBS= -L/afs/rzg/bc-b/vrs/opteron64/lib
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"Mako")
CC = mpicc # sets the C-compiler
OPTIMIZE = -O3 -march=athlon-mp -mfpmath=sse
GSL_INCL =
GSL_LIBS =
FFTW_INCL=
FFTW_LIBS=
MPICHLIB =
HDF5INCL =
HDF5LIB = -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"Regatta")
CC = mpcc_r
OPTIMIZE = -O5 -qstrict -qipa -q64
GSL_INCL = -I/afs/rzg/u/vrs/gsl_psi64/include
GSL_LIBS = -L/afs/rzg/u/vrs/gsl_psi64/lib
FFTW_INCL= -I/afs/rzg/u/vrs/fftw_psi64/include
FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_psi64/lib -q64 -qipa
MPICHLIB =
HDF5INCL = -I/afs/rzg/u/vrs/hdf5_psi64/include
HDF5LIB = -L/afs/rzg/u/vrs/hdf5_psi64/lib -lhdf5 -lz
endif
ifeq ($(SYSTYPE),"RZG_LinuxCluster")
CC = mpicci
OPTIMIZE = -O3 -ip # Note: Don't use the "-rcd" optimization of Intel's compiler! (causes code crashes)
GSL_INCL = -I/afs/rzg/u/vrs/gsl_linux/include
GSL_LIBS = -L/afs/rzg/u/vrs/gsl_linux/lib -Wl,"-R /afs/rzg/u/vrs/gsl_linux/lib"
FFTW_INCL= -I/afs/rzg/u/vrs/fftw_linux/include
FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_linux/lib
HDF5INCL = -I/afs/rzg/u/vrs/hdf5_linux/include
HDF5LIB = -L/afs/rzg/u/vrs/hdf5_linux/lib -lhdf5 -lz -Wl,"-R /afs/rzg/u/vrs/hdf5_linux/lib"
endif
ifeq ($(SYSTYPE),"RZG_LinuxCluster-gcc")
CC = mpiccg
OPTIMIZE = -Wall -g -O3 -march=pentium4
GSL_INCL = -I/afs/rzg/u/vrs/gsl_linux_gcc3.2/include
GSL_LIBS = -L/afs/rzg/u/vrs/gsl_linux_gcc3.2/lib -Wl,"-R /afs/rzg/u/vrs/gsl_linux_gcc3.2/lib"
FFTW_INCL= -I/afs/rzg/u/vrs/fftw_linux_gcc3.2/include
FFTW_LIBS= -L/afs/rzg/u/vrs/fftw_linux_gcc3.2/lib
HDF5INCL = -I/afs/rzg/u/vrs/hdf5_linux/include
HDF5LIB = -L/afs/rzg/u/vrs/hdf5_linux/lib -lhdf5 -lz -Wl,"-R /afs/rzg/u/vrs/hdf5_linux/lib"
endif
ifneq (HAVE_HDF5,$(findstring HAVE_HDF5,$(OPT)))
HDF5INCL =
HDF5LIB =
endif
OPTIONS = $(OPTIMIZE) $(OPT)
EXEC = Gadget2
OBJS = main.o run.o predict.o begrun.o endrun.o global.o \
timestep.o init.o restart.o io.o \
accel.o read_ic.o ngb.o \
system.o allocate.o density.o \
gravtree.o hydra.o driftfac.o \
domain.o allvars.o potential.o \
forcetree.o peano.o gravtree_forcetest.o \
pm_periodic.o pm_nonperiodic.o longrange.o \
cooling.o agn_heating.o phase.o sticky.o outerpotential.o starformation.o \
agn_feedback.o bubbles.o bondi_accretion.o chimie.o stars_density.o cosmictime.o \
- pnbody.o ab_turb.o art_visc.o chemistry.o
+ pnbody.o ab_turb.o art_visc.o sigvel.o
INCL = allvars.h proto.h tags.h Makefile
CFLAGS = $(OPTIONS) $(GSL_INCL) $(FFTW_INCL) $(HDF5INCL) $(PY_INCL)
ifeq (NOTYPEPREFIX_FFTW,$(findstring NOTYPEPREFIX_FFTW,$(OPT))) # fftw installed with type prefix?
FFTW_LIB = $(FFTW_LIBS) -lrfftw_mpi -lfftw_mpi -lrfftw -lfftw
else
ifeq (DOUBLEPRECISION_FFTW,$(findstring DOUBLEPRECISION_FFTW,$(OPT)))
FFTW_LIB = $(FFTW_LIBS) -ldrfftw_mpi -ldfftw_mpi -ldrfftw -ldfftw
else
FFTW_LIB = $(FFTW_LIBS) -lsrfftw_mpi -lsfftw_mpi -lsrfftw -lsfftw
endif
endif
ifeq ($(NO_FFTW_LIB),"yes")
FFTW_LIB =
endif
LIBS = $(HDF5LIB) -g $(MPICHLIB) $(GSL_LIBS) -lgsl -lgslcblas -lm $(FFTW_LIB) $(PY_LIB)
$(EXEC): $(OBJS)
$(CC) $(OBJS) $(LIBS) -o $(EXEC)
$(OBJS): $(INCL)
clean:
rm -f $(OBJS) $(EXEC)
#-----------------------------------------------------------------------
#
# Brief guide to compile-time options of the code. More information
# can be found in the code documentation.
#
# - PERIODIC:
# Set this if you want to have periodic boundary conditions.
#
# - UNEQUALSOFTENINGS:
# Set this if you use particles with different gravitational
# softening lengths.
#
# - PEANOHILBERT:
# This is a tuning option. When set, the code will bring the
# particles after each domain decomposition into Peano-Hilbert
# order. This improves cache utilization and performance.
#
# - WALLCLOCK:
# If set, a wallclock timer is used by the code to measure internal
# time consumption (see cpu-log file). Otherwise, a timer that
# measures consumed processor ticks is used.
#
# - PMGRID:
# This enables the TreePM method, i.e. the long-range force is
# computed with a PM-algorithm, and the short range force with the
# tree. The parameter has to be set to the size of the mesh that
# should be used, (e.g. 64, 96, 128, etc). The mesh dimensions need
# not necessarily be a power of two. Note: If the simulation is
# not in a periodic box, then a FFT method for vacuum boundaries is
# employed, using an actual mesh with dimension twice(!) that
# specified by PMGRID.
#
# - PLACEHIGHRESREGION:
# If this option is set (will only work together with PMGRID), then
# the long range force is computed in two stages: One Fourier-grid
# is used to cover the whole simulation volume, allowing the
# computation of the longe-range force. A second Fourier mesh is
# placed on the region occupied by "high-resolution" particles,
# allowing the computation of an intermediate scale force. Finally,
# the force on short scales is computed with the tree. This
# procedure can be useful for "zoom-simulations", provided the
# majority of particles (the high-res particles) are occupying only
# a small fraction of the volume. To activate this option, the
# parameter needs to be set to an integer bit mask that encodes the
# particle types that make up the high-res particles.
# For example, if types 0, 1, and 4 form the high-res
# particles, set the parameter to PLACEHIGHRESREGION=19, because
# 2^0 + 2^1 + 2^4 = 19. The spatial region covered by the high-res
# grid is determined automatically from the initial conditions.
# Note: If a periodic box is used, the high-res zone may not intersect
# the box boundaries.
#
# - ENLARGEREGION:
# The spatial region covered by the high-res zone has a fixed size
# during the simulation, which initially is set to the smallest
# region that encompasses all high-res particles. Normally, the
# simulation will be interrupted if high-res particles leave this
# region in the course of the run. However, by setting this
# parameter to a value larger than one, the size of the high-res
# region can be expanded, providing a buffer region. For example,
# setting it to 1.4 will enlarge its side-length by 40% (it remains
# centered on the high-res particles). Hence, with this setting, the
# high-res region may expand or move by a limited amount.
# Note: If SYNCHRONIZATION is activated, the code will be able to
# continue even if high-res particles leave the initial high-res
# grid. In this case, the code will update the size and position of
# the grid that is placed onto the high-resolution region
# automatically. To prevent that this potentially happens every
# single PM step, one should nevertheless assign a value slightly
# larger than 1 to ENLARGEREGION.
#
# - ASMTH:
# This can be used to override the value assumed for the scale that
# defines the long-range/short-range force-split in the TreePM
# algorithm. The default value is 1.25, in mesh-cells.
#
# - RCUT:
# This can be used to override the maximum radius in which the
# short-range tree-force is evaluated (in case the TreePM algorithm
# is used). The default value is 4.5, given in mesh-cells.
#
# - DOUBLEPRECISION:
# This makes the code store and compute internal particle data in
# double precision. Note that output files are nevertheless written
# by converting the particle data to single precision.
#
# - DDOUBLEPRECISION_FFTW:
# If this is set, the code will use the double-precision version of
# FTTW, provided the latter has been explicitly installed with a
# "d" prefix, and NOTYPEPREFIX_FFTW is not set. Otherwise the
# single precision version ("s" prefix) is used.
#
# - SYNCHRONIZATION:
# When this is set, particles are kept in a binary hierarchy of
# timesteps and may only increase their timestep if the new
# timestep will put them into synchronization with the higher time
# level.
#
# - FLEXSTEPS:
# This is an alternative to SYNCHRONIZATION. Particle timesteps are
# here allowed to be integer multiples of the minimum timestep that
# occurs among the particles, which in turn is rounded down to the
# nearest power-of-two devision of the total simulated
# timespan. This option distributes particles more evenly over
# individual system timesteps, particularly once a simulation has
# run for a while, and may then result in a reduction of work-load
# imbalance losses.
#
# - PSEUDOSYMMETRIC:
# When this option is set, the code will try to "anticipate"
# timestep changes by extrapolating the change of the acceleration
# into the future. This can in certain idealized cases improve the
# long-term integration behaviour of periodic orbits, but should
# make little or no difference in most real-world applications. May
# only be used together with SYNCHRONIZATION.
#
# - NOSTOP_WHEN_BELOW_MINTIMESTEP:
# If this is activated, the code will not terminate when the
# timestep falls below the value of MinSizeTimestep specified in
# the parameterfile. This is useful for runs where one wants to
# enforce a constant timestep for all particles. This can be done
# by activating this option, and by setting MinSizeTimestep and
# MaxSizeTimestep to an equal value.
#
# - NOPMSTEPADJUSTMENT:
# When this is set, the long-range timestep for the PM-force
# computation (when the TreePM algorithm is used) is always
# determined by MaxSizeTimeStep. Otherwise, it is determined by
# the MaxRMSDisplacement parameter, or MaxSizeTimeStep, whichever
# gives the smaller step.
#
# - HAVE_HDF5:
# If this is set, the code will be compiled with support for input
# and output in the HDF5 format. You need to have the HDF5
# libraries and headers installed on your computer for this option
# to work. The HDF5 format can then be selected as format "3" in
# Gadget's parameterfile.
#
# - OUTPUTPOTENTIAL:
# This will make the code compute gravitational potentials for
# all particles each time a snapshot file is generated. The values
# are then included in the snapshot file. Note that the computation
# of the values of the gravitational potential costs additional CPU.
#
# - OUTPUTACCELERATION:
# This will include the physical acceleration of each particle in
# snapshot files.
#
# - OUTPUTCHANGEOFENTROPY:
# This will include the rate of change of entropy of gas particles
# in snapshot files.
#
# - OUTPUTTIMESTEP:
# This will include the current timesteps of all particles in the
# snapshot files.
#
# - NOGRAVITY
# This switches off gravity. Useful only for pure SPH simulations
# in non-expanding space.
#
# - NOTREERND:
# If this is not set, the tree construction will succeed even when
# there are a few particles at identical locations. This is done by
# `rerouting' particles once the node-size has fallen below 1.0e-3
# of the softening length. When this option is activated, this will
# be surpressed and the tree construction will always fail if there
# are particles at extremely close coordinates.
#
# - NOTYPEPREFIX_FFTW:
# This is an option that signals that FFTW has been compiled
# without the type-prefix option, i.e. no leading "d" or "s"
# characters are used to access the library.
#
# - LONG_X/Y/Z:
# These options can be used together with PERIODIC and NOGRAVITY only.
# When set, the options define numerical factors that can be used to
# distorts the periodic simulation cube into a parallelepiped of
# arbitrary aspect ratio. This can be useful for idealized SPH tests.
#
# - TWODIMS:
# This effectively switches of one dimension in SPH, i.e. the code
# follows only 2d hydrodynamics in the xy-, yz-, or xz-plane. This
# only works with NOGRAVITY, and if all coordinates of the third
# axis are exactly equal. Can be useful for idealized SPH tests.
#
# - SPH_BND_PARTICLES:
# If this is set, particles with a particle-ID equal to zero do not
# receive any SPH acceleration. This can be useful for idealized
# SPH tests, where these particles represent fixed "walls".
#
# - NOVISCOSITYLIMITER:
# If this is set, the code will not try to put an upper limit on
# the viscous force in case an implausibly high pair-wise viscous
# force (which may lead to a particle 'reflection' in case of poor
# timestepping) should arise. Note: For proper settings of the
# timestep parameters, this situation should not arise.
#
# - COMPUTE_POTENTIAL_ENERGY:
# When this option is set, the code will compute the gravitational
# potential energy each time a global statistics is computed. This
# can be useful for testing global energy conservation.
#
# - LONGIDS:
# If this is set, the code assumes that particle-IDs are stored as
# 64-bit long integers. This is only really needed if you want to
# go beyond ~2 billion particles.
#
# - ISOTHERM_EQS:
# This special option makes the gas behave like an isothermal gas
# with equation of state P = cs^2 * rho. The sound-speed cs is set by
# the thermal energy per unit mass in the intial conditions,
# i.e. cs^2=u. If the value for u is zero, then the initial gas
# temperature in the parameter file is used to define the sound speed
# according to cs^2 = 3/2 kT/mp, where mp is the proton mass.
#
# - ADAPTIVE_GRAVSOFT_FORGAS:
# When this option is set, the gravitational softening lengths used for
# gas particles is tied to their SPH smoothing length. This can be useful
# for dissipative collapse simulations. The option requires the setting
# of UNEQUALSOFTENINGS.
#
# - SELECTIVE_NO_GRAVITY:
# This can be used for special computations where one wants to
# exclude certain particle types from receiving gravitational
# forces. The particle types that are excluded in this fashion are
# specified by a bit mask, in the same as for the PLACEHIGHRESREGION
# option.
#
# - FORCETEST:
# This can be set to check the force accuracy of the code. The
# option needs to be set to a number between 0 and 1 (e.g. 0.01),
# which is taken to specify a random fraction of particles for
# which at each timestep forces by direct summation are
# computed. The normal tree-forces and the correct direct
# summation forces are collected in a file. Note that the
# simulation itself is unaffected by this option, but it will of
# course run much(!) slower, especially if
# FORCETEST*NumPart*NumPart >> NumPart. Note: Particle IDs must
# be set to numbers >=1 for this to work.
#
# - MAKEGLASS
# This option can be used to generate a glass-like particle
# configuration. The value assigned gives the particle load,
# which is initially generated as a Poisson sample and then
# evolved towards a glass with the sign of gravity reversed.
#
#-----------------------------------------------------------------------
diff --git a/src/Makefile b/src/Makefiles/Makefile.periodic_turbulence
similarity index 100%
copy from src/Makefile
copy to src/Makefiles/Makefile.periodic_turbulence
diff --git a/src/Makefiles/Readme b/src/Makefiles/Readme
new file mode 100644
index 0000000..d10a616
--- /dev/null
+++ b/src/Makefiles/Readme
@@ -0,0 +1,5 @@
+
+
+Makefile.chemistry3 : chemistry + DSYNCHRONIZE_NGB_TIMESTEP
+ DTIMESTEP_UPDATE_FOR_FEEDBACK
+ DIMPROVED_TIMESTEP_CRITERION_FORGAS
diff --git a/src/TODO b/src/TODO
index b6e328c..1329ed2 100644
--- a/src/TODO
+++ b/src/TODO
@@ -1,1202 +1,1277 @@
+
+New Feedback + cooling
+----------------------
+
+
+1) compute
+
+ a_n, du/dt)_n : ok, no change
+
+
+2) set feedback at the correct time
+
+ now :
+
+ 1) put energy in SphP[i].DeltaEgySpec during chimie loop
+ 2) apply feedback using : chimie_apply_thermal_feedback
+
+ new :
+
+ 1) ok
+ 2) chimie_apply_thermal_feedback must be moved after timestep (or inside) # TODO (normalement ok, need to be checked)
+
+
+3) compute new Delta t, taking into account feedback # TODO (ok)
+
+ compute signal velocity TIMESTEP_UPDATE_FOR_FEEDBACK
+
+ sigvel.c # need the correct pressure (depending on feedback)
+
+ get_sigvel
+ get_sigvel_evaluate
+
+ - init.c ok
+ - init in sigve.c !!! initialize when SphP[j].DeltaEgySpec=-1
+
+ SphP[p].DeltaEgySpec=0; /* unflag */ (this is bad)
+
+4) kick
+
+ ok
+
+5) add the feedback # TODO
+
+ !!! particles that recieved the feedback needs to be active !!! # TODO
+
+
+
+
+!!! make_particles_actives !!! need to be better
+ ---> need entropy pred
+
+!!! vérifier ce que je n'avais pas compris ---> timestep.c
+
+ /* old mid step */
+ tstart = (P[i].Old_Ti_begstep + P[i].Old_Ti_endstep) / 2; /* midpoint of old step */
+ tend = (P[i].Ti_begstep + P[i].Ti_endstep) / 2; /* midpoint of new step */
+
+
+!!! il faut faire un kick correct pour EntropyPred
+!!! il faut faire un drift correct pour EntropyPred
+
+
+
+
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!!!!! need to make particles affected by feedback active !!!!!
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+
+
+
+
+
multiple IMF :
----------
- add parameters
- add code
- add maxlivetime
---> !!! si toutes les étoiles explosent,
la masse de certaines étoiles devient très petite,
voir négative.
--> il faut voir les tables, regarder la perte de masse pour une masse donnée.
new cooling :
----------
--- tester le cooling avec M=4, on devrait avoir Tcool << Tdyn à faire
- ajouter implicit cooling integration ok
+ tester ok, semble bien
- verifier ComovingIntegrationOn à tester
- tester sur cluster à faire
- tester sur boite cosmo à faire
on peut enlever :
DtEgySpecRadSph
DtEntropyRadSph
new sfr :
----------
- crée des étoiles de plus petite masse, et donc, garder la part. de gaz. ok
- utiliser tous les critères, jeans aussi... à faire
- tester un model statique --> loi exp décroissante ok
- transformer toute la particule lorsque elle est trops petite ok
- ecrire les output de StP ok
- prendre en compte l'énergie des étoiles... ok
- conservation de l'énergie ok
1 proc, StarFormationNStarsFromGas = 1 snap00
4 proc, StarFormationNStarsFromGas = 1 snap01 mini diff.
1 proc, StarFormationNStarsFromGas = 4 snap02 sfr correcte, mais
!!! il y a des oscillations...
1 proc, StarFormationNStarsFromGas = 4 snap03 ok, on corrige avec TreeDomainUpdateFrequency = 0
4 proc, StarFormationNStarsFromGas = 4
- permettre : TreeDomainUpdateFrequency > 0 à faire
domain :
----------
- forcer les particules stellaire a etre arrangées dans le block 1 ok
domain.c à faire (difficile)
maxloadsph -> maxloadstars
pour PEANOHILBERT : trier les particules star à faire
peano.c à faire
domain_sumCost -> add local_DomainCountSt à faire (difficile ?)
- modifier StP ok
All.TotN_stars ok
All.MaxPartStars = All.StarFormationNStarsFromGas*All.MaxPartSph ok
+ All.PartAllocFactor * (All.TotN_stars / NTask)
allocate.c ok
All.MaxPartStars
restart.c ok
init.c ok
domain.c ok
starformation.c ok
rearrange_particle_sequence ok
io.c
read_ic.c ok
ok
- vérifier les output ok
int flag_sfr
int flag_feedback
int flag_cooling
int flag_stellarage
int flag_metals
typelist[6] ok
--> 0 si aucune particule de ce type est présente dans le block
get_particles_in_block ok
- vérifier les input ok
- partir avec un fichier qui contiend déjà des étoiles
--> conditions initiales (lit qu'une partie...) ok
--> redémarrage
- partir avec un fichier qui contiend déjà la métalicité du gaz
--> conditions initiales
--> redémarrage
- tester All.TotN_stars ok
- N_stars ok
chimie :
----------
- pour une part. d'étoile, trouver les plus proches particules de gaz ok
- il faut calculer hsml/density (gaz) pour les étoiles... ok
- dans do_chimie, commencer la boucle par la première étoile ok
et non particule
- verifier le bon nombre de voisins pour les étoiles ok
stars_density.c ok
chimie.c ok
tests :
- vérifier la densité (écrire) ok
- vérifier hsml (écrire) ok
-> on écrit dans le fichier de sortie Density,Hsml ok
- vérifier les plus proches voisins ok
--> dans chimie,
-> vérifier : Sum m/rho wij = 1, par example... ok
- initialisation
utiliser get_nelts pour avoir NELEMENTS, ou alors, check.... à faire
- pour une particule stellaire, calculer à faire
- la masse totale ejectée ok
- les métaux éjectés ok
- l'énergie éjectée ok
- injecter la masse au plus proches voisins ok
- enlever mass et elt ejecté par une étoile ok
!!! conservation de l'énergie kin,pot, int
!!! conservation de l'impulsion...
!!! conservation de la masse ok
ok, a 1%pres... a vérifier par la suite...
dans des conditions moins difficiles...
<-------- Wed Jul 22 15:09:25 CEST 2009
- mieux conserver l'énergie lors de feedback thermqie (rendre part. active ?) à faire !!! (ou feedback cinétique...)
- faire un fichier statistique qui comptes ce qui est crée... à faire
- injecter l'énergie thermique ok
- injecter l'énergie cinetique à faire
- utiliser la metallicité pour le cooling ok
- utiliser TreeDomainUpdateFrequency != 0.0 à faire
- vérifier dans chimie.c -> utilisation de vel ou velpred...
- unifier à faire
SolarAbun_Fe = 0.001771 pNbody
SolarAbun_Mg = 0.00091245 pNbody
#define FEH_SOLAR 0.001771 Gadget /* 0.00181 */
#define MGH_SOLAR 0.00091245 Gadget
FeHSolar = 0.00181 Gadget cooling !!!
- vérifier restart -> particulièrement les parametres à faire
??? All.ChimieSupernovaEnergy = all.ChimieSupernovaEnergy
- revoir sticky (faire attention au leap frog, par ex.) à faire
- cooling : on peut completement sortie docooling de timestep, non ?
!! initialisation correcte de : StarEnergyInt,StarEnergyRadSph, à faire
StarEnergyRadSticky...
--> aussi lors d'un restart !!!
--> should be in All.
--------------------------------------------------------------------------------
CPU
--------------------------------------------------------------------------------
timediff(t0, t1) = t1-t0
All.CPU_Total,
run.c : on somme les diff de la boucle principale
All.CPU_Gravity,
accel.c :
gravity_tree()
All.CPU_Hydro,
accel.c :
density()
hydro_force();
All.CPU_Domain,
domain.c :
domain_Decomposition() sans peano_hilbert_order
All.CPU_Potential, potential.c :
compute_potential()
All.CPU_Predict, accel.c :
force_update_hmax(); ???
predic.c :
move_particles()
run.c :
find_next_sync_point_and_drift()
All.CPU_TimeLine, timestep.c :
advance_and_find_timesteps()
All.CPU_Snapshot, io.c
savepositions()
All.CPU_TreeConstruction potential.c
force_treebuild()
gravtree.c
force_treebuild()
All.CPU_TreeWalk, gravtree.c !!!
All.CPU_CommSum, gravtree.c
All.CPU_Imbalance, gravtree.c
All.CPU_TreeWalk += sumt / NTask;
All.CPU_Imbalance += sumimbalance / NTask;
All.CPU_CommSum += sumcomm / NTask;
All.CPU_HydCompWalk, chimie.c density.c hydra.c stars_density.c !!!
All.CPU_HydCommSumm, chimie.c density.c hydra.c stars_density.c
All.CPU_HydImbalance, chimie.c density.c hydra.c stars_density.c
All.CPU_HydImbalance += sumimbalance / NTask;
All.CPU_HydCommSumm += sumcomm / NTask;
All.CPU_HydCompWalk += sumt / NTask;
All.CPU_EnsureNgb, density.c !!!
All.CPU_EnsureNgb += sumtimengb / NTask;
stars_density.c
All.CPU_EnsureNgb += sumtimengb / NTask;
All.CPU_PM, accel.c
long_range_force()
All.CPU_Peano
domain.c : peano_hilbert_order()
on pourait rajouter :
-------------------
All.CPU_Accel
if faut rajouter :
All.CPU_Chimie
All.CPU_StarFormation
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
- sticky bournaud
- faire des tests
- choix du sticky --> pour cluster, mieux dans rayon sph...
- éviter que la densité soit calculée ??? vraiment utiles ?
- conservation de l'énergie pour sticky
----------------------------------------
- improve multiphase computation : only with MULTIPHASE switched on
- test sticky only
* for each particle, find one particle to collide with
* try to do that in the same function than the main hydra (???)
- test sph only
- test dark gas
- test combination of all
check
* indep. of number of proc
##########################################3
MULTIPHASE
##########################################
- part of particles behave differently (sticky)
GAS_SPH -> normal sph
GAS_STICKY -> sticky
GAS_DARK -> no (weak) interaction
functions
multi_hydro_evaluate
sticky_evaluate
ngb_treefind_phase_pairs
io.c ok
init.c ok
read_ic.c ok
run.c ok
density.c ok (rien de particulier)
ngb.c
ngb_treefind_pairs hydra.c
every body sees every body
ngb_treefind_variable density.c
find density and hsml !!!! hsml is then needed to find colliding particules
!!!! but may be problematic if only sph part. are present in hsml !!!
ngb_treefind_phase_pairs bondi_accretion.c
hydra.c !!!!!!!!!!!!
multi_hydro_evaluate(i, 0);
hydro_evaluate(i, 0);
sticky_evaluate(i, 0);
timestep.c
global.c
cooling.c
starformation.c
bubbles.c
##########################################
PHASE_MIXING
##########################################
proto.h
phase_mixing
run.c
phase.c
##########################################
COLDGAS_CYCLE (need MULTIPHASE)
##########################################
- compute cycling or not
---------------------------------------
COLDGAS_CYCLE :parameters
-------------------------
ColdGasCycleTransitionTime;
ColdGasCycleTransitionParameter;
allvars.h ok
begrun.c ok
phase.c ok
##########################################
EXTERNAL_FLUX : really only external flux
##########################################
begrun.c
cooling.c
phase.c
allvars.h
HeatingPeSolarEnergyDensity
HeatingExternalFLuxEnergyDensity
##########################################
STELLAR_FLUX : only stellar flux
##########################################
allvars.h
init.c
gravtree.c
forcetree.c
begrun.c
HeatingPeSolarEnergyDensity;
HeatingPeLMRatioGas;
HeatingPeLMRatioHalo;
HeatingPeLMRatioDisk;
HeatingPeLMRatioBulge;
HeatingPeLMRatioStars;
HeatingPeLMRatioBndry;
HeatingPeLMRatio[6];
####################################################
# sticky_evaluate(i, 0)
####################################################
1) check who can inteact with who ?
- loop over active particles
- ngb_treefind_pairs ! find all particles in h_i ... check !!!
here we could use different fct, depending on the type
- if(P[j].Ti_endstep == All.Ti_Current) only active particles ! ensure symetry,
really necessary ???
- if(SphP[j].Phase == GAS_STICKY) ok, but may be done with "ngb_treefind_pairs"
- if(SphP[j].StickyFlag) SphP[i].StickyFlag = 1; in init.c
SphP[i].StickyFlag is determined in phase.c
2) what is modified by sticky_evaluate
P[target].Vel[k] <------ here, we change the velocity !!!!!!!
P[j].Vel[k]
SphP[target].EgySpecRadSticky <------ here, we count the energy !!!!!!!
SphP[j].EgySpecRadSticky
SphP[target].StickyCollisionNumber++;
SphP[j].StickyCollisionNumber++;
SphP[target].HydroAccel[k] = 0;
SphP[target].StickyFlag = 0;
SphP[target].DtEntropy = 0; /* force dt entropy to zero */
SphP[j].DtEgySpecFeedback = 0; /* should not be there */
!!!! we change the velocity and count energy further
tests:
- only sticky : be sure that particles interact symetrically...
####################################################
# problemes
####################################################
(!!) si Entropy < 0, le gaz est considéré GAS_DARK (output)
-> bien vérifier...
(!!) si une particule oscille frequemment entre dark et visible, elle risque
de colisonner trops souvent... StickyFlag=1 automatique lors du retour au sticky
####################################################
#
# comoving integration
#
####################################################
velocity
-------
gadget-1 : w=sqrt(a)*xp
gadget-2 : u=a*a*xp
################################################
# conversion (comobile -> physical')
################################################
r_p = x_c*a = Pos*a
v_p = x_c*H(a)*a + v_c * a = Pos*H(a)*a + Vel/a (in the code)
v_p = x_c*H(a)*a + v_c * a = Pos*H(a)*a + Vel*sqrt(a) (in the snapshot)
m_p = m_c = Mass
u_p = u_c (need to compute from entropy)
rho_p = rho_c / a^3 = Density/a^3
A = Ap
Pp = Pc * a^(-3 * gamma)
c_c = c_p a^(3(gamma-1))/2
################################################
# conversion (physical' -> physical)
################################################
pos_p = pos_p'/h
mass_p= mass_p'/h
t_p = t_p'/h
vel_p = vel_p'
u_p = u_p'
rho_p = rho_p'*h^2
* where :
starformation.c
---------------
1) .Density
if (SphP[i].Density*a3inv > All.ThresholdDensity)
-> All.ThresholdDensity en terme physical
2) .Vel
!!! FEEDBACK_WIND use Vel ---> bad !
chimie.c
---------------
1) .Density
LocalSysState.EnergyInt1 += P[i].Mass * SphP[i].EntropyPred / (GAMMA_MINUS1) * pow(SphP[i].Density*a3inv, GAMMA_MINUS1);
LocalSysState.EnergyInt2 += P[i].Mass * SphP[i].EntropyPred / (GAMMA_MINUS1) * pow(SphP[i].Density*a3inv, GAMMA_MINUS1);
EgySpec = SphP[i].EntropyPred / GAMMA_MINUS1 * pow(SphP[i].Density*a3inv, GAMMA_MINUS1);
DeltaEntropy = GAMMA_MINUS1*NewEgySpec/pow(SphP[i].Density*a3inv, GAMMA_MINUS1) - SphP[i].EntropyPred;
EgySpec = SphP[j].EntropyPred / GAMMA_MINUS1 * pow(SphP[j].Density*a3inv, GAMMA_MINUS1);
DeltaEntropy = GAMMA_MINUS1*NewEgySpec/pow(SphP[j].Density*a3inv, GAMMA_MINUS1) - SphP[j].EntropyPred;
2) .Vel
!!! chimie_compute_energy_kin
!!! chimie_apply_wind
!!! vj2 += SphP[j].VelPred[k]*SphP[j].VelPred[k];
!!! vi2 += vel[k]*vel[k];
timestep.c
---------------
a bien refaire
cooling.c
---------------
1) Density
CoolingForOne
DoCooling
lambda
2) Vel
no
#################################################
# steps for the entropy
predict.c
dt_drift = get_drift_factor(time0, time1);
dt_gravkick = get_gravkick_factor(time0, time1); -> pour v = a_grav * dt
dt_hydrokick = get_hydrokick_factor(time0, time1); -> pour v = a_hydro * dt
dt_entr = (time1 - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval;
SphP[i].EntropyPred = (SphP[i].Entropy + SphP[i].DtEntropy * dt_entr);
timestep.c
dt_gravkick = get_gravkick_factor(tstart, tend);
dt_hydrokick = get_hydrokick_factor(tstart, tend);
dt_entr = (tend - tstart) * All.Timebase_interval;
SphP[i].Entropy += SphP[i].DtEntropy * dt_entr;
- que vaut DtEntropy
hydra.c
dtEntropy += 0.5 * hfc_visc * vdotr2;
SphP[i].DtEntropy *= GAMMA_MINUS1 / (hubble_a2 * pow(SphP[i].Density, GAMMA_MINUS1));
et c'est tout !
-------------------------------------------------------
Tue Dec 21 22:37:07 CET 2010
-------------------------------------------------------
dA/dt = SphP[i].DtEntropy * hubble_a
==>
dA/dt * ds/hubble_a
dans le code est ecrit par
SphP[i].DtEntropy * (tend - tstart) * All.Timebase_interval
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
j'ai suivit l'evolution de
SphP[i].DtEntropy * hubble_a
dans le cas d'un système isolé
ou du meme système en comobile,
le resultat est identique !!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
/* Conversion into program time unit */
Cp->coeff_z[2][2] = Cp->coeff_z[2][2] + u_lt;
double u_lt;
double UnitLength_in_kpc; ok
double UnitMass_in_Msol; ok
u_lt = -log10( 4.7287e11*sqrt(pow(UnitLength_in_kpc,3)/UnitMass_in_Msol));
treeasph
unit_mass 1.e10 Msol
unit_length 1. kpc
u_lt = -log10( 4.7287d11*SQRT(unit_length**3/unit_mass))
u_lt : unit_local_time ? calculé a partir de G=1 ???
u_lt = 4728700.0 unit time in yr
d'ou vient :
4.7287d11 =
time unit = 4.72e6 ans
All.G = GRAVITY / pow(All.UnitLength_in_cm, 3) * All.UnitMass_in_g * pow(All.UnitTime_in_s, 2);
GRAVITY 6.672e-8
UnitTime_in_s = sqrt ( pow(UnitLength_in_cm, 3)/GRAVITY /UnitMass_in_g )
SEC_PER_MEGAYEAR=3.155e13
UnitLength_in_cm =3.085678e+21
UnitVelocity_in_cm_per_s =100000.0
UnitTime_in_s = UnitLength_in_cm / UnitVelocity_in_cm_per_s;
UnitTime_in_Megayears = UnitTime_in_s / SEC_PER_MEGAYEAR;
u_lt = -log10(UnitTime_in_Megayears/1e6)
5.3262507580106977
c Conversion into program time unit
coeff_z(3,3) = coeff_z(3,3) + u_lt
minlivetime maxlivetime
unite treeasph : 0.828579 5.64403e+08
4.72
==> Myr : 3.9108 2.6639e+09
première étoile : 16.8457 = 79.511 Myr
première sn : 17.6926 = 83.50 Myr
_________________________________________________________________
minlivetime maxlivetime
unite gadget : 0.00400612 2.72885e+06
978.027
==> Myr 3.918 2.668e+08
6.96579
première étoile : 7.04278 = 75.28
première sn : 7.06
isolated
--------
StP[m].FormationTime : myr formation de la ssp
minlivetime : myr temps de vie minimum
maxlivetime : myr temps de vie maximum
tmin : myr temps minimum d'exection
tmax : myr temps maximum d'exection
t1 : myr debut du pas de temps
t2 : myr find du pas de temps
il y a chimie, lorsque
1) tmin < t1 && tmax >=t1 debut du pas de temps dans l'interval [tmin,tmax]
tmin <=t2 && tmax > t2 find du pas de temps dans l'interval [tmin,tmax]
tmin >=t1 && tmax <=t2 t1,t2 hord de [tmin,tmax]
starf of sfr
0.708
################################################################################
# passage unite en h -> unités physiques
################################################################################
pos_p = pos_p'/h
mass_p= mass_p'/h
t_p = t_p'/h
rho_p = rho_p'*h^2
e_p = e_p'/h
vel_p = vel_p'
u_p = u_p'
# parties a modifier :
* starformation.c
#################
All.ThresholdDensity -> unite de h
All.StarFormationDensity -> unite de h
--> rien a faire
* cooling.c
#################
--> lambda
nHcgs -> nHcgs*h^2 ok
All.mumh/All.Boltzmann ok indep de h
T= All.mumh/All.Boltzmann * GAMMA_MINUS1 * Entropy ok indep de h
l = cooling_function_with_metals ok ne dép que de T et Fe
!! l doit etre converti en unité h
l = l * h**3
l = l * nH2 si nH2 est en unité h oui
l = l * h**3
l = l * nH2h**2 si nH2 a été exprimé en unité normales ok ICI
* chimie.c
#################
* !!! toutes les grandeurs liées au temps
minlivetime = star_lifetime(StP[m].Metal[NELEMENTS-1],Cp->Mmax*SOLAR_MASS/All.UnitMass_in_g);
maxlivetime = star_lifetime(StP[m].Metal[NELEMENTS-1],Cp->Mmin*SOLAR_MASS/All.UnitMass_in_g);
-> doit etre converti en h
minlivetime = minlivetime*h
maxlivetime = maxlivetime*h
suite :
comparaison t01 t02 -> ok
# star_mass_from_age prend des valeur non h
m2 = star_mass_from_age(StP[m].Metal[NELEMENTS-1],t01/h); !!!
m1 = star_mass_from_age(StP[m].Metal[NELEMENTS-1],t02/h); !!!
m1, n2 seront en non h
Total_mass_ejection -> M0/h
EjectedMass = EjectedMass*h
le reste devrait etre bon...
__________> a vérifier... et à réfléchir...
# tests
- prendre une simulation avec unités sans h,
-> transformer en unité avec h
pos = pos*h
vel = vel
mass = mass*h
u = u
rho = rho/h**2
-> transformer les parametes (qui doivent etre en unité de h dans le code)
pos = pos*h
rho = rho/h**2
t = t/h
e = e*h
HubbleParam ok
BoxSize ok
TimeBetSnapshot ok
TimeOfFirstSnapshot ok
TimeBetStatistics ok
MaxSizeTimestep ok
MinSizeTimestep ok
All.HubbleParam
StickyDensity non -> convertir dans le code ok
StarFormationTime non -> convertir dans le code ok
StarFormationDensity non -> convertir dans le code ok
ChimieMaxSizeTimestep non -> convertir dans le code ok
ChimieWindTime non -> convertir dans le code ok
ChimieThermalTime non -> convertir dans le code ok
ChimieSupernovaEnergy non -> convertir dans le code ok
SofteningGas ok
SofteningHalo
SofteningDisk
SofteningBulge
SofteningStars
SofteningBndry
SofteningGasMaxPhys ok
SofteningHaloMaxPhys
SofteningDiskMaxPhys
SofteningBulgeMaxPhys
SofteningStarsMaxPhys
SofteningBndryMaxPhys
################################################################################
# only the master read chimie
################################################################################
pour chaque fichier chimie à lire:
---------------------------------
read_chimie(filename,nf)
-> Cps[it]
-> Elts[it]
allocation de
MassFracSNIIs[it]
EjectedMasss[it]
SingleMassFracSNIIs[it]
SingleEjectedMasss[it]
Cps
alloué par la suite
Elts
MassFracSNIIs[it] ->
EjectedMasss[it]
SingleMassFracSNIIs[it]
SingleEjectedMasss[it]
variables initializée durant read_chime
Cps
!!!
* allocate memory for elts */
Elts
!!!
/* allocate memory */
MassFracSNIIs[it] = malloc((Cps[it].nelts+2) * sizeof(double));
EjectedMasss[it] = malloc((Cps[it].nelts+2) * sizeof(double));
SingleMassFracSNIIs[it] = malloc((Cps[it].nelts+2) * sizeof(double));
SingleEjectedMasss[it] = malloc((Cps[it].nelts+2) * sizeof(double));
/* injected metals */
-- elts
diff --git a/src/accel.c b/src/accel.c
index 5d93e67..9cb34ac 100644
--- a/src/accel.c
+++ b/src/accel.c
@@ -1,126 +1,132 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <mpi.h>
#include "allvars.h"
#include "proto.h"
/*! \file accel.c
* \brief driver routine to carry out force computation
*/
/*! This routine computes the accelerations for all active particles.
* First, the long-range PM force is computed if the TreePM algorithm is
* used and a "big" PM step is done. Next, the gravitational tree forces
* are computed. This also constructs the tree, if needed.
*
* If gas particles are present, the density-loop for active SPH particles
* is carried out. This includes an iteration on the correct number of
* neighbours. Finally, the hydrodynamical forces are added.
*/
void compute_accelerations(int mode)
{
double tstart, tend;
#ifdef DETAILED_CPU
double t0,t1;
t0 = second();
#endif
if(ThisTask == 0)
{
printf("Start force computation...\n");
fflush(stdout);
}
#ifdef PMGRID
if(All.PM_Ti_endstep == All.Ti_Current)
{
tstart = second();
long_range_force();
tend = second();
All.CPU_PM += timediff(tstart, tend);
}
#endif
tstart = second(); /* measure the time for the full force computation */
gravity_tree(); /* computes gravity accel. */
if(All.TypeOfOpeningCriterion == 1 && All.Ti_Current == 0)
gravity_tree(); /* For the first timestep, we redo it
* to allow usage of relative opening
* criterion for consistent accuracy.
*/
tend = second();
All.CPU_Gravity += timediff(tstart, tend);
#ifdef FORCETEST
gravity_forcetest();
#endif
if(All.TotN_gas > 0)
{
if(ThisTask == 0)
{
printf("Start density computation...\n");
fflush(stdout);
}
#ifndef NO_DENSITY_FOR_GAS
tstart = second();
density(0); /* computes density, and pressure */
tend = second();
All.CPU_Hydro += timediff(tstart, tend);
tstart = second();
force_update_hmax(); /* tell the tree nodes the new SPH smoothing length such that they are guaranteed to hold the correct max(Hsml) */
tend = second();
All.CPU_Predict += timediff(tstart, tend);
#endif
#ifndef NO_HYDRO_FOR_GAS
if(ThisTask == 0)
{
printf("Start hydro-force computation...\n");
fflush(stdout);
}
tstart = second();
hydro_force(); /* adds hydrodynamical accelerations and computes viscous entropy injection */
tend = second();
All.CPU_Hydro += timediff(tstart, tend);
#endif
}
#ifdef AB_TURB
add_turb_accel();
#endif
+
+
+#ifdef TIMESTEP_UPDATE_FOR_FEEDBACK
+ get_sigvel();
+#endif
+
if(ThisTask == 0)
{
printf("force computation done.\n");
fflush(stdout);
}
#ifdef DETAILED_CPU
t1 = second();
All.CPU_Accel += timediff(t0, t1);
#endif
}
diff --git a/src/allvars.h b/src/allvars.h
index 7292ba2..a196c4e 100644
--- a/src/allvars.h
+++ b/src/allvars.h
@@ -1,1998 +1,2002 @@
/*! \file allvars.h
* \brief declares global variables.
*
* This file declares all global variables. Further variables should be added here, and declared as
* 'extern'. The actual existence of these variables is provided by the file 'allvars.c'. To produce
* 'allvars.c' from 'allvars.h', do the following:
*
* - Erase all #define's, typedef's, and enum's
* - add #include "allvars.h", delete the #ifndef ALLVARS_H conditional
* - delete all keywords 'extern'
* - delete all struct definitions enclosed in {...}, e.g.
* "extern struct global_data_all_processes {....} All;"
* becomes "struct global_data_all_processes All;"
*/
#ifndef ALLVARS_H
#define ALLVARS_H
#include <stdio.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include <gsl/gsl_integration.h>
#include "tags.h"
#define GADGETVERSION "2.0" /*!< code version string */
#define TIMEBASE (1<<28) /*!< The simulated timespan is mapped onto the integer interval [0,TIMESPAN],
* where TIMESPAN needs to be a power of 2. Note that (1<<28) corresponds to 2^29
*/
#define MAXTOPNODES 200000 /*!< Maximum number of nodes in the top-level tree used for domain decomposition */
typedef long long peanokey; /*!< defines the variable type used for Peano-Hilbert keys */
#define BITS_PER_DIMENSION 18 /*!< Bits per dimension available for Peano-Hilbert order.
Note: If peanokey is defined as type int, the allowed maximum is 10.
If 64-bit integers are used, the maximum is 21 */
#define PEANOCELLS (((peanokey)1)<<(3*BITS_PER_DIMENSION)) /*!< The number of different Peano-Hilbert cells */
#define RNDTABLE 3000 /*!< gives the length of a table with random numbers, refreshed at every timestep.
This is used to allow application of random numbers to a specific particle
in a way that is independent of the number of processors used. */
#define MAX_REAL_NUMBER 1e37
#define MIN_REAL_NUMBER 1e-37
#define MAXLEN_FILENAME 100 /*!< Maximum number of characters for filenames (including the full path) */
#ifdef ISOTHERM_EQS
#define GAMMA (1.0) /*!< index for isothermal gas */
#else
#define GAMMA (5.0/3) /*!< adiabatic index of simulated gas */
#endif
#define GAMMA_MINUS1 (GAMMA-1)
#define HYDROGEN_MASSFRAC 0.76 /*!< mass fraction of hydrogen, relevant only for radiative cooling */
/* Some physical constants in cgs units */
#define GRAVITY 6.672e-8 /*!< Gravitational constant (in cgs units) */
#define SOLAR_MASS 1.989e33
#define SOLAR_LUM 3.826e33
#define RAD_CONST 7.565e-15
#define AVOGADRO 6.0222e23
#define BOLTZMANN 1.3806e-16
#define GAS_CONST 8.31425e7
#define C 2.9979e10
#define PLANCK 6.6262e-27
#define CM_PER_MPC 3.085678e24
#define PROTONMASS 1.6726e-24
#define ELECTRONMASS 9.10953e-28
#define THOMPSON 6.65245e-25
#define ELECTRONCHARGE 4.8032e-10
#define HUBBLE 3.2407789e-18 /* in h/sec */
#define YEAR_IN_SECOND 31536000.0 /* year in sec */
#define FEH_SOLAR 0.00181 /* used only if cooling with metal is on and chimie is off */
#define PI 3.1415926535897931
#define TWOPI 6.2831853071795862
/* Some conversion factors */
#define SEC_PER_MEGAYEAR 3.155e13
#define SEC_PER_YEAR 3.155e7
#ifndef ASMTH
#define ASMTH 1.25 /*!< ASMTH gives the scale of the short-range/long-range force split in units of FFT-mesh cells */
#endif
#ifndef RCUT
#define RCUT 4.5 /*!< RCUT gives the maximum distance (in units of the scale used for the force split) out to
which short-range forces are evaluated in the short-range tree walk. */
#endif
#define MAX_NGB 20000 /*!< defines maximum length of neighbour list */
#define MAXLEN_OUTPUTLIST 500 /*!< maxmimum number of entries in list of snapshot output times */
#define DRIFT_TABLE_LENGTH 1000 /*!< length of the lookup table used to hold the drift and kick factors */
#ifdef COSMICTIME
#define COSMICTIME_TABLE_LENGTH 1000 /*!< length of the lookup table used for the cosmic time computation */
#endif
#define MAXITER 1000 /*!< maxmimum number of steps for SPH neighbour iteration */
#ifdef DOUBLEPRECISION /*!< If defined, the variable type FLOAT is set to "double", otherwise to FLOAT */
#define FLOAT double
#else
#define FLOAT float
#endif
#ifndef TWODIMS
#define NUMDIMS 3 /*!< For 3D-normalized kernel */
#define KERNEL_COEFF_1 2.546479089470 /*!< Coefficients for SPH spline kernel and its derivative */
#define KERNEL_COEFF_2 15.278874536822
#define KERNEL_COEFF_3 45.836623610466
#define KERNEL_COEFF_4 30.557749073644
#define KERNEL_COEFF_5 5.092958178941
#define KERNEL_COEFF_6 (-15.278874536822)
#define NORM_COEFF 4.188790204786 /*!< Coefficient for kernel normalization. Note: 4.0/3 * PI = 4.188790204786 */
#else
#define NUMDIMS 2 /*!< For 2D-normalized kernel */
#define KERNEL_COEFF_1 (5.0/7*2.546479089470) /*!< Coefficients for SPH spline kernel and its derivative */
#define KERNEL_COEFF_2 (5.0/7*15.278874536822)
#define KERNEL_COEFF_3 (5.0/7*45.836623610466)
#define KERNEL_COEFF_4 (5.0/7*30.557749073644)
#define KERNEL_COEFF_5 (5.0/7*5.092958178941)
#define KERNEL_COEFF_6 (5.0/7*(-15.278874536822))
#define NORM_COEFF M_PI /*!< Coefficient for kernel normalization. */
#endif
#ifdef MULTIPHASE
#define GAS_SPH 0
#define GAS_STICKY 1
#define GAS_DARK 2
#endif
#if defined(SFR) || defined(STELLAR_PROP)
#define ST 1
#endif
#ifdef CHIMIE
#define NELEMENTS 5
#define MAXNELEMENTS 64
#define FIRST_ELEMENT "Fe"
#define FE 0
#endif
#ifdef COOLING
#define COOLING_NMETALICITIES 9
#define COOLING_NTEMPERATURES 171
#endif
#ifdef COMPUTE_VELOCITY_DISPERSION
#define VELOCITY_DISPERSION_SIZE 3
#endif
extern int SetMinTimeStepForActives;
extern int ThisTask; /*!< the rank of the local processor */
extern int NTask; /*!< number of processors */
extern int PTask; /*!< smallest integer such that NTask <= 2^PTask */
extern int NumPart; /*!< number of particles on the LOCAL processor */
extern int N_gas; /*!< number of gas particles on the LOCAL processor */
#if defined(SFR) || defined(STELLAR_PROP)
extern int N_stars; /*!< number of stars particle on the LOCAL processor */
#endif
#ifdef MULTIPHASE
extern int N_sph;
extern int N_sticky;
extern int N_stickyflaged;
extern int N_dark;
extern int NumColPotLocal; /*!< local number of potentially collisional particles */
extern int NumColPot; /*!< total number of potentially collisional particles */
extern int NumColLocal; /*!< local number of collisions */
extern int NumCol; /*!< total number of collisions */
extern int NumNoColLocal;
extern int NumNoCol;
#endif
extern long long Ntype[6]; /*!< total number of particles of each type */
extern int NtypeLocal[6]; /*!< local number of particles of each type */
extern int NumForceUpdate; /*!< number of active particles on local processor in current timestep */
extern int NumSphUpdate; /*!< number of active SPH particles on local processor in current timestep */
#ifdef CHIMIE
extern int NumStUpdate;
#endif
#ifdef TESSEL
extern int NumPTUpdate;
#endif
extern double CPUThisRun; /*!< Sums the CPU time for the process (current submission only) */
#ifdef SPLIT_DOMAIN_USING_TIME
extern double CPU_Gravity;
#endif
extern int RestartFlag; /*!< taken from command line used to start code. 0 is normal start-up from
initial conditions, 1 is resuming a run from a set of restart files, while 2
marks a restart from a snapshot file. */
extern char *Exportflag; /*!< Buffer used for flagging whether a particle needs to be exported to another process */
extern int *Ngblist; /*!< Buffer to hold indices of neighbours retrieved by the neighbour search routines */
extern int TreeReconstructFlag; /*!< Signals that a new tree needs to be constructed */
#ifdef SFR
extern int RearrangeParticlesFlag;/*!< Signals that particles must be rearanged */
#endif
extern int Flag_FullStep; /*!< This flag signals that the current step involves all particles */
extern gsl_rng *random_generator; /*!< the employed random number generator of the GSL library */
extern double RndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */
#ifdef SFR
extern double StarFormationRndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */
#endif
#ifdef FEEDBACK_WIND
extern double FeedbackWindRndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */
#endif
#ifdef CHIMIE
extern double ChimieRndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
extern double ChimieKineticFeedbackRndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */
#endif
#ifdef AB_TURB
//Ornstein-Uhlenbeck variables
extern double StOUVar;
extern double* StOUPhases;
extern gsl_rng* StRng;
//forcing field in fourie space
extern double* StAmpl;
extern double* StAka; //phases (real part)
extern double* StAkb; //phases (imag part)
extern double* StMode;
extern int StNModes;
//integertime StTPrev; (yr : ask ?)
extern int StTPrev;
extern double StSolWeightNorm;
#endif
#ifdef PY_INTERFACE
extern int NumPartQ;
extern int N_gasQ;
extern long long NtypeQ[6]; /*!< total number of particles of each type */
extern int NtypeLocalQ[6]; /*!< local number of particles of each type */
extern double DomainCornerQ[3]; /*!< gives the lower left corner of simulation volume */
extern double DomainCenterQ[3]; /*!< gives the center of simulation volume */
extern double DomainLenQ; /*!< gives the (maximum) side-length of simulation volume */
extern double DomainFacQ; /*!< factor used for converting particle coordinates to a Peano-Hilbert mesh covering the simulation volume */
extern int DomainMyStartQ; /*!< first domain mesh cell that resides on the local processor */
extern int DomainMyLastQ; /*!< last domain mesh cell that resides on the local processor */
extern int *DomainStartListQ; /*!< a table that lists the first domain mesh cell for all processors */
extern int *DomainEndListQ; /*!< a table that lists the last domain mesh cell for all processors */
extern double *DomainWorkQ; /*!< a table that gives the total "work" due to the particles stored by each processor */
extern int *DomainCountQ; /*!< a table that gives the total number of particles held by each processor */
extern int *DomainCountSphQ; /*!< a table that gives the total number of SPH particles held by each processor */
extern int *DomainTaskQ; /*!< this table gives for each leaf of the top-level tree the processor it was assigned to */
extern peanokey *DomainKeyBufQ; /*!< this points to a buffer used during the exchange of particle data */
extern int NTopnodesQ; /*!< total number of nodes in top-level tree */
extern int NTopleavesQ; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */
extern void *CommBufferQ; /*!< points to communication buffer, which is used in the domain decomposition, the
parallel tree-force computation, the SPH routines, etc. */
#endif
extern double DomainCorner[3]; /*!< gives the lower left corner of simulation volume */
extern double DomainCenter[3]; /*!< gives the center of simulation volume */
extern double DomainLen; /*!< gives the (maximum) side-length of simulation volume */
extern double DomainFac; /*!< factor used for converting particle coordinates to a Peano-Hilbert mesh covering the simulation volume */
extern int DomainMyStart; /*!< first domain mesh cell that resides on the local processor */
extern int DomainMyLast; /*!< last domain mesh cell that resides on the local processor */
extern int *DomainStartList; /*!< a table that lists the first domain mesh cell for all processors */
extern int *DomainEndList; /*!< a table that lists the last domain mesh cell for all processors */
extern double *DomainWork; /*!< a table that gives the total "work" due to the particles stored by each processor */
extern int *DomainCount; /*!< a table that gives the total number of particles held by each processor */
extern int *DomainCountSph; /*!< a table that gives the total number of SPH particles held by each processor */
extern int *DomainTask; /*!< this table gives for each leaf of the top-level tree the processor it was assigned to */
extern int *DomainNodeIndex; /*!< this table gives for each leaf of the top-level tree the corresponding node of the gravitational tree */
extern FLOAT *DomainTreeNodeLen; /*!< this table gives for each leaf of the top-level tree the side-length of the corresponding node of the gravitational tree */
extern FLOAT *DomainHmax; /*!< this table gives for each leaf of the top-level tree the maximum SPH smoothing length among the particles of the corresponding node of the gravitational tree */
extern struct DomainNODE
{
FLOAT s[3]; /*!< center-of-mass coordinates */
FLOAT vs[3]; /*!< center-of-mass velocities */
FLOAT mass; /*!< mass of node */
#ifdef STELLAR_FLUX
FLOAT starlum; /*!< star luminosity of node */
#endif
#ifdef UNEQUALSOFTENINGS
#ifndef ADAPTIVE_GRAVSOFT_FORGAS
int bitflags; /*!< this bit-field encodes the particle type with the largest softening among the particles of the nodes, and whether there are particles with different softening in the node */
#else
FLOAT maxsoft; /*!< hold the maximum gravitational softening of particles in the
node if the ADAPTIVE_GRAVSOFT_FORGAS option is selected */
#endif
#endif
}
*DomainMoment; /*!< this table stores for each node of the top-level tree corresponding node data from the gravitational tree */
extern peanokey *DomainKeyBuf; /*!< this points to a buffer used during the exchange of particle data */
extern peanokey *Key; /*!< a table used for storing Peano-Hilbert keys for particles */
extern peanokey *KeySorted; /*!< holds a sorted table of Peano-Hilbert keys for all particles, used to construct top-level tree */
extern int NTopnodes; /*!< total number of nodes in top-level tree */
extern int NTopleaves; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */
extern struct topnode_data
{
int Daughter; /*!< index of first daughter cell (out of 8) of top-level node */
int Pstart; /*!< for the present top-level node, this gives the index of the first node in the concatenated list of topnodes collected from all processors */
int Blocks; /*!< for the present top-level node, this gives the number of corresponding nodes in the concatenated list of topnodes collected from all processors */
int Leaf; /*!< if the node is a leaf, this gives its number when all leaves are traversed in Peano-Hilbert order */
peanokey Size; /*!< number of Peano-Hilbert mesh-cells represented by top-level node */
peanokey StartKey; /*!< first Peano-Hilbert key in top-level node */
long long Count; /*!< counts the number of particles in this top-level node */
}
#ifdef PY_INTERFACE
*TopNodesQ,
#endif
*TopNodes; /*!< points to the root node of the top-level tree */
extern double TimeOfLastTreeConstruction; /*!< holds what it says, only used in connection with FORCETEST */
/* variables for input/output, usually only used on process 0 */
extern char ParameterFile[MAXLEN_FILENAME]; /*!< file name of parameterfile used for starting the simulation */
extern FILE *FdInfo; /*!< file handle for info.txt log-file. */
extern FILE *FdLog; /*!< file handle for log.txt log-file. */
extern FILE *FdEnergy; /*!< file handle for energy.txt log-file. */
#ifdef SYSTEMSTATISTICS
extern FILE *FdSystem;
#endif
extern FILE *FdTimings; /*!< file handle for timings.txt log-file. */
extern FILE *FdCPU; /*!< file handle for cpu.txt log-file. */
#ifdef FORCETEST
extern FILE *FdForceTest; /*!< file handle for forcetest.txt log-file. */
#endif
#ifdef SFR
extern FILE *FdSfr; /*!< file handle for sfr.txt log-file. */
#endif
#ifdef CHIMIE
extern FILE *FdChimie; /*!< file handle for chimie log-file. */
#endif
#ifdef MULTIPHASE
extern FILE *FdPhase; /*!< file handle for pase.txt log-file. */
extern FILE *FdSticky; /*!< file handle for sticky.txt log-file. */
#endif
#ifdef AGN_ACCRETION
extern FILE *FdAccretion; /*!< file handle for accretion.txt log-file. */
#endif
#ifdef BONDI_ACCRETION
extern FILE *FdBondi; /*!< file handle for bondi.txt log-file. */
#endif
#ifdef BUBBLES
extern FILE *FdBubble; /*!< file handle for bubble.txt log-file. */
#endif
extern double DriftTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological drift factors */
extern double GravKickTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological kick factor for gravitational forces */
extern double HydroKickTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological kick factor for hydrodynmical forces */
#ifdef COSMICTIME
extern double CosmicTimeTable[COSMICTIME_TABLE_LENGTH]; /*!< table for the computation of cosmic time */
#endif
extern void *CommBuffer; /*!< points to communication buffer, which is used in the domain decomposition, the
parallel tree-force computation, the SPH routines, etc. */
/*! This structure contains data which is the SAME for all tasks (mostly code parameters read from the
* parameter file). Holding this data in a structure is convenient for writing/reading the restart file, and
* it allows the introduction of new global variables in a simple way. The only thing to do is to introduce
* them into this structure.
*/
extern struct global_data_all_processes
{
long long TotNumPart; /*!< total particle numbers (global value) */
long long TotN_gas; /*!< total gas particle number (global value) */
#ifdef PY_INTERFACE
long long TotNumPartQ; /*!< total particle numbers (global value) */
long long TotN_gasQ; /*!< total gas particle number (global value) */
int MaxPartQ; /*!< This gives the maxmimum number of particles that can be stored on one processor. */
int MaxPartSphQ; /*!< This gives the maxmimum number of SPH particles that can be stored on one processor. */
int BunchSizeSph;
int BunchSizeDensitySph;
double ForceSofteningQ;
#endif
#if defined(SFR) || defined(STELLAR_PROP)
long long TotN_stars; /*!< total stars particle number (global value) */
#endif
#ifdef MULTIPHASE
long long TotN_sph; /*!< total sph particle number (global value) */
long long TotN_sticky; /*!< total sticky particle number (global value) */
long long TotN_stickyflaged; /*!< total sticky flaged particle number (global value) */
long long TotN_stickyactive; /*!< total sticky active particle number (global value) */
long long TotN_dark; /*!< total dark particle number (global value) */
#endif
int MaxPart; /*!< This gives the maxmimum number of particles that can be stored on one processor. */
int MaxPartSph; /*!< This gives the maxmimum number of SPH particles that can be stored on one processor. */
#ifdef TESSEL
int MaxgPart;
#endif
#ifdef STELLAR_PROP
int MaxPartStars; /*!< This gives the maxmimum number of Star particles that can be stored on one processor. */
#endif
double BoxSize; /*!< Boxsize in case periodic boundary conditions are used */
int ICFormat; /*!< selects different versions of IC file-format */
int SnapFormat; /*!< selects different versions of snapshot file-formats */
int NumFilesPerSnapshot; /*!< number of files in multi-file snapshot dumps */
int NumFilesWrittenInParallel;/*!< maximum number of files that may be written simultaneously when
writing/reading restart-files, or when writing snapshot files */
int BufferSize; /*!< size of communication buffer in MB */
int BunchSizeForce; /*!< number of particles fitting into the buffer in the parallel tree-force algorithm */
int BunchSizeDensity; /*!< number of particles fitting into the communication buffer in the density computation */
int BunchSizeHydro; /*!< number of particles fitting into the communication buffer in the SPH hydrodynamical force computation */
int BunchSizeDomain; /*!< number of particles fitting into the communication buffer in the domain decomposition */
#ifdef MULTIPHASE
int BunchSizeSticky; /*!< number of particles fitting into the communication buffer in the Chimie computation */
#endif
#ifdef CHIMIE
int BunchSizeChimie; /*!< number of particles fitting into the communication buffer in the Chimie computation */
int BunchSizeStarsDensity; /*!< number of particles fitting into the communication buffer in the star density computation */
#endif
#ifdef SYNCHRONIZE_NGB_TIMESTEP
int BunchSizeSynchronizeNgBTimestep;
#endif
#ifdef TESSEL
int BunchSizeGhost;
#endif
double PartAllocFactor; /*!< in order to maintain work-load balance, the particle load will usually
NOT be balanced. Each processor allocates memory for PartAllocFactor times
the average number of particles to allow for that */
double TreeAllocFactor; /*!< Each processor allocates a number of nodes which is TreeAllocFactor times
the maximum(!) number of particles. Note: A typical local tree for N
particles needs usually about ~0.65*N nodes. */
#ifdef SFR
double StarsAllocFactor; /*!< Estimated fraction of gas particles that will form stars during the simulation
This allow to reduce the memory stored for stellar particles */
#endif
/* some SPH parameters */
double DesNumNgb; /*!< Desired number of SPH neighbours */
double MaxNumNgbDeviation; /*!< Maximum allowed deviation neighbour number */
double ArtBulkViscConst; /*!< Sets the parameter \f$\alpha\f$ of the artificial viscosity */
#ifdef ART_CONDUCTIVITY
double ArtCondConst; /*!< Sets the parameter \f$\alpha\f$ of the artificial conductivity */
double ArtCondThreshold;
#endif
double InitGasTemp; /*!< may be used to set the temperature in the IC's */
double MinGasTemp; /*!< may be used to set a floor for the gas temperature */
double MinEgySpec; /*!< the minimum allowed temperature expressed as energy per unit mass */
/* Usefull constants */
double Boltzmann;
double ProtonMass;
double mumh;
#ifdef COOLING
/* Cooling parameters */
double *logT;
double *logL;
gsl_interp_accel *acc_cooling_spline;
gsl_spline *cooling_spline;
double CoolingType;
char CoolingFile[MAXLEN_FILENAME]; /*!< cooling file */
double CutofCoolingTemperature;
/*
new metal dependent cooling
*/
double CoolingParameters_zmin;
double CoolingParameters_zmax;
double CoolingParameters_slz;
double CoolingParameters_tmin;
double CoolingParameters_tmax;
double CoolingParameters_slt;
double CoolingParameters_FeHSolar;
double CoolingParameters_cooling_data_max;
double CoolingParameters_cooling_data[COOLING_NMETALICITIES][COOLING_NTEMPERATURES];
int CoolingParameters_p;
int CoolingParameters_q;
#endif
#ifdef CHIMIE
int ChimieNumberOfParameterFiles;
#ifdef PYCHEM
char * ChimieParameterFile;
#else
char ChimieParameterFile[MAXLEN_FILENAME]; /*!< chimie parameter file */
#endif
double ChimieSupernovaEnergy;
double ChimieKineticFeedbackFraction;
double ChimieWindSpeed;
double ChimieWindTime;
double ChimieSNIaThermalTime;
double ChimieSNIIThermalTime;
double ChimieMaxSizeTimestep;
#ifdef CHIMIE_ONE_SN_ONLY /*!< explode only one sn>*/
int ChimieOneSN;
#endif
#endif
#if defined (CHIMIE) || defined (COOLING)
double InitGasMetallicity;
#endif
#if !defined (HEATING_PE)
double HeatingPeElectronFraction;
#endif
#if !defined (HEATING_PE) || defined (STELLAR_FLUX) || defined (EXTERNAL_FLUX)
double HeatingPeSolarEnergyDensity;
#endif
#if !defined (HEATING_PE) || defined (STELLAR_FLUX)
double HeatingPeLMRatioGas;
double HeatingPeLMRatioHalo;
double HeatingPeLMRatioDisk;
double HeatingPeLMRatioBulge;
double HeatingPeLMRatioStars;
double HeatingPeLMRatioBndry;
double HeatingPeLMRatio[6];
#endif
#ifdef EXTERNAL_FLUX
double HeatingExternalFLuxEnergyDensity;
#endif
#ifdef MULTIPHASE
double CriticalTemperature;
double CriticalEgySpec;
double CriticalNonCollisionalTemperature;
double CriticalNonCollisionalEgySpec;
#ifdef COLDGAS_CYCLE
double ColdGasCycleTransitionTime;
double ColdGasCycleTransitionParameter;
#endif
#endif
#ifdef MULTIPHASE
/* some STICKY parameters */
int StickyUseGridForCollisions;
double StickyTime; /*!< Cooling time of sticky particle collision */
double StickyCollisionTime;
double StickyLastCollisionTime;
double StickyIdleTime;
double StickyMinVelocity;
double StickyMaxVelocity;
int StickyGridNx;
int StickyGridNy;
int StickyGridNz;
double StickyGridXmin;
double StickyGridXmax;
double StickyGridYmin;
double StickyGridYmax;
double StickyGridZmin;
double StickyGridZmax;
double StickyLambda;
double StickyDensity;
double StickyDensityPower;
double StickyBetaR;
double StickyBetaT;
double StickyRsphFact; /*!< Fraction of the sph radius used in sticky particle */
#endif
#ifdef OUTERPOTENTIAL
#ifdef NFW
double HaloConcentration;
double HaloMass;
double GasMassFraction;
double NFWPotentialCte;
double Rs;
#endif
#ifdef PLUMMER
double PlummerMass;
double PlummerSoftenning;
double PlummerPotentialCte;
#endif
#ifdef MIYAMOTONAGAI
double MiyamotoNagaiMass;
double MiyamotoNagaiHr;
double MiyamotoNagaiHz;
double MiyamotoNagaiPotentialCte;
#endif
#ifdef PISOTHERM
double Rho0;
double Rc;
double PisothermPotentialCte;
double GasMassFraction;
double PotentialInf;
gsl_function PotentialF;
gsl_integration_workspace *Potentialw;
#endif
#ifdef CORIOLIS
double CoriolisOmegaX;
double CoriolisOmegaY;
double CoriolisOmegaZ;
double CoriolisOmegaX0;
double CoriolisOmegaY0;
double CoriolisOmegaZ0;
#endif
#endif
#ifdef SFR
int StarFormationNStarsFromGas;
double StarFormationStarMass;
double StarFormationMgMsFraction;
int StarFormationType;
double StarFormationCstar;
double StarFormationTime;
double StarFormationDensity;
double StarFormationTemperature;
double ThresholdDensity;
#endif
#ifdef FEEDBACK
double SupernovaTime;
#endif
#ifdef FEEDBACK_WIND
double SupernovaWindEgySpecPerMassUnit;
double SupernovaWindFractionInEgyKin;
double SupernovaWindParameter;
double SupernovaWindSpeed;
double SupernovaWindIntAccuracy;
#endif
#ifdef AGN_ACCRETION
double TimeBetAccretion;
double AccretionRadius;
double AGNFactor;
double MinMTotInRa;
double TimeLastAccretion;
double LastMTotInRa;
double MTotInRa;
double dMTotInRa;
#endif
#ifdef BUBBLES
char BubblesInitFile[MAXLEN_FILENAME]; /*!< bubble file */
double *BubblesTime;
double *BubblesD;
double *BubblesR;
double *BubblesE;
double *BubblesA;
double *BubblesB;
int BubblesIndex;
double BubblesAlpha;
double BubblesBeta;
double BubblesDelta;
double BubblesRadiusFactor;
double EnergyBubbles;
#endif
#ifdef AGN_HEATING
double AGNHeatingPower;
double AGNHeatingRmax;
#endif
#ifdef BONDI_ACCRETION
double BondiEfficiency;
double BondiBlackHoleMass;
double BondiHsmlFactor;
double BondiPower;
double BondiTimeBet;
double BondiTimeLast;
#endif
#if defined (AGN_ACCRETION) || defined (BONDI_ACCRETION)
double LightSpeed;
#endif
#if defined(ART_VISCO_MM)|| defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
double ArtBulkViscConstMin;
double ArtBulkViscConstMax;
double ArtBulkViscConstL;
#endif
#ifdef AB_TURB
double StDecay;
double StEnergy;
double StDtFreq;
double StKmin;
double StKmax;
double StSolWeight;
double StAmplFac;
int StSpectForm;
int StSeed;
#endif
#ifdef SYNCHRONIZE_NGB_TIMESTEP
int NgbFactorTimestep;
#endif
/* some force counters */
long long TotNumOfForces; /*!< counts total number of force computations */
long long NumForcesSinceLastDomainDecomp; /*!< count particle updates since last domain decomposition */
/* system of units */
double G; /*!< Gravity-constant in internal units */
double UnitTime_in_s; /*!< factor to convert internal time unit to seconds/h */
double UnitMass_in_g; /*!< factor to convert internal mass unit to grams/h */
double UnitVelocity_in_cm_per_s; /*!< factor to convert intqernal velocity unit to cm/sec */
double UnitLength_in_cm; /*!< factor to convert internal length unit to cm/h */
double UnitPressure_in_cgs; /*!< factor to convert internal pressure unit to cgs units (little 'h' still around!) */
double UnitDensity_in_cgs; /*!< factor to convert internal length unit to g/cm^3*h^2 */
double UnitCoolingRate_in_cgs; /*!< factor to convert internal cooling rate to cgs units */
double UnitEnergy_in_cgs; /*!< factor to convert internal energy to cgs units */
double UnitTime_in_Megayears; /*!< factor to convert internal time to megayears/h */
double GravityConstantInternal; /*!< If set to zero in the parameterfile, the internal value of the
gravitational constant is set to the Newtonian value based on the system of
units specified. Otherwise the value provided is taken as internal gravity constant G. */
/* Cosmological parameters */
double Hubble; /*!< Hubble-constant in internal units */
double Omega0; /*!< matter density in units of the critical density (at z=0)*/
double OmegaLambda; /*!< vaccum energy density relative to crictical density (at z=0) */
double OmegaBaryon; /*!< baryon density in units of the critical density (at z=0)*/
double HubbleParam; /*!< little `h', i.e. Hubble constant in units of 100 km/s/Mpc. Only needed to get absolute physical values for cooling physics */
/* Code options */
int ComovingIntegrationOn; /*!< flags that comoving integration is enabled */
int PeriodicBoundariesOn; /*!< flags that periodic boundaries are enabled */
int ResubmitOn; /*!< flags that automatic resubmission of job to queue system is enabled */
int TypeOfOpeningCriterion; /*!< determines tree cell-opening criterion: 0 for Barnes-Hut, 1 for relative criterion */
int TypeOfTimestepCriterion; /*!< gives type of timestep criterion (only 0 supported right now - unlike gadget-1.1) */
int OutputListOn; /*!< flags that output times are listed in a specified file */
/* Parameters determining output frequency */
int SnapshotFileCount; /*!< number of snapshot that is written next */
double TimeBetSnapshot; /*!< simulation time interval between snapshot files */
double TimeOfFirstSnapshot; /*!< simulation time of first snapshot files */
double CpuTimeBetRestartFile; /*!< cpu-time between regularly generated restart files */
double TimeLastRestartFile; /*!< cpu-time when last restart-file was written */
double TimeBetStatistics; /*!< simulation time interval between computations of energy statistics */
double TimeLastStatistics; /*!< simulation time when the energy statistics was computed the last time */
int NumCurrentTiStep; /*!< counts the number of system steps taken up to this point */
/* Current time of the simulation, global step, and end of simulation */
double Time; /*!< current time of the simulation */
double TimeBegin; /*!< time of initial conditions of the simulation */
double TimeStep; /*!< difference between current times of previous and current timestep */
double TimeMax; /*!< marks the point of time until the simulation is to be evolved */
/* variables for organizing discrete timeline */
double Timebase_interval; /*!< factor to convert from floating point time interval to integer timeline */
int Ti_Current; /*!< current time on integer timeline */
int Ti_nextoutput; /*!< next output time on integer timeline */
#ifdef FLEXSTEPS
int PresentMinStep; /*!< If FLEXSTEPS is used, particle timesteps are chosen as multiples of the present minimum timestep. */
int PresentMaxStep; /*!< If FLEXSTEPS is used, this is the maximum timestep in timeline units, rounded down to the next power 2 division */
#endif
#ifdef PMGRID
int PM_Ti_endstep; /*!< begin of present long-range timestep */
int PM_Ti_begstep; /*!< end of present long-range timestep */
#endif
/* Placement of PM grids */
#ifdef PMGRID
double Asmth[2]; /*!< Gives the scale of the long-range/short-range split (in mesh-cells), both for the coarse and the high-res mesh */
double Rcut[2]; /*!< Gives the maximum radius for which the short-range force is evaluated with the tree (in mesh-cells), both for the coarse and the high-res mesh */
double Corner[2][3]; /*!< lower left corner of coarse and high-res PM-mesh */
double UpperCorner[2][3]; /*!< upper right corner of coarse and high-res PM-mesh */
double Xmintot[2][3]; /*!< minimum particle coordinates both for coarse and high-res PM-mesh */
double Xmaxtot[2][3]; /*!< maximum particle coordinates both for coarse and high-res PM-mesh */
double TotalMeshSize[2]; /*!< total extension of coarse and high-res PM-mesh */
#endif
/* Variables that keep track of cumulative CPU consumption */
double TimeLimitCPU; /*!< CPU time limit as defined in parameterfile */
double CPU_TreeConstruction; /*!< time spent for constructing the gravitational tree */
double CPU_TreeWalk; /*!< actual time spent for pure tree-walks */
double CPU_Gravity; /*!< cumulative time used for gravity computation (tree-algorithm only) */
double CPU_Potential; /*!< time used for computing gravitational potentials */
double CPU_Domain; /*!< cumulative time spent for domain decomposition */
double CPU_Snapshot; /*!< time used for writing snapshot files */
double CPU_Total; /*!< cumulative time spent for domain decomposition */
double CPU_CommSum; /*!< accumulated time used for communication, and for collecting partial results, in tree-gravity */
double CPU_Imbalance; /*!< cumulative time lost accross all processors as work-load imbalance in gravitational tree */
double CPU_HydCompWalk; /*!< time used for actual SPH computations, including neighbour search */
double CPU_HydCommSumm; /*!< cumulative time used for communication in SPH, and for collecting partial results */
double CPU_HydImbalance; /*!< cumulative time lost due to work-load imbalance in SPH */
double CPU_Hydro; /*!< cumulative time spent for SPH related computations */
#ifdef SFR
double CPU_StarFormation; /*!< cumulative time spent for star formation computations */
#endif
#ifdef CHIMIE
double CPU_Chimie; /*!< cumulative time spent for chimie computations */
double CPU_ChimieDensCompWalk;
double CPU_ChimieDensCommSumm;
double CPU_ChimieDensImbalance;
double CPU_ChimieDensEnsureNgb;
double CPU_ChimieCompWalk;
double CPU_ChimieCommSumm;
double CPU_ChimieImbalance;
#endif
#ifdef MULTIPHASE
double CPU_Sticky; /*!< cumulative time spent for sticky computations */
#endif
double CPU_EnsureNgb; /*!< time needed to iterate on correct neighbour numbers */
double CPU_Predict; /*!< cumulative time to drift the system forward in time, including dynamic tree updates */
double CPU_TimeLine; /*!< time used for determining new timesteps, and for organizing the timestepping, including kicks of active particles */
double CPU_PM; /*!< time used for long-range gravitational force */
double CPU_Peano; /*!< time required to establish Peano-Hilbert order */
#ifdef DETAILED_CPU_DOMAIN
double CPU_Domain_findExtend;
double CPU_Domain_determineTopTree;
double CPU_Domain_sumCost;
double CPU_Domain_findSplit;
double CPU_Domain_shiftSplit;
double CPU_Domain_countToGo;
double CPU_Domain_exchange;
#endif
#ifdef DETAILED_CPU_GRAVITY
double CPU_Gravity_TreeWalk1;
double CPU_Gravity_TreeWalk2;
double CPU_Gravity_CommSum1;
double CPU_Gravity_CommSum2;
double CPU_Gravity_Imbalance1;
double CPU_Gravity_Imbalance2;
#endif
#ifdef COOLING
double CPU_Cooling;
#endif
#ifdef DETAILED_CPU
double CPU_Leapfrog;
double CPU_Physics;
double CPU_Residual;
double CPU_Accel;
double CPU_Begrun;
#endif
/* tree code opening criterion */
double ErrTolTheta; /*!< BH tree opening angle */
double ErrTolForceAcc; /*!< parameter for relative opening criterion in tree walk */
/* adjusts accuracy of time-integration */
double ErrTolIntAccuracy; /*!< accuracy tolerance parameter \f$ \eta \f$ for timestep criterion. The
timestep is \f$ \Delta t = \sqrt{\frac{2 \eta eps}{a}} \f$ */
double MinSizeTimestep; /*!< minimum allowed timestep. Normally, the simulation terminates if the
timestep determined by the timestep criteria falls below this limit. */
double MaxSizeTimestep; /*!< maximum allowed timestep */
double MaxRMSDisplacementFac; /*!< this determines a global timestep criterion for cosmological simulations
in comoving coordinates. To this end, the code computes the rms velocity
of all particles, and limits the timestep such that the rms displacement
is a fraction of the mean particle separation (determined from the
particle mass and the cosmological parameters). This parameter specifies
this fraction. */
double CourantFac; /*!< SPH-Courant factor */
/* frequency of tree reconstruction/domain decomposition */
double TreeDomainUpdateFrequency; /*!< controls frequency of domain decompositions */
/* Gravitational and hydrodynamical softening lengths (given in terms of an `equivalent' Plummer softening length).
* Five groups of particles are supported 0="gas", 1="halo", 2="disk", 3="bulge", 4="stars", 5="bndry"
*/
double MinGasHsmlFractional; /*!< minimum allowed SPH smoothing length in units of SPH gravitational softening length */
double MinGasHsml; /*!< minimum allowed SPH smoothing length */
double SofteningGas; /*!< comoving gravitational softening lengths for type 0 */
double SofteningHalo; /*!< comoving gravitational softening lengths for type 1 */
double SofteningDisk; /*!< comoving gravitational softening lengths for type 2 */
double SofteningBulge; /*!< comoving gravitational softening lengths for type 3 */
double SofteningStars; /*!< comoving gravitational softening lengths for type 4 */
double SofteningBndry; /*!< comoving gravitational softening lengths for type 5 */
double SofteningGasMaxPhys; /*!< maximum physical softening length for type 0 */
double SofteningHaloMaxPhys; /*!< maximum physical softening length for type 1 */
double SofteningDiskMaxPhys; /*!< maximum physical softening length for type 2 */
double SofteningBulgeMaxPhys; /*!< maximum physical softening length for type 3 */
double SofteningStarsMaxPhys; /*!< maximum physical softening length for type 4 */
double SofteningBndryMaxPhys; /*!< maximum physical softening length for type 5 */
double SofteningTable[6]; /*!< current (comoving) gravitational softening lengths for each particle type */
double ForceSoftening[6]; /*!< the same, but multiplied by a factor 2.8 - at that scale the force is Newtonian */
double MassTable[6]; /*!< Table with particle masses for particle types with equal mass.
If particle masses are all equal for one type, the corresponding entry in MassTable
is set to this value, allowing the size of the snapshot files to be reduced. */
/* some filenames */
char InitCondFile[MAXLEN_FILENAME]; /*!< filename of initial conditions */
char OutputDir[MAXLEN_FILENAME]; /*!< output directory of the code */
char SnapshotFileBase[MAXLEN_FILENAME]; /*!< basename to construct the names of snapshotf files */
char EnergyFile[MAXLEN_FILENAME]; /*!< name of file with energy statistics */
#ifdef SYSTEMSTATISTICS
char SystemFile[MAXLEN_FILENAME];
#endif
char CpuFile[MAXLEN_FILENAME]; /*!< name of file with cpu-time statistics */
char InfoFile[MAXLEN_FILENAME]; /*!< name of log-file with a list of the timesteps taken */
char LogFile[MAXLEN_FILENAME]; /*!< name of log-file with varied info */
#ifdef SFR
char SfrFile[MAXLEN_FILENAME]; /*!< name of file with sfr records */
#endif
#ifdef CHIMIE
char ChimieFile[MAXLEN_FILENAME]; /*!< name of file with chimie records */
#endif
#ifdef MULTIPHASE
char PhaseFile[MAXLEN_FILENAME]; /*!< name of file with phase records */
char StickyFile[MAXLEN_FILENAME]; /*!< name of file with sticky records */
#endif
#ifdef AGN_ACCRETION
char AccretionFile[MAXLEN_FILENAME]; /*!< name of file with accretion records */
#endif
#ifdef BONDI_ACCRETION
char BondiFile[MAXLEN_FILENAME]; /*!< name of file with bondi records */
#endif
#ifdef BUBBLES
char BubbleFile[MAXLEN_FILENAME]; /*!< name of file with bubble records */
#endif
char TimingsFile[MAXLEN_FILENAME]; /*!< name of file with performance metrics of gravitational tree algorithm */
char RestartFile[MAXLEN_FILENAME]; /*!< basename of restart-files */
char ResubmitCommand[MAXLEN_FILENAME]; /*!< name of script-file that will be executed for automatic restart */
char OutputListFilename[MAXLEN_FILENAME]; /*!< name of file with list of desired output times */
double OutputListTimes[MAXLEN_OUTPUTLIST]; /*!< table with desired output times */
int OutputListLength; /*!< number of output times stored in the table of desired output times */
#ifdef RANDOMSEED_AS_PARAMETER
int RandomSeed; /*!< initial random seed >*/
#endif
}
All; /*!< a container variable for global variables that are equal on all processors */
/*! This structure holds all the information that is
* stored for each particle of the simulation.
*/
extern struct particle_data
{
FLOAT Pos[3]; /*!< particle position at its current time */
FLOAT Mass; /*!< particle mass */
FLOAT Vel[3]; /*!< particle velocity at its current time */
FLOAT GravAccel[3]; /*!< particle acceleration due to gravity */
#ifdef PMGRID
FLOAT GravPM[3]; /*!< particle acceleration due to long-range PM gravity force*/
#endif
#ifdef FORCETEST
FLOAT GravAccelDirect[3]; /*!< particle acceleration when computed with direct summation */
#endif
FLOAT Potential; /*!< gravitational potential */
FLOAT OldAcc; /*!< magnitude of old gravitational force. Used in relative opening criterion */
#ifndef LONGIDS
unsigned int ID; /*!< particle identifier */
#else
unsigned long long ID; /*!< particle identifier */
#endif
int Type; /*!< flags particle type. 0=gas, 1=halo, 2=disk, 3=bulge, 4=stars, 5=bndry */
int Ti_endstep; /*!< marks start of current timestep of particle on integer timeline */
int Ti_begstep; /*!< marks end of current timestep of particle on integer timeline */
#ifdef SYNCHRONIZE_NGB_TIMESTEP
int Old_Ti_endstep; /*!< marks start of old current timestep of particle on integer timeline */
int Old_Ti_begstep; /*!< marks end of old current timestep of particle on integer timeline */
#endif
#ifdef FLEXSTEPS
int FlexStepGrp; /*!< a random 'offset' on the timeline to create a smooth groouping of particles */
#endif
float GravCost; /*!< weight factor used for balancing the work-load */
#ifdef PSEUDOSYMMETRIC
float AphysOld; /*!< magnitude of acceleration in last timestep. Used to make a first order
prediction of the change of acceleration expected in the future, thereby
allowing to guess whether a decrease/increase of the timestep should occur
in the timestep that is started. */
#endif
#ifdef PARTICLE_FLAG
float Flag;
#endif
#ifdef STELLAR_PROP
unsigned int StPIdx; /*!< index to the corresponding StP particle */
#endif
#ifdef TESSEL
int iT; /*!< index of a triangle to which the point belong to */
int IsDone;
int IsAdded; /*!< if the point has already be added in the tesselation */
int ivPoint; /*!< index of first voronoi point */
int nvPoints; /*!< number of voronoi points */
int iMedian;
int nMedians;
double Volume;
double Density;
double Pressure;
double Entropy;
double rSearch; /*!< radius in which particles must search for ngbs */
int iPref; /*!< for a ghost point, index of the reference point */
FLOAT tesselAccel[3];
#endif
# ifdef SYNCHRONIZE_NGB_TIMESTEP
int Ti_step;
#endif
}
*P, /*!< holds particle data on local processor */
#ifdef PY_INTERFACE
*Q,
*DomainPartBufQ, /*!< buffer for particle data used in domain decomposition */
#endif
*DomainPartBuf; /*!< buffer for particle data used in domain decomposition */
/* the following struture holds data that is stored for each SPH particle in addition to the collisionless
* variables.
*/
extern struct sph_particle_data
{
FLOAT Entropy; /*!< current value of entropy (actually entropic function) of particle */
FLOAT Density; /*!< current baryonic mass density of particle */
FLOAT Hsml; /*!< current smoothing length */
FLOAT Left; /*!< lower bound in iterative smoothing length search */
FLOAT Right; /*!< upper bound in iterative smoothing length search */
FLOAT NumNgb; /*!< weighted number of neighbours found */
#ifdef AVOIDNUMNGBPROBLEM
FLOAT OldNumNgb;
#endif
FLOAT Pressure; /*!< current pressure */
FLOAT DtEntropy; /*!< rate of change of entropy */
#ifdef STELLAR_FLUX
FLOAT EnergyFlux; /*!< current value of local energy flux - Sph particles */
#endif
#ifdef AGN_HEATING
FLOAT EgySpecAGNHeat; /*!< current value of specific energy radiated of particle - Sph particles */
FLOAT DtEgySpecAGNHeat; /*!< rate of change of specific radiated energy - Sph particles */
FLOAT DtEntropyAGNHeat;
#endif
#ifdef MULTIPHASE
FLOAT StickyTime;
int StickyFlag;
#ifdef COUNT_COLLISIONS
float StickyCollisionNumber;
#endif
#endif
#ifdef FEEDBACK
FLOAT EgySpecFeedback;
FLOAT DtEgySpecFeedback;
FLOAT EnergySN;
FLOAT EnergySNrem;
FLOAT TimeSN;
FLOAT FeedbackVel[3]; /*!< kick due to feedback force */
#endif
#ifdef FEEDBACK_WIND
FLOAT FeedbackWindVel[3]; /*!< kick due to feedback force */
#endif
FLOAT HydroAccel[3]; /*!< acceleration due to hydrodynamical force */
FLOAT VelPred[3]; /*!< predicted SPH particle velocity at the current time */
FLOAT DivVel; /*!< local velocity divergence */
FLOAT CurlVel; /*!< local velocity curl */
FLOAT Rot[3]; /*!< local velocity curl */
FLOAT DhsmlDensityFactor; /*!< correction factor needed in the equation of motion of the conservative entropy formulation of SPH */
FLOAT MaxSignalVel; /*!< maximum "signal velocity" occuring for this particle */
#ifdef MULTIPHASE
int Phase;
int StickyIndex;
int StickyNgb;
int StickyMaxID;
float StickyMaxFs;
FLOAT StickyNewVel[3];
#endif
#ifdef OUTPUTOPTVAR1
FLOAT OptVar1; /*!< optional variable 1 */
#endif
#ifdef OUTPUTOPTVAR2
FLOAT OptVar2; /*!< optional variable 2 */
#endif
#ifdef COMPUTE_VELOCITY_DISPERSION
FLOAT VelocityDispersion[VELOCITY_DISPERSION_SIZE]; /*!< velocity dispersion */
#endif
#ifdef CHIMIE
FLOAT Metal[NELEMENTS];
FLOAT dMass; /*!< mass variation due to mass transfere */
#ifdef CHIMIE_THERMAL_FEEDBACK
FLOAT DeltaEgySpec;
FLOAT SNIaThermalTime; /*!< flag particles that got energy from SNIa */
FLOAT SNIIThermalTime; /*!< flag particles that got energy from SNII */
double NumberOfSNIa;
double NumberOfSNII;
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
FLOAT WindTime; /*!< flag particles that belongs to the wind */
unsigned int WindFlag; /*!< flag particles that will be part of the wind */
#endif
#endif /*CHIMIE*/
#ifdef ENTROPYPRED
FLOAT EntropyPred; /*!< predicted entropy at the current time */
#endif
#ifdef ART_CONDUCTIVITY
FLOAT EnergyIntPred;
FLOAT GradEnergyInt[3];
#endif
#ifdef AB_TURB
FLOAT TurbAccel[3];
#endif
#if defined(ART_VISCO_MM)|| defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
double ArtBulkViscConst;
#ifdef ART_VISCO_CD
double DmatCD[3][3];
double TmatCD[3][3];
double DiVelAccurate;
double DiVelTemp;
double ArtBulkViscConstOld;
double R_CD;
FLOAT MaxSignalVelCD;
#endif
#endif
#ifdef PRESSURE_ENTROPY_FORMULATION
FLOAT nDensity;
FLOAT mPressure;
FLOAT DhsmlnDensityFactor;
FLOAT DhsmlPressureFactor;
#endif
#if PY_INTERFACE
FLOAT Observable;
FLOAT ObsMoment0;
FLOAT ObsMoment1;
FLOAT GradObservable[3];
#endif
# ifdef SYNCHRONIZE_NGB_TIMESTEP
int Ti_minNgbStep;
#endif
+#ifdef TIMESTEP_UPDATE_FOR_FEEDBACK
+ FLOAT FeedbackUpdatedAccel[3]; /*!< acceleration after feedback injection */
+#endif
+
}
*SphP, /*!< holds SPH particle data on local processor */
#ifdef PY_INTERFACE
*SphQ,
*DomainSphBufQ, /*!< buffer for SPH particle data in domain decomposition */
#endif
*DomainSphBuf; /*!< buffer for SPH particle data in domain decomposition */
#ifdef STELLAR_PROP
/* the following struture holds data that is stored for each SPH particle in addition to the collisionless
* variables.
*/
extern struct st_particle_data
{
#ifdef CHECK_ID_CORRESPONDENCE
unsigned int ID; /*!< particle identifier (must be the same as P[].ID) only used to check ID correspondance */
#endif
FLOAT FormationTime; /*!< star formation time of particle */
FLOAT InitialMass; /*!< initial stellar mass */
#ifndef LONGIDS
unsigned int IDProj; /*!< id of projenitor particle */
#else
unsigned long long IDProj; /*!< id of projenitor particle */
#endif
FLOAT Metal[NELEMENTS];
FLOAT Density; /*!< current baryonic mass density of particle */
FLOAT Volume; /*!< current volume of particle */
FLOAT Hsml; /*!< current smoothing length */
FLOAT Left; /*!< lower bound in iterative smoothing length search */
FLOAT Right; /*!< upper bound in iterative smoothing length search */
FLOAT NumNgb; /*!< weighted number of neighbours found */
unsigned int PIdx; /*!< index to the corresponding particle */
#ifdef AVOIDNUMNGBPROBLEM
FLOAT OldNumNgb;
#endif
FLOAT DhsmlDensityFactor; /*!< correction factor needed in the equation of motion of the conservative entropy formulation of SPH */
double TotalEjectedGasMass;
double TotalEjectedEltMass[NELEMENTS];
double TotalEjectedEgySpec;
double NumberOfSNIa;
double NumberOfSNII;
#ifdef CHIMIE_KINETIC_FEEDBACK
double NgbMass; /*!< mass of neighbours */
#endif
#ifdef CHIMIE
unsigned int Flag;
#endif
}
*StP, /*!< holds ST particle data on local processor */
*DomainStBuf; /*!< buffer for ST particle data in domain decomposition */
#endif
/* Variables for Tree
*/
extern int MaxNodes; /*!< maximum allowed number of internal nodes */
extern int Numnodestree; /*!< number of (internal) nodes in each tree */
extern struct NODE
{
FLOAT len; /*!< sidelength of treenode */
FLOAT center[3]; /*!< geometrical center of node */
#ifdef ADAPTIVE_GRAVSOFT_FORGAS
FLOAT maxsoft; /*!< hold the maximum gravitational softening of particles in the
node if the ADAPTIVE_GRAVSOFT_FORGAS option is selected */
#endif
#ifdef STELLAR_FLUX
FLOAT starlum ; /*!< star luminosity of node */
#endif
union
{
int suns[8]; /*!< temporary pointers to daughter nodes */
struct
{
FLOAT s[3]; /*!< center of mass of node */
FLOAT mass; /*!< mass of node */
int bitflags; /*!< a bit-field with various information on the node */
int sibling; /*!< this gives the next node in the walk in case the current node can be used */
int nextnode; /*!< this gives the next node in case the current node needs to be opened */
int father; /*!< this gives the parent node of each node (or -1 if we have the root node) */
}
d;
}
u;
}
*Nodes_base, /*!< points to the actual memory allocted for the nodes */
*Nodes; /*!< this is a pointer used to access the nodes which is shifted such that Nodes[All.MaxPart]
gives the first allocated node */
extern int *Nextnode; /*!< gives next node in tree walk */
extern int *Father; /*!< gives parent node in tree */
extern struct extNODE /*!< this structure holds additional tree-node information which is not needed in the actual gravity computation */
{
FLOAT hmax; /*!< maximum SPH smoothing length in node. Only used for gas particles */
FLOAT vs[3]; /*!< center-of-mass velocity */
}
*Extnodes_base, /*!< points to the actual memory allocted for the extended node information */
*Extnodes; /*!< provides shifted access to extended node information, parallel to Nodes/Nodes_base */
/*! Header for the standard file format.
*/
extern struct io_header
{
int npart[6]; /*!< number of particles of each type in this file */
double mass[6]; /*!< mass of particles of each type. If 0, then the masses are explicitly
stored in the mass-block of the snapshot file, otherwise they are omitted */
double time; /*!< time of snapshot file */
double redshift; /*!< redshift of snapshot file */
int flag_sfr; /*!< flags whether the simulation was including star formation */
int flag_feedback; /*!< flags whether feedback was included (obsolete) */
unsigned int npartTotal[6]; /*!< total number of particles of each type in this snapshot. This can be
different from npart if one is dealing with a multi-file snapshot. */
int flag_cooling; /*!< flags whether cooling was included */
int num_files; /*!< number of files in multi-file snapshot */
double BoxSize; /*!< box-size of simulation in case periodic boundaries were used */
double Omega0; /*!< matter density in units of critical density */
double OmegaLambda; /*!< cosmological constant parameter */
double HubbleParam; /*!< Hubble parameter in units of 100 km/sec/Mpc */
int flag_stellarage; /*!< flags whether the file contains formation times of star particles */
int flag_metals; /*!< flags whether the file contains metallicity values for gas and star particles */
unsigned int npartTotalHighWord[6]; /*!< High word of the total number of particles of each type */
int flag_entropy_instead_u; /*!< flags that IC-file contains entropy instead of u */
int flag_chimie_extraheader; /*!< flags that IC-file contains extra-header for chimie */
#ifdef MULTIPHASE
double critical_energy_spec;
#ifdef MESOMACHINE
char fill[38];
#else
char fill[48]; /* use 42 with regor... */
#endif
#else
char fill[56]; /*!< fills to 256 Bytes */
#endif
}
header; /*!< holds header for snapshot files */
#ifdef CHIMIE_EXTRAHEADER
/*! Header for the chimie part.
*/
extern struct io_chimie_extraheader
{
int nelts; /*!< number of chemical element followed */
float SolarAbundances[NELEMENTS];
char labels[256-4-4*(NELEMENTS)];
}
chimie_extraheader;
#endif
#define IO_NBLOCKS 24 /*!< total number of defined information blocks for snapshot files.
Must be equal to the number of entries in "enum iofields" */
enum iofields /*!< this enumeration lists the defined output blocks in snapshot files. Not all of them need to be present. */
{
IO_POS,
IO_VEL,
IO_ID,
IO_MASS,
IO_U,
IO_RHO,
IO_HSML,
IO_POT,
IO_ACCEL,
IO_DTENTR,
IO_TSTP,
IO_ERADSPH,
IO_ERADSTICKY,
IO_ERADFEEDBACK,
IO_ENERGYFLUX,
IO_METALS,
IO_STAR_FORMATIONTIME,
IO_INITIAL_MASS,
IO_STAR_IDPROJ,
IO_STAR_RHO,
IO_STAR_HSML,
IO_STAR_METALS,
IO_OPTVAR1,
IO_OPTVAR2
};
extern char Tab_IO_Labels[IO_NBLOCKS][4]; /*<! This table holds four-byte character tags used for fileformat 2 */
/* global state of system, used for global statistics
*/
extern struct state_of_system
{
double Mass;
double EnergyKin;
double EnergyPot;
double EnergyInt;
#ifdef COOLING
double EnergyRadSph;
#endif
#ifdef AGN_HEATING
double EnergyAGNHeat;
#endif
#ifdef MULTIPHASE
double EnergyRadSticky;
#endif
#ifdef FEEDBACK_WIND
double EnergyFeedbackWind;
#endif
#ifdef BUBBLES
double EnergyBubbles;
#endif
#ifdef CHIMIE_THERMAL_FEEDBACK
double EnergyThermalFeedback;
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
double EnergyKineticFeedback;
#endif
double EnergyTot;
double Momentum[4];
double AngMomentum[4];
double CenterOfMass[4];
double MassComp[6];
double EnergyKinComp[6];
double EnergyPotComp[6];
double EnergyIntComp[6];
#ifdef COOLING
double EnergyRadSphComp[6];
#endif
#ifdef AGN_HEATING
double EnergyAGNHeatComp[6];
#endif
#ifdef MULTIPHASE
double EnergyRadStickyComp[6];
#endif
#ifdef FEEDBACK_WIND
double EnergyFeedbackWindComp[6];
#endif
#ifdef BUBBLES
double EnergyBubblesComp[6];
#endif
#ifdef CHIMIE_THERMAL_FEEDBACK
double EnergyThermalFeedbackComp[6];
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
double EnergyKineticFeedbackComp[6];
#endif
double EnergyTotComp[6];
double MomentumComp[6][4];
double AngMomentumComp[6][4];
double CenterOfMassComp[6][4];
}
SysState; /*<! Structure for storing some global statistics about the simulation. */
/*! This structure contains data related to the energy budget.
These values are different for each task. It need to be stored
in the restart flag.
*/
extern struct local_state_of_system
{
double EnergyTest;
double EnergyInt1;
double EnergyInt2;
double EnergyKin1;
double EnergyKin2;
#ifdef COOLING
double RadiatedEnergy;
#endif
#ifdef SFR
double StarEnergyInt;
#ifdef FEEDBACK
double StarEnergyFeedback;
#endif
#endif
#ifdef CHIMIE_THERMAL_FEEDBACK
double EnergyThermalFeedback;
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
double EnergyKineticFeedback;
#endif
#ifdef MULTIPHASE
double EnergyRadSticky;
#endif
#ifdef FEEDBACK_WIND
double EnergyFeedbackWind;
#endif
}
LocalSysState; /*<! Structure for storing some local statistics about the simulation. */
/* Various structures for communication
*/
extern struct gravdata_in
{
union
{
FLOAT Pos[3];
FLOAT Acc[3];
FLOAT Potential;
}
u;
#if defined(UNEQUALSOFTENINGS) || defined(STELLAR_FLUX)
int Type;
#ifdef ADAPTIVE_GRAVSOFT_FORGAS
FLOAT Soft;
#endif
#endif
#ifdef STELLAR_FLUX
FLOAT EnergyFlux;
#endif
union
{
FLOAT OldAcc;
int Ninteractions;
}
w;
}
*GravDataIn, /*!< holds particle data to be exported to other processors */
*GravDataGet, /*!< holds particle data imported from other processors */
*GravDataResult, /*!< holds the partial results computed for imported particles. Note: We use GravDataResult = GravDataGet, such that the result replaces the imported data */
*GravDataOut; /*!< holds partial results received from other processors. This will overwrite the GravDataIn array */
extern struct gravdata_index
{
int Task;
int Index;
int SortIndex;
}
*GravDataIndexTable; /*!< the particles to be exported are grouped by task-number. This table allows the results to be disentangled again and to be assigned to the correct particle */
extern struct densdata_in
{
FLOAT Pos[3];
FLOAT Vel[3];
FLOAT Hsml;
#ifdef MULTIPHASE
int Phase;
#endif
int Index;
int Task;
#ifdef ART_CONDUCTIVITY
FLOAT EnergyIntPred;
#endif
}
*DensDataIn, /*!< holds particle data for SPH density computation to be exported to other processors */
*DensDataGet; /*!< holds imported particle data for SPH density computation */
extern struct densdata_out
{
FLOAT Rho;
FLOAT Div, Rot[3];
FLOAT DhsmlDensity;
FLOAT Ngb;
#ifdef ART_CONDUCTIVITY
FLOAT GradEnergyInt[3];
#endif
#ifdef PRESSURE_ENTROPY_FORMULATION
FLOAT nDensity;
FLOAT mPressure;
FLOAT DhsmlnDensity;
FLOAT DhsmlPressure;
#endif
}
*DensDataResult, /*!< stores the locally computed SPH density results for imported particles */
*DensDataPartialResult; /*!< imported partial SPH density results from other processors */
extern struct hydrodata_in
{
FLOAT Pos[3];
FLOAT Vel[3];
FLOAT Hsml;
#ifdef FEEDBACK
FLOAT EnergySN;
#endif
#ifdef MULTIPHASE
int Phase;
FLOAT Entropy;
int StickyFlag;
#endif
FLOAT Mass;
FLOAT Density;
FLOAT Pressure;
FLOAT F1;
FLOAT DhsmlDensityFactor;
int Timestep;
int Task;
int Index;
#ifdef WITH_ID_IN_HYDRA
int ID;
#endif
#ifdef ART_CONDUCTIVITY
FLOAT NormGradEnergyInt;
#endif
#if defined(ART_VISCO_MM)|| defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
double ArtBulkViscConst;
#endif
#ifdef PRESSURE_ENTROPY_FORMULATION
FLOAT mPressure;
FLOAT EntropyPred;
FLOAT DhsmlnDensityFactor;
#endif
}
*HydroDataIn, /*!< holds particle data for SPH hydro-force computation to be exported to other processors */
*HydroDataGet; /*!< holds imported particle data for SPH hydro-force computation */
extern struct hydrodata_out
{
FLOAT Acc[3];
FLOAT DtEntropy;
#ifdef FEEDBACK
FLOAT DtEgySpecFeedback;
FLOAT FeedbackAccel[3]; /*!< acceleration due to feedback force */
#endif
FLOAT MaxSignalVel;
#ifdef COMPUTE_VELOCITY_DISPERSION
FLOAT VelocityDispersion[VELOCITY_DISPERSION_SIZE];
#endif
#ifdef MULTIPHASE
FLOAT StickyDVel[3]; /*!< differences in velocities induced by sticky collisions */
#endif
#ifdef OUTPUT_CONDUCTIVITY
FLOAT OptVar2;
#endif
#ifdef ART_VISCO_CD
double DmatCD[3][3];
double TmatCD[3][3];
double R_CD;
FLOAT MaxSignalVelCD;
#endif
}
*HydroDataResult, /*!< stores the locally computed SPH hydro results for imported particles */
*HydroDataPartialResult; /*!< imported partial SPH hydro-force results from other processors */
#ifdef MULTIPHASE
extern struct stickydata_in
{
FLOAT Pos[3];
FLOAT Vel[3];
FLOAT Mass;
FLOAT Hsml;
int ID;
int StickyMaxID;
int StickyNgb;
float StickyMaxFs;
int Task;
int Index;
}
*StickyDataIn, /*!< holds particle data for sticky computation to be exported to other processors */
*StickyDataGet; /*!< holds imported particle data for sticky computation */
extern struct stickydata_out
{
int StickyMaxID;
int StickyNgb;
float StickyMaxFs;
FLOAT StickyNewVel[3];
}
*StickyDataResult, /*!< stores the locally computed sticky results for imported particles */
*StickyDataPartialResult; /*!< imported partial sticky results from other processors */
extern struct Sticky_index
{
int Index;
int CellIndex;
int Flag;
}
*StickyIndex;
#endif
#ifdef CHIMIE
extern struct chimiedata_in
{
FLOAT Pos[3];
FLOAT Vel[3];
#ifndef LONGIDS
unsigned int ID; /*!< particle identifier */
#else
unsigned long long ID; /*!< particle identifier */
#endif
FLOAT Hsml;
#ifdef FEEDBACK
FLOAT EnergySN;
#endif
#ifdef MULTIPHASE
int Phase;
FLOAT Entropy;
int StickyFlag;
#endif
FLOAT Density;
FLOAT Volume;
FLOAT Pressure;
FLOAT F1;
FLOAT DhsmlDensityFactor;
int Timestep;
int Task;
int Index;
#ifdef WITH_ID_IN_HYDRA
int ID;
#endif
double TotalEjectedGasMass;
double TotalEjectedEltMass[NELEMENTS];
double TotalEjectedEgySpec;
double NumberOfSNIa;
double NumberOfSNII;
#ifdef CHIMIE_KINETIC_FEEDBACK
FLOAT NgbMass;
#endif
}
*ChimieDataIn, /*!< holds particle data for Chimie computation to be exported to other processors */
*ChimieDataGet; /*!< holds imported particle data for Chimie computation */
extern struct chimiedata_out
{
FLOAT Acc[3];
FLOAT DtEntropy;
#ifdef FEEDBACK
FLOAT DtEgySpecFeedback;
FLOAT FeedbackAccel[3]; /*!< acceleration due to feedback force */
#endif
FLOAT MaxSignalVel;
#ifdef COMPUTE_VELOCITY_DISPERSION
FLOAT VelocityDispersion[VELOCITY_DISPERSION_SIZE];
#endif
#ifdef MULTIPHASE
FLOAT StickyDVel[3]; /*!< differences in velocities induced by sticky collisions */
#endif
}
*ChimieDataResult, /*!< stores the locally computed Chimie results for imported particles */
*ChimieDataPartialResult; /*!< imported partial Chimie results from other processors */
extern struct starsdensdata_in
{
FLOAT Pos[3];
FLOAT Hsml;
int Index;
int Task;
}
*StarsDensDataIn, /*!< holds particle data for SPH density computation to be exported to other processors */
*StarsDensDataGet; /*!< holds imported particle data for SPH density computation */
extern struct starsdensdata_out
{
FLOAT Rho;
FLOAT Volume;
FLOAT DhsmlDensity;
FLOAT Ngb;
#ifdef CHIMIE_KINETIC_FEEDBACK
FLOAT NgbMass;
#endif
}
*StarsDensDataResult, /*!< stores the locally computed SPH density results for imported particles */
*StarsDensDataPartialResult; /*!< imported partial SPH density results from other processors */
#endif /*CHIMIE*/
#ifdef TESSEL
extern struct ghostdata_in
{
FLOAT Pos[3];
FLOAT rSearch;
int Index;
int Task;
}
*GhostDataIn, /*!< holds particle data for SPH density computation to be exported to other processors */
*GhostDataGet; /*!< holds imported particle data for SPH density computation */
extern struct ghostdata_out
{
FLOAT Value;
}
*GhostDataResult, /*!< stores the locally computed SPH density results for imported particles */
*GhostDataPartialResult; /*!< imported partial SPH density results from other processors */
/* ghost particles */
//extern struct ghost_particle_data
//{
// FLOAT Pos[3]; /*!< particle position at its current time */
// FLOAT Mass; /*!< particle mass */
//}
// *gP;
extern int NumgPart;
#endif /* TESSEL */
#ifdef SYNCHRONIZE_NGB_TIMESTEP
extern struct SynchroinzeNgbTimestepdata_in
{
FLOAT Pos[3];
FLOAT Hsml;
int Ti_step;
int Ti_endstep;
int Index;
int Task;
#ifdef MULTIPHASE
int Phase;
#endif
}
*SynchroinzeNgbTimestepDataIn,
*SynchroinzeNgbTimestepDataGet;
#endif
#ifdef PY_INTERFACE
extern struct denssphdata_in
{
FLOAT Pos[3];
FLOAT Vel[3];
FLOAT Hsml;
FLOAT Density;
FLOAT DhsmlDensityFactor;
int Index;
int Task;
FLOAT Observable;
}
*DensSphDataIn, /*!< holds particle data for SPH density computation to be exported to other processors */
*DensSphDataGet; /*!< holds imported particle data for SPH density computation */
extern struct denssphdata_out
{
FLOAT Rho;
FLOAT Div, Rot[3];
FLOAT DhsmlDensity;
FLOAT Ngb;
FLOAT GradObservable[3];
}
*DensSphDataResult, /*!< stores the locally computed SPH density results for imported particles */
*DensSphDataPartialResult; /*!< imported partial SPH density results from other processors */
extern struct sphdata_in
{
FLOAT Pos[3];
FLOAT Vel[3];
FLOAT Hsml;
FLOAT Density;
FLOAT DhsmlDensityFactor;
FLOAT ObsMoment0;
FLOAT ObsMoment1;
FLOAT Observable;
int Task;
int Index;
}
*SphDataIn, /*!< holds particle data for SPH hydro-force computation to be exported to other processors */
*SphDataGet; /*!< holds imported particle data for SPH hydro-force computation */
extern struct sphdata_out
{
FLOAT ObsMoment0;
FLOAT ObsMoment1;
FLOAT GradObservable[3];
}
*SphDataResult, /*!< stores the locally computed SPH hydro results for imported particles */
*SphDataPartialResult; /*!< imported partial SPH hydro-force results from other processors */
#endif /*PY_INTERFACE*/
#endif
diff --git a/src/chimie.c b/src/chimie.c
index 74eb2e4..d2f1f9e 100644
--- a/src/chimie.c
+++ b/src/chimie.c
@@ -1,5315 +1,5306 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <mpi.h>
#include <gsl/gsl_math.h>
#include "allvars.h"
#include "proto.h"
#ifdef CHIMIE
#ifdef PYCHEM
#include <Python.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <numpy/arrayobject.h>
/*
****************************************************
these variables are already defined in Gadget (or not needed)
****************************************************
*/
#define TO_DOUBLE(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_DOUBLE) ,0) )
#endif /* PYCHEM */
/****************************************************************************************/
/*
/*
/*
/* COMMON CHIMIE PART
/*
/*
/*
/****************************************************************************************/
#define MAXPTS 10
#define MAXDATASIZE 200
#define KPC_IN_CM 3.085e+21
static int verbose=0;
static double *MassFracSNII;
static double *MassFracSNIa;
static double *MassFracDYIN;
static double *SingleMassFracSNII;
static double *SingleMassFracSNIa;
static double *SingleMassFracDYIN;
static double *EjectedMass;
static double *SingleEjectedMass;
static double **MassFracSNIIs;
static double **MassFracSNIas;
static double **MassFracDYINs;
static double **SingleMassFracSNIIs;
static double **SingleMassFracSNIas;
static double **SingleMassFracDYINs;
static double **EjectedMasss;
static double **SingleEjectedMasss;
/* intern global variables */
static struct local_params_chimie
{
float coeff_z[3][3];
float Mmin,Mmax;
int n;
float ms[MAXPTS];
float as[MAXPTS+1];
float bs[MAXPTS+1];
float fs[MAXPTS];
double imf_Ntot;
float SNII_Mmin;
float SNII_Mmax;
float SNII_cte;
float SNII_a;
float SNIa_Mpl;
float SNIa_Mpu;
float SNIa_a;
float SNIa_cte;
float SNIa_Mdl1;
float SNIa_Mdu1;
float SNIa_a1;
float SNIa_b1;
float SNIa_cte1;
float SNIa_bb1;
float SNIa_Mdl2;
float SNIa_Mdu2;
float SNIa_a2;
float SNIa_b2;
float SNIa_cte2;
float SNIa_bb2;
float Mco;
int npts;
int nelts;
}
*Cps,*Cp;
static struct local_elts_chimie
{
float Mmin; /* minimal mass */
float Step; /* log of mass step */
float Array[MAXDATASIZE]; /* data */
float Metal[MAXDATASIZE]; /* data */
float MSNIa;
float SolarAbundance;
char label[72];
}
**Elts,*Elt;
/*! This function allocate all varaiables related to the chemistry
*/
void allocate_chimie()
{
int j;
/* allocate Cp */
Cps = malloc((All.ChimieNumberOfParameterFiles) * sizeof(struct local_params_chimie));
/* allocate elts */
Elts = malloc((All.ChimieNumberOfParameterFiles) * sizeof(struct local_elts_chimie));
//for (j=0;j<All.ChimieNumberOfParameterFiles;j++)
// Elt[j] = malloc((nelts) * sizeof(struct local_elts_chimie));
MassFracSNIIs = malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
MassFracSNIas = malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
MassFracDYINs = malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
EjectedMasss = malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
SingleMassFracSNIIs= malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
SingleMassFracSNIas= malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
SingleMassFracDYINs= malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
SingleEjectedMasss = malloc((All.ChimieNumberOfParameterFiles) * sizeof(double));
}
/*! Set the chemistry table to use
*/
void set_table(int i)
{
if (i>=All.ChimieNumberOfParameterFiles)
{
printf("\n set_table : i>= %d !!!\n\n",All.ChimieNumberOfParameterFiles);
endrun(88809);
}
else
{
Cp = &Cps[i];
Elt = Elts[i];
MassFracSNII = MassFracSNIIs[i]; /* all this is useless, no ?*/
MassFracSNIa = MassFracSNIas[i];
MassFracDYIN = MassFracDYINs[i];
SingleMassFracSNII = SingleMassFracSNIIs[i];
SingleMassFracSNIa = SingleMassFracSNIas[i];
SingleMassFracDYIN = SingleMassFracDYINs[i];
EjectedMass = EjectedMasss[i];
SingleEjectedMass = SingleEjectedMasss[i];
}
}
/*! Read the chemistry table
*/
void read_chimie(char * filename,int it)
{
char line[72],buffer[72];
FILE *fd;
int i,j;
if (verbose && ThisTask==0)
printf("reading %s ...\n",filename);
fd = fopen(filename,"r");
/* read Lifetime */
/* #### Livetime #### */
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%g %g %g\n", &Cps[it].coeff_z[0][0],&Cps[it].coeff_z[0][1],&Cps[it].coeff_z[0][2]);
fscanf(fd, "%g %g %g\n", &Cps[it].coeff_z[1][0],&Cps[it].coeff_z[1][1],&Cps[it].coeff_z[1][2]);
fscanf(fd, "%g %g %g\n", &Cps[it].coeff_z[2][0],&Cps[it].coeff_z[2][1],&Cps[it].coeff_z[2][2]);
fgets(line, sizeof(line), fd);
/* IMF Parameters */
/* #### IMF Parameters #### */
fgets(line, sizeof(line), fd);
fscanf(fd, "%g %g\n",&Cps[it].Mmin,&Cps[it].Mmax);
fscanf(fd, "%d\n",&Cps[it].n);
if (Cps[it].n>0)
for (i=0;i<Cps[it].n;i++)
fscanf(fd,"%g",&Cps[it].ms[i]);
else
fgets(line, sizeof(line), fd);
for (i=0;i<Cps[it].n+1;i++)
fscanf(fd,"%g",&Cps[it].as[i]);
fgets(line, sizeof(line), fd);
/* Parameters for SNII Rates */
/* #### SNII Parameters #### */
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%g \n",&Cps[it].SNII_Mmin);
fgets(line, sizeof(line), fd);
/* Parameters for SNIa Rates */
/* #### SNIa Parameters #### */
fgets(line, sizeof(line), fd);
fscanf(fd, "%g %g\n",&Cps[it].SNIa_Mpl,&Cps[it].SNIa_Mpu);
fscanf(fd, "%g \n",&Cps[it].SNIa_a);
fscanf(fd, "%g %g %g\n",&Cps[it].SNIa_Mdl1,&Cps[it].SNIa_Mdu1,&Cps[it].SNIa_bb1);
fscanf(fd, "%g %g %g\n",&Cps[it].SNIa_Mdl2,&Cps[it].SNIa_Mdu2,&Cps[it].SNIa_bb2);
fgets(line, sizeof(line), fd);
/* Metal injection SNII */
/* #### Metal Parameters ####*/
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%d %d\n",&Cps[it].npts,&Cps[it].nelts);
/* allocate memory for elts */
if (Cps[it].npts<=MAXDATASIZE)
{
Elts[it] = malloc((Cps[it].nelts+2) * sizeof(struct local_elts_chimie));
}
else
{
printf("\n Cps[it].npts = %d > MAXDATASIZE = %d !!!\n\n",Cps[it].npts,MAXDATASIZE);
endrun(88800);
}
/* allocate memory */
MassFracSNIIs[it] = malloc((Cps[it].nelts+2) * sizeof(double)); /* really needed ? */
MassFracSNIas[it] = malloc((Cps[it].nelts+2) * sizeof(double));
MassFracDYINs[it] = malloc((Cps[it].nelts+2) * sizeof(double));
EjectedMasss[it] = malloc((Cps[it].nelts+2) * sizeof(double));
SingleMassFracSNIIs[it] = malloc((Cps[it].nelts+2) * sizeof(double));
SingleMassFracSNIas[it] = malloc((Cps[it].nelts+2) * sizeof(double));
SingleMassFracDYINs[it] = malloc((Cps[it].nelts+2) * sizeof(double));
SingleEjectedMasss[it] = malloc((Cps[it].nelts+2) * sizeof(double));
/* injected metals */
for (i=0;i<Cps[it].nelts+2;i++)
{
fgets(line, sizeof(line), fd);
/* strip trailing line */
for (j = 0; j < strlen(line); j++)
if ( line[j] == '\n' || line[j] == '\r' )
line[j] = '\0';
/* copy labels */
strcpy(Elts[it][i].label,line);
/* probleme */
strcpy(buffer,&Elts[it][i].label[2]);
strcpy(Elts[it][i].label,buffer);
fgets(line, sizeof(line), fd);
fscanf(fd, "%g %g\n",&Elts[it][i].Mmin,&Elts[it][i].Step);
for (j=0;j<Cps[it].npts;j++)
{
fscanf(fd, "%g\n",&Elts[it][i].Metal[j]);
}
}
/* integrals of injected metals */
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%d %d\n",&Cps[it].npts,&Cps[it].nelts);
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
/* integrals of injected metals */
for (i=0;i<Cps[it].nelts+2;i++)
{
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%g %g\n",&Elts[it][i].Mmin,&Elts[it][i].Step);
for (j=0;j<Cps[it].npts;j++)
{
fscanf(fd, "%g\n",&Elts[it][i].Array[j]);
}
}
/* Metal injection SNIa */
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%g\n",&Cps[it].Mco);
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
int nelts;
char label[72];
fscanf(fd, "%d\n",&nelts);
/* check */
if (nelts != Cps[it].nelts)
{
printf("\nThe number of elements in SNII (=%d) is not identical to the on of SNIa (=%d) !!!\n\n",Cps[it].nelts,nelts);
printf("This is not supported by the current implementation !!!\n");
endrun(88805);
}
for (i=0;i<Cps[it].nelts+2;i++)
{
fgets(line, sizeof(line), fd); /* label */
/* check label */
/* strip trailing line */
for (j = 0; j < strlen(line); j++)
if ( line[j] == '\n' || line[j] == '\r' )
line[j] = '\0';
strcpy(label,line);
strcpy(buffer,&label[2]);
strcpy(label,buffer);
if (strcmp(label,Elts[it][i].label)!=0)
{
printf("\nLabel of SNII element %d (=%s) is different from the SNIa one (=%s) !!!\n\n",i,Elts[it][i].label,label);
endrun(88806);
}
//fgets(line, sizeof(line), fd);
fscanf(fd, "%g\n",&Elts[it][i].MSNIa);
}
/* Solar Abundances */
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fgets(line, sizeof(line), fd);
fscanf(fd, "%d\n",&nelts);
/* check */
if (nelts != Cps[it].nelts)
{
printf("\nThe number of elements in SolarAbundances (=%d) is not identical to the on of SNIa (=%d) !!!\n\n",Cps[it].nelts,nelts);
printf("This is not supported by the current implementation !!!\n");
endrun(88805);
}
for (i=0;i<Cps[it].nelts;i++)
{
fgets(line, sizeof(line), fd); /* label */
/* check label */
/* strip trailing line */
for (j = 0; j < strlen(line); j++)
if ( line[j] == '\n' || line[j] == '\r' )
line[j] = '\0';
strcpy(label,line);
strcpy(buffer,&label[2]);
strcpy(label,buffer);
if (strcmp(label,Elts[it][i+2].label)!=0)
{
printf("\nLabel of SNII element %d (=%s) is different from the SNIa one (=%s) !!!\n\n",i,Elts[it][i+2].label,label);
endrun(88806);
}
//fgets(line, sizeof(line), fd);
fscanf(fd, "%g\n",&Elts[it][i+2].SolarAbundance);
}
fclose(fd);
if (verbose && ThisTask==0)
{
printf("%g %g %g\n", Cps[it].coeff_z[0][0],Cps[it].coeff_z[0][1],Cps[it].coeff_z[0][2]);
printf("%g %g %g\n", Cps[it].coeff_z[1][0],Cps[it].coeff_z[1][1],Cps[it].coeff_z[1][2]);
printf("%g %g %g\n", Cps[it].coeff_z[2][0],Cps[it].coeff_z[2][1],Cps[it].coeff_z[2][2]);
printf("\n");
printf("\nIMF\n");
printf("%g %g\n",Cps[it].Mmin,Cps[it].Mmax);
printf("%d\n",Cps[it].n);
for (i=0;i<Cps[it].n;i++)
printf( "ms : %g ",Cps[it].ms[i]);
printf("\n");
for (i=0;i<Cps[it].n+1;i++)
printf( "as : %g ",Cps[it].as[i]);
printf("\n");
printf("\nRate SNII\n");
printf("%g ",Cps[it].SNII_Mmin);
printf("\n");
printf("\nRate SNIa\n");
printf("%g %g\n",Cps[it].SNIa_Mpl,Cps[it].SNIa_Mpu);
printf("%g \n",Cps[it].SNIa_a);
printf("%g %g %g\n",Cps[it].SNIa_Mdl1,Cps[it].SNIa_Mdu1,Cps[it].SNIa_b1);
printf("%g %g %g\n",Cps[it].SNIa_Mdl2,Cps[it].SNIa_Mdu2,Cps[it].SNIa_b2);
printf("\n");
for (i=0;i<Cps[it].nelts+2;i++)
{
printf("> %g %g\n",Elts[it][i].Mmin,Elts[it][i].Step);
for (j=0;j<Cps[it].npts;j++)
{
printf(" %g\n",Elts[it][i].Array[j]);
}
}
printf("\n");
printf("%g\n",Cps[it].Mco);
for (i=0;i<Cps[it].nelts+2;i++)
printf("%g\n",Elts[it][i].MSNIa);
printf("\n");
}
}
/*! This function returns the mass fraction of a star of mass m
* using the current IMF
*/
static double get_imf(double m)
{
int i;
int n;
n = Cp->n;
/* convert m in msol */
m = m*All.UnitMass_in_g / SOLAR_MASS;
if (n==0)
return Cp->bs[0]* pow(m,Cp->as[0]);
else
{
for (i=0;i<n;i++)
if (m < Cp->ms[i])
return Cp->bs[i]* pow(m,Cp->as[i]);
return Cp->bs[n]* pow(m,Cp->as[n]);
}
}
/*! This function returns the mass fraction between m1 and m2
* per mass unit, using the current IMF
*/
static double get_imf_M(double m1, double m2)
{
int i;
int n;
double p;
double integral=0;
double mmin,mmax;
n = Cp->n;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
m2 = m2*All.UnitMass_in_g / SOLAR_MASS;
if (n==0)
{
p = Cp->as[0]+1;
integral = (Cp->bs[0]/p) * ( pow(m2,p) - pow(m1,p) );
//printf("--> %g %g %g %g int=%g\n",m1,m2,pow(m2,p), pow(m1,p),integral);
}
else
{
integral = 0;
/* first */
if (m1<Cp->ms[0])
{
mmin = m1;
mmax = dmin(Cp->ms[0],m2);
p = Cp->as[0] + 1;
integral += (Cp->bs[0]/p) * ( pow(mmax,p) - pow(mmin,p) );
}
/* last */
if (m2>Cp->ms[n-1])
{
mmin = dmax(Cp->ms[n-1],m1);
mmax = m2;
p = Cp->as[n] + 1;
integral += (Cp->bs[n]/p) * ( pow(mmax,p) - pow(mmin,p) );
}
/* loop over other segments */
for (i=0;i<n-1;i++)
{
mmin = dmax(Cp->ms[i ],m1);
mmax = dmin(Cp->ms[i+1],m2);
if (mmin<mmax)
{
p = Cp->as[i+1] + 1;
integral += (Cp->bs[i+1]/p) * ( pow(mmax,p) - pow(mmin,p) );
}
}
}
/* convert into mass unit mass unit */
/* integral = integral * SOLAR_MASS/All.UnitMass_in_g;*/
return integral;
}
/*! This function returns the number fraction between m1 and m2
* per mass unit, using the current IMF
*/
static double get_imf_N(double m1, double m2)
{
int i;
int n;
double p;
double integral=0;
double mmin,mmax;
n = Cp->n;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
m2 = m2*All.UnitMass_in_g / SOLAR_MASS;
if (n==0)
{
p = Cp->as[0];
integral = (Cp->bs[0]/p) * ( pow(m2,p) - pow(m1,p) );
}
else
{
integral = 0;
/* first */
if (m1<Cp->ms[0])
{
mmin = m1;
mmax = dmin(Cp->ms[0],m2);
p = Cp->as[0];
integral += (Cp->bs[0]/p) * ( pow(mmax,p) - pow(mmin,p) );
}
/* last */
if (m2>Cp->ms[n-1])
{
mmin = dmax(Cp->ms[n-1],m1);
mmax = m2;
p = Cp->as[n];
integral += (Cp->bs[n]/p) * ( pow(mmax,p) - pow(mmin,p) );
}
/* loop over other segments */
for (i=0;i<n-1;i++)
{
mmin = dmax(Cp->ms[i ],m1);
mmax = dmin(Cp->ms[i+1],m2);
if (mmin<mmax)
{
p = Cp->as[i+1];
integral += (Cp->bs[i+1]/p) * ( pow(mmax,p) - pow(mmin,p) );
}
}
}
/* convert into mass unit mass unit */
integral = integral / SOLAR_MASS*All.UnitMass_in_g;
return integral;
}
/*! Sample the imf using monte carlo approach
*/
static double imf_sampling()
{
int i;
int n;
double m;
double f;
double pmin,pmax;
n = Cp->n;
/* init random */
//srandom(irand);
f = (double)random()/(double)RAND_MAX;
if (n==0)
{
pmin = pow(Cp->Mmin,Cp->as[0]);
pmax = pow(Cp->Mmax,Cp->as[0]);
m = pow(f*(pmax - pmin) + pmin ,1./Cp->as[0]);
return m* SOLAR_MASS/All.UnitMass_in_g;
}
else
{
if (f<Cp->fs[0])
{
pmin = pow(Cp->Mmin ,Cp->as[0]);
m = pow(Cp->imf_Ntot*Cp->as[0]/Cp->bs[0]* (f-0) + pmin ,1./Cp->as[0]);
return m* SOLAR_MASS/All.UnitMass_in_g;
}
for (i=0;i<n-1;i++)
{
if (f<Cp->fs[i+1])
{
pmin = pow(Cp->ms[i] ,Cp->as[i+1]);
m = pow(Cp->imf_Ntot*Cp->as[i+1]/Cp->bs[i+1]* (f-Cp->fs[i]) + pmin ,1./Cp->as[i+1]);
return m* SOLAR_MASS/All.UnitMass_in_g;
}
}
/* last portion */
pmin = pow(Cp->ms[n-1] ,Cp->as[n]);
m = pow(Cp->imf_Ntot*Cp->as[n]/Cp->bs[n]* (f-Cp->fs[n-1]) + pmin ,1./Cp->as[n]);
return m* SOLAR_MASS/All.UnitMass_in_g;
}
}
/*! This function initializes the imf parameters
defined in the chemistry file
*/
void init_imf(void)
{
float integral = 0;
float p;
float cte;
int i,n;
double mmin,mmax;
n = Cp->n;
if (n==0)
{
p = Cp->as[0]+1;
integral = integral + ( pow(Cp->Mmax,p)-pow(Cp->Mmin,p))/(p) ;
Cp->bs[0] = 1./integral ;
}
else
{
cte = 1.0;
if (Cp->Mmin < Cp->ms[0])
{
p = Cp->as[0]+1;
integral = integral + (pow(Cp->ms[0],p) - pow(Cp->Mmin,p))/p;
}
for (i=0;i<n-1;i++)
{
cte = cte* pow( Cp->ms[i],( Cp->as[i] - Cp->as[i+1] ));
p = Cp->as[i+1]+1;
integral = integral + cte*(pow(Cp->ms[i+1],p) - pow(Cp->ms[i],p))/p;
}
if (Cp->Mmax > Cp->ms[-1])
{
cte = cte* pow( Cp->ms[n-1] , ( Cp->as[n-1] - Cp->as[n] ) );
p = Cp->as[n]+1;
integral = integral + cte*(pow(Cp->Mmax,p) - pow(Cp->ms[n-1],p))/p;
}
/* compute all b */
Cp->bs[0] = 1./integral;
for (i=0;i<n;i++)
{
Cp->bs[i+1] = Cp->bs[i] * pow( Cp->ms[i],( Cp->as[i] - Cp->as[i+1] ));
}
}
if (verbose && ThisTask==0)
{
printf("-- bs -- \n");
for (i=0;i<n+1;i++)
printf("%g ",Cp->bs[i]);
printf("\n");
}
mmin = Cp->Mmin / All.UnitMass_in_g * SOLAR_MASS; /* in mass unit */
mmax = Cp->Mmax / All.UnitMass_in_g * SOLAR_MASS; /* in mass unit */
Cp->imf_Ntot = get_imf_N(mmin,mmax) *SOLAR_MASS/All.UnitMass_in_g;
/* init fs : mass fraction at ms */
if (n>0)
{
for (i=0;i<n+1;i++)
{
mmax = Cp->ms[i] / All.UnitMass_in_g * SOLAR_MASS; /* in mass unit */
Cp->fs[i] = SOLAR_MASS/All.UnitMass_in_g*get_imf_N(mmin,mmax)/Cp->imf_Ntot;
}
}
}
/*! This function initializes the chemistry parameters
*/
void init_chimie(void)
{
int i,nf;
double u_lt;
double UnitLength_in_kpc;
double UnitMass_in_Msol;
char filename[500];
char ext[100];
/* check some flags */
#ifndef COSMICTIME
if (All.ComovingIntegrationOn)
{
if(ThisTask == 0)
printf("Code wasn't compiled with COSMICTIME support enabled!\n");
endrun(-88800);
}
#endif
UnitLength_in_kpc = All.UnitLength_in_cm / KPC_IN_CM;
UnitMass_in_Msol = All.UnitMass_in_g / SOLAR_MASS;
//u_lt = -log10( 4.7287e11*sqrt(pow(UnitLength_in_kpc,3)/UnitMass_in_Msol));
/*Sat Dec 25 23:27:10 CET 2010 */
u_lt = -log10(All.UnitTime_in_Megayears*1e6);
allocate_chimie();
for (nf=0;nf<All.ChimieNumberOfParameterFiles;nf++)
{
if (All.ChimieNumberOfParameterFiles==1)
sprintf(filename,"%s",All.ChimieParameterFile);
else
sprintf(filename,"%s.%d",All.ChimieParameterFile,nf);
read_chimie(filename,nf);
/* set the table */
set_table(nf);
/* Conversion into program time unit */
Cp->coeff_z[2][2] = Cp->coeff_z[2][2] + u_lt;
for (i=0;i<3;i++)
Cp->coeff_z[1][i] = Cp->coeff_z[1][i]/2.0;
/* init imf parameters */
init_imf();
/* init SNII parameters */
if (Cp->n==0)
{
//Cp->SNII_cte[0] = Cp->bs[0]/Cp->as[0];
Cp->SNII_cte = Cp->bs[0]/Cp->as[0];
Cp->SNII_a = Cp->as[0];
}
else
{
//for (i=0;i<Cp->n+1;i++) /* if multiple power law in the SNII mass range */
// Cp->SNII_cte[i] = Cp->bs[i]/Cp->as[i];
Cp->SNII_cte = Cp->bs[Cp->n]/Cp->as[Cp->n];
Cp->SNII_a = Cp->as[Cp->n];
}
/* init SNIa parameters */
Cp->SNIa_a1 = Cp->SNIa_a;
Cp->SNIa_b1 = (Cp->SNIa_a1+1)/(pow(Cp->SNIa_Mdu1,Cp->SNIa_a1+1)-pow(Cp->SNIa_Mdl1,Cp->SNIa_a1+1));
Cp->SNIa_cte1 = Cp->SNIa_b1/Cp->SNIa_a1;
Cp->SNIa_a2 = Cp->SNIa_a;
Cp->SNIa_b2 = (Cp->SNIa_a2+1)/(pow(Cp->SNIa_Mdu2,Cp->SNIa_a2+1)-pow(Cp->SNIa_Mdl2,Cp->SNIa_a2+1));
Cp->SNIa_cte2 = Cp->SNIa_b2/Cp->SNIa_a2;
/* init SNII parameters */
if (Cp->n==0)
{
Cp->SNIa_cte = Cp->bs[0]/Cp->as[0];
Cp->SNIa_a = Cp->as[0];
}
else
{
Cp->SNIa_cte = Cp->bs[Cp->n]/Cp->as[Cp->n];
Cp->SNIa_a = Cp->as[Cp->n];
}
Cp->SNII_Mmax = Cp->Mmax;
for (i=0;i<Cp->nelts+2;i++)
Elt[i].Mmin = log10(Elt[i].Mmin);
/* output info */
if (verbose && ThisTask==0)
{
printf("-- SNII_cte -- \n");
//for (i=0;i<Cp->n+1;i++)
// printf("%g ",Cp->SNII_cte[i]);
printf("%g ",Cp->SNII_cte);
printf("\n");
}
/* check that the masses are higher than the last IMF elbow */
if (Cp->n>0)
{
if (Cp->SNIa_Mpl < Cp->ms[Cp->n-1])
{
printf("\nSNIa_Mpl = %g < ms[n-1] = %g !!!\n\n",Cp->SNIa_Mpl,Cp->ms[Cp->n-1]);
printf("This is not supported by the current implementation !!!\n");
endrun(88801);
}
if (Cp->SNIa_Mpu < Cp->ms[Cp->n-1])
{
printf("\nSNIa_Mpu = %g < ms[n-1] = %g !!!\n\n",Cp->SNIa_Mpu,Cp->ms[Cp->n-1]);
printf("This is not supported by the current implementation !!!\n");
endrun(88802);
}
if (Cp->SNII_Mmin < Cp->ms[Cp->n-1])
{
printf("\nSNII_Mmin = %g < ms[n-1] = %g !!!\n\n",Cp->SNII_Mmin,Cp->ms[Cp->n-1]);
printf("This is not supported by the current implementation !!!\n");
endrun(88803);
}
if (Cp->SNII_Mmax < Cp->ms[Cp->n-1])
{
printf("\nSNII_Mmax = %g < ms[n-1] = %g !!!\n\n",Cp->SNII_Mmax,Cp->ms[Cp->n-1]);
printf("This is not supported by the current implementation !!!\n");
endrun(88804);
}
}
}
}
/*! This function performe simple checks
* to validate the chemistry initialization
*/
void check_chimie(void)
{
int i;
printf("(Taks=%d) Number of elts : %d\n",ThisTask,Cp->nelts);
for(i=2;i<Cp->nelts+2;i++)
printf("%s ",&Elt[i].label);
printf("\n");
/* check number of elements */
if (NELEMENTS != Cp->nelts)
{
printf("(Taks=%d) NELEMENTS (=%d) != Cp->nelts (=%d) : please check !!!\n\n",ThisTask,NELEMENTS,Cp->nelts);
endrun(88807);
}
/* check that iron is the first element */
if ((strcmp("Fe",Elt[2].label))!=0)
{
printf("(Taks=%d) first element (=%s) is not %s !!!\n\n",ThisTask,Elt[2].label,FIRST_ELEMENT);
endrun(88808);
}
}
/*! Return the number of elements considered
*/
int get_nelts()
{
return Cp->nelts;
}
/*! Return the solar abundance of elt i
*/
float get_SolarAbundance(i)
{
return Elt[i+2].SolarAbundance;
}
/*! Return the label of element i
*/
char* get_Element(i)
{
return Elt[i+2].label;
}
/*! Return the lifetime of a star of mass m and metallicity z
*/
double star_lifetime(double z,double m)
{
/* z is the mass fraction of metals, ie, the metallicity */
/* m is the stellar mass in code unit */
/* Return t in code time unit */
int i;
double a,b,c;
double coeff[3];
double logm,twologm,logm2,time;
/* convert m in msol */
m = m*All.UnitMass_in_g / SOLAR_MASS;
for (i=0;i<3;i++)
coeff[i] = ( Cp->coeff_z[i][0]*z+Cp->coeff_z[i][1] )*z+Cp->coeff_z[i][2];
a = coeff[0];
b = coeff[1];
c = coeff[2];
logm = log10(m);
twologm = 2.0 * logm;
logm2 = logm*logm;
time = pow(10.,(a*logm2+b*twologm+c));
return time;
}
/*! Return the mass of a star having a livetime t and a metallicity z
*/
double star_mass_from_age(double z,double t)
{
/* z is the mass fraction of metals, ie, the metallicity */
/* t is the star life time */
/* return the stellar mass (in code unit) that has a lifetime equal to t */
/* this is the inverse of star_lifetime */
int i;
double a,b,c;
double coeff[3];
double m;
for (i=0;i<3;i++)
coeff[i] = ( Cp->coeff_z[i][0]*z+Cp->coeff_z[i][1] )*z+Cp->coeff_z[i][2];
a = coeff[0];
b = coeff[1];
c = coeff[2];
m = -(b+sqrt(b*b-a*(c-log10(t))))/a;
m = pow(10,m); /* here, m is in solar mass */
m = m*SOLAR_MASS/All.UnitMass_in_g; /* Msol to mass unit */
return m;
}
/****************************************************************************************/
/*
/* Supernova rate : number of supernova per mass unit
/*
/****************************************************************************************/
double DYIN_rate(double m1,double m2)
{
/*
compute the number of stars between m1 and m2
masses in code unit
*/
double RDYIN;
double md,mu;
/* find md, mu */
md = dmin(m1,Cp->SNII_Mmin/All.UnitMass_in_g * SOLAR_MASS);
mu = dmin(m2,Cp->SNII_Mmin/All.UnitMass_in_g * SOLAR_MASS);
if (mu<=md) /* no dying stars in that mass range */
return 0.0;
RDYIN = get_imf_N(md,mu);
return RDYIN;
}
double SNII_rate(double m1,double m2)
{
/*
compute the number of SNII between m1 and m2
masses in code unit
*/
double RSNII;
double md,mu;
RSNII = 0.0;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
m2 = m2*All.UnitMass_in_g / SOLAR_MASS;
/* (1) find md, mu */
md = dmax(m1,Cp->SNII_Mmin);
mu = dmin(m2,Cp->SNII_Mmax);
if (mu<=md) /* no SNII in that mass range */
return 0.0;
/* !!!!! here we should use get_imf_N !!!! */
/* to ensure the full imf */
RSNII = Cp->SNII_cte * (pow(mu,Cp->SNII_a)-pow(md,Cp->SNII_a)); /* number per solar mass */
/* convert in number per solar mass to number per mass unit */
RSNII = RSNII *All.UnitMass_in_g / SOLAR_MASS;
return RSNII;
}
double SNIa_rate(double m1,double m2)
{
/*
compute the number of SNIa between m1 and m2
masses in code unit
*/
double RSNIa;
double md,mu;
RSNIa = 0.0;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
m2 = m2*All.UnitMass_in_g / SOLAR_MASS;
/* RG contribution */
md = dmax(m1,Cp->SNIa_Mdl1);
mu = dmin(m2,Cp->SNIa_Mdu1);
if (md<mu)
RSNIa = RSNIa + Cp->SNIa_bb1 * Cp->SNIa_cte1 * (pow(mu,Cp->SNIa_a1)-pow(md,Cp->SNIa_a1));
/* MS contribution */
md = dmax(m1,Cp->SNIa_Mdl2);
mu = dmin(m2,Cp->SNIa_Mdu2);
if (md<mu)
RSNIa = RSNIa + Cp->SNIa_bb2 * Cp->SNIa_cte2 * (pow(mu,Cp->SNIa_a2)-pow(md,Cp->SNIa_a2));
/* WD contribution */
md = dmax(m1,Cp->SNIa_Mpl); /* select stars that have finished their life -> WD */
mu = Cp->SNIa_Mpu; /* no upper bond */
if (mu<=md) /* no SNIa in that mass range */
return 0.0;
RSNIa = RSNIa * Cp->SNIa_cte * (pow(mu,Cp->SNIa_a)-pow(md,Cp->SNIa_a)); /* number per solar mass */
/* convert in number per solar mass to number per mass unit */
RSNIa = RSNIa *All.UnitMass_in_g / SOLAR_MASS;
return RSNIa;
}
void DYIN_mass_ejection(double m1,double m2)
{
/*
Compute the mass fraction and yields of dying stars with masses between m1 and m2.
Store the result in the global variable`` MassFracDYIN``::
MassFracDYIN[0] = total gas
MassFracDYIN[1] = helium core (i.e. alpha(m))
MassFracDYIN[i] = frac mass elt i.
*/
double l1,l2;
int i1,i2,i1p,i2p,j;
double f1,f2;
double v1,v2;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
m2 = m2*All.UnitMass_in_g / SOLAR_MASS;
/* this was not in Poirier... */
m1 = dmin(m1,Cp->SNII_Mmin);
m2 = dmin(m2,Cp->SNII_Mmin);
if (m1>=m2)
{
for (j=0;j<Cp->nelts+2;j++)
MassFracDYIN[j] = 0;
return;
}
j = 0;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
l2 = ( log10(m2) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
if (l2 < 0.0) l2 = 0.0;
i1 = (int)l1;
i2 = (int)l2;
i1p = i1 + 1;
i2p = i2 + 1;
f1 = l1 - i1;
f2 = l2 - i2;
/* check (yr) */
if (i1<0) i1=0;
if (i2<0) i2=0;
/* --------- TOTAL GAS ---------- */
j = 0;
v1 = f1 * ( Elt[j].Array[i1p] - Elt[j].Array[i1] ) + Elt[j].Array[i1];
v2 = f2 * ( Elt[j].Array[i2p] - Elt[j].Array[i2] ) + Elt[j].Array[i2];
MassFracDYIN[j] = v2-v1;
/* --------- He core therm ---------- */
j = 1;
v1 = f1 * ( Elt[j].Array[i1p] - Elt[j].Array[i1] ) + Elt[j].Array[i1];
v2 = f2 * ( Elt[j].Array[i2p] - Elt[j].Array[i2] ) + Elt[j].Array[i2];
MassFracDYIN[j] = v2-v1;
/* ---------------------------- */
/* --------- Metals ---------- */
/* ---------------------------- */
j = 2;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
l2 = ( log10(m2) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
if (l2 < 0.0) l2 = 0.0;
i1 = (int)l1;
i2 = (int)l2;
i1p = i1 + 1;
i2p = i2 + 1;
f1 = l1 - i1;
f2 = l2 - i2;
/* check (yr) */
if (i1<0) i1=0;
if (i2<0) i2=0;
for (j=2;j<Cp->nelts+2;j++)
{
v1 = f1 * ( Elt[j].Array[i1p] - Elt[j].Array[i1] ) + Elt[j].Array[i1];
v2 = f2 * ( Elt[j].Array[i2p] - Elt[j].Array[i2] ) + Elt[j].Array[i2];
MassFracDYIN[j] = v2-v1;
}
}
void DYIN_single_mass_ejection(double m1)
{
/*
Compute the mass fraction and yields of a dying stars of masse m1.
Store the result in the global variable ``SingleMassFracDYIN``::
SingleMassFracDYIN[0] = total gas
SingleMassFracDYIN[1] = helium core (i.e. alpha(m))
SingleMassFracDYIN[i] = frac mass elt i.
*/
double l1;
int i1,i1p,j;
double f1;
double v1;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
/* this was not in Poirier... */
if ( (m1>=Cp->SNII_Mmin) )
{
for (j=0;j<Cp->nelts+2;j++)
SingleMassFracDYIN[j] = 0;
return;
}
j = 0;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
i1 = (int)l1;
i1p = i1 + 1;
f1 = l1 - i1;
/* check (yr) */
if (i1<0) i1=0;
/* --------- TOTAL GAS ---------- */
j = 0;
v1 = f1 * ( Elt[j].Metal[i1p] - Elt[j].Metal[i1] ) + Elt[j].Metal[i1];
SingleMassFracDYIN[j] = v1;
/* --------- He core therm ---------- */
j = 1;
v1 = f1 * ( Elt[j].Metal[i1p] - Elt[j].Metal[i1] ) + Elt[j].Metal[i1];
SingleMassFracDYIN[j] = v1;
/* ---------------------------- */
/* --------- Metals ---------- */
/* ---------------------------- */
j = 2;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
i1 = (int)l1;
i1p = i1 + 1;
f1 = l1 - i1;
/* check (yr) */
if (i1<0) i1=0;
for (j=2;j<Cp->nelts+2;j++)
{
v1 = f1 * ( Elt[j].Metal[i1p] - Elt[j].Metal[i1] ) + Elt[j].Metal[i1];
SingleMassFracDYIN[j] = v1;
}
}
void SNII_mass_ejection(double m1,double m2)
{
/*
.. warning:: here, we we do not limit the computation to SNII !!!
Compute the mass fraction and yields of SNII stars with masses between m1 and m2.
Store the result in the global variable ``MassFracSNII``::
MassFracSNII[0] = total gas
MassFracSNII[1] = 1-helium core (i.e. non processed elts)
MassFracSNII[i] = frac mass elt i.
*/
double l1,l2;
int i1,i2,i1p,i2p,j;
double f1,f2;
double v1,v2;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
m2 = m2*All.UnitMass_in_g / SOLAR_MASS;
/* this was not in Poirier... */
m1 = dmax(m1,Cp->SNII_Mmin);
m2 = dmin(m2,Cp->SNII_Mmax);
if ( m2<=m1 )
{
for (j=0;j<Cp->nelts+2;j++)
MassFracSNII[j] = 0;
return;
}
j = 0;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
l2 = ( log10(m2) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
if (l2 < 0.0) l2 = 0.0;
i1 = (int)l1;
i2 = (int)l2;
i1p = i1 + 1;
i2p = i2 + 1;
f1 = l1 - i1;
f2 = l2 - i2;
/* check (yr) */
if (i1<0) i1=0;
if (i2<0) i2=0;
/* --------- TOTAL GAS ---------- */
j = 0;
v1 = f1 * ( Elt[j].Array[i1p] - Elt[j].Array[i1] ) + Elt[j].Array[i1];
v2 = f2 * ( Elt[j].Array[i2p] - Elt[j].Array[i2] ) + Elt[j].Array[i2];
MassFracSNII[j] = v2-v1;
/* --------- He core therm ---------- */
j = 1;
v1 = f1 * ( Elt[j].Array[i1p] - Elt[j].Array[i1] ) + Elt[j].Array[i1];
v2 = f2 * ( Elt[j].Array[i2p] - Elt[j].Array[i2] ) + Elt[j].Array[i2];
MassFracSNII[j] = v2-v1;
/* ---------------------------- */
/* --------- Metals ---------- */
/* ---------------------------- */
j = 2;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
l2 = ( log10(m2) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
if (l2 < 0.0) l2 = 0.0;
i1 = (int)l1;
i2 = (int)l2;
i1p = i1 + 1;
i2p = i2 + 1;
f1 = l1 - i1;
f2 = l2 - i2;
/* check (yr) */
if (i1<0) i1=0;
if (i2<0) i2=0;
for (j=2;j<Cp->nelts+2;j++)
{
v1 = f1 * ( Elt[j].Array[i1p] - Elt[j].Array[i1] ) + Elt[j].Array[i1];
v2 = f2 * ( Elt[j].Array[i2p] - Elt[j].Array[i2] ) + Elt[j].Array[i2];
MassFracSNII[j] = v2-v1;
}
}
void SNII_single_mass_ejection(double m1)
{
/*
.. warning:: here, we we do not limit the computation to SNII !!!
Compute the mass fraction and yields of a SNII stars of masse m1.
Store the result in the global variable ``SingleMassFracSNII``::
SingleMassFracSNII[0] = total gas
SingleMassFracSNII[1] = 1-helium core (i.e. non processed elts)
SingleMassFracSNII[i] = frac mass elt i.
*/
double l1;
int i1,i1p,j;
double f1;
double v1;
/* convert m in msol */
m1 = m1*All.UnitMass_in_g / SOLAR_MASS;
/* this was not in Poirier... */
if ( m1<= Cp->SNII_Mmin)
{
for (j=0;j<Cp->nelts+2;j++)
SingleMassFracSNII[j] = 0;
return;
}
j = 0;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
i1 = (int)l1;
i1p = i1 + 1;
f1 = l1 - i1;
/* check (yr) */
if (i1<0) i1=0;
/* --------- TOTAL GAS ---------- */
j = 0;
v1 = f1 * ( Elt[j].Metal[i1p] - Elt[j].Metal[i1] ) + Elt[j].Metal[i1];
SingleMassFracSNII[j] = v1;
/* --------- He core therm ---------- */
j = 1;
v1 = f1 * ( Elt[j].Metal[i1p] - Elt[j].Metal[i1] ) + Elt[j].Metal[i1];
SingleMassFracSNII[j] = v1;
/* ---------------------------- */
/* --------- Metals ---------- */
/* ---------------------------- */
j = 2;
l1 = ( log10(m1) - Elt[j].Mmin) / Elt[j].Step ;
if (l1 < 0.0) l1 = 0.0;
i1 = (int)l1;
i1p = i1 + 1;
f1 = l1 - i1;
/* check (yr) */
if (i1<0) i1=0;
for (j=2;j<Cp->nelts+2;j++)
{
v1 = f1 * ( Elt[j].Metal[i1p] - Elt[j].Metal[i1] ) + Elt[j].Metal[i1];
SingleMassFracSNII[j] = v1;
}
}
void SNIa_mass_ejection(double m1,double m2)
{
/*
Compute the total mass and element mass per mass unit of SNIa stars with masses between m1 and m2.
Store the result in the global variable ``MassFracSNIa``::
MassFracSNIa[0] = total gas
MassFracSNIa[1] = unused
MassFracSNIa[i] = frac mass elt i.
*/
int j;
double NSNIa;
/* number of SNIa per mass unit between time and time+dt */
NSNIa = SNIa_rate(m1,m2);
/* ejected mass in gas per mass unit */
MassFracSNIa[0] = Cp->Mco/All.UnitMass_in_g*SOLAR_MASS * NSNIa;
/* ejected elements in gas per mass unit */
for (j=2;j<Cp->nelts+2;j++)
MassFracSNIa[j] = NSNIa* Elt[j].MSNIa/All.UnitMass_in_g*SOLAR_MASS;
/* unused */
MassFracSNIa[1]=-1;
}
void SNIa_single_mass_ejection(double m1)
{
/*
Compute the total mass mass of element of a SNIa stars of masse m1.
Store the result in the global variable ``SingleMassFracSNIa``::
SingleMassFracSNIa[0] = total gas
SingleMassFracSNIa[1] = unused
SingleMassFracSNIa[i] = frac mass elt i.
*/
int j;
/* total ejected gas mass */
SingleMassFracSNIa[0] = Cp->Mco/All.UnitMass_in_g*SOLAR_MASS;
/* ejected mass per element */
for (j=2;j<Cp->nelts+2;j++)
SingleMassFracSNIa[j] = Elt[j].MSNIa/All.UnitMass_in_g*SOLAR_MASS;
/* unused */
SingleMassFracSNIa[1] = -1;
}
void Total_mass_ejection(double m1,double m2,double M0,double *z)
{
/*
Sum the contribution in mass and yields of all stars in the mass range m1,m2.
Store the result in the global variable EjectedMass::
EjectedMass[0] = total gas
EjectedMass[1] = UNUSED
EjectedMass[i+2] = frac mass elt i.
FOR THE MOMENT::
- contrib of SNII (= all stars)
- contrib of SNIa
EjectedMass[0] = ejected Mass from SNII + Mco * number of SNIa
EjectedMass[i] = (SNII elts created ) + (SNII elts existing) + (SNIa elts)
*/
int j;
/* compute SNII mass ejection -> MassFracSNII */
SNII_mass_ejection(m1,m2);
/* compute SNIa mass ejection -> MassFracSNIa */ /* not really a mass fraction */
SNIa_mass_ejection(m1,m2);
/* compute DYIN mass ejection -> MassFracDYIN */ /* not really a mass fraction */
DYIN_mass_ejection(m1,m2);
/* total ejected gas mass */
EjectedMass[0] = M0 * ( MassFracDYIN[0] + MassFracSNII[0] + MassFracSNIa[0] );
/* ejected mass per element */
for (j=2;j<Cp->nelts+2;j++)
EjectedMass[j] = M0*( MassFracDYIN[j] +z[j-2]*MassFracDYIN[1] + MassFracSNII[j] +z[j-2]*MassFracSNII[1] + MassFracSNIa[j] );
/* not used */
EjectedMass[1] = -1;
}
void DYIN_Total_single_mass_ejection(double m1,double *z)
{
/*
Mass and element ejected by a single dying stars of mass m1.
This takes into account processed and non processed gas
The results are stored in::
SingleEjectedMass[0] = gas mass
SingleEjectedMass[1] = unsued
SingleEjectedMass[i+2] = frac mass elt i
*/
int j;
float M0;
M0 = m1;
/* compute dying stars mass ejection -> SingleMassFracDYIN */
DYIN_single_mass_ejection(m1);
/* total ejected gas mass */
SingleEjectedMass[0] = M0 * SingleMassFracDYIN[0];
/* ejected mass per element */
for (j=2;j<Cp->nelts+2;j++)
SingleEjectedMass[j] = M0*(SingleMassFracDYIN[j] +z[j-2]*SingleMassFracDYIN[1]);
/* not used */
SingleEjectedMass[1] = -1;
}
void SNII_Total_single_mass_ejection(double m1,double *z)
{
/*
Mass and element ejected by a single SNII of mass m1.
This takes into account processed and non processed gas
The results are stored in::
SingleEjectedMass[0] = gas mass
SingleEjectedMass[1] = unsued
SingleEjectedMass[i+2] = frac mass elt i
*/
int j;
float M0;
M0 = m1;
/* compute SNII mass ejection -> SingleMassFracSNII */
SNII_single_mass_ejection(m1);
/* total ejected gas mass */
SingleEjectedMass[0] = M0 * SingleMassFracSNII[0];
/* ejected mass per element */
for (j=2;j<Cp->nelts+2;j++)
SingleEjectedMass[j] = M0*(SingleMassFracSNII[j] +z[j-2]*SingleMassFracSNII[1]);
/* not used */
SingleEjectedMass[1] = -1;
}
void SNIa_Total_single_mass_ejection(double m1, double *z)
{
int j;
/*
Mass and element ejected by a single SNIa of mass m1.
The results are stored in::
SingleEjectedMass[0] = gas mass
SingleEjectedMass[1] = unsued
SingleEjectedMass[i+2] = frac mass elt i
*/
/* compute SNIa mass ejection -> SingleMassFracSNIa */
SNIa_single_mass_ejection(m1);
/* total ejected gas mass */
SingleEjectedMass[0] = SingleMassFracSNIa[0];
/* ejected mass per element */
for (j=2;j<Cp->nelts+2;j++)
SingleEjectedMass[j] = SingleMassFracSNIa[j];
}
void Total_single_mass_ejection(double m1,double *z,double NSNII,double NSNIa,double NDYIN)
{
/*
Sum the contribution in mass and yields of one star for mass m1.
Store the result in the global variable EjectedMass::
SingleEjectedMass[0] = total gas
SingleEjectedMass[1] = UNUSED
SingleEjectedMass[i+2] = frac mass elt i.
FOR THE MOMENT::
- contrib of SNII (= all stars)
- contrib of SNIa
SingleEjectedMass[0] = ejected Mass from SNII + Mco * number of SNIa
SingleEjectedMass[i] = (SNII elts created ) + (SNII elts existing) + (SNIa elts)
*/
int j;
float M0;
M0 = m1;
/* compute SNII mass ejection -> SingleMassFracSNII */
SNII_single_mass_ejection(m1);
/* compute SNII mass ejection -> SingleMassFracSNIa */
SNIa_single_mass_ejection(m1);
/* compute DYIN mass ejection -> SingleMassFracDYIN */
DYIN_single_mass_ejection(m1);
/* total ejected gas mass */
SingleEjectedMass[0] = M0 * ( SingleMassFracDYIN[0]*NDYIN + SingleMassFracSNII[0]*NSNII ) + SingleMassFracSNIa[0]*NSNIa;
/* ejected mass per element */
for (j=2;j<Cp->nelts+2;j++)
SingleEjectedMass[j] = M0*( SingleMassFracDYIN[j]*NDYIN +z[j-2]*SingleMassFracDYIN[1]*NDYIN + SingleMassFracSNII[j]*NSNII +z[j-2]*SingleMassFracSNII[1]*NSNII ) + SingleMassFracSNIa[j]*NSNIa;
/* not used */
SingleEjectedMass[1] = -1;
}
/****************************************************************************************/
/*
/*
/*
/* GADGET ONLY PART
/*
/*
/*
/****************************************************************************************/
static double hubble_a, atime, hubble_a2, fac_mu, fac_vsic_fix, a3inv, fac_egy;
#ifdef FEEDBACK
static double fac_pow;
#endif
#ifdef PERIODIC
static double boxSize, boxHalf;
#ifdef LONG_X
static double boxSize_X, boxHalf_X;
#else
#define boxSize_X boxSize
#define boxHalf_X boxHalf
#endif
#ifdef LONG_Y
static double boxSize_Y, boxHalf_Y;
#else
#define boxSize_Y boxSize
#define boxHalf_Y boxHalf
#endif
#ifdef LONG_Z
static double boxSize_Z, boxHalf_Z;
#else
#define boxSize_Z boxSize
#define boxHalf_Z boxHalf
#endif
#endif
#if defined(CHIMIE_THERMAL_FEEDBACK) && defined(CHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY)
void chimie_compute_energy_int(int mode)
{
int i;
double DeltaEgyInt;
double Tot_DeltaEgyInt;
DeltaEgyInt = 0;
Tot_DeltaEgyInt = 0;
if (mode==1)
{
LocalSysState.EnergyInt1 = 0;
LocalSysState.EnergyInt2 = 0;
}
for(i = 0; i < N_gas; i++)
{
if (P[i].Type==0)
{
if (mode==1)
LocalSysState.EnergyInt1 += P[i].Mass * SphP[i].EntropyPred / (GAMMA_MINUS1) * pow(SphP[i].Density*a3inv, GAMMA_MINUS1);
else
LocalSysState.EnergyInt2 += P[i].Mass * SphP[i].EntropyPred / (GAMMA_MINUS1) * pow(SphP[i].Density*a3inv, GAMMA_MINUS1);
}
}
if (mode==2)
{
DeltaEgyInt = LocalSysState.EnergyInt2 - LocalSysState.EnergyInt1;
MPI_Reduce(&DeltaEgyInt, &Tot_DeltaEgyInt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
LocalSysState.EnergyThermalFeedback -= DeltaEgyInt;
}
}
#endif
#if defined(CHIMIE_KINETIC_FEEDBACK) && defined(CHIMIE_COMPUTE_KINETIC_FEEDBACK_ENERGY)
void chimie_compute_energy_kin(int mode)
{
int i;
double DeltaEgyKin;
double Tot_DeltaEgyKin;
DeltaEgyKin = 0;
Tot_DeltaEgyKin = 0;
if (mode==1)
{
LocalSysState.EnergyKin1 = 0;
LocalSysState.EnergyKin2 = 0;
}
for(i = 0; i < N_gas; i++)
{
if (P[i].Type==0)
{
if (mode==1)
LocalSysState.EnergyKin1 += 0.5 * P[i].Mass * (P[i].Vel[0]*P[i].Vel[0]+P[i].Vel[1]*P[i].Vel[1]+P[i].Vel[2]*P[i].Vel[2]);
else
LocalSysState.EnergyKin2 += 0.5 * P[i].Mass * (P[i].Vel[0]*P[i].Vel[0]+P[i].Vel[1]*P[i].Vel[1]+P[i].Vel[2]*P[i].Vel[2]);
}
}
if (mode==2)
{
DeltaEgyKin = LocalSysState.EnergyKin2 - LocalSysState.EnergyKin1;
MPI_Reduce(&DeltaEgyKin, &Tot_DeltaEgyKin, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
LocalSysState.EnergyKineticFeedback -= DeltaEgyKin;
}
}
#endif
#ifdef CHIMIE_THERMAL_FEEDBACK
void chimie_apply_thermal_feedback(void)
{
int i;
double EgySpec,NewEgySpec,DeltaEntropy;
for(i = 0; i < N_gas; i++)
{
if (P[i].Type==0)
{
if (SphP[i].DeltaEgySpec > 0)
{
-
+
+
+ printf("(%d) Step=%d i=%08d particle receive feedback\n",ThisTask,All.NumCurrentTiStep,i);
+
+
/* spec energy at current step */
EgySpec = SphP[i].EntropyPred / GAMMA_MINUS1 * pow(SphP[i].Density*a3inv, GAMMA_MINUS1);
/* new egyspec */
NewEgySpec = EgySpec + SphP[i].DeltaEgySpec;
LocalSysState.EnergyThermalFeedback -= SphP[i].DeltaEgySpec*P[i].Mass;
/* new entropy */
DeltaEntropy = GAMMA_MINUS1*NewEgySpec/pow(SphP[i].Density*a3inv, GAMMA_MINUS1) - SphP[i].EntropyPred;
SphP[i].EntropyPred += DeltaEntropy;
SphP[i].Entropy += DeltaEntropy;
/* set the adiabatic period for SNIa */
if (SphP[i].NumberOfSNIa>0)
SphP[i].SNIaThermalTime = All.Time;
/* set the adiabatic period for SNII */
if (SphP[i].NumberOfSNII>0)
SphP[i].SNIIThermalTime = All.Time;
/* reset variables */
SphP[i].DeltaEgySpec = 0;
SphP[i].NumberOfSNIa = 0;
SphP[i].NumberOfSNII = 0;
- if (P[i].Ti_endstep == All.Ti_Current)
- printf("The particle is +++++ %d %d %g\n",All.Ti_Current,P[i].Ti_endstep,P[i].Ti_endstep* All.Timebase_interval);
- else
- printf("The particle is ----- %d %d %g\n",All.Ti_Current,P[i].Ti_endstep,P[i].Ti_endstep* All.Timebase_interval);
-
-
-
-
+
}
}
}
}
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
void chimie_apply_wind(void)
{
/* apply wind */
int i;
double e1,e2;
double phi,costh,sinth,vx,vy,vz;
for(i = 0; i < N_gas; i++)
{
if (P[i].Type==0)
{
if (SphP[i].WindFlag)
{
phi = get_ChimieKineticFeedback_random_number(P[i].ID)*PI*2.;
costh = 1.-2.*get_ChimieKineticFeedback_random_number(P[i].ID+1);
sinth = sqrt(1.-pow(costh,2));
vx = All.ChimieWindSpeed*sinth*cos(phi);
vy = All.ChimieWindSpeed*sinth*sin(phi);
vz = All.ChimieWindSpeed*costh;
e1 = 0.5*P[i].Mass * ( SphP[i].VelPred[0]*SphP[i].VelPred[0] + SphP[i].VelPred[1]*SphP[i].VelPred[1] + SphP[i].VelPred[2]*SphP[i].VelPred[2]);
P[i].Vel[0] += vx;
P[i].Vel[1] += vy;
P[i].Vel[2] += vz;
SphP[i].VelPred[0] += vx;
SphP[i].VelPred[1] += vy;
SphP[i].VelPred[2] += vz;
e2 = 0.5*P[i].Mass * ( SphP[i].VelPred[0]*SphP[i].VelPred[0] + SphP[i].VelPred[1]*SphP[i].VelPred[1] + SphP[i].VelPred[2]*SphP[i].VelPred[2]);
LocalSysState.EnergyKineticFeedback -= e2-e1;
SphP[i].WindFlag = 0;
}
}
}
}
#endif
/*! This function is the driver routine for the calculation of chemical evolution
*/
void chimie(void)
{
double t0, t1;
t0 = second(); /* measure the time for the full chimie computation */
if (ThisTask==0)
printf("Start Chimie computation.\n");
if(All.ComovingIntegrationOn)
{
/* Factors for comoving integration of hydro */
hubble_a = All.Omega0 / (All.Time * All.Time * All.Time)
+ (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda;
hubble_a = All.Hubble * sqrt(hubble_a);
hubble_a2 = All.Time * All.Time * hubble_a;
fac_mu = pow(All.Time, 3 * (GAMMA - 1) / 2) / All.Time;
fac_egy = pow(All.Time, 3 * (GAMMA - 1));
fac_vsic_fix = hubble_a * pow(All.Time, 3 * GAMMA_MINUS1);
a3inv = 1 / (All.Time * All.Time * All.Time);
atime = All.Time;
#ifdef FEEDBACK
fac_pow = fac_egy*atime*atime;
#endif
}
else
{
hubble_a = hubble_a2 = atime = fac_mu = fac_vsic_fix = a3inv = fac_egy = 1.0;
#ifdef FEEDBACK
fac_pow = 1.0;
#endif
}
-
-
-
-/* apply thermal feedback on selected particles */
-#ifdef CHIMIE_THERMAL_FEEDBACK
- chimie_apply_thermal_feedback();
-#endif
-
- /* apply wind on selected particles */
-#ifdef CHIMIE_KINETIC_FEEDBACK
- chimie_apply_wind();
-#endif
-
-
-
stars_density(); /* compute density */
#ifdef CHIMIE_ONE_SN_ONLY
if(All.ChimieOneSN==0) /* explode only if not one sn only*/
#endif
do_chimie(); /* chimie */
- else
- printf("%d before chimie computation done. \n",ThisTask);
-
if (ThisTask==0)
printf("Chimie computation done.\n");
t1 = second();
All.CPU_Chimie += timediff(t0, t1);
}
/*! This function is the driver routine for the calculation of chemical evolution
*/
void do_chimie(void)
{
long long ntot, ntotleft;
int i, j, k, n, m, ngrp, maxfill, source, ndone;
int *nbuffer, *noffset, *nsend_local, *nsend, *numlist, *ndonelist;
int level, sendTask, recvTask, nexport, place;
double tstart, tend, sumt, sumcomm;
double timecomp = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance;
int flag_chimie;
MPI_Status status;
int do_it;
int Ti0,Ti1,Ti2;
double t1,t2,t01,t02;
double tmin,tmax;
double minlivetime,maxlivetime;
double m1,m2,M0;
double NSNIa,NSNII,NDYIN;
double NSNIa_tot,NSNII_tot,NDYIN_tot,NSNIa_totlocal,NSNII_totlocal,NDYIN_totlocal;
double EgySN,EgySNlocal;
double EgySNThermal,EgySNKinetic;
int Nchim,Nchimlocal;
int Nwind,Nwindlocal;
int Nflag,Nflaglocal;
int Noldwind,Noldwindlocal;
double metals[NELEMENTS];
double FeH;
float MinRelMass=1e-3;
#ifdef DETAILED_CPU_OUTPUT_IN_CHIMIE
double *timecomplist;
double *timecommsummlist;
double *timeimbalancelist;
#endif
#ifdef PERIODIC
boxSize = All.BoxSize;
boxHalf = 0.5 * All.BoxSize;
#ifdef LONG_X
boxHalf_X = boxHalf * LONG_X;
boxSize_X = boxSize * LONG_X;
#endif
#ifdef LONG_Y
boxHalf_Y = boxHalf * LONG_Y;
boxSize_Y = boxSize * LONG_Y;
#endif
#ifdef LONG_Z
boxHalf_Z = boxHalf * LONG_Z;
boxSize_Z = boxSize * LONG_Z;
#endif
#endif
#ifdef COMPUTE_VELOCITY_DISPERSION
double v1m,v2m;
#endif
/* `NumStUpdate' gives the number of particles on this processor that want a chimie computation */
for(n = 0, NumStUpdate = 0; n < N_gas+N_stars; n++)
{
if(P[n].Ti_endstep == All.Ti_Current)
if(P[n].Type == ST)
{
m = P[n].StPIdx;
if ( (P[n].Mass/StP[m].InitialMass) > MinRelMass)
NumStUpdate++;
}
if(P[n].Type == 0)
SphP[n].dMass = 0.;
}
numlist = malloc(NTask * sizeof(int) * NTask);
MPI_Allgather(&NumStUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD);
for(i = 0, ntot = 0; i < NTask; i++)
ntot += numlist[i];
free(numlist);
noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */
nbuffer = malloc(sizeof(int) * NTask);
nsend_local = malloc(sizeof(int) * NTask);
nsend = malloc(sizeof(int) * NTask * NTask);
ndonelist = malloc(sizeof(int) * NTask);
i = 0; /* first gas particle, because stars may be hidden among gas particles */
ntotleft = ntot; /* particles left for all tasks together */
NSNIa_tot = 0;
NSNII_tot = 0;
NDYIN_tot = 0;
NSNIa_totlocal = 0;
NSNII_totlocal = 0;
NDYIN_totlocal = 0;
EgySN = 0;
EgySNlocal =0;
Nchimlocal = 0;
Nchim = 0;
Nwindlocal = 0;
Nwind = 0;
Noldwindlocal = 0;
Noldwind = 0;
Nflaglocal = 0;
Nflag = 0;
while(ntotleft > 0)
{
for(j = 0; j < NTask; j++)
nsend_local[j] = 0;
/* do local particles and prepare export list */
tstart = second();
for(nexport = 0, ndone = 0; i < N_gas+N_stars && nexport < All.BunchSizeChimie - NTask; i++)
{
/* only active particles and stars */
if((P[i].Ti_endstep == All.Ti_Current)&&(P[i].Type == ST))
{
if(P[i].Type != ST)
{
printf("P[i].Type != ST, we better stop.\n");
printf("N_gas=%d (type=%d) i=%d (type=%d)\n",N_gas,P[N_gas].Type,i,P[i].Type);
printf("Please, check that you do not use PEANOHILBERT\n");
endrun(777001);
}
m = P[i].StPIdx;
if ( (P[i].Mass/StP[m].InitialMass) > MinRelMass)
{
flag_chimie = 0;
/******************************************/
/* do chimie */
/******************************************/
/*****************************************************/
/* look if a SN may have explode during the last step
/*****************************************************/
/***********************************************/
/***********************************************/
/* set the right table base of the metallicity */
set_table(0);
//FeH = log10( (StP[m].Metal[FE]/get_SolarAbundance(FE)) + 1.e-20 );
//if (FeH<-3)
// set_table(1);
//else
// set_table(0);
//if (P[i].ID==65546)
// {
// printf("(%d) %g the particle 65546 FeH=%g metalFe=%g Mmin=%g Mmax=%g n=%d\n",ThisTask,All.Time,FeH,StP[m].Metal[FE],Cp->Mmin,Cp->Mmax,Cp->n);
// }
/*
Cp->Mmin
Cp->Mmax
Cp->n
Cp->ms[]
Cp->as[]
Cp->SNIa_cte
Cp->SNIa_a
Cp->SNIa_Mdl1
Cp->SNIa_Mdu1
Cp->SNIa_bb1
Cp->SNIa_cte1
Cp->SNIa_a1
Cp->SNIa_Mdl2
Cp->SNIa_Mdu2
Cp->SNIa_bb2
Cp->SNIa_cte2
Cp->SNIa_a2
*/
/***********************************************/
/***********************************************/
/* minimum live time for a given metallicity */
minlivetime = star_lifetime(StP[m].Metal[NELEMENTS-1],Cp->Mmax*SOLAR_MASS/All.UnitMass_in_g)*All.HubbleParam;
/* maximum live time for a given metallicity */
maxlivetime = star_lifetime(StP[m].Metal[NELEMENTS-1],Cp->Mmin*SOLAR_MASS/All.UnitMass_in_g)*All.HubbleParam;
//if (P[i].ID==65546)
// printf("(%d) %g the particle 65546 has a max livetime of %g (metal=%g Mmin=%g)\n",ThisTask,All.Time,maxlivetime,StP[m].Metal[NELEMENTS-1],Cp->Mmin);
if (All.ComovingIntegrationOn)
{
/* FormationTime on the time line */
Ti0 = log(StP[m].FormationTime/All.TimeBegin) / All.Timebase_interval;
/* Beginning of time step on the time line */
Ti1 = P[i].Ti_begstep;
/* End of time step on the time line */
Ti2 = All.Ti_Current;
#ifdef COSMICTIME
t01 = get_cosmictime_difference(Ti0,Ti1);
t02 = get_cosmictime_difference(Ti0,Ti2);
#endif
}
else
{
t1 = All.TimeBegin + (P[i].Ti_begstep * All.Timebase_interval);
t2 = All.TimeBegin + (All.Ti_Current * All.Timebase_interval);
t01 = t1-StP[m].FormationTime;
t02 = t2-StP[m].FormationTime;
}
/* now treat all cases */
do_it=1;
/* beginning of interval */
if (t01>=minlivetime)
if (t01>=maxlivetime)
do_it=0; /* nothing to do */
else
m2 = star_mass_from_age(StP[m].Metal[NELEMENTS-1],t01/All.HubbleParam)*All.HubbleParam;
else
m2 = Cp->Mmax*SOLAR_MASS/All.UnitMass_in_g*All.HubbleParam;
/* end of interval */
if (t02<=maxlivetime)
if (t02<=minlivetime)
do_it=0; /* nothing to do */
else
m1 = star_mass_from_age(StP[m].Metal[NELEMENTS-1],t02/All.HubbleParam)*All.HubbleParam;
else
m1 = Cp->Mmin*SOLAR_MASS/All.UnitMass_in_g*All.HubbleParam;
//printf("Time=%g t01=%g t02=%g id=%d minlivetime=%g maxlivetime=%g \n",All.Time,t01,t02,P[i].ID,minlivetime,maxlivetime);
/* if some of the stars in the SSP explode between t1 and t2 */
if (do_it)
{
Nchimlocal++;
StP[m].Flag = 1; /* mark it as active */
if (m1>m2)
{
printf("m1=%g (%g Msol) > m2=%g (%g Msol) !!!\n\n",m1,m1*All.UnitMass_in_g/SOLAR_MASS,m2,m2*All.UnitMass_in_g/SOLAR_MASS);
endrun(777002);
}
M0 = StP[m].InitialMass;
for (k=0;k<NELEMENTS;k++)
metals[k] = StP[m].Metal[k];
/* number of SNIa */
NSNIa = SNIa_rate(m1/All.HubbleParam,m2/All.HubbleParam)*M0/All.HubbleParam;
/* number of SNII */
NSNII = SNII_rate(m1/All.HubbleParam,m2/All.HubbleParam)*M0/All.HubbleParam;
/* number of DYIN */
NDYIN = DYIN_rate(m1/All.HubbleParam,m2/All.HubbleParam)*M0/All.HubbleParam;
/* discretize SN */
#ifdef CHIMIE_MC_SUPERNOVAE
double fNSNIa,fNSNII,fNDYIN;
/* discretize SNIa */
fNSNIa = NSNIa-floor(NSNIa);
NSNIa = floor(NSNIa);
if (get_Chimie_random_number(P[i].ID) < fNSNIa)
NSNIa = NSNIa+1;
/* discretize SNII */
fNSNII = NSNII-floor(NSNII);
NSNII = floor(NSNII);
if (get_Chimie_random_number(P[i].ID) < fNSNII)
NSNII = NSNII+1;
/* discretize DYIN */
fNDYIN = NDYIN-floor(NDYIN);
NDYIN = floor(NDYIN);
if (get_Chimie_random_number(P[i].ID) < fNDYIN)
NDYIN = NDYIN+1;
/* compute ejectas */
Total_single_mass_ejection(0.5*(m1+m2)/All.HubbleParam,metals,NSNII,NSNIa,NDYIN);
StP[m].TotalEjectedGasMass = SingleEjectedMass[0]*All.HubbleParam; /* gas mass */
for (k=0;k<NELEMENTS;k++)
StP[m].TotalEjectedEltMass[k] = SingleEjectedMass[k+2]*All.HubbleParam; /* metal mass */
#else
/* compute ejectas */
Total_mass_ejection(m1/All.HubbleParam,m2/All.HubbleParam,M0/All.HubbleParam,metals);
StP[m].TotalEjectedGasMass = EjectedMass[0]*All.HubbleParam; /* gas mass */
for (k=0;k<NELEMENTS;k++)
StP[m].TotalEjectedEltMass[k] = EjectedMass[k+2]*All.HubbleParam; /* metal mass */
#endif CHIMIE_MC_SUPERNOVAE
/* discretize SN */
if (StP[m].TotalEjectedGasMass>0)
flag_chimie=1;
/* compute injected energy */
StP[m].TotalEjectedEgySpec = All.ChimieSupernovaEnergy* (NSNIa + NSNII) /StP[m].TotalEjectedGasMass;
StP[m].NumberOfSNIa = NSNIa;
StP[m].NumberOfSNII = NSNII;
EgySNlocal += All.ChimieSupernovaEnergy* (NSNIa + NSNII);
NSNIa_totlocal += NSNIa;
NSNII_totlocal += NSNII;
NDYIN_totlocal += NDYIN;
/* correct mass particle */
if (P[i].Mass-StP[m].TotalEjectedGasMass<0)
{
printf("mass wants to be less than zero...\n");
printf("P[i].Mass=%g StP[m].TotalEjectedGasMass=%g\n",P[i].Mass,StP[m].TotalEjectedGasMass);
endrun(777100);
}
//if (P[i].ID==65546)
// printf("(%d) %g the particle 65546 is here, mass=%g TotalEjectedEltMass=%g m1=%g m2=%g\n",ThisTask,All.Time,P[i].Mass,StP[m].TotalEjectedGasMass,m1,m2);
P[i].Mass = P[i].Mass-StP[m].TotalEjectedGasMass;
if(P[i].Mass<0)
endrun(777023);
//float Fe,Mg;
//Fe = StP[m].TotalEjectedEltMass[0];
//Mg = StP[m].TotalEjectedEltMass[1];
}
/******************************************/
/* end do chimie */
/******************************************/
ndone++;
if (flag_chimie)
{
for(j = 0; j < NTask; j++)
Exportflag[j] = 0;
chimie_evaluate(i, 0);
for(j = 0; j < NTask; j++)
{
if(Exportflag[j])
{
for(k = 0; k < 3; k++)
{
ChimieDataIn[nexport].Pos[k] = P[i].Pos[k];
ChimieDataIn[nexport].Vel[k] = P[i].Vel[k];
}
ChimieDataIn[nexport].ID = P[i].ID;
ChimieDataIn[nexport].Timestep = P[i].Ti_endstep - P[i].Ti_begstep;
ChimieDataIn[nexport].Hsml = StP[m].Hsml;
ChimieDataIn[nexport].Density = StP[m].Density;
ChimieDataIn[nexport].Volume = StP[m].Volume;
#ifdef CHIMIE_KINETIC_FEEDBACK
ChimieDataIn[nexport].NgbMass = StP[m].NgbMass;
#endif
ChimieDataIn[nexport].TotalEjectedGasMass = StP[m].TotalEjectedGasMass;
for(k = 0; k < NELEMENTS; k++)
ChimieDataIn[nexport].TotalEjectedEltMass[k] = StP[m].TotalEjectedEltMass[k];
ChimieDataIn[nexport].TotalEjectedEgySpec = StP[m].TotalEjectedEgySpec;
ChimieDataIn[nexport].NumberOfSNIa = StP[m].NumberOfSNIa;
ChimieDataIn[nexport].NumberOfSNII = StP[m].NumberOfSNII;
#ifdef WITH_ID_IN_HYDRA
ChimieDataIn[nexport].ID = P[i].ID;
#endif
ChimieDataIn[nexport].Index = i;
ChimieDataIn[nexport].Task = j;
nexport++;
nsend_local[j]++;
}
}
}
}
}
}
tend = second();
timecomp += timediff(tstart, tend);
qsort(ChimieDataIn, nexport, sizeof(struct chimiedata_in), chimie_compare_key);
for(j = 1, noffset[0] = 0; j < NTask; j++)
noffset[j] = noffset[j - 1] + nsend_local[j - 1];
tstart = second();
MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD);
tend = second();
timeimbalance += timediff(tstart, tend);
/* now do the particles that need to be exported */
for(level = 1; level < (1 << PTask); level++)
{
tstart = second();
for(j = 0; j < NTask; j++)
nbuffer[j] = 0;
for(ngrp = level; ngrp < (1 << PTask); ngrp++)
{
maxfill = 0;
for(j = 0; j < NTask; j++)
{
if((j ^ ngrp) < NTask)
if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j])
maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j];
}
if(maxfill >= All.BunchSizeChimie)
break;
sendTask = ThisTask;
recvTask = ThisTask ^ ngrp;
if(recvTask < NTask)
{
if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0)
{
/* get the particles */
MPI_Sendrecv(&ChimieDataIn[noffset[recvTask]],
nsend_local[recvTask] * sizeof(struct chimiedata_in), MPI_BYTE,
recvTask, TAG_CHIMIE_A,
&ChimieDataGet[nbuffer[ThisTask]],
nsend[recvTask * NTask + ThisTask] * sizeof(struct chimiedata_in), MPI_BYTE,
recvTask, TAG_CHIMIE_A, MPI_COMM_WORLD, &status);
}
}
for(j = 0; j < NTask; j++)
if((j ^ ngrp) < NTask)
nbuffer[j] += nsend[(j ^ ngrp) * NTask + j];
}
tend = second();
timecommsumm += timediff(tstart, tend);
/* now do the imported particles */
tstart = second();
for(j = 0; j < nbuffer[ThisTask]; j++)
chimie_evaluate(j, 1);
tend = second();
timecomp += timediff(tstart, tend);
/* do a block to measure imbalance */
tstart = second();
MPI_Barrier(MPI_COMM_WORLD);
tend = second();
timeimbalance += timediff(tstart, tend);
/* get the result */
tstart = second();
for(j = 0; j < NTask; j++)
nbuffer[j] = 0;
for(ngrp = level; ngrp < (1 << PTask); ngrp++)
{
maxfill = 0;
for(j = 0; j < NTask; j++)
{
if((j ^ ngrp) < NTask)
if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j])
maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j];
}
if(maxfill >= All.BunchSizeChimie)
break;
sendTask = ThisTask;
recvTask = ThisTask ^ ngrp;
if(recvTask < NTask)
{
if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0)
{
/* send the results */
MPI_Sendrecv(&ChimieDataResult[nbuffer[ThisTask]],
nsend[recvTask * NTask + ThisTask] * sizeof(struct chimiedata_out),
MPI_BYTE, recvTask, TAG_CHIMIE_B,
&ChimieDataPartialResult[noffset[recvTask]],
nsend_local[recvTask] * sizeof(struct chimiedata_out),
MPI_BYTE, recvTask, TAG_CHIMIE_B, MPI_COMM_WORLD, &status);
/* add the result to the particles */
for(j = 0; j < nsend_local[recvTask]; j++)
{
source = j + noffset[recvTask];
place = ChimieDataIn[source].Index;
// for(k = 0; k < 3; k++)
// SphP[place].HydroAccel[k] += HydroDataPartialResult[source].Acc[k];
//
// SphP[place].DtEntropy += HydroDataPartialResult[source].DtEntropy;
//#ifdef FEEDBACK
// SphP[place].DtEgySpecFeedback += HydroDataPartialResult[source].DtEgySpecFeedback;
//#endif
// if(SphP[place].MaxSignalVel < HydroDataPartialResult[source].MaxSignalVel)
// SphP[place].MaxSignalVel = HydroDataPartialResult[source].MaxSignalVel;
//#ifdef COMPUTE_VELOCITY_DISPERSION
// for(k = 0; k < VELOCITY_DISPERSION_SIZE; k++)
// SphP[place].VelocityDispersion[k] += HydroDataPartialResult[source].VelocityDispersion[k];
//#endif
}
}
}
for(j = 0; j < NTask; j++)
if((j ^ ngrp) < NTask)
nbuffer[j] += nsend[(j ^ ngrp) * NTask + j];
}
tend = second();
timecommsumm += timediff(tstart, tend);
level = ngrp - 1;
}
MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD);
for(j = 0; j < NTask; j++)
ntotleft -= ndonelist[j];
}
free(ndonelist);
free(nsend);
free(nsend_local);
free(nbuffer);
free(noffset);
/* do final operations on results */
tstart = second();
for(i = 0; i < N_gas; i++)
{
if (P[i].Type==0)
{
P[i].Mass += SphP[i].dMass;
SphP[i].dMass = 0.;
}
}
tend = second();
timecomp += timediff(tstart, tend);
/* collect some timing information */
MPI_Reduce(&timecomp, &sumt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&timecommsumm, &sumcomm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if(ThisTask == 0)
{
All.CPU_ChimieCompWalk += sumt / NTask;
All.CPU_ChimieCommSumm += sumcomm / NTask;
All.CPU_ChimieImbalance += sumimbalance / NTask;
}
#ifdef DETAILED_CPU_OUTPUT_IN_CHIMIE
numlist = malloc(sizeof(int) * NTask);
timecomplist = malloc(sizeof(double) * NTask);
timecommsummlist = malloc(sizeof(double) * NTask);
timeimbalancelist = malloc(sizeof(double) * NTask);
MPI_Gather(&NumStUpdate, 1, MPI_INT, numlist, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Gather(&timecomp, 1, MPI_DOUBLE, timecomplist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Gather(&timecommsumm, 1, MPI_DOUBLE, timecommsummlist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Gather(&timeimbalance, 1, MPI_DOUBLE, timeimbalancelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if(ThisTask == 0)
{
fprintf(FdTimings, "\n chimie\n\n");
fprintf(FdTimings, "Nupdate ");
for (i=0;i<NTask;i++)
fprintf(FdTimings, "%12d ",numlist[i]); /* nombre de part par proc */
fprintf(FdTimings, "\n");
fprintf(FdTimings, "timecomp ");
for (i=0;i<NTask;i++)
fprintf(FdTimings, "%12g ",timecomplist[i]);
fprintf(FdTimings, "\n");
fprintf(FdTimings, "timecommsumm ");
for (i=0;i<NTask;i++)
fprintf(FdTimings, "%12g ",timecommsummlist[i]);
fprintf(FdTimings, "\n");
fprintf(FdTimings, "timeimbalance ");
for (i=0;i<NTask;i++)
fprintf(FdTimings, "%12g ",timeimbalancelist[i]);
fprintf(FdTimings, "\n");
fprintf(FdTimings, "\n");
}
free(timeimbalancelist);
free(timecommsummlist);
free(timecomplist);
free(numlist);
#endif
/* collect some chimie informations */
MPI_Reduce(&NSNIa_totlocal, &NSNIa_tot, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&NSNII_totlocal, &NSNII_tot, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&NDYIN_totlocal, &NDYIN_tot, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&EgySNlocal, &EgySN, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&Nchimlocal, &Nchim, 1, MPI_INT , MPI_SUM, 0, MPI_COMM_WORLD);
#ifdef CHIMIE_THERMAL_FEEDBACK
EgySNThermal = EgySN*(1-All.ChimieKineticFeedbackFraction);
#else
EgySNThermal = 0;
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
EgySNKinetic = EgySN*All.ChimieKineticFeedbackFraction;
/* count number of wind particles */
for(i = 0; i < N_gas; i++)
{
if (P[i].Type==0)
{
if (SphP[i].WindTime >= (All.Time-All.ChimieWindTime))
Nwindlocal++;
//else
// if (SphP[i].WindTime > All.TimeBegin-2*All.ChimieWindTime)
// Noldwindlocal++;
if (SphP[i].WindFlag)
Nflaglocal++;
}
}
MPI_Reduce(&Nwindlocal, &Nwind, 1, MPI_INT , MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&Noldwindlocal, &Noldwind, 1, MPI_INT , MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Allreduce(&Nflaglocal, &Nflag, 1, MPI_INT , MPI_SUM, MPI_COMM_WORLD);
#else
EgySNKinetic = 0;
#endif
/* write some info */
if (ThisTask==0)
{
fprintf(FdChimie, "%15g %10d %15g %15g %15g %15g %15g %10d %10d %10d\n",All.Time,Nchim,NSNIa_tot,NSNII_tot,EgySN,EgySNThermal,EgySNKinetic,Nwind,Noldwind,Nflag);
fflush(FdChimie);
}
- if (Nflag>0)
- {
- SetMinTimeStepForActives=1;
- if (ThisTask==0)
- fprintf(FdLog,"%g : !!! set min timestep for active particles !!!\n",All.Time);
- }
+/* this is no longer used */
+// if (Nflag>0)
+// {
+// SetMinTimeStepForActives=1;
+// if (ThisTask==0)
+// fprintf(FdLog,"%g : !!! set min timestep for active particles !!!\n",All.Time);
+// }
#ifdef CHIMIE_ONE_SN_ONLY
if (EgySN>0)
All.ChimieOneSN=1;
MPI_Bcast(&All.ChimieOneSN, 1, MPI_INT, 0, MPI_COMM_WORLD);
#endif
}
/*! This function is the 'core' of the Chemie computation. A target
* particle is specified which may either be local, or reside in the
* communication buffer.
*/
void chimie_evaluate(int target, int mode)
{
int j, n, startnode, numngb,numngb_inbox,k;
FLOAT *pos,*vel;
//FLOAT *vel;
//FLOAT mass;
double h, h2;
double acc[3];
double dx, dy, dz;
double wk, r, r2, u=0;
double hinv=1, hinv3;
int target_stp;
double density;
double volume;
#ifdef CHIMIE_KINETIC_FEEDBACK
double ngbmass;
double p;
#endif
double aij;
double ejectedGasMass;
double ejectedEltMass[NELEMENTS];
double ejectedEgySpec;
double NumberOfSNIa;
double NumberOfSNII;
double mass_k;
double NewMass;
double fv,vi2,vj2;
double EgySpec,NewEgySpec;
double DeltaEntropy;
double DeltaVel[3];
#ifndef LONGIDS
unsigned int id; /*!< particle identifier */
#else
unsigned long long id; /*!< particle identifier */
#endif
if(mode == 0)
{
pos = P[target].Pos;
vel = P[target].Vel;
id = P[target].ID;
target_stp = P[target].StPIdx;
h = StP[target_stp].Hsml;
density = StP[target_stp].Density;
volume = StP[target_stp].Volume;
#ifdef CHIMIE_KINETIC_FEEDBACK
ngbmass = StP[target_stp].NgbMass;
#endif
ejectedGasMass = StP[target_stp].TotalEjectedGasMass;
for(k=0;k<NELEMENTS;k++)
ejectedEltMass[k] = StP[target_stp].TotalEjectedEltMass[k];
ejectedEgySpec = StP[target_stp].TotalEjectedEgySpec;
NumberOfSNIa = StP[target_stp].NumberOfSNIa;
NumberOfSNII = StP[target_stp].NumberOfSNII;
}
else
{
pos = ChimieDataGet[target].Pos;
vel = ChimieDataGet[target].Vel;
id = ChimieDataGet[target].ID;
h = ChimieDataGet[target].Hsml;
density = ChimieDataGet[target].Density;
volume = ChimieDataGet[target].Volume;
#ifdef CHIMIE_KINETIC_FEEDBACK
ngbmass = ChimieDataGet[target].NgbMass;
#endif
ejectedGasMass = ChimieDataGet[target].TotalEjectedGasMass;
for(k=0;k<NELEMENTS;k++)
ejectedEltMass[k] = ChimieDataGet[target].TotalEjectedEltMass[k];
ejectedEgySpec = ChimieDataGet[target].TotalEjectedEgySpec;
NumberOfSNIa = ChimieDataGet[target].NumberOfSNIa;
NumberOfSNII = ChimieDataGet[target].NumberOfSNII;
}
/* initialize variables before SPH loop is started */
acc[0] = acc[1] = acc[2] = 0;
vi2 = 0;
for(k=0;k<3;k++)
vi2 += vel[k]*vel[k];
h2 = h * h;
hinv = 1.0 / h;
#ifndef TWODIMS
hinv3 = hinv * hinv * hinv;
#else
hinv3 = hinv * hinv / boxSize_Z;
#endif
/* Now start the actual SPH computation for this particle */
startnode = All.MaxPart;
numngb = 0;
do
{
numngb_inbox = ngb_treefind_variable_for_chimie(&pos[0], h, &startnode);
for(n = 0; n < numngb_inbox; n++)
{
j = Ngblist[n];
dx = pos[0] - P[j].Pos[0];
dy = pos[1] - P[j].Pos[1];
dz = pos[2] - P[j].Pos[2];
#ifdef PERIODIC /* now find the closest image in the given box size */
if(dx > boxHalf_X)
dx -= boxSize_X;
if(dx < -boxHalf_X)
dx += boxSize_X;
if(dy > boxHalf_Y)
dy -= boxSize_Y;
if(dy < -boxHalf_Y)
dy += boxSize_Y;
if(dz > boxHalf_Z)
dz -= boxSize_Z;
if(dz < -boxHalf_Z)
dz += boxSize_Z;
#endif
r2 = dx * dx + dy * dy + dz * dz;
if(r2 < h2)
{
numngb++;
r = sqrt(r2);
u = r * hinv;
if(u < 0.5)
{
wk = hinv3 * (KERNEL_COEFF_1 + KERNEL_COEFF_2 * (u - 1) * u * u);
}
else
{
wk = hinv3 * KERNEL_COEFF_5 * (1.0 - u) * (1.0 - u) * (1.0 - u);
}
/* normalisation using mass */
aij = P[j].Mass*wk/density;
/* normalisation using volume */
/* !!! si on utilise, il faut stoquer une nouvelle variable : OldDensity, car density est modifié plus bas... */
//aij = P[j].Mass/SphP[j].Density*wk/volume;
/* metal injection */
for(k=0;k<NELEMENTS;k++)
{
mass_k = SphP[j].Metal[k]*P[j].Mass; /* mass of elt k */
SphP[j].Metal[k] = ( mass_k + aij*ejectedEltMass[k] )/( P[j].Mass + aij*ejectedGasMass );
}
/* new mass */
NewMass = P[j].Mass + aij*ejectedGasMass;
/* new velocity */
vj2 = 0;
for(k=0;k<3;k++)
vj2 += SphP[j].VelPred[k]*SphP[j].VelPred[k];
fv = sqrt( (P[j].Mass/NewMass) + aij*(ejectedGasMass/NewMass) * (vi2/vj2) );
for(k=0;k<3;k++)
{
DeltaVel[k] = fv*SphP[j].VelPred[k] - SphP[j].VelPred[k];
SphP[j].VelPred[k] += DeltaVel[k];
P[j].Vel [k] += DeltaVel[k];
}
/* spec energy at current step */
EgySpec = SphP[j].EntropyPred / GAMMA_MINUS1 * pow(SphP[j].Density*a3inv, GAMMA_MINUS1);
/* new egyspec */
NewEgySpec = (EgySpec )*(P[j].Mass/NewMass);
/* new density */
SphP[j].Density = SphP[j].Density*NewMass/P[j].Mass;
/* new entropy */
DeltaEntropy = GAMMA_MINUS1*NewEgySpec/pow(SphP[j].Density*a3inv, GAMMA_MINUS1) - SphP[j].EntropyPred;
SphP[j].EntropyPred += DeltaEntropy;
SphP[j].Entropy += DeltaEntropy;
#ifdef CHIMIE_THERMAL_FEEDBACK
SphP[j].DeltaEgySpec += (1.-All.ChimieKineticFeedbackFraction)*(ejectedGasMass*ejectedEgySpec)* aij/NewMass;
SphP[j].NumberOfSNII += NumberOfSNII*aij;
SphP[j].NumberOfSNIa += NumberOfSNIa*aij;
+
+#ifdef TIMESTEP_UPDATE_FOR_FEEDBACK
+ if(P[j].Ti_endstep != All.Ti_Current)
+ {
+ make_particle_active(j);
+ printf("(%d) Step=%d i=%08d particle flaged to become active\n",ThisTask,All.NumCurrentTiStep,j);
+ }
+ else
+ printf("(%d) Step=%d i=%08d particle already active\n",ThisTask,All.NumCurrentTiStep,j);
+#endif
+
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
p = (All.ChimieKineticFeedbackFraction*ejectedEgySpec*ejectedGasMass)/(0.5*ngbmass*All.ChimieWindSpeed*All.ChimieWindSpeed);
double r;
r = get_Chimie_random_number(P[j].ID+id);
if ( r < p) /* we should maybe have a 2d table here... */
{
if (SphP[j].WindTime < (All.Time-All.ChimieWindTime)) /* not a wind particle */
{
SphP[j].WindFlag = 1;
SphP[j].WindTime = All.Time;
}
}
#endif
#ifdef CHECK_ENTROPY_SIGN
if ((SphP[j].EntropyPred < 0)||(SphP[j].Entropy < 0))
{
printf("\ntask=%d: entropy less than zero in chimie_evaluate !\n", ThisTask);
printf("ID=%d Entropy=%g EntropyPred=%g DeltaEntropy=%g\n",P[j].ID,SphP[j].Entropy,SphP[j].EntropyPred,DeltaEntropy);
fflush(stdout);
endrun(777003);
}
#endif
/* store mass diff. */
SphP[j].dMass += NewMass-P[j].Mass;
}
}
}
while(startnode >= 0);
/* Now collect the result at the right place */
if(mode == 0)
{
// for(k = 0; k < 3; k++)
// SphP[target].HydroAccel[k] = acc[k];
// SphP[target].DtEntropy = dtEntropy;
//#ifdef FEEDBACK
// SphP[target].DtEgySpecFeedback = dtEgySpecFeedback;
//#endif
// SphP[target].MaxSignalVel = maxSignalVel;
//#ifdef COMPUTE_VELOCITY_DISPERSION
// for(k = 0; k < VELOCITY_DISPERSION_SIZE; k++)
// SphP[target].VelocityDispersion[k] = VelocityDispersion[k];
//#endif
}
else
{
// for(k = 0; k < 3; k++)
// HydroDataResult[target].Acc[k] = acc[k];
// HydroDataResult[target].DtEntropy = dtEntropy;
//#ifdef FEEDBACK
// HydroDataResult[target].DtEgySpecFeedback = dtEgySpecFeedback;
//#endif
// HydroDataResult[target].MaxSignalVel = maxSignalVel;
//#ifdef COMPUTE_VELOCITY_DISPERSION
// for(k = 0; k < VELOCITY_DISPERSION_SIZE; k++)
// HydroDataResult[target].VelocityDispersion[k] = VelocityDispersion[k];
//#endif
}
}
/*! This is a comparison kernel for a sort routine, which is used to group
* particles that are going to be exported to the same CPU.
*/
int chimie_compare_key(const void *a, const void *b)
{
if(((struct chimiedata_in *) a)->Task < (((struct chimiedata_in *) b)->Task))
return -1;
if(((struct chimiedata_in *) a)->Task > (((struct chimiedata_in *) b)->Task))
return +1;
return 0;
}
/****************************************************************************************/
/*
/*
/*
/* PYTHON INTERFACE
/*
/*
/*
/****************************************************************************************/
#ifdef PYCHEM
static PyObject *
chemistry_CodeUnits_to_SolarMass_Factor(PyObject *self, PyObject *args)
{
return Py_BuildValue("d",All.UnitMass_in_g/SOLAR_MASS);
}
static PyObject *
chemistry_SolarMass_to_CodeUnits_Factor(PyObject *self, PyObject *args)
{
return Py_BuildValue("d",SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject * chemistry_InitDefaultParameters(void)
{
/* list of Gadget parameters */
/* System of units */
All.UnitLength_in_cm = 3.085e+21; /* 1.0 kpc */
All.UnitMass_in_g = 1.989e+43; /* 1.0e10 solar masses */
All.UnitVelocity_in_cm_per_s = 20725573.785998672; /* 207 km/sec */
All.GravityConstantInternal = 0;
All.UnitTime_in_s = All.UnitLength_in_cm / All.UnitVelocity_in_cm_per_s;
All.UnitTime_in_Megayears=All.UnitTime_in_s / SEC_PER_MEGAYEAR;
return Py_BuildValue("i",1);
}
static PyObject * SetParameters(PyObject *dict)
{
PyObject *key;
PyObject *value;
int ivalue;
float fvalue;
double dvalue;
/* check that it is a PyDictObject */
if(!PyDict_Check(dict))
{
PyErr_SetString(PyExc_AttributeError, "argument is not a dictionary.");
return NULL;
}
if (PyDict_Size(dict)==0)
return Py_BuildValue("i",0);
Py_ssize_t pos=0;
while(PyDict_Next(dict,&pos,&key,&value))
{
if(PyString_Check(key))
{
/* System of units */
if(strcmp(PyString_AsString(key), "UnitLength_in_cm")==0)
{
if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value))
All.UnitLength_in_cm = PyFloat_AsDouble(value);
}
if(strcmp(PyString_AsString(key), "UnitMass_in_g")==0)
{
if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value))
All.UnitMass_in_g = PyFloat_AsDouble(value);
}
if(strcmp(PyString_AsString(key), "UnitVelocity_in_cm_per_s")==0)
{
if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value))
All.UnitVelocity_in_cm_per_s = PyFloat_AsDouble(value);
}
if(strcmp(PyString_AsString(key), "GravityConstantInternal")==0)
{
if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value))
All.GravityConstantInternal = PyFloat_AsDouble(value);
}
}
}
return Py_BuildValue("i",1);
}
static PyObject * chemistry_SetParameters(PyObject *self, PyObject *args)
{
PyObject *dict;
/* here, we can have either arguments or dict directly */
if(PyDict_Check(args))
{
dict = args;
}
else
{
if (! PyArg_ParseTuple(args, "O",&dict))
return NULL;
}
SetParameters(dict);
return Py_BuildValue("i",1);
}
static PyObject * chemistry_GetParameters(void)
{
PyObject *dict;
PyObject *key;
PyObject *value;
dict = PyDict_New();
/* System of units */
key = PyString_FromString("UnitLength_in_cm");
value = PyFloat_FromDouble(All.UnitLength_in_cm);
PyDict_SetItem(dict,key,value);
key = PyString_FromString("UnitMass_in_g");
value = PyFloat_FromDouble(All.UnitMass_in_g);
PyDict_SetItem(dict,key,value);
key = PyString_FromString("UnitVelocity_in_cm_per_s");
value = PyFloat_FromDouble(All.UnitVelocity_in_cm_per_s);
PyDict_SetItem(dict,key,value);
key = PyString_FromString("GravityConstantInternal");
value = PyFloat_FromDouble(All.GravityConstantInternal);
PyDict_SetItem(dict,key,value);
return Py_BuildValue("O",dict);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_init_chimie(PyObject *self, PyObject *args, PyObject *kwds)
{
int NumberOfTables=1;
int DefaultTable=0;
PyObject *paramsDict=NULL;
paramsDict= PyDict_New();
//PyObject *filename;
//if (! PyArg_ParseTuple(args, "Oii",&filename,&NumberOfTables,&DefaultTable))
// {
// PyErr_SetString(PyExc_ValueError,"init_chimie, error in parsing.");
// return NULL;
// }
static char *kwlist[] = {"filename","NumberOfTables","DefaultTable","params", NULL};
PyObject *filename=PyString_FromString("chimie.yr.dat");
/* this fails with python2.6, I do not know why ??? */
if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OiiO",kwlist,&filename,&NumberOfTables,&DefaultTable,&paramsDict))
{
PyErr_SetString(PyExc_ValueError,"init_chimie, error in parsing arguments.");
return NULL;
}
if (!PyString_Check(filename))
{
PyErr_SetString(PyExc_ValueError,"Argument must be a string.");
return NULL;
}
/* copy filename */
All.ChimieParameterFile = PyString_AsString(filename);
/* set number of tables */
All.ChimieNumberOfParameterFiles = NumberOfTables;
/* check if the file exists */
if(!(fopen(All.ChimieParameterFile, "r")))
{
PyErr_SetString(PyExc_ValueError,"The parameter file does not exists.");
return NULL;
}
/* use default parameters */
chemistry_InitDefaultParameters();
/* check if units are given */
/* check that it is a PyDictObject */
if(!PyDict_Check(paramsDict))
{
PyErr_SetString(PyExc_AttributeError, "argument is not a dictionary.");
return NULL;
}
else
{
SetParameters(paramsDict);
}
init_chimie();
/* by default, set the first one */
set_table(DefaultTable);
return Py_BuildValue("O",Py_None);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_set_table(PyObject *self, PyObject *args, PyObject *kwds)
{
int i;
if (! PyArg_ParseTuple(args, "i",&i))
return PyString_FromString("error");
/* set the table */
set_table(i);
return Py_BuildValue("d",0);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_get_imf(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *m,*imf;
int i;
if (! PyArg_ParseTuple(args, "O",&m))
return PyString_FromString("error");
m = TO_DOUBLE(m);
/* create an output */
imf = (PyArrayObject *) PyArray_SimpleNew(m->nd,m->dimensions,PyArray_DOUBLE);
//printf("--> %g\n",Cp->bs[0]);
//for (i=0;i<Cp->n;i++)
// printf("%g %g\n",Cp->ms[i],Cp->as[i]);
for(i = 0; i < m->dimensions[0]; i++)
{
*(double *)(imf->data + i*(imf->strides[0])) = get_imf(*(double *)(m->data + i*(m->strides[0])));
}
return PyArray_Return(imf);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_get_imf_M(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *m1,*m2,*imf;
int i;
if (! PyArg_ParseTuple(args, "OO",&m1,&m2))
return PyString_FromString("error");
m1 = TO_DOUBLE(m1);
m2 = TO_DOUBLE(m2);
/* create an output */
imf = (PyArrayObject *) PyArray_SimpleNew(m1->nd,m1->dimensions,PyArray_DOUBLE);
for(i = 0; i < imf->dimensions[0]; i++)
{
*(double *)(imf->data + i*(imf->strides[0])) = get_imf_M( *(double *)(m1->data + i*(m1->strides[0])), *(double *)(m2->data + i*(m2->strides[0])) );
}
return PyArray_Return(imf);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_get_imf_N(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *m1,*m2,*imf;
int i;
if (! PyArg_ParseTuple(args, "OO",&m1,&m2))
return PyString_FromString("error");
m1 = TO_DOUBLE(m1);
m2 = TO_DOUBLE(m2);
/* create an output */
imf = (PyArrayObject *) PyArray_SimpleNew(m1->nd,m1->dimensions,PyArray_DOUBLE);
for(i = 0; i < imf->dimensions[0]; i++)
{
*(double *)(imf->data + i*(imf->strides[0])) = get_imf_N( *(double *)(m1->data + i*(m1->strides[0])), *(double *)(m2->data + i*(m2->strides[0])) );
}
return PyArray_Return(imf);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_star_lifetime(self, args)
PyObject *self;
PyObject *args;
{
/* z is the mass fraction of metals, ie, the metallicity */
/* m is the star mass in code unit */
/* Return t in time unit */
double time,z,m;
if (!PyArg_ParseTuple(args, "dd", &z, &m))
return NULL;
time = star_lifetime(z,m);
return Py_BuildValue("d",time);
}
static PyObject *
chemistry_star_mass_from_age(self, args)
PyObject *self;
PyObject *args;
{
/* t : life time (in code unit) */
/* return the stellar mass (in code unit) that has a lifetime equal to t */
double time,z,m;
if (!PyArg_ParseTuple(args, "dd", &z, &time))
return NULL;
m = star_mass_from_age(z,time);
return Py_BuildValue("d",m);
}
static PyObject *
chemistry_DYIN_rate(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2;
double RDYIN;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dd", &m1,&m2))
return NULL;
RDYIN = DYIN_rate(m1,m2);
return Py_BuildValue("d",RDYIN);
}
static PyObject *
chemistry_SNII_rate(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2;
double RSNII;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dd", &m1,&m2))
return NULL;
RSNII = SNII_rate(m1,m2);
return Py_BuildValue("d",RSNII);
}
static PyObject *
chemistry_SNIa_rate(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2;
double RSNIa;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dd", &m1,&m2))
return NULL;
RSNIa = SNIa_rate(m1,m2);
return Py_BuildValue("d",RSNIa);
}
static PyObject *
chemistry_DYIN_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2;
PyArrayObject *ArrMassDYIN;
npy_intp ld[1];
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dd", &m1,&m2))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
ArrMassDYIN = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* compute dying stars ejection */
DYIN_mass_ejection(m1,m2);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(ArrMassDYIN->data + (i)*(ArrMassDYIN->strides[0])) = MassFracDYIN[i];
/* convert in array */
return Py_BuildValue("O",ArrMassDYIN);
}
static PyObject *
chemistry_DYIN_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
PyArrayObject *ArrMassDYIN;
npy_intp ld[1];
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "d", &m1))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
ArrMassDYIN = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* compute SN ejection */
DYIN_single_mass_ejection(m1);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(ArrMassDYIN->data + (i)*(ArrMassDYIN->strides[0])) = SingleMassFracDYIN[i];
/* convert in array */
return Py_BuildValue("O",ArrMassDYIN);
}
static PyObject *
chemistry_SNII_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2;
PyArrayObject *ArrMassSNII;
npy_intp ld[1];
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dd", &m1,&m2))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
ArrMassSNII = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* compute SN ejection */
SNII_mass_ejection(m1,m2);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(ArrMassSNII->data + (i)*(ArrMassSNII->strides[0])) = MassFracSNII[i];
/* convert in array */
return Py_BuildValue("O",ArrMassSNII);
}
static PyObject *
chemistry_SNII_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
PyArrayObject *ArrMassSNII;
npy_intp ld[1];
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "d", &m1))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
ArrMassSNII = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* compute SN ejection */
SNII_single_mass_ejection(m1);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(ArrMassSNII->data + (i)*(ArrMassSNII->strides[0])) = SingleMassFracSNII[i];
/* convert in array */
return Py_BuildValue("O",ArrMassSNII);
}
static PyObject *
chemistry_SNIa_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2;
PyArrayObject *ArrMassSNIa;
npy_intp ld[1];
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dd", &m1,&m2))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
ArrMassSNIa = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* compute SN ejection */
SNIa_mass_ejection(m1,m2);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(ArrMassSNIa->data + (i)*(ArrMassSNIa->strides[0])) = MassFracSNIa[i];
/* convert in array */
return Py_BuildValue("O",ArrMassSNIa);
}
static PyObject *
chemistry_SNIa_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
PyArrayObject *ArrMassSNIa;
npy_intp ld[1];
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "d", &m1))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
ArrMassSNIa = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* compute SN ejection */
SNIa_single_mass_ejection(m1);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(ArrMassSNIa->data + (i)*(ArrMassSNIa->strides[0])) = SingleMassFracSNIa[i];
/* convert in array */
return Py_BuildValue("O",ArrMassSNIa);
}
static PyObject *
chemistry_Total_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1,m2,M;
PyArrayObject *zs;
PyArrayObject *EMass;
npy_intp ld[1];
int i;
double *z;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dddO", &m1,&m2,&M,&zs))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
EMass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* allocate memory for the metallicity array */
z = malloc((Cp->nelts) * sizeof(double));
/* export values */
for (i=0;i<Cp->nelts;i++)
z[i]= *(double *)(zs->data + (i)*(zs->strides[0]));
/* compute SN ejection */
Total_mass_ejection(m1,m2,M,z);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(EMass->data + (i)*(EMass->strides[0])) = EjectedMass[i];
/* convert in array */
return Py_BuildValue("O",EMass);
}
static PyObject *
chemistry_DYIN_Total_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
PyArrayObject *zs;
PyArrayObject *EMass;
npy_intp ld[1];
int i;
double *z;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dO", &m1,&zs))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
EMass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* allocate memory for the metallicity array */
z = malloc((Cp->nelts) * sizeof(double));
/* export values */
for (i=0;i<Cp->nelts;i++)
z[i]= *(double *)(zs->data + (i)*(zs->strides[0]));
/* compute dying stars ejection */
DYIN_Total_single_mass_ejection(m1,z);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(EMass->data + (i)*(EMass->strides[0])) = SingleEjectedMass[i];
/* convert in array */
return Py_BuildValue("O",EMass);
}
static PyObject *
chemistry_SNII_Total_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
PyArrayObject *zs;
PyArrayObject *EMass;
npy_intp ld[1];
int i;
double *z;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dO", &m1,&zs))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
EMass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* allocate memory for the metallicity array */
z = malloc((Cp->nelts) * sizeof(double));
/* export values */
for (i=0;i<Cp->nelts;i++)
z[i]= *(double *)(zs->data + (i)*(zs->strides[0]));
/* compute SN ejection */
SNII_Total_single_mass_ejection(m1,z);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(EMass->data + (i)*(EMass->strides[0])) = SingleEjectedMass[i];
/* convert in array */
return Py_BuildValue("O",EMass);
}
static PyObject *
chemistry_SNIa_Total_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
PyArrayObject *zs;
PyArrayObject *EMass;
npy_intp ld[1];
int i;
double *z;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dO", &m1,&zs))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
EMass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* allocate memory for the metallicity array */
z = malloc((Cp->nelts) * sizeof(double));
/* export values */
for (i=0;i<Cp->nelts;i++)
z[i]= *(double *)(zs->data + (i)*(zs->strides[0]));
/* compute SN ejection */
SNIa_Total_single_mass_ejection(m1,z);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(EMass->data + (i)*(EMass->strides[0])) = SingleEjectedMass[i];
/* convert in array */
return Py_BuildValue("O",EMass);
}
static PyObject *
chemistry_Total_single_mass_ejection(self, args)
PyObject *self;
PyObject *args;
{
double m1;
double NSNII,NSNIa,NDYIN;
PyArrayObject *zs;
PyArrayObject *EMass;
npy_intp ld[1];
int i;
double *z;
/* parse arguments */
if (!PyArg_ParseTuple(args, "dOddd", &m1,&zs,&NSNII,&NSNIa,&NDYIN))
return NULL;
/* create output array */
ld[0]= Cp->nelts+2;
EMass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* allocate memory for the metallicity array */
z = malloc((Cp->nelts) * sizeof(double));
/* export values */
for (i=0;i<Cp->nelts;i++)
z[i]= *(double *)(zs->data + (i)*(zs->strides[0]));
/* compute SN ejection */
Total_single_mass_ejection(m1,z,NSNII,NSNIa,NDYIN);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(EMass->data + (i)*(EMass->strides[0])) = SingleEjectedMass[i];
/* convert in array */
return Py_BuildValue("O",EMass);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_cooling_function(self, args)
PyObject *self;
PyObject *args;
{
/*
on gives :
u_energy
metal = metal(i,2)
parameters
t_const,zmin,zmax,slz,tmin,tmax,slt,FeHSolar,cooling_data_max
*/
PyArrayObject *cooling_data;
double u_energy,metal;
double t_const,zmin,zmax,slz,tmin,tmax,slt,FeHSolar,cooling_data_max;
double cooling,u_cutoff,T,Z;
double rt, rz, ft, fz, v1, v2, v;
int it,iz,itp,izp;
/* parse arguments */
if (!PyArg_ParseTuple(args, "ddOddddddddd", &u_energy, &metal, &cooling_data,&t_const,&zmin,&zmax,&slz,&tmin,&tmax,&slt,&FeHSolar,&cooling_data_max))
return NULL;
u_cutoff=(100)/t_const;
cooling = 0.0;
if (u_energy > u_cutoff)
{
T = log10( t_const*u_energy );
Z = log10( metal/FeHSolar + 1.e-10 );
if (Z>zmax)
{
/*print *,'Warning: Z>Zmax for',i*/
Z=zmax;
}
if (Z < zmin)
{
rt = (T-tmin)/slt;
it = (int)rt;
if (it < cooling_data_max )
it = (int)rt;
else
it = cooling_data_max;
itp = it+1;
ft = rt - it;
fz = ( 10. + Z )/( 10. + zmin);
//v1 = ft*(cooling_data( 1, itp)-cooling_data( 1,it) ) + cooling_data( 1,it );
v1 = ft * (*(double *) (cooling_data->data + 1*(cooling_data->strides[0]) + itp*cooling_data->strides[1])
- *(double *) (cooling_data->data + 1*(cooling_data->strides[0]) + it *cooling_data->strides[1]))
+ *(double *) (cooling_data->data + 1*(cooling_data->strides[0]) + it *cooling_data->strides[1]);
//v2 = ft*(cooling_data( 0,itp )-cooling_data( 0, it ) ) + cooling_data( 0, it );
v2 = ft * (*(double *) (cooling_data->data + 0*(cooling_data->strides[0]) + itp*cooling_data->strides[1])
- *(double *) (cooling_data->data + 0*(cooling_data->strides[0]) + it *cooling_data->strides[1]))
+ *(double *) (cooling_data->data + 0*(cooling_data->strides[0]) + it *cooling_data->strides[1]);
v = v2 + fz*(v1-v2);
}
else
{
rt = (T-tmin)/slt;
rz = (Z-zmin)/slz+1.0;
if (it < cooling_data_max )
it = (int)rt;
else
it = cooling_data_max;
iz = (int)rz;
itp = it+1;
izp = iz+1;
ft = rt - it;
fz = rz - iz;
//v1 = ft*(cooling_data( izp, itp)-cooling_data(izp,it)) + cooling_data( izp, it );
v1 = ft * (*(double *) (cooling_data->data + izp*(cooling_data->strides[0]) + itp*cooling_data->strides[1])
- *(double *) (cooling_data->data + izp*(cooling_data->strides[0]) + it *cooling_data->strides[1]))
+ *(double *) (cooling_data->data + izp*(cooling_data->strides[0]) + it *cooling_data->strides[1]);
//v2 = ft*(cooling_data( iz, itp )-cooling_data(iz,it )) + cooling_data( iz, it );
v2 = ft * (*(double *) (cooling_data->data + iz *(cooling_data->strides[0]) + itp*cooling_data->strides[1])
- *(double *) (cooling_data->data + iz *(cooling_data->strides[0]) + it *cooling_data->strides[1]))
+ *(double *) (cooling_data->data + iz *(cooling_data->strides[0]) + it *cooling_data->strides[1]);
v = v2 + fz*(v1-v2);
}
cooling = pow(10,v);
}
return Py_BuildValue("d",cooling);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_get_Mmax(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->Mmax * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_Mmin(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->Mmin * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_Mco(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->Mco * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_SNIa_Mpl(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->SNIa_Mpl * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_SNIa_Mpu(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->SNIa_Mpu * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_SNII_Mmin(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->SNII_Mmin * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_SNII_Mmax(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->SNII_Mmax * SOLAR_MASS/All.UnitMass_in_g);
}
static PyObject *
chemistry_get_imf_Ntot(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("d",(double)Cp->imf_Ntot/SOLAR_MASS*All.UnitMass_in_g); /* in code mass unit */
}
static PyObject *
chemistry_get_as(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *as;
npy_intp ld[1];
int i;
/* create output array */
ld[0]= Cp->n+1;
as = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* import values */
for (i=0;i<Cp->n+1;i++)
*(double *)(as->data + (i)*(as->strides[0])) = Cp->as[i];
return Py_BuildValue("O",as);
}
static PyObject *
chemistry_get_bs(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *bs;
npy_intp ld[1];
int i;
/* create output array */
ld[0]= Cp->n+1;
bs = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* import values */
for (i=0;i<Cp->n+1;i++)
*(double *)(bs->data + (i)*(bs->strides[0])) = Cp->bs[i];
return Py_BuildValue("O",bs);
}
static PyObject *
chemistry_get_fs(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *fs;
npy_intp ld[1];
int i;
/* create output array */
ld[0]= Cp->n;
fs = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* import values */
for (i=0;i<Cp->n;i++)
*(double *)(fs->data + (i)*(fs->strides[0])) = Cp->fs[i];
return Py_BuildValue("O",fs);
}
static PyObject *
chemistry_get_allnelts(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("i",(int)Cp->nelts+2);
}
static PyObject *
chemistry_get_nelts(self, args)
PyObject *self;
PyObject *args;
{
return Py_BuildValue("i",(int)Cp->nelts);
}
static PyObject *
chemistry_get_allelts_labels(self, args)
PyObject *self;
PyObject *args;
{
int i;
PyObject *LabelList,*LabelString;
LabelList = PyList_New((Py_ssize_t)Cp->nelts+2);
for(i=0;i<Cp->nelts+2;i++)
{
LabelString = PyString_FromString(Elt[i].label);
PyList_SetItem(LabelList, (Py_ssize_t)i,LabelString);
}
return Py_BuildValue("O",LabelList);
}
static PyObject *
chemistry_get_elts_labels(self, args)
PyObject *self;
PyObject *args;
{
int i;
PyObject *LabelList,*LabelString;
LabelList = PyList_New((Py_ssize_t)Cp->nelts);
for(i=2;i<Cp->nelts+2;i++)
{
LabelString = PyString_FromString(Elt[i].label);
PyList_SetItem(LabelList, (Py_ssize_t)i-2,LabelString);
}
return Py_BuildValue("O",LabelList);
}
/* static PyObject *
chemistry_get_elts_SolarAbundances(self, args)
PyObject *self;
PyObject *args;
{
int i;
npy_intp ld[1];
PyArrayObject *AbList;
ld[0] = Cp->nelts;
AbList = (PyArrayObject *) PyArray_SimpleNew(1,ld,NPY_FLOAT);
for(i=2;i<Cp->nelts+2;i++)
*(float*)(AbList->data + (i-2)*(AbList->strides[0])) = (float) Elt[i].SolarAbundance;
return PyArray_Return(AbList);
} */
static PyObject *
chemistry_get_elts_SolarAbundances(self, args)
PyObject *self;
PyObject *args;
{
int i;
PyObject *AbDict,*LabelString,*AbVal;
AbDict = PyDict_New();
for(i=2;i<Cp->nelts+2;i++)
{
AbVal = PyFloat_FromDouble(Elt[i].SolarAbundance);
LabelString = PyString_FromString(Elt[i].label);
PyDict_SetItem(AbDict,LabelString, AbVal);
}
return Py_BuildValue("O",AbDict);
}
static PyObject *
chemistry_get_MSNIa(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *MSNIa;
npy_intp ld[1];
int i;
/* create output array */
ld[0]= Cp->nelts;
MSNIa = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* import values */
for (i=0;i<Cp->nelts;i++)
*(double *)(MSNIa->data + (i)*(MSNIa->strides[0])) = Elt[i+2].MSNIa/All.UnitMass_in_g*SOLAR_MASS;
return Py_BuildValue("O",MSNIa);
}
static PyObject *
chemistry_get_MassFracSNII(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *MassFrac;
npy_intp ld[1];
int i;
/* create output array */
ld[0]= Cp->nelts+2;
MassFrac = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(MassFrac->data + (i)*(MassFrac->strides[0])) = MassFracSNII[i];
return Py_BuildValue("O",MassFrac);
}
static PyObject *
chemistry_get_SingleMassFracSNII(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *MassFrac;
npy_intp ld[1];
int i;
/* create output array */
ld[0]= Cp->nelts+2;
MassFrac = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
/* import values */
for (i=0;i<Cp->nelts+2;i++)
*(double *)(MassFrac->data + (i)*(MassFrac->strides[0])) = SingleMassFracSNII[i];
return Py_BuildValue("O",MassFrac);
}
static PyObject *
chemistry_imf_sampling(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *ms;
npy_intp ld[1];
int i;
int n,seed;
/* parse arguments */
if (!PyArg_ParseTuple(args, "ii", &n,&seed))
return NULL;
/* create output array */
ld[0]= n;
ms = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
srandom(seed);
/* import values */
for (i=0;i<n;i++)
*(double *)(ms->data + (i)*(ms->strides[0])) = imf_sampling();
return Py_BuildValue("O",ms);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_SNII_rate_P(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *ConstSN,*Msn;
double m1,m2,md;
double powSN1,powSN2;
double RSNII;
RSNII = 0.0;
/* parse arguments */
if (!PyArg_ParseTuple(args, "ddddOO", &m1,&m2,&powSN1,&powSN2,&ConstSN,&Msn))
return NULL;
if ( m1 < *(double *) (Msn->data + 2*(Msn->strides[0]) + 1*(Msn->strides[1])) )
md = *(double *) (Msn->data + 2*(Msn->strides[0]) + 1*(Msn->strides[1]));
else
md = m1;
if (md >= m2)
RSNII = 0;
else
RSNII = *(double *) (ConstSN->data + 2*ConstSN->strides[0]) *(pow(m2,powSN1)-pow(md,powSN1));
return Py_BuildValue("d",RSNII);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_SNIa_rate_P(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *ConstSN,*Msn;
double m1,m2,md,mu;
double powSN1,powSN2;
double RSNIa;
double rate;
int i;
/* parse arguments */
if (!PyArg_ParseTuple(args, "ddddOO", &m1,&m2,&powSN1,&powSN2,&ConstSN,&Msn))
return NULL;
RSNIa = 0.0;
for (i=0;i<2;i++)
{
if ( m1 < *(double *) (Msn->data + i*(Msn->strides[0]) + 0*(Msn->strides[1])) )
md = *(double *) (Msn->data + i*(Msn->strides[0]) + 0*(Msn->strides[1]));
else
md = m1;
if ( m2 > *(double *) (Msn->data + i*(Msn->strides[0]) + 1*(Msn->strides[1])) )
mu = *(double *) (Msn->data + i*(Msn->strides[0]) + 1*(Msn->strides[1]));
else
mu = m2;
if (md<mu)
RSNIa = RSNIa+ *(double *) (ConstSN->data + i*ConstSN->strides[0])*(pow(mu,powSN2)-pow(md,powSN2));
}
if ( m1 < *(double *) (Msn->data + 2*(Msn->strides[0]) + 0*(Msn->strides[1])) )
md = *(double *) (Msn->data + 2*(Msn->strides[0]) + 0*(Msn->strides[1]));
else
md = m1;
mu = *(double *) (Msn->data + 2*(Msn->strides[0]) + 1*(Msn->strides[1]));
if (md >= mu)
RSNIa = 0.0;
else
RSNIa = RSNIa*(pow(mu,powSN1)-pow(md,powSN1));
if (RSNIa<0)
RSNIa = 0;
return Py_BuildValue("d",RSNIa);
}
/*********************************/
/* */
/*********************************/
static PyObject *
chemistry_SNII_mass_ejection_P(self, args)
PyObject *self;
PyObject *args;
{
PyArrayObject *ArrayOrigin,*ArrayStep,*ChemArray;
double m1,m2;
int NbElement;
double l1,l2;
int i1,i2,i1p,i2p,j;
double f1,f2;
double v1,v2;
PyArrayObject *ArrMassSNII;
npy_intp ld[1];
/* parse arguments */
if (!PyArg_ParseTuple(args, "ddOOOi", &m1,&m2,&ArrayOrigin,&ArrayStep,&ChemArray,&NbElement))
return NULL;
/* create output array */
ld[0]= NbElement+2;
ArrMassSNII = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE);
l1 = ( log10(m1) - *(double *)(ArrayOrigin->data + 0*(ArrayOrigin->strides[0])) ) / *(double *)(ArrayStep->data + 0*(ArrayStep->strides[0])) ;
l2 = ( log10(m2) - *(double *)(ArrayOrigin->data + 0*(ArrayOrigin->strides[0])) ) / *(double *)(ArrayStep->data + 0*(ArrayStep->strides[0])) ;
if (l1 < 0.0) l1 = 0.0;
if (l2 < 0.0) l2 = 0.0;
i1 = (int)l1;
i2 = (int)l2;
i1p = i1 + 1;
i2p = i2 + 1;
f1 = l1 - i1;
f2 = l2 - i2;
/* check (yr) */
if (i1<1) i1=1;
if (i2<1) i2=1;
/* --------- TOTAL GAS ---------- */
j = NbElement;
v1=f1* (*(double *)(ChemArray->data + (i1p)*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]))
- *(double *)(ChemArray->data + (i1 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1])))
+ *(double *)(ChemArray->data + (i1 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]));
v2=f2* (*(double *)(ChemArray->data + (i2p)*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]))
- *(double *)(ChemArray->data + (i2 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1])))
+ *(double *)(ChemArray->data + (i2 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]));
*(double *)(ArrMassSNII->data + (j)*(ArrMassSNII->strides[0])) = v2-v1;
/* --------- He core therm ---------- */
j = NbElement+1;
v1=f1* (*(double *)(ChemArray->data + (i1p)*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]))
- *(double *)(ChemArray->data + (i1 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1])))
+ *(double *)(ChemArray->data + (i1 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]));
v2=f2* (*(double *)(ChemArray->data + (i2p)*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]))
- *(double *)(ChemArray->data + (i2 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1])))
+ *(double *)(ChemArray->data + (i2 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]));
*(double *)(ArrMassSNII->data + (j)*(ArrMassSNII->strides[0])) = v2-v1;
/* --------- Metals ---------- */
l1 = ( log10(m1) - *(double *)(ArrayOrigin->data + 1*(ArrayOrigin->strides[0])) ) / *(double *)(ArrayStep->data + 1*(ArrayStep->strides[0])) ;
l2 = ( log10(m2) - *(double *)(ArrayOrigin->data + 1*(ArrayOrigin->strides[0])) ) / *(double *)(ArrayStep->data + 1*(ArrayStep->strides[0])) ;
if (l1 < 0.0) l1 = 0.0;
if (l2 < 0.0) l2 = 0.0;
i1 = (int)l1;
i2 = (int)l2;
i1p = i1 + 1;
i2p = i2 + 1;
f1 = l1 - i1;
f2 = l2 - i2;
/* check (yr) */
if (i1<1) i1=1;
if (i2<1) i2=1;
for (j=0;j<NbElement;j++)
{
v1=f1* (*(double *)(ChemArray->data + (i1p)*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]))
- *(double *)(ChemArray->data + (i1 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1])))
+ *(double *)(ChemArray->data + (i1 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]));
v2=f2* (*(double *)(ChemArray->data + (i2p)*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]))
- *(double *)(ChemArray->data + (i2 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1])))
+ *(double *)(ChemArray->data + (i2 )*(ChemArray->strides[0]) + (j)*(ChemArray->strides[1]));
*(double *)(ArrMassSNII->data + (j)*(ArrMassSNII->strides[0])) = v2-v1;
}
return Py_BuildValue("O",ArrMassSNII);
}
/* definition of the method table */
static PyMethodDef chemistryMethods[] = {
{"CodeUnits_to_SolarMass_Factor", chemistry_CodeUnits_to_SolarMass_Factor, METH_VARARGS,
"convertion factor : CodeUnits -> SolarMass"},
{"SolarMass_to_CodeUnits_Factor", chemistry_SolarMass_to_CodeUnits_Factor, METH_VARARGS,
"convertion factor : SolarMass -> CodeUnits"},
{"InitDefaultParameters", (PyCFunction)chemistry_InitDefaultParameters, METH_VARARGS,
"Init default parameters"},
{"SetParameters", (PyCFunction)chemistry_SetParameters, METH_VARARGS,
"Set gadget parameters"},
{"GetParameters", (PyCFunction)chemistry_GetParameters, METH_VARARGS,
"get some gadget parameters"},
{"init_chimie", chemistry_init_chimie, METH_VARARGS| METH_KEYWORDS,
"Init chimie."},
{"set_table", chemistry_set_table, METH_VARARGS,
"Set the chimie table."},
{"get_imf", chemistry_get_imf, METH_VARARGS,
"Compute corresponding imf value."},
{"get_imf_M", chemistry_get_imf_M, METH_VARARGS,
"Compute the mass fraction between m1 and m2."},
{"get_imf_N", chemistry_get_imf_N, METH_VARARGS,
"Compute the fraction number between m1 and m2."},
{"star_lifetime", chemistry_star_lifetime, METH_VARARGS,
"Compute star life time."},
{"star_mass_from_age", chemistry_star_mass_from_age, METH_VARARGS,
"Return the stellar mass that has a lifetime equal to t."},
{"DYIN_rate", chemistry_DYIN_rate, METH_VARARGS,
"Return the number of dying stars per unit mass with masses between m1 and m2."},
{"SNII_rate", chemistry_SNII_rate, METH_VARARGS,
"Return the number of SNII per unit mass with masses between m1 and m2."},
{"SNIa_rate", chemistry_SNIa_rate, METH_VARARGS,
"Return the number of SNIa per unit mass with masses between m1 and m2."},
{"DYIN_mass_ejection", chemistry_DYIN_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements, per unit mass due to the explotion of dying stars with masses between m1 and m2."},
{"DYIN_single_mass_ejection", chemistry_DYIN_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements due to the explotion of one dying star of mass m."},
{"SNII_mass_ejection", chemistry_SNII_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements, per unit mass due to the explotion of SNII with masses between m1 and m2."},
{"SNII_single_mass_ejection", chemistry_SNII_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements due to the explotion of one SNII of mass m."},
{"SNIa_mass_ejection", chemistry_SNIa_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements, per unit mass due to the explotion of SNIa with masses between m1 and m2."},
{"SNIa_single_mass_ejection", chemistry_SNIa_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements due to the explotion of one SNIa of mass m."},
{"Total_mass_ejection", chemistry_Total_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements, per unit mass due to the explotion of SNIa and SNII with masses between m1 and m2."},
{"DYIN_Total_single_mass_ejection", chemistry_DYIN_Total_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements (including processed and non processed elements) due to the explotion of one dying star of mass m."},
{"SNII_Total_single_mass_ejection", chemistry_SNII_Total_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements (including processed and non processed elements) due to the explotion of one SNII of mass m."},
{"SNIa_Total_single_mass_ejection", chemistry_SNIa_Total_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements (including processed and non processed elements) due to the explotion of one SNIa of mass m."},
{"Total_single_mass_ejection", chemistry_Total_single_mass_ejection, METH_VARARGS,
"Mass fraction of ejected elements, per unit mass due to the explotion of one dying star of mass m1."},
{"get_Mmax", chemistry_get_Mmax, METH_VARARGS,
"Get max star mass of the IMF, in code unit."},
{"get_Mmin", chemistry_get_Mmin, METH_VARARGS,
"Get min star mass of the IMF, in code unit."},
{"get_Mco", chemistry_get_Mco, METH_VARARGS,
"Get mean WD mass, in code unit."},
{"get_SNIa_Mpl", chemistry_get_SNIa_Mpl, METH_VARARGS,
"Get min mass of SNIa, in code unit."},
{"get_SNIa_Mpu", chemistry_get_SNIa_Mpu, METH_VARARGS,
"Get max mass of SNIa, in code unit."},
{"get_SNII_Mmin", chemistry_get_SNII_Mmin, METH_VARARGS,
"Get min mass of SNII, in code unit."},
{"get_SNII_Mmax", chemistry_get_SNII_Mmax, METH_VARARGS,
"Get max mass of SNII, in code unit."},
{"get_imf_Ntot", chemistry_get_imf_Ntot, METH_VARARGS,
"Get number of stars in the imf, per unit mass."},
{"get_as", chemistry_get_as, METH_VARARGS,
"Get power coefficients."},
{"get_bs", chemistry_get_bs, METH_VARARGS,
"Get normalisation coefficients."},
{"get_fs", chemistry_get_fs, METH_VARARGS,
"Get fs, mass fraction at ms."},
{"get_allnelts", chemistry_get_allnelts, METH_VARARGS,
"Get the number of element considered, including ejected mass (Ej) and non processed ejected mass (Ejnp).."},
{"get_nelts", chemistry_get_nelts, METH_VARARGS,
"Get the number of element considered."},
{"get_allelts_labels", chemistry_get_allelts_labels, METH_VARARGS,
"Get the labels of elements, including ejected mass (Ej) and non processed ejected mass (Ejnp)."},
{"get_elts_labels", chemistry_get_elts_labels, METH_VARARGS,
"Get the labels of elements."},
{"get_elts_SolarAbundances", chemistry_get_elts_SolarAbundances, METH_VARARGS,
"Get the solar abundance of elements."},
{"get_MassFracSNII", chemistry_get_MassFracSNII, METH_VARARGS,
"Get the mass fraction per element ejected by a set of SNII."},
{"get_SingleMassFracSNII", chemistry_get_SingleMassFracSNII, METH_VARARGS,
"Get the mass fraction per element ejected by a SNII."},
{"get_MSNIa", chemistry_get_MSNIa, METH_VARARGS,
"Get the mass per element ejected by a SNIa."},
{"cooling_function", chemistry_cooling_function, METH_VARARGS,
"Compute cooling."},
{"imf_sampling", chemistry_imf_sampling, METH_VARARGS,
"Sample imf with n points."},
/* old poirier */
{"SNIa_rate_P", chemistry_SNIa_rate_P, METH_VARARGS,
"Return the number of SNIa per unit mass and time. (Poirier version)"},
{"SNII_rate_P", chemistry_SNII_rate_P, METH_VARARGS,
"Return the number of SNII per unit mass and time. (Poirier version)"},
{"SNII_mass_ejection_P", chemistry_SNII_mass_ejection_P, METH_VARARGS,
"Mass ejection due to SNII per unit mass and time. (Poirier version)"},
{NULL, NULL, 0, NULL} /* Sentinel */
};
void initchemistry(void)
{
(void) Py_InitModule("chemistry", chemistryMethods);
import_array();
}
#endif /* PYCHEM */
#endif /* CHIMIE */
diff --git a/src/init.c b/src/init.c
index 92634e0..1a54fb8 100644
--- a/src/init.c
+++ b/src/init.c
@@ -1,666 +1,676 @@
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <mpi.h>
#include "allvars.h"
#include "proto.h"
/*! \file init.c
* \brief Code for initialisation of a simulation from initial conditions
*/
/*! This function reads the initial conditions, and allocates storage for the
* tree. Various variables of the particle data are initialised and An intial
* domain decomposition is performed. If SPH particles are present, the inial
* SPH smoothing lengths are determined.
*/
void init(void)
{
int i, j;
double a3;
#ifdef SFR
double Mgas=0,sum_Mgas=0;
int nstars=0;
int *numlist;
#endif
All.Time = All.TimeBegin;
switch (All.ICFormat)
{
case 1:
#if (MAKEGLASS > 1)
seed_glass();
#else
read_ic(All.InitCondFile);
#endif
break;
case 2:
case 3:
read_ic(All.InitCondFile);
break;
default:
if(ThisTask == 0)
printf("ICFormat=%d not supported.\n", All.ICFormat);
endrun(0);
}
All.Time = All.TimeBegin;
All.Ti_Current = 0;
if(All.ComovingIntegrationOn)
{
All.Timebase_interval = (log(All.TimeMax) - log(All.TimeBegin)) / TIMEBASE;
a3 = All.Time * All.Time * All.Time;
}
else
{
All.Timebase_interval = (All.TimeMax - All.TimeBegin) / TIMEBASE;
a3 = 1;
}
if (ThisTask==0)
printf("\nMinimum Time Step (Timebase_interval) = %g \n\n", All.Timebase_interval);
set_softenings();
All.NumCurrentTiStep = 0; /* setup some counters */
All.SnapshotFileCount = 0;
if(RestartFlag == 2)
All.SnapshotFileCount = atoi(All.InitCondFile + strlen(All.InitCondFile) - 3) + 1;
All.TotNumOfForces = 0;
All.NumForcesSinceLastDomainDecomp = 0;
if(All.ComovingIntegrationOn)
if(All.PeriodicBoundariesOn == 1)
check_omega();
All.TimeLastStatistics = All.TimeBegin - All.TimeBetStatistics;
#ifdef AGN_ACCRETION
All.TimeLastAccretion = All.TimeBegin - All.TimeBetAccretion;
All.LastMTotInRa = 0;
#endif
#ifdef BONDI_ACCRETION
All.BondiTimeLast = All.TimeBegin - All.BondiTimeBet;
#endif
#ifdef BUBBLES
All.EnergyBubbles=0;
#endif
if(All.ComovingIntegrationOn) /* change to new velocity variable */
{
for(i = 0; i < NumPart; i++)
for(j = 0; j < 3; j++)
P[i].Vel[j] *= sqrt(All.Time) * All.Time;
}
for(i = 0; i < NumPart; i++) /* start-up initialization */
{
for(j = 0; j < 3; j++)
P[i].GravAccel[j] = 0;
#ifdef PMGRID
for(j = 0; j < 3; j++)
P[i].GravPM[j] = 0;
#endif
P[i].Ti_endstep = 0;
P[i].Ti_begstep = 0;
P[i].OldAcc = 0;
P[i].GravCost = 1;
P[i].Potential = 0;
#ifdef PARTICLE_FLAG
P[i].Flag = 0;
#endif
#ifdef TESSEL
P[i].iPref = -1; /* index of the reference particle : -1 for normal particles */
#endif
}
#ifdef PMGRID
All.PM_Ti_endstep = All.PM_Ti_begstep = 0;
#endif
#ifdef FLEXSTEPS
All.PresentMinStep = TIMEBASE;
for(i = 0; i < NumPart; i++) /* start-up initialization */
{
P[i].FlexStepGrp = (int) (TIMEBASE * get_random_number(P[i].ID));
}
#endif
for(i = 0; i < N_gas; i++) /* initialize sph_properties */
{
for(j = 0; j < 3; j++)
{
SphP[i].VelPred[j] = P[i].Vel[j];
SphP[i].HydroAccel[j] = 0;
}
SphP[i].DtEntropy = 0;
#ifdef STELLAR_FLUX
SphP[i].EnergyFlux = 0.;
#endif
#ifdef AGN_HEATING
SphP[i].EgySpecAGNHeat = 0.;
SphP[i].DtEgySpecAGNHeat = 0.;
#endif
#ifdef MULTIPHASE
#ifdef COUNT_COLLISIONS
SphP[i].StickyCollisionNumber = 0;
#endif
#endif
#ifdef FEEDBACK
SphP[i].EgySpecFeedback = 0.;
SphP[i].DtEgySpecFeedback = 0.;
SphP[i].EnergySN = 0.;
SphP[i].EnergySNrem = 0.;
SphP[i].TimeSN = 0.;
for(j = 0; j < 3; j++)
{
SphP[i].FeedbackVel[j] = 0;
}
#endif
#ifdef FEEDBACK_WIND
for(j = 0; j < 3; j++)
{
SphP[i].FeedbackWindVel[j] = 0;
}
#endif
#if defined(ART_VISCO_MM)|| defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
SphP[i].ArtBulkViscConst = All.ArtBulkViscConstMin;
#ifdef ART_VISCO_CD
SphP[i].ArtBulkViscConst = 0.;
SphP[i].DiVelAccurate = 0.;
SphP[i].DiVelTemp = 0.;
#endif
#endif
#ifdef OUTPUTOPTVAR1
SphP[i].OptVar1 = 0.;
#endif
#ifdef OUTPUTOPTVAR2
SphP[i].OptVar2 = 0.;
#endif
#ifdef COMPUTE_VELOCITY_DISPERSION
for(j = 0; j < VELOCITY_DISPERSION_SIZE; j++)
SphP[i].VelocityDispersion[j] = 0.;
#endif
if(RestartFlag == 0)
{
SphP[i].Hsml = 0;
SphP[i].Density = -1;
}
#ifdef MULTIPHASE
/* here, we set the Phase, according to the SpecificEnergy and not Entropy */
if (SphP[i].Entropy > All.CriticalEgySpec)
SphP[i].Phase = GAS_SPH; /* warmer phase */
else
{
if (SphP[i].Entropy >= All.CriticalNonCollisionalEgySpec)
SphP[i].Phase = GAS_STICKY;
else
SphP[i].Phase = GAS_DARK;
}
SphP[i].StickyFlag = 0;
SphP[i].StickyTime = All.Time;
//SphP[i].StickyTime = All.Time + All.StickyIdleTime*get_random_number(P[i].ID);
#endif
#ifdef SFR
Mgas += P[i].Mass;
#endif
}
#ifdef SFR
RearrangeParticlesFlag=0;
if (All.StarFormationStarMass==0)
{
/* compute the mean gas mass */
MPI_Allreduce(&Mgas, &sum_Mgas, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
All.StarFormationStarMass = (sum_Mgas/All.TotN_gas) / All.StarFormationNStarsFromGas;
}
for(i = 0; i < NumPart; i++) /* initialize st_properties */
{
if (P[i].Type==ST)
nstars++;
#ifdef STELLAR_PROP
if (P[i].Type==ST)
{
if (RestartFlag==0) /* only if starting from scratch */
{
#ifndef CHIMIE_INPUT_ALL
P[i].StPIdx = i-N_gas;
StP[P[i].StPIdx].FormationTime = 0; /* bad */
StP[P[i].StPIdx].InitialMass = P[i].Mass; /* bad */
StP[P[i].StPIdx].IDProj = P[i].ID;
StP[P[i].StPIdx].Hsml = 0;
StP[P[i].StPIdx].Density = -1;
for(j = 0; j < NELEMENTS; j++)
StP[P[i].StPIdx].Metal[j] = 0.;
StP[P[i].StPIdx].Flag = 0; /*obsolete*/
#else /* here, we restart for a file already processed by gadget */
P[i].StPIdx = i-N_gas;
StP[P[i].StPIdx].Flag = 0; /*obsolete*/
#endif
}
if (RestartFlag==2) /* start from snapshot */
{
P[i].StPIdx = i-N_gas;
StP[P[i].StPIdx].Flag = 0; /*obsolete*/
}
StP[P[i].StPIdx].PIdx = i;
#ifdef CHECK_ID_CORRESPONDENCE
StP[P[i].StPIdx].ID = P[i].ID;
#endif
}
//else
// P[i].StPIdx = -1; /* shoud be set, however, may be a problem in domain.c --> must be corrected */
#endif
#ifdef CHIMIE
if (P[i].Type==0)
{
if (RestartFlag==0 && header.flag_metals==0) /* only if starting from scratch and metal block not present */
{
for(j = 0; j < NELEMENTS; j++)
{
SphP[i].Metal[j] = (pow(10,All.InitGasMetallicity)-1e-10)*get_SolarAbundance(j);
//if (j==FE)
// SphP[i].Metal[j] = (pow(10,All.InitGasMetallicity)-1e-10)*All.CoolingParameters_FeHSolar;
//else
// SphP[i].Metal[j] = 0;
}
}
#ifdef CHIMIE_THERMAL_FEEDBACK
SphP[i].DeltaEgySpec = 0;
SphP[i].NumberOfSNIa = 0;
SphP[i].NumberOfSNII = 0;
SphP[i].SNIaThermalTime = -1;
- SphP[i].SNIIThermalTime = -1;
-
-
+ SphP[i].SNIIThermalTime = -1;
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
SphP[i].WindTime = All.TimeBegin-2*All.ChimieWindTime;
SphP[i].WindFlag = 0;
#endif
}
#endif /* chimie */
+
+#ifdef TIMESTEP_UPDATE_FOR_FEEDBACK
+ if (P[i].Type==0)
+ {
+ for (j=0;j<3;j++)
+ SphP[i].FeedbackUpdatedAccel[j] = 0;
+ }
+#endif
+
+
+
+
}
#ifdef CHECK_ID_CORRESPONDENCE
#ifdef CHIMIE
for(i = N_gas; i < N_gas+N_stars; i++)
{
if( StP[P[i].StPIdx].PIdx != i )
{
printf("\nP/StP correspondance error\n");
printf("(%d) (in domain before) N_stars=%d N_gas=%d i=%d id=%d P[i].StPIdx=%d StP[P[i].StPIdx].PIdx=%d\n\n",ThisTask,N_stars,N_gas,i,P[i].ID,P[i].StPIdx,StP[P[i].StPIdx].PIdx);
endrun(333001);
}
if(StP[P[i].StPIdx].ID != P[i].ID)
{
printf("\nP/StP correspondance error\n");
printf("(%d) (in domain before) N_gas=%d N_stars=%d i=%d Type=%d P.Id=%d P[i].StPIdx=%d StP[P[i].StPIdx].ID=%d \n\n",ThisTask,N_gas,N_stars,i,P[i].Type,P[i].ID, P[i].StPIdx, StP[P[i].StPIdx].ID);
endrun(333002);
}
}
if (ThisTask==0)
printf("Check id correspondence before decomposition done...\n");
#endif
#endif
/* here, we would like to reduce N_stars to TotN_stars */
/* MPI_Allreduce(&N_stars, &All.TotN_stars, 1, MPI_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); does not works */
numlist = malloc(NTask * sizeof(int) * NTask);
MPI_Allgather(&N_stars, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD);
for(i = 0, All.TotN_stars = 0; i < NTask; i++)
All.TotN_stars += numlist[i];
free(numlist);
if(ThisTask == 0)
{
printf("Total number of star particles : %d%09d\n\n",(int) (All.TotN_stars / 1000000000), (int) (All.TotN_stars % 1000000000));
fflush(stdout);
}
#endif /*SFR*/
ngb_treeallocate(MAX_NGB);
force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart);
All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency;
Flag_FullStep = 1; /* to ensure that Peano-Hilber order is done */
domain_Decomposition(); /* do initial domain decomposition (gives equal numbers of particles) */
ngb_treebuild(); /* will build tree */
setup_smoothinglengths();
#ifdef CHIMIE
#ifndef CHIMIE_INPUT_ALL
stars_setup_smoothinglengths();
#endif
#endif
#ifdef TESSEL
setup_searching_radius();
#endif
TreeReconstructFlag = 1;
/* at this point, the entropy variable normally contains the
* internal energy, read in from the initial conditions file, unless the file
* explicitly signals that the initial conditions contain the entropy directly.
* Once the density has been computed, we can convert thermal energy to entropy.
*/
#ifndef ISOTHERM_EQS
if(header.flag_entropy_instead_u == 0)
{
for(i = 0; i < N_gas; i++)
#ifdef MULTIPHASE
{
switch(SphP[i].Phase)
{
case GAS_SPH:
SphP[i].Entropy = GAMMA_MINUS1 * SphP[i].Entropy / pow(SphP[i].Density / a3, GAMMA_MINUS1);
break;
case GAS_STICKY:
break;
case GAS_DARK:
SphP[i].Entropy = -SphP[i].Entropy;
break;
}
}
#else
SphP[i].Entropy = GAMMA_MINUS1 * SphP[i].Entropy / pow(SphP[i].Density / a3, GAMMA_MINUS1);
#endif
}
#endif
#ifdef ENTROPYPRED
for(i = 0; i < N_gas; i++)
SphP[i].EntropyPred = SphP[i].Entropy;
#endif
}
/*! This routine computes the mass content of the box and compares it to the
* specified value of Omega-matter. If discrepant, the run is terminated.
*/
void check_omega(void)
{
double mass = 0, masstot, omega;
int i;
for(i = 0; i < NumPart; i++)
mass += P[i].Mass;
MPI_Allreduce(&mass, &masstot, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
omega =
masstot / (All.BoxSize * All.BoxSize * All.BoxSize) / (3 * All.Hubble * All.Hubble / (8 * M_PI * All.G));
if(fabs(omega - All.Omega0) > 1.0e-3)
{
if(ThisTask == 0)
{
printf("\n\nI've found something odd!\n");
printf
("The mass content accounts only for Omega=%g,\nbut you specified Omega=%g in the parameterfile.\n",
omega, All.Omega0);
printf("\nI better stop.\n");
fflush(stdout);
}
endrun(1);
}
}
/*! This function is used to find an initial smoothing length for each SPH
* particle. It guarantees that the number of neighbours will be between
* desired_ngb-MAXDEV and desired_ngb+MAXDEV. For simplicity, a first guess
* of the smoothing length is provided to the function density(), which will
* then iterate if needed to find the right smoothing length.
*/
void setup_smoothinglengths(void)
{
int i, no, p;
if(RestartFlag == 0)
{
for(i = 0; i < N_gas; i++)
{
no = Father[i];
while(10 * All.DesNumNgb * P[i].Mass > Nodes[no].u.d.mass)
{
p = Nodes[no].u.d.father;
if(p < 0)
break;
no = p;
}
#ifndef TWODIMS
SphP[i].Hsml =
pow(3.0 / (4 * M_PI) * All.DesNumNgb * P[i].Mass / Nodes[no].u.d.mass, 1.0 / 3) * Nodes[no].len;
#else
SphP[i].Hsml =
pow(1.0 / (M_PI) * All.DesNumNgb * P[i].Mass / Nodes[no].u.d.mass, 1.0 / 2) * Nodes[no].len;
#endif
}
}
density(0);
}
#ifdef CHIMIE
/*! This function is used to find an initial smoothing length for each SPH
* particle. It guarantees that the number of neighbours will be between
* desired_ngb-MAXDEV and desired_ngb+MAXDEV. For simplicity, a first guess
* of the smoothing length is provided to the function density(), which will
* then iterate if needed to find the right smoothing length.
*/
void stars_setup_smoothinglengths(void)
{
int i, no, p;
if(RestartFlag == 0)
{
for(i = 0; i < NumPart; i++)
{
if(P[i].Type == ST)
{
no = Father[i];
while(10 * All.DesNumNgb * P[i].Mass > Nodes[no].u.d.mass)
{
p = Nodes[no].u.d.father;
if(p < 0)
break;
no = p;
}
#ifndef TWODIMS
StP[P[i].StPIdx].Hsml =
pow(3.0 / (4 * M_PI) * All.DesNumNgb * P[i].Mass / Nodes[no].u.d.mass, 1.0 / 3) * Nodes[no].len;
#else
StP[P[i].StPIdx].Hsml =
pow(1.0 / (M_PI) * All.DesNumNgb * P[i].Mass / Nodes[no].u.d.mass, 1.0 / 2) * Nodes[no].len;
#endif
}
}
}
stars_density();
}
#endif
/*! If the code is run in glass-making mode, this function populates the
* simulation box with a Poisson sample of particles.
*/
#if (MAKEGLASS > 1)
void seed_glass(void)
{
int i, k, n_for_this_task;
double Range[3], LowerBound[3];
double drandom, partmass;
long long IDstart;
All.TotNumPart = MAKEGLASS;
partmass = All.Omega0 * (3 * All.Hubble * All.Hubble / (8 * M_PI * All.G))
* (All.BoxSize * All.BoxSize * All.BoxSize) / All.TotNumPart;
All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); /* sets the maximum number of particles that may */
allocate_memory();
header.npartTotal[1] = All.TotNumPart;
header.mass[1] = partmass;
if(ThisTask == 0)
{
printf("\nGlass initialising\nPartMass= %g\n", partmass);
printf("TotNumPart= %d%09d\n\n",
(int) (All.TotNumPart / 1000000000), (int) (All.TotNumPart % 1000000000));
}
/* set the number of particles assigned locally to this task */
n_for_this_task = All.TotNumPart / NTask;
if(ThisTask == NTask - 1)
n_for_this_task = All.TotNumPart - (NTask - 1) * n_for_this_task;
NumPart = 0;
IDstart = 1 + (All.TotNumPart / NTask) * ThisTask;
/* split the temporal domain into Ntask slabs in z-direction */
Range[0] = Range[1] = All.BoxSize;
Range[2] = All.BoxSize / NTask;
LowerBound[0] = LowerBound[1] = 0;
LowerBound[2] = ThisTask * Range[2];
srand48(ThisTask);
for(i = 0; i < n_for_this_task; i++)
{
for(k = 0; k < 3; k++)
{
drandom = drand48();
P[i].Pos[k] = LowerBound[k] + Range[k] * drandom;
P[i].Vel[k] = 0;
}
P[i].Mass = partmass;
P[i].Type = 1;
P[i].ID = IDstart + i;
NumPart++;
}
}
#endif
diff --git a/src/params/params.chemistry3 b/src/params/params.chemistry3
new file mode 100644
index 0000000..c6733dc
--- /dev/null
+++ b/src/params/params.chemistry3
@@ -0,0 +1,228 @@
+% Relevant files
+
+InitCondFile snap.dat
+OutputDir snap
+
+EnergyFile energy.txt
+SystemFile system.txt
+InfoFile info.txt
+TimingsFile timings.txt
+CpuFile cpu.txt
+LogFile log.txt
+
+RestartFile restart
+SnapshotFileBase snapshot
+
+OutputListFilename snaplist.txt
+
+% CPU-time limit
+
+TimeLimitCPU 100000000
+ResubmitOn 0
+ResubmitCommand xyz
+
+
+% Code options
+
+ICFormat 1
+SnapFormat 1
+ComovingIntegrationOn 0
+
+TypeOfTimestepCriterion 0
+OutputListOn 0
+PeriodicBoundariesOn 0
+
+
+% Caracteristics of run
+
+TimeBegin 0.0
+TimeMax 3000
+
+RandomSeed 77
+
+Omega0 0
+OmegaLambda 0
+OmegaBaryon 0
+HubbleParam 1.0
+BoxSize 0
+
+
+% Output frequency
+
+TimeBetSnapshot 10.0
+TimeOfFirstSnapshot 0
+
+CpuTimeBetRestartFile 3600.0 ; here in seconds
+TimeBetStatistics 50.
+
+NumFilesPerSnapshot 1
+NumFilesWrittenInParallel 1
+
+
+% Accuracy of time integration
+
+ErrTolIntAccuracy 0.05 % used for TypeOfTimestepCriterion==0
+
+CourantFac 0.1 % for SPH
+MaxSizeTimestep 1.0
+MinSizeTimestep 0.0
+
+
+
+
+% Tree algorithm, force accuracy, domain update frequency
+
+ErrTolTheta 0.7
+TypeOfOpeningCriterion 0
+ErrTolForceAcc 0.05
+
+NgbFactorTimestep 4 % must be a power of 2
+
+
+TreeDomainUpdateFrequency 0.1 # 0.1
+MaxRMSDisplacementFac 0.25
+
+
+% Further parameters of SPH
+
+DesNumNgb 50
+MaxNumNgbDeviation 0.1
+ArtBulkViscConst 0.8
+InitGasTemp 0 % always ignored if set to 0
+MinGasTemp 10
+
+
+% cooling parameters
+CoolingType 2 % 0=Sutherland 1=with heating 2=with metals
+CoolingFile cooling.dat % cooling file
+CutofCoolingTemperature 1e1 % below this value, the cooling is zero
+InitGasMetallicity -10 % gas metallicity
+
+
+
+
+
+% multiphase parameters
+
+%PhaseFile phase.txt
+%CriticalTemperature 1e4
+%CriticalEgyFactor 1.00
+%StickyLambda 1e-4 % sticky parameter
+%StickyDensity 1e-24 % sticky density parameter
+%StickyRsphFact 1.0 % 1 or less
+
+
+% star formation parameters
+SfrFile sfr.txt
+StarFormationType 2 % 0=Rasera 1=Springel 2=use cstar
+StarFormationCstar 0.1
+StarFormationTime 190.0 % in Gyr (3:rasera,2.1:springel) not used if 2
+StarFormationDensity 1.67e-25 % !!!!! remettre à -25 (treeasph)!!! in g/cm3 (5.7e-27:rasera)
+StarFormationTemperature 3e4 % in Kelvin (not used when sticky)
+StarFormationNStarsFromGas 4
+StarFormationMgMsFraction 0.5 % min Gas-Star ratio allowed Mgas >= 0.5 Mstar
+StarFormationStarMass 0.0 % mass of a star particle (usefull for restart=2)
+ % if set to 0, it is taken as the
+ % mean mass of gas particles divided by NStarsFromGas
+
+
+% chimie parameters
+ChimieFile chimie.txt
+ChimieNumberOfParameterFiles 1
+ChimieParameterFile chimie.yr.dat
+ChimieMaxSizeTimestep 0.2 % Myr
+ChimieKineticFeedbackFraction 0 % fraction of energy released in kinetic form
+ChimieWindSpeed 1 % km/s
+ChimieWindTime 15 % Myr
+ChimieSNIaThermalTime 5 % Myr
+ChimieSNIIThermalTime 5 % Myr
+ChimieSupernovaEnergy 3e49 % supernova energy in erg
+
+% feedback parameters
+
+%SupernovaEgySpecPerMassUnit 5e14 % in cgs : (erg/g) (5e14=1e48erg/Msol)
+%SupernovaFractionInEgyKin 0.9
+%SupernovaTime 5 % in code unit
+
+
+
+
+
+
+
+
+% bubbles
+%BubbleFile bubble.txt
+%BubblesInitFile bubbles.dat
+%BubblesDelta 0.5
+%BubblesAlpha 1.0
+%BubblesRadiusFactor 0.25
+
+
+% agn_feedback
+%AccretionFile accretion.txt
+%TimeBetAccretion 2
+%AccretionRadius 10 % in code unit
+%AGNFactor 0.00015
+%MinMTotInRa 0.08 % in code unit
+
+
+% outer potential
+
+% NFW
+%HaloMass 1e15 % M200 (gas+dm) in Msolar
+%HaloConcentration 5.
+%GasMassFraction 0.15
+
+% Pseudo-isothermal potential
+%Rho0 6.2e-05 % in user unit (with gas)
+%Rc 40 % in user unit
+%GasMassFraction 0.15
+
+
+% Memory allocation
+
+PartAllocFactor 10.0
+StarsAllocFactor 0.25
+TreeAllocFactor 10.
+BufferSize 50 % in MByte
+
+% System of units
+
+UnitLength_in_cm 3.085e+21
+UnitMass_in_g 1.989e+43
+UnitVelocity_in_cm_per_s 20725573.785998672
+
+GravityConstantInternal 0.0 % if set to zero, the physical value 6.672e-8
+ % is taken
+
+% Softening lengths
+
+MinGasHsmlFractional 0.1 % minimum softening in terms of the gravitational
+ % softening length
+
+SofteningGas 0.05
+SofteningHalo 0.05
+SofteningDisk 0.125
+SofteningBulge 0.125
+SofteningStars 0.125
+SofteningBndry 0.125
+
+SofteningGasMaxPhys 0.05
+SofteningHaloMaxPhys 0.05
+SofteningDiskMaxPhys 0.125
+SofteningBulgeMaxPhys 0.125
+SofteningStarsMaxPhys 0.125
+SofteningBndryMaxPhys 0.125
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/predict.c b/src/predict.c
index f5d5cc1..5fa8870 100644
--- a/src/predict.c
+++ b/src/predict.c
@@ -1,178 +1,178 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <mpi.h>
#include <gsl/gsl_math.h>
#include "allvars.h"
#include "proto.h"
/*! \file predict.c
* \brief drift particles by a small time interval
*
* This function contains code to implement a drift operation on all the
* particles, which represents one part of the leapfrog integration scheme.
*/
/*! This function drifts all particles from the current time to the future:
* time0 - > time1
*
* If there is no explicit tree construction in the following timestep, the
* tree nodes are also drifted and updated accordingly. Note: For periodic
* boundary conditions, the mapping of coordinates onto the interval
* [0,All.BoxSize] is only done before the domain decomposition, or for
* outputs to snapshot files. This simplifies dynamic tree updates, and
* allows the domain decomposition to be carried out only every once in a
* while.
*/
void move_particles(int time0, int time1)
{
int i, j;
double dt_drift, dt_gravkick, dt_hydrokick, dt_entr;
double t0, t1;
t0 = second();
if(All.ComovingIntegrationOn)
{
dt_drift = get_drift_factor(time0, time1);
dt_gravkick = get_gravkick_factor(time0, time1);
dt_hydrokick = get_hydrokick_factor(time0, time1);
}
else
{
dt_drift = dt_gravkick = dt_hydrokick = (time1 - time0) * All.Timebase_interval;
}
for(i = 0; i < NumPart; i++)
{
for(j = 0; j < 3; j++)
P[i].Pos[j] += P[i].Vel[j] * dt_drift;
if(P[i].Type == 0)
{
#ifdef PMGRID
for(j = 0; j < 3; j++)
{
SphP[i].VelPred[j] += (P[i].GravAccel[j] + P[i].GravPM[j]) * dt_gravkick + SphP[i].HydroAccel[j] * dt_hydrokick;
}
#else
for(j = 0; j < 3; j++)
{
SphP[i].VelPred[j] += P[i].GravAccel[j] * dt_gravkick + SphP[i].HydroAccel[j] * dt_hydrokick;
}
#endif
SphP[i].Density *= exp(-SphP[i].DivVel * dt_drift);
SphP[i].Hsml *= exp(0.333333333333 * SphP[i].DivVel * dt_drift);
if(SphP[i].Hsml < All.MinGasHsml)
SphP[i].Hsml = All.MinGasHsml;
dt_entr = (time1 - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval;
SphP[i].Pressure = (SphP[i].Entropy + SphP[i].DtEntropy * dt_entr) * pow(SphP[i].Density, GAMMA);
#ifdef ENTROPYPRED
SphP[i].EntropyPred = (SphP[i].Entropy + SphP[i].DtEntropy * dt_entr);
#endif
#ifdef CHECK_ENTROPY_SIGN
if ((SphP[i].EntropyPred < 0)||(SphP[i].Entropy < 0))
{
printf("\ntask=%d: entropy less than zero in move_particles !\n", ThisTask);
printf("ID=%d Entropy=%g EntropyPred=%g DtEntropy=%g dt_entr=%g\n",P[i].ID,SphP[i].Entropy,SphP[i].EntropyPred,SphP[i].DtEntropy,dt_entr);
fflush(stdout);
- endrun(333001);
+ endrun(333021);
}
#endif
#ifdef NO_NEGATIVE_PRESSURE
if (SphP[i].Pressure<0)
{
printf("\ntask=%d: pressure less than zero in move_particles !\n", ThisTask);
printf("ID=%d Entropy=%g DtEntropy*dt=%g Density=%g DtEntropy=%g dt=%g\n",P[i].ID,SphP[i].Entropy,SphP[i].DtEntropy*dt_entr,SphP[i].Density,SphP[i].DtEntropy,dt_entr);
fflush(stdout);
- endrun(333002);
+ endrun(333022);
}
#endif
/***********************************************************/
/* compute art visc coeff */
/***********************************************************/
#if defined(ART_VISCO_MM)|| defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
move_art_visc(i,dt_drift);
#endif
}
}
/* if domain-decomp and tree are not going to be reconstructed, update dynamically. */
if(All.NumForcesSinceLastDomainDecomp < All.TotNumPart * All.TreeDomainUpdateFrequency)
{
for(i = 0; i < Numnodestree; i++)
for(j = 0; j < 3; j++)
Nodes[All.MaxPart + i].u.d.s[j] += Extnodes[All.MaxPart + i].vs[j] * dt_drift;
force_update_len();
force_update_pseudoparticles();
}
t1 = second();
All.CPU_Predict += timediff(t0, t1);
}
/*! This function makes sure that all particle coordinates (Pos) are
* periodically mapped onto the interval [0, BoxSize]. After this function
* has been called, a new domain decomposition should be done, which will
* also force a new tree construction.
*/
#ifdef PERIODIC
void do_box_wrapping(void)
{
int i, j;
double boxsize[3];
for(j = 0; j < 3; j++)
boxsize[j] = All.BoxSize;
#ifdef LONG_X
boxsize[0] *= LONG_X;
#endif
#ifdef LONG_Y
boxsize[1] *= LONG_Y;
#endif
#ifdef LONG_Z
boxsize[2] *= LONG_Z;
#endif
for(i = 0; i < NumPart; i++)
for(j = 0; j < 3; j++)
{
while(P[i].Pos[j] < 0)
P[i].Pos[j] += boxsize[j];
while(P[i].Pos[j] >= boxsize[j])
P[i].Pos[j] -= boxsize[j];
}
}
#endif
diff --git a/src/proto.h b/src/proto.h
index 6bb78bb..57590af 100644
--- a/src/proto.h
+++ b/src/proto.h
@@ -1,537 +1,547 @@
/*! \file proto.h
* \brief this file contains all function prototypes of the code
*/
#ifndef ALLVARS_H
#include "allvars.h"
#endif
#ifdef HAVE_HDF5
#include <hdf5.h>
#endif
void advance_and_find_timesteps(void);
void allocate_commbuffers(void);
void allocate_memory(void);
void begrun(void);
int blockpresent(enum iofields blocknr);
#ifdef BLOCK_SKIPPING
int blockabsent(enum iofields blocknr);
#endif
void catch_abort(int sig);
void catch_fatal(int sig);
void check_omega(void);
void close_outputfiles(void);
int compare_key(const void *a, const void *b);
void compute_accelerations(int mode);
void compute_global_quantities_of_system(void);
void compute_potential(void);
int dens_compare_key(const void *a, const void *b);
void density(int mode);
void density_decouple(void);
void density_evaluate(int i, int mode);
#ifdef CHIMIE
int stars_dens_compare_key(const void *a, const void *b);
void stars_density(void);
void stars_density_evaluate(int i, int mode);
#endif
void distribute_file(int nfiles, int firstfile, int firsttask, int lasttask, int *filenr, int *master, int *last);
double dmax(double, double);
double dmin(double, double);
void do_box_wrapping(void);
void domain_Decomposition(void);
int domain_compare_key(const void *a, const void *b);
int domain_compare_key(const void *a, const void *b);
int domain_compare_toplist(const void *a, const void *b);
void domain_countToGo(void);
void domain_decompose(void);
void domain_determineTopTree(void);
void domain_exchangeParticles(int partner, int sphflag, int send_count, int recv_count);
void domain_findExchangeNumbers(int task, int partner, int sphflag, int *send, int *recv);
void domain_findExtent(void);
int domain_findSplit(int cpustart, int ncpu, int first, int last);
int domain_findSplityr(int cpustart, int ncpu, int first, int last);
void domain_shiftSplit(void);
void domain_shiftSplityr(void);
void domain_sumCost(void);
void domain_topsplit(int node, peanokey startkey);
void domain_topsplit_local(int node, peanokey startkey);
double drift_integ(double a, void *param);
void dump_particles(void);
void empty_read_buffer(enum iofields blocknr, int offset, int pc, int type);
void endrun(int);
void energy_statistics(void);
#ifdef ADVANCEDSTATISTICS
void advanced_energy_statistics(void);
#endif
void every_timestep_stuff(void);
void ewald_corr(double dx, double dy, double dz, double *fper);
void ewald_force(int ii, int jj, int kk, double x[3], double force[3]);
void ewald_init(void);
double ewald_pot_corr(double dx, double dy, double dz);
double ewald_psi(double x[3]);
void fill_Tab_IO_Labels(void);
void fill_write_buffer(enum iofields blocknr, int *pindex, int pc, int type);
void find_dt_displacement_constraint(double hfac);
int find_files(char *fname);
int find_next_outputtime(int time);
void find_next_sync_point_and_drift(void);
void force_create_empty_nodes(int no, int topnode, int bits, int x, int y, int z, int *nodecount, int *nextfree);
void force_exchange_pseudodata(void);
void force_flag_localnodes(void);
void force_insert_pseudo_particles(void);
void force_setupnonrecursive(int no);
void force_treeallocate(int maxnodes, int maxpart);
int force_treebuild(int npart);
int force_treebuild_single(int npart);
int force_treeevaluate(int target, int mode, double *ewaldcountsum);
int force_treeevaluate_direct(int target, int mode);
int force_treeevaluate_ewald_correction(int target, int mode, double pos_x, double pos_y, double pos_z, double aold);
void force_treeevaluate_potential(int target, int type);
void force_treeevaluate_potential_shortrange(int target, int mode);
int force_treeevaluate_shortrange(int target, int mode);
void force_treefree(void);
void force_treeupdate_pseudos(void);
void force_update_hmax(void);
void force_update_len(void);
void force_update_node(int no, int flag);
void force_update_node_hmax_local(void);
void force_update_node_hmax_toptree(void);
void force_update_node_len_local(void);
void force_update_node_len_toptree(void);
void force_update_node_recursive(int no, int sib, int father);
void force_update_pseudoparticles(void);
void force_update_size_of_parent_node(int no);
void free_memory(void);
int get_bytes_per_blockelement(enum iofields blocknr);
void get_dataset_name(enum iofields blocknr, char *buf);
int get_datatype_in_block(enum iofields blocknr);
double get_drift_factor(int time0, int time1);
double get_gravkick_factor(int time0, int time1);
double get_hydrokick_factor(int time0, int time1);
int get_particles_in_block(enum iofields blocknr, int *typelist);
double get_random_number(int id);
#ifdef SFR
double get_StarFormation_random_number(int id);
#endif
#ifdef FEEDBACK_WIND
double get_FeedbackWind_random_number(int id);
#endif
#ifdef CHIMIE
double get_Chimie_random_number(int id);
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
double get_ChimieKineticFeedback_random_number(int id);
#endif
int get_timestep(int p, double *a, int flag);
int get_values_per_blockelement(enum iofields blocknr);
#ifdef SYNCHRONIZE_NGB_TIMESTEP
void synchronize_ngb_timestep();
int synchronize_ngb_timestep_evaluate(int target, int mode);
int synchronize_ngb_timestep_compare_key(const void *a, const void *b);
#endif
int grav_tree_compare_key(const void *a, const void *b);
void gravity_forcetest(void);
void gravity_tree(void);
void gravity_tree_shortrange(void);
double gravkick_integ(double a, void *param);
int hydro_compare_key(const void *a, const void *b);
void hydro_evaluate(int target, int mode);
void hydro_force(void);
double hydrokick_integ(double a, void *param);
int imax(int, int);
int imin(int, int);
void init(void);
void init_drift_table(void);
void init_peano_map(void);
#ifdef COSMICTIME
void init_cosmictime_table(void);
double get_cosmictime_difference(int time0, int time1);
#endif
void long_range_force(void);
void long_range_init(void);
void long_range_init_regionsize(void);
void move_particles(int time0, int time1);
size_t my_fread(void *ptr, size_t size, size_t nmemb, FILE * stream);
size_t my_fwrite(void *ptr, size_t size, size_t nmemb, FILE * stream);
int ngb_clear_buf(FLOAT searchcenter[3], FLOAT hguess, int numngb);
void ngb_treeallocate(int npart);
void ngb_treebuild(void);
int ngb_treefind_pairs(FLOAT searchcenter[3], FLOAT hsml, int phase, int *startnode);
#ifdef MULTIPHASE
int ngb_treefind_phase_pairs(FLOAT searchcenter[3], FLOAT hsml, int phase, int *startnode);
int ngb_treefind_sticky_collisions(FLOAT searchcenter[3], FLOAT hguess, int phase, int *startnode);
#endif
int ngb_treefind_variable(FLOAT searchcenter[3], FLOAT hguess, int phase, int *startnode);
#ifdef CHIMIE
int ngb_treefind_variable_for_chimie(FLOAT searchcenter[3], FLOAT hguess, int *startnode);
#endif
void ngb_treefree(void);
void ngb_treesearch(int);
void ngb_treesearch_pairs(int);
void ngb_update_nodes(void);
void open_outputfiles(void);
peanokey peano_hilbert_key(int x, int y, int z, int bits);
void peano_hilbert_order(void);
void pm_init_nonperiodic(void);
void pm_init_nonperiodic_allocate(int dimprod);
void pm_init_nonperiodic_free(void);
void pm_init_periodic(void);
void pm_init_periodic_allocate(int dimprod);
void pm_init_periodic_free(void);
void pm_init_regionsize(void);
void pm_setup_nonperiodic_kernel(void);
int pmforce_nonperiodic(int grnr);
void pmforce_periodic(void);
int pmpotential_nonperiodic(int grnr);
void pmpotential_periodic(void);
double pow(double, double); /* on some old DEC Alphas, the correct prototype for pow() is missing, even when math.h is included */
void read_file(char *fname, int readTask, int lastTask);
void read_header_attributes_in_hdf5(char *fname);
void read_ic(char *fname);
int read_outputlist(char *fname);
void read_parameter_file(char *fname);
void readjust_timebase(double TimeMax_old, double TimeMax_new);
void reorder_gas(void);
void reorder_particles(void);
#ifdef STELLAR_PROP
void reorder_stars(void);
void reorder_st(void);
#endif
void restart(int mod);
void run(void);
void savepositions(int num);
double second(void);
void seed_glass(void);
void set_random_numbers(void);
void set_softenings(void);
void set_units(void);
void init_local_sys_state(void);
void setup_smoothinglengths(void);
#ifdef CHIMIE
void stars_setup_smoothinglengths(void);
#endif
void statistics(void);
void terminate_processes(void);
double timediff(double t0, double t1);
#ifdef HAVE_HDF5
void write_header_attributes_in_hdf5(hid_t handle);
#endif
void write_file(char *fname, int readTask, int lastTask);
void write_pid_file(void);
#ifdef COOLING
int init_cooling(FLOAT metallicity);
int init_cooling_with_metals();
double cooling_function(double temperature);
double cooling_function_with_metals(double temperature,double metal);
void init_from_new_redshift(double Redshift);
double J_0();
double J_nu(double e);
double sigma_rad_HI(double e);
double sigma_rad_HeI(double e);
double sigma_rad_HeII(double e);
double cooling_bremstrahlung_HI(double T);
double cooling_bremstrahlung_HeI(double T);
double cooling_bremstrahlung_HeII(double T);
double cooling_ionization_HI(double T);
double cooling_ionization_HeI(double T);
double cooling_ionization_HeII(double T);
double cooling_recombination_HI(double T);
double cooling_recombination_HeI(double T);
double cooling_recombination_HeII(double T);
double cooling_dielectric_recombination(double T);
double cooling_excitation_HI(double T);
double cooling_excitation_HII(double T);
double cooling_compton(double T);
double A_HII(double T);
double A_HeIId(double T);
double A_HeII(double T);
double A_HeIII(double T);
double G_HI(double T);
double G_HeI(double T);
double G_HeII(double T);
double G_gHI();
double G_gHeI();
double G_gHeII();
double G_gHI_t(double J0);
double G_gHeI_t(double J0);
double G_gHeII_t(double J0);
double G_gHI_w();
double G_gHeI_w();
double G_gHeII_w();
double heating_radiative_HI();
double heating_radiative_HeI();
double heating_radiative_HeII();
double heating_radiative_HI_t(double J0);
double heating_radiative_HeI_t(double J0);
double heating_radiative_HeII_t(double J0);
double heating_radiative_HI_w();
double heating_radiative_HeI_w();
double heating_radiative_HeII_w();
double heating_compton();
void print_cooling(double T,double c1,double c2,double c3,double c4,double c5,double c6,double c7,double c8,double c9,double c10,double c11,double c12,double c13,double h1, double h2, double h3, double h4);
void compute_densities(double T,double X,double* n_H, double* n_HI,double* n_HII,double* n_HEI,double* n_HEII,double* n_HEIII,double* n_E,double* mu);
void compute_cooling_from_T_and_Nh(double T,double X,double n_H,double *c1,double *c2,double *c3,double *c4,double *c5,double *c6,double *c7,double *c8,double *c9,double *c10,double *c11,double *c12,double *c13,double *h1, double *h2, double *h3, double *h4);
double compute_cooling_from_Egyspec_and_Density(double Egyspec,double Density, double *MeanWeight);
double DoCooling(FLOAT Density,FLOAT Entropy,int Phase,int i,FLOAT DtEntropyVisc, double dt, double hubble_a);
void CoolingForOne(int i,int t0,int t1,double a3inv,double hubble_a);
void cooling();
double lambda(FLOAT density,FLOAT egyspec, int phase, int i);
#endif
#ifdef HEATING
void heating();
double gamma_fct(FLOAT Density,FLOAT Entropy,int i);
#endif
#ifdef AGN_HEATING
void agn_heating();
double gamma_fct(FLOAT density,double r, double SpecPower);
double HeatingRadialDependency(double r);
#endif
#ifdef MULTIPHASE
void update_phase(void);
void init_sticky(void);
void sticky(void);
void sticky_compute_energy_kin(int mode);
void sticky_collisions(void);
void sticky_collisions2(int loop);
void sticky_evaluate(int target, int mode, int loop);
int sticky_compare_key(const void *a, const void *b);
#endif
#ifdef FEEDBACK_WIND
void feedbackwind_compute_energy_kin(int mode);
#endif
#ifdef CHIMIE
void init_chimie(void);
void check_chimie(void);
void chimie(void);
void do_chimie(void);
void chimie_evaluate(int target, int mode);
int chimie_compare_key(const void *a, const void *b);
int get_nelts();
char* get_Element(i);
float get_SolarAbundance(i);
#if defined(CHIMIE_THERMAL_FEEDBACK) && defined(CHIMIE_COMPUTE_THERMAL_FEEDBACK_ENERGY)
void chimie_compute_energy_int(int mode);
#endif
#if defined(CHIMIE_KINETIC_FEEDBACK) && defined(CHIMIE_COMPUTE_KINETIC_FEEDBACK_ENERGY)
void chimie_compute_energy_kin(int mode);
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
void chimie_apply_wind(void);
#endif
#endif
#ifdef OUTERPOTENTIAL
void init_outer_potential(void);
void outer_forces(void);
void outer_potential(void);
#ifdef NFW
void init_outer_potential_nfw(void);
void outer_forces_nfw(void);
void outer_potential_nfw(void);
#endif
#ifdef PLUMMER
void init_outer_potential_plummer(void);
void outer_forces_plummer(void);
void outer_potential_plummer(void);
#endif
#ifdef PISOTHERM
void init_outer_potential_pisotherm(void);
void outer_forces_pisotherm(void);
void outer_potential_pisotherm(void);
double potential_f(double r, void * params);
double get_potential(double r);
#endif
#ifdef CORIOLIS
void init_outer_potential_coriolis(void);
void set_outer_potential_coriolis(void);
void outer_forces_coriolis(void);
void outer_potential_coriolis(void);
#endif
#endif
#ifdef SFR
void star_formation(void);
void rearrange_particle_sequence(void);
void sfr_compute_energy_int(int mode);
void sfr_check_number_of_stars(int mode);
#endif
#ifdef AGN_ACCRETION
void compute_agn_accretion(void);
#endif
#ifdef BUBBLES
void init_bubble(void);
void make_bubble(void);
void create_bubble(int sign);
#endif
#ifdef BONDI_ACCRETION
void bondi_accretion(void);
#endif
#ifdef PNBODY
void init_pnbody();
void finalize_pnbody();
void compute_pnbody();
#endif
#ifdef AB_TURB
void init_turb();
#endif
#if defined(ART_VISCO_MM)|| defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
void move_art_visc(int i,double dt_drift);
#ifdef ART_VISCO_CD
void art_visc_allocate();
void art_visc_free();
void compute_art_visc(int i);
#endif
#endif
+#ifdef TIMESTEP_UPDATE_FOR_FEEDBACK
+void get_sigvel(void);
+void get_sigvel_evaluate(int target, int mode);
+FLOAT updated_pressure(FLOAT EntropyPred,FLOAT Density,FLOAT DeltaEgySpec);
+void make_particle_active(int target);
+void kickback(int i,int tstart,int tend);
+#endif
+
+
+
#ifdef TESSEL
void ConstructDelaunay();
void ComputeVoronoi();
void setup_searching_radius();
int ngb_treefind_variable_for_tessel(FLOAT searchcenter[3], FLOAT hsml, int phase, int *startnode);
void ghost();
void tessel_compute_accelerations();
void tessel_convert_energy_to_entropy();
void tessel_kick(float dt_kick);
void tessel_drift(float dt_drift);
double tessel_get_timestep();
int CheckCompletenessForThisPoint(int i);
int ghost_compare_key(const void *a, const void *b);
void CheckTriangles();
void AddGhostPoints(int istart,int nadd);
void dump_triangles(char *filename);
void dump_voronoi(char *filename);
#ifdef PY_INTERFACE
#include <Python.h>
PyObject *gadget_GetAllDelaunayTriangles(self, args);
PyObject *gadget_GetAllvPoints(self, args);
PyObject *gadget_GetAllvDensities(PyObject* self);
PyObject *gadget_GetAllvVolumes(PyObject* self);
PyObject *gadget_GetAllvPressures(PyObject* self);
PyObject *gadget_GetAllvEnergySpec(PyObject* self);
PyObject *gadget_GetAllvAccelerations(PyObject* self);
PyObject *gadget_GetvPointsForOnePoint(self, args);
PyObject *gadget_GetNgbPointsForOnePoint(self, args);
PyObject *gadget_GetNgbPointsAndFacesForOnePoint(self, args);
PyObject *gadget_GetAllGhostPositions(PyObject* self);
PyObject *gadget_GetAllGhostvDensities(PyObject* self);
PyObject *gadget_GetAllGhostvVolumes(PyObject* self);
#endif
#endif
#ifdef PY_INTERFACE
void allocate_commbuffersQ(void);
void density_sub(void);
void density_evaluate_sub(int i, int mode);
void do_box_wrappingQ(void);
void domain_DecompositionQ(void);
void domain_decomposeQ(void);
int domain_findSplitQ(int cpustart, int ncpu, int first, int last);
void domain_shiftSplitQ(void);
void domain_findExchangeNumbersQ(int task, int partner, int sphflag, int *send, int *recv);
void domain_exchangeParticlesQ(int partner, int sphflag, int send_count, int recv_count);
void domain_countToGoQ(void);
void domain_walktoptreeQ(int no);
void domain_sumCostQ(void);
void domain_findExtentQ(void);
void domain_determineTopTreeQ(void);
void domain_topsplit_localQ(int node, peanokey startkey);
void domain_topsplitQ(int node, peanokey startkey);
int force_treeevaluate_sub(int target, int mode, double *ewaldcountsum);
void force_treeevaluate_potential_sub(int target, int type);
void force_treeevaluate_potential_shortrange_sub(int target, int mode);
int force_treeevaluate_shortrange_sub(int target, int mode);
void gravity_tree_sub(void);
void sph(void);
void sph_evaluate(int target, int mode);
void sph_sub(void);
void sph_evaluate_sub(int target, int mode);
void sph_thermal_conductivity(void);
void sph_evaluate_thermal_conductivity(int target, int mode);
int sph_compare_key(const void *a, const void *b);
void peano_hilbert_orderQ(void);
void reorder_gasQ(void);
void reorder_particlesQ(void);
void setup_smoothinglengths_sub(void);
#endif
diff --git a/src/sigvel.c b/src/sigvel.c
new file mode 100644
index 0000000..a59f594
--- /dev/null
+++ b/src/sigvel.c
@@ -0,0 +1,788 @@
+/* ################################################################################## */
+/* ### ### */
+/* ### sigvel.c ### */
+/* ### ### */
+/* ### Original: hydra.c (public version of Gadget 2) ### */
+/* ### Author: Volker Springel ### */
+/* ### ### */
+/* ### Modified: February 2011 ### */
+/* ### Author: Fabrice Durier ### */
+/* ### ### */
+/* ################################################################################## */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <mpi.h>
+#include <gsl/gsl_math.h>
+#include "allvars.h"
+#include "proto.h"
+
+/*! \file sigvel.c
+ * Re-Computation of SPH acceleration and maximum signal velocity
+ * for 'feedback' particles only.
+ *
+ * This file is a modified version of hydra.c, where the SPH forces are
+ * computed, and where the rate of change of entropy due to the shock heating
+ * (via artificial viscosity) is computed.
+ */
+
+#ifdef TIMESTEP_UPDATE_FOR_FEEDBACK
+
+static double hubble_a, atime, hubble_a2, fac_mu, fac_vsic_fix, a3inv, fac_egy;
+
+#ifdef PERIODIC
+static double boxSize, boxHalf;
+
+#ifdef LONG_X
+static double boxSize_X, boxHalf_X;
+#else
+#define boxSize_X boxSize
+#define boxHalf_X boxHalf
+#endif
+#ifdef LONG_Y
+static double boxSize_Y, boxHalf_Y;
+#else
+#define boxSize_Y boxSize
+#define boxHalf_Y boxHalf
+#endif
+#ifdef LONG_Z
+static double boxSize_Z, boxHalf_Z;
+#else
+#define boxSize_Z boxSize
+#define boxHalf_Z boxHalf
+#endif
+#endif
+
+
+
+
+
+
+
+/*! This function is the driver routine for the update of the calculation
+ * of hydrodynamical force due to the feedback impact
+ * from either SNe or BHs on active gas particles.
+ */
+void get_sigvel(void)
+{
+ long long ntot, ntotleft;
+ int i, j, k, n, ngrp, maxfill, source, ndone;
+ int *nbuffer, *noffset, *nsend_local, *nsend, *numlist, *ndonelist;
+ int level, sendTask, recvTask, nexport, place;
+ double soundspeed_i;
+ double tstart, tend, sumt, sumcomm;
+ double timecomp = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance;
+ MPI_Status status;
+
+#ifdef PERIODIC
+ boxSize = All.BoxSize;
+ boxHalf = 0.5 * All.BoxSize;
+#ifdef LONG_X
+ boxHalf_X = boxHalf * LONG_X;
+ boxSize_X = boxSize * LONG_X;
+#endif
+#ifdef LONG_Y
+ boxHalf_Y = boxHalf * LONG_Y;
+ boxSize_Y = boxSize * LONG_Y;
+#endif
+#ifdef LONG_Z
+ boxHalf_Z = boxHalf * LONG_Z;
+ boxSize_Z = boxSize * LONG_Z;
+#endif
+#endif
+
+ if(All.ComovingIntegrationOn)
+ {
+ /* Factors for comoving integration of hydro */
+ hubble_a = All.Omega0 / (All.Time * All.Time * All.Time)
+ + (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda;
+
+ hubble_a = All.Hubble * sqrt(hubble_a);
+ hubble_a2 = All.Time * All.Time * hubble_a;
+
+ fac_mu = pow(All.Time, 3 * (GAMMA - 1) / 2) / All.Time;
+
+ fac_egy = pow(All.Time, 3 * (GAMMA - 1));
+
+ fac_vsic_fix = hubble_a * pow(All.Time, 3 * GAMMA_MINUS1);
+
+ a3inv = 1 / (All.Time * All.Time * All.Time);
+ atime = All.Time;
+ }
+ else
+ hubble_a = hubble_a2 = atime = fac_mu = fac_vsic_fix = a3inv = fac_egy = 1.0;
+
+
+ /* `NumSphUpdate' gives the number of feedback particles on this processor that want a force update */
+ for(n = 0, NumSphUpdate = 0; n < N_gas; n++)
+ {
+ if(P[n].Ti_endstep == All.Ti_Current && SphP[n].DeltaEgySpec > 0)
+ NumSphUpdate++;
+
+ for(j = 0; j < 3; j++)
+ SphP[n].FeedbackUpdatedAccel[j] = 0;
+ }
+
+ numlist = malloc(NTask * sizeof(int) * NTask);
+ MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD);
+ for(i = 0, ntot = 0; i < NTask; i++)
+ ntot += numlist[i];
+ free(numlist);
+
+ if(ThisTask == 0 && ntot)
+ printf("\t ---> Number of feedback particles = %ld \n\n", ntot);
+
+
+ noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */
+ nbuffer = malloc(sizeof(int) * NTask);
+ nsend_local = malloc(sizeof(int) * NTask);
+ nsend = malloc(sizeof(int) * NTask * NTask);
+ ndonelist = malloc(sizeof(int) * NTask);
+
+
+ i = 0; /* first particle for this task */
+ ntotleft = ntot; /* particles left for all tasks together */
+
+ while(ntotleft > 0)
+ {
+ for(j = 0; j < NTask; j++)
+ nsend_local[j] = 0;
+
+ /* do local particles and prepare export list */
+ tstart = second();
+ for(nexport = 0, ndone = 0; i < N_gas && nexport < All.BunchSizeHydro - NTask; i++)
+ if(P[i].Ti_endstep == All.Ti_Current && SphP[i].DeltaEgySpec > 0)
+ {
+ ndone++;
+
+ for(j = 0; j < NTask; j++)
+ Exportflag[j] = 0;
+
+ get_sigvel_evaluate(i, 0);
+
+ for(j = 0; j < NTask; j++)
+ {
+ if(Exportflag[j])
+ {
+ for(k = 0; k < 3; k++)
+ {
+ HydroDataIn[nexport].Pos[k] = P[i].Pos[k];
+ HydroDataIn[nexport].Vel[k] = SphP[i].VelPred[k];
+ }
+ HydroDataIn[nexport].Hsml = SphP[i].Hsml;
+ HydroDataIn[nexport].Mass = P[i].Mass;
+ HydroDataIn[nexport].DhsmlDensityFactor = SphP[i].DhsmlDensityFactor;
+ HydroDataIn[nexport].Density = SphP[i].Density;
+ HydroDataIn[nexport].Pressure = updated_pressure(SphP[i].EntropyPred,SphP[i].Density,SphP[i].DeltaEgySpec);
+ HydroDataIn[nexport].Timestep = P[i].Ti_endstep - P[i].Ti_begstep;
+
+ /* calculation of F1 */
+ soundspeed_i = sqrt(GAMMA * updated_pressure(SphP[i].EntropyPred,SphP[i].Density,SphP[i].DeltaEgySpec) / SphP[i].Density);
+ HydroDataIn[nexport].F1 = fabs(SphP[i].DivVel) /
+ (fabs(SphP[i].DivVel) + SphP[i].CurlVel +
+ 0.0001 * soundspeed_i / SphP[i].Hsml / fac_mu);
+
+ HydroDataIn[nexport].Index = i;
+ HydroDataIn[nexport].Task = j;
+ nexport++;
+ nsend_local[j]++;
+ }
+ }
+ }
+ tend = second();
+ timecomp += timediff(tstart, tend);
+
+ qsort(HydroDataIn, nexport, sizeof(struct hydrodata_in), hydro_compare_key);
+
+ for(j = 1, noffset[0] = 0; j < NTask; j++)
+ noffset[j] = noffset[j - 1] + nsend_local[j - 1];
+
+ tstart = second();
+
+ MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD);
+
+ tend = second();
+ timeimbalance += timediff(tstart, tend);
+
+
+
+ /* now do the particles that need to be exported */
+
+ for(level = 1; level < (1 << PTask); level++)
+ {
+ tstart = second();
+ for(j = 0; j < NTask; j++)
+ nbuffer[j] = 0;
+ for(ngrp = level; ngrp < (1 << PTask); ngrp++)
+ {
+ maxfill = 0;
+ for(j = 0; j < NTask; j++)
+ {
+ if((j ^ ngrp) < NTask)
+ if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j])
+ maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j];
+ }
+ if(maxfill >= All.BunchSizeHydro)
+ break;
+
+ sendTask = ThisTask;
+ recvTask = ThisTask ^ ngrp;
+
+ if(recvTask < NTask)
+ {
+ if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0)
+ {
+ /* get the particles */
+ MPI_Sendrecv(&HydroDataIn[noffset[recvTask]],
+ nsend_local[recvTask] * sizeof(struct hydrodata_in), MPI_BYTE,
+ recvTask, TAG_HYDRO_A,
+ &HydroDataGet[nbuffer[ThisTask]],
+ nsend[recvTask * NTask + ThisTask] * sizeof(struct hydrodata_in), MPI_BYTE,
+ recvTask, TAG_HYDRO_A, MPI_COMM_WORLD, &status);
+ }
+ }
+
+ for(j = 0; j < NTask; j++)
+ if((j ^ ngrp) < NTask)
+ nbuffer[j] += nsend[(j ^ ngrp) * NTask + j];
+ }
+ tend = second();
+ timecommsumm += timediff(tstart, tend);
+
+ /* now do the imported particles */
+ tstart = second();
+ for(j = 0; j < nbuffer[ThisTask]; j++)
+ get_sigvel_evaluate(j, 1);
+ tend = second();
+ timecomp += timediff(tstart, tend);
+
+ /* do a block to measure imbalance */
+ tstart = second();
+ MPI_Barrier(MPI_COMM_WORLD);
+ tend = second();
+ timeimbalance += timediff(tstart, tend);
+
+ /* get the result */
+ tstart = second();
+ for(j = 0; j < NTask; j++)
+ nbuffer[j] = 0;
+ for(ngrp = level; ngrp < (1 << PTask); ngrp++)
+ {
+ maxfill = 0;
+ for(j = 0; j < NTask; j++)
+ {
+ if((j ^ ngrp) < NTask)
+ if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j])
+ maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j];
+ }
+ if(maxfill >= All.BunchSizeHydro)
+ break;
+
+ sendTask = ThisTask;
+ recvTask = ThisTask ^ ngrp;
+
+ if(recvTask < NTask)
+ {
+ if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0)
+ {
+ /* send the results */
+ MPI_Sendrecv(&HydroDataResult[nbuffer[ThisTask]],
+ nsend[recvTask * NTask + ThisTask] * sizeof(struct hydrodata_out),
+ MPI_BYTE, recvTask, TAG_HYDRO_B,
+ &HydroDataPartialResult[noffset[recvTask]],
+ nsend_local[recvTask] * sizeof(struct hydrodata_out),
+ MPI_BYTE, recvTask, TAG_HYDRO_B, MPI_COMM_WORLD, &status);
+
+ /* add the result to the particles */
+ for(j = 0; j < nsend_local[recvTask]; j++)
+ {
+ source = j + noffset[recvTask];
+ place = HydroDataIn[source].Index;
+
+ for(k = 0; k < 3; k++)
+ SphP[place].FeedbackUpdatedAccel[k] += HydroDataPartialResult[source].Acc[k];
+
+ if(SphP[place].MaxSignalVel < HydroDataPartialResult[source].MaxSignalVel)
+ SphP[place].MaxSignalVel = HydroDataPartialResult[source].MaxSignalVel;
+ }
+ }
+ }
+
+ for(j = 0; j < NTask; j++)
+ if((j ^ ngrp) < NTask)
+ nbuffer[j] += nsend[(j ^ ngrp) * NTask + j];
+ }
+ tend = second();
+ timecommsumm += timediff(tstart, tend);
+
+ level = ngrp - 1;
+ }
+
+ MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD);
+ for(j = 0; j < NTask; j++)
+ ntotleft -= ndonelist[j];
+ }
+
+ free(ndonelist);
+ free(nsend);
+ free(nsend_local);
+ free(nbuffer);
+ free(noffset);
+
+
+ /* collect some timing information */
+
+ MPI_Reduce(&timecomp, &sumt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
+ MPI_Reduce(&timecommsumm, &sumcomm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
+ MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
+
+ //if(ThisTask == 0)
+ // {
+ // All.CPU_HydCompWalk += sumt / NTask;
+ // All.CPU_HydCommSumm += sumcomm / NTask;
+ // All.CPU_HydImbalance += sumimbalance / NTask;
+ // }
+
+
+}
+
+
+/*! This function is the 'core' of the SPH force update. A target
+ * particle is specified which may either be local, or reside in the
+ * communication buffer.
+ * Note that only the acceleration due to feedback and the change
+ * of signal velocity are updated, not the actual variation of Entropy rate.
+ */
+void get_sigvel_evaluate(int target, int mode)
+{
+ int j, k, n, timestep, startnode, numngb;
+ FLOAT *pos, *vel;
+ FLOAT mass, h_i, dhsmlDensityFactor, rho, pressure, f1, f2;
+ double acc[3], dtEntropy, maxSignalVel;
+ double dx, dy, dz, dvx, dvy, dvz;
+ double h_i2, hinv, hinv4;
+ double p_over_rho2_i, p_over_rho2_j, soundspeed_i, soundspeed_j;
+ double hfc, dwk_i, vdotr, vdotr2, visc, mu_ij, rho_ij, vsig;
+ double h_j, dwk_j, r, r2, u, hfc_visc;
+
+ int phase=0;
+#ifndef NOVISCOSITYLIMITER
+ double dt;
+#endif
+
+
+#ifdef ART_CONDUCTIVITY
+ printf("get_sigvel_evaluate is not implemented to run with the flag ART_CONDUCTIVITY");
+ endrun(674321);
+#endif
+
+#if defined(ART_VISCO_MM) || defined(ART_VISCO_RO) || defined(ART_VISCO_CD)
+ printf("get_sigvel_evaluate is not implemented to run with the flags ART_VISCO_MM, ART_VISCO_RO, ART_VISCO_CD");
+ endrun(674322);
+#endif
+
+ if(mode == 0)
+ {
+ pos = P[target].Pos;
+ vel = SphP[target].VelPred;
+ h_i = SphP[target].Hsml;
+ mass = P[target].Mass;
+ dhsmlDensityFactor = SphP[target].DhsmlDensityFactor;
+ rho = SphP[target].Density;
+ pressure = updated_pressure(SphP[target].EntropyPred,SphP[target].Density,SphP[target].DeltaEgySpec);
+ timestep = P[target].Ti_endstep - P[target].Ti_begstep;
+ soundspeed_i = sqrt(GAMMA * pressure / rho);
+ f1 = fabs(SphP[target].DivVel) /
+ (fabs(SphP[target].DivVel) + SphP[target].CurlVel +
+ 0.0001 * soundspeed_i / SphP[target].Hsml / fac_mu);
+ }
+ else
+ {
+ pos = HydroDataGet[target].Pos;
+ vel = HydroDataGet[target].Vel;
+ h_i = HydroDataGet[target].Hsml;
+ mass = HydroDataGet[target].Mass;
+ dhsmlDensityFactor = HydroDataGet[target].DhsmlDensityFactor;
+ rho = HydroDataGet[target].Density;
+ pressure = HydroDataGet[target].Pressure;
+ timestep = HydroDataGet[target].Timestep;
+ soundspeed_i = sqrt(GAMMA * pressure / rho);
+ f1 = HydroDataGet[target].F1;
+ }
+
+
+ /* initialize variables before SPH loop is started */
+ acc[0] = acc[1] = acc[2] = dtEntropy = 0;
+ maxSignalVel = 0;
+
+ p_over_rho2_i = pressure / (rho * rho) * dhsmlDensityFactor;
+ h_i2 = h_i * h_i;
+
+ /* Now start the actual SPH computation for this particle */
+ startnode = All.MaxPart;
+ do
+ {
+ numngb = ngb_treefind_pairs(&pos[0], h_i, phase, &startnode);
+
+
+ for(n = 0; n < numngb; n++)
+ {
+ j = Ngblist[n];
+
+ dx = pos[0] - P[j].Pos[0];
+ dy = pos[1] - P[j].Pos[1];
+ dz = pos[2] - P[j].Pos[2];
+
+#ifdef PERIODIC /* find the closest image in the given box size */
+ if(dx > boxHalf_X)
+ dx -= boxSize_X;
+ if(dx < -boxHalf_X)
+ dx += boxSize_X;
+ if(dy > boxHalf_Y)
+ dy -= boxSize_Y;
+ if(dy < -boxHalf_Y)
+ dy += boxSize_Y;
+ if(dz > boxHalf_Z)
+ dz -= boxSize_Z;
+ if(dz < -boxHalf_Z)
+ dz += boxSize_Z;
+#endif
+ r2 = dx * dx + dy * dy + dz * dz;
+ h_j = SphP[j].Hsml;
+ if(r2 < h_i2 || r2 < h_j * h_j)
+ {
+ r = sqrt(r2);
+ if(r > 0)
+ {
+
+ /*
+ here, we need to differenciate two cases, because DeltaEgySpec
+ is also used as a flag (see further) and may be equal to -1
+ */
+
+ if (SphP[j].DeltaEgySpec>0) /* the particle is touch by feedback */
+ p_over_rho2_j = updated_pressure(SphP[j].EntropyPred,SphP[j].Density,SphP[j].DeltaEgySpec) / (SphP[j].Density * SphP[j].Density);
+ else
+ p_over_rho2_j = SphP[j].Pressure / (SphP[j].Density * SphP[j].Density);
+
+
+
+
+ soundspeed_j = sqrt(GAMMA * p_over_rho2_j * SphP[j].Density);
+ dvx = vel[0] - SphP[j].VelPred[0];
+ dvy = vel[1] - SphP[j].VelPred[1];
+ dvz = vel[2] - SphP[j].VelPred[2];
+ vdotr = dx * dvx + dy * dvy + dz * dvz;
+
+ if(All.ComovingIntegrationOn)
+ vdotr2 = vdotr + hubble_a2 * r2;
+ else
+ vdotr2 = vdotr;
+
+ if(r2 < h_i2)
+ {
+ hinv = 1.0 / h_i;
+#ifndef TWODIMS
+ hinv4 = hinv * hinv * hinv * hinv;
+#else
+ hinv4 = hinv * hinv * hinv / boxSize_Z;
+#endif
+ u = r * hinv;
+ if(u < 0.5)
+ dwk_i = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4);
+ else
+ dwk_i = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u);
+ }
+ else
+ {
+ dwk_i = 0;
+ }
+
+ if(r2 < h_j * h_j)
+ {
+ hinv = 1.0 / h_j;
+#ifndef TWODIMS
+ hinv4 = hinv * hinv * hinv * hinv;
+#else
+ hinv4 = hinv * hinv * hinv / boxSize_Z;
+#endif
+ u = r * hinv;
+ if(u < 0.5)
+ dwk_j = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4);
+ else
+ dwk_j = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u);
+ }
+ else
+ {
+ dwk_j = 0;
+ }
+
+ if(soundspeed_i + soundspeed_j > maxSignalVel)
+ maxSignalVel = soundspeed_i + soundspeed_j;
+
+ if(vdotr2 < 0) /* ... artificial viscosity */
+ {
+ mu_ij = fac_mu * vdotr2 / r; /* note: this is negative! */
+
+ vsig = soundspeed_i + soundspeed_j - 3 * mu_ij;
+
+ if(vsig > maxSignalVel)
+ maxSignalVel = vsig;
+
+ if(P[j].Ti_endstep == All.Ti_Current)
+ if(vsig > SphP[j].MaxSignalVel)
+ SphP[j].MaxSignalVel = vsig;
+
+ rho_ij = 0.5 * (rho + SphP[j].Density);
+ f2 =
+ fabs(SphP[j].DivVel) / (fabs(SphP[j].DivVel) + SphP[j].CurlVel +
+ 0.0001 * soundspeed_j / fac_mu / SphP[j].Hsml);
+
+ visc = 0.25 * All.ArtBulkViscConst * vsig * (-mu_ij) / rho_ij * (f1 + f2);
+
+ /* .... end artificial viscosity evaluation */
+#ifndef NOVISCOSITYLIMITER
+ /* make sure that viscous acceleration is not too large */
+ dt = imax(timestep, (P[j].Ti_endstep - P[j].Ti_begstep)) * All.Timebase_interval;
+ if(dt > 0 && (dwk_i + dwk_j) < 0)
+ {
+ visc = dmin(visc, 0.5 * fac_vsic_fix * vdotr2 /
+ (0.5 * (mass + P[j].Mass) * (dwk_i + dwk_j) * r * dt));
+ }
+#endif
+ }
+ else
+ visc = 0;
+
+ p_over_rho2_j *= SphP[j].DhsmlDensityFactor;
+
+ hfc_visc = 0.5 * P[j].Mass * visc * (dwk_i + dwk_j) / r;
+
+ hfc = hfc_visc + P[j].Mass * (p_over_rho2_i * dwk_i + p_over_rho2_j * dwk_j) / r;
+
+ acc[0] -= hfc * dx;
+ acc[1] -= hfc * dy;
+ acc[2] -= hfc * dz;
+
+ if(P[j].Ti_endstep == All.Ti_Current)
+ {
+ if(SphP[j].DeltaEgySpec == 0) /* the particle is active but not affected by feedback */
+ SphP[j].DeltaEgySpec = -1;
+
+ if(maxSignalVel > SphP[j].MaxSignalVel)
+ SphP[j].MaxSignalVel = maxSignalVel;
+
+ SphP[j].FeedbackUpdatedAccel[0] -= hfc * dx;
+ SphP[j].FeedbackUpdatedAccel[1] -= hfc * dy;
+ SphP[j].FeedbackUpdatedAccel[2] -= hfc * dz;
+ }
+
+ }
+ }
+ }
+ fflush(stdout);
+ }
+ while(startnode >= 0);
+
+ /* Now collect the result at the right place */
+ if(mode == 0)
+ {
+ SphP[target].MaxSignalVel = maxSignalVel;
+
+ for(k = 0; k < 3; k++)
+ SphP[target].FeedbackUpdatedAccel[k] = acc[k];
+ }
+ else
+ {
+ HydroDataResult[target].MaxSignalVel = maxSignalVel;
+
+ for(k = 0; k < 3; k++)
+ HydroDataResult[target].Acc[k] = acc[k];
+ }
+}
+
+
+
+/*! This function return the pressure as a function
+ * of entropy and density, but add the contribution
+ * of the feedback energy.
+ */
+FLOAT updated_pressure(FLOAT EntropyPred,FLOAT Density,FLOAT DeltaEgySpec)
+{
+ FLOAT pressure;
+ FLOAT EgySpec,EgySpecUpdated;
+
+ /* energy from entropy */
+ EgySpec = EntropyPred / GAMMA_MINUS1 * pow(Density*a3inv, GAMMA_MINUS1);
+ EgySpecUpdated = EgySpec + DeltaEgySpec;
+
+ /* pressure */
+ pressure = GAMMA_MINUS1 * (Density*a3inv) * EgySpecUpdated;
+ return pressure;
+}
+
+
+
+/*! This function force the particle to be active
+ */
+void make_particle_active(int i)
+{
+ int j;
+ int tstart, tend;
+
+ double dt_entr;
+ double dt_gravkick;
+ double dt_hydrokick;
+
+
+ printf("(%d) make particle %d active.\n",ThisTask,i);
+
+
+#ifdef PMGRID
+ double dt_gravkickA, dt_gravkickB;
+ #endif
+
+
+ tstart = (P[i].Ti_begstep + P[i].Ti_endstep) / 2; /* midpoint of old step */
+ tend = (P[i].Ti_begstep + All.Ti_Current) / 2; /* midpoint of shrinked step */
+
+
+ /* kick the particle back */
+ kickback(i,tstart,tend);
+
+
+
+ /* flag it as active */
+ P[i].Ti_endstep = All.Ti_Current;
+ NumForceUpdate++;
+
+
+
+
+#ifdef PMGRID
+printf("make_particle_active is not implemented to run with the flag PMGRID\n");
+printf("here we need to add the last part of advance_and_find_timesteps()\n");
+endrun(674323);
+#endif
+
+#ifdef MULTIPHASE
+printf("make_particle_active is not implemented to run with the flag MULTIPHASE\n");
+endrun(674324);
+#endif
+
+
+}
+
+#endif
+
+
+
+/*! In case we want to reduce the timestep of a particle,
+ * this function perform a negative kick on the particle.
+ * Positions, densities, predicted velocities and predicted
+ * entropies are not affected, as they are allready defined
+ * at the present step.
+ */
+void kickback(int i,int tstart,int tend)
+{
+
+ int j;
+
+ double dt_entr;
+ double dt_gravkick;
+ double dt_hydrokick;
+
+#ifdef PMGRID
+
+ double dt_gravkickA, dt_gravkickB;
+
+ !!! this is probably bad, please check !!!
+
+ if(All.ComovingIntegrationOn)
+ dt_gravkickB = get_gravkick_factor(All.PM_Ti_begstep, All.Ti_Current) -
+ get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2);
+ else
+ dt_gravkickB = (All.Ti_Current - (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2) * All.Timebase_interval;
+#endif
+
+
+
+ if(All.ComovingIntegrationOn)
+ {
+ dt_entr = (tend - tstart) * All.Timebase_interval;
+ dt_gravkick = get_gravkick_factor(tstart, tend);
+ dt_hydrokick = get_hydrokick_factor(tstart, tend);
+ }
+ else
+ {
+ dt_entr = dt_gravkick = dt_hydrokick = (tend - tstart) * All.Timebase_interval;
+ }
+
+
+
+ for(j = 0; j < 3; j++)
+ {
+ P[i].Vel[j] += P[i].GravAccel[j] * dt_gravkick;
+
+#ifdef PMGRID
+ P[i].Vel[j] += P[i].GravPM[j] * dt_gravkickB;
+#endif
+
+
+#ifdef AB_TURB
+ P[i].Vel[j] += SphP[i].TurbAccel[j] * dt_hydrokick;
+#endif
+
+ }
+
+
+ if(P[i].Type == 0) /* SPH stuff */
+ {
+ for(j = 0; j < 3; j++)
+ P[i].Vel[j] += SphP[i].HydroAccel[j] * dt_hydrokick;
+
+ SphP[i].Entropy += SphP[i].DtEntropy * dt_entr;
+
+ }
+
+
+
+
+#ifdef PMGRID
+ printf("kickback is not implemented to run with the flag PMGRID\n");
+ printf("here we need to add the last part of advance_and_find_timesteps()\n");
+ endrun(674324);
+#endif
+
+#ifdef MULTIPHASE
+printf("kickback is not implemented to run with the flag MULTIPHASE\n");
+endrun(674326);
+#endif
+
+
+#ifdef LIMIT_DVEL
+ printf("kickback is not implemented to run with the flag LIMIT_DVEL\n");
+ endrun(674327)
+#endif
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/timestep.c b/src/timestep.c
index ba71f56..f73d841 100644
--- a/src/timestep.c
+++ b/src/timestep.c
@@ -1,1417 +1,1461 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <mpi.h>
#include "allvars.h"
#include "proto.h"
/*! \file timestep.c
* \brief routines for 'kicking' particles in momentum space and assigning new timesteps
*/
static double fac1, fac2, fac3, hubble_a, atime, a3inv;
static double dt_displacement = 0;
/*! This function advances the system in momentum space, i.e. it does apply
* the 'kick' operation after the forces have been computed. Additionally, it
* assigns new timesteps to particles. At start-up, a half-timestep is
* carried out, as well as at the end of the simulation. In between, the
* half-step kick that ends the previous timestep and the half-step kick for
* the new timestep are combined into one operation.
*/
void advance_and_find_timesteps(void)
{
int i, j, no, ti_step, ti_min, tend, tstart;
double dt_entr, dt_entr2, dt_gravkick, dt_hydrokick, dt_gravkick2, dt_hydrokick2, t0, t1;
double minentropy, aphys;
FLOAT dv[3];
#ifdef COOLING
double t2,t3;
#endif
#ifdef FLEXSTEPS
int ti_grp;
#endif
#if defined(PSEUDOSYMMETRIC) && !defined(FLEXSTEPS)
double apred, prob;
int ti_step2;
#endif
#ifdef PMGRID
double dt_gravkickA, dt_gravkickB;
#endif
#ifdef MAKEGLASS
double disp, dispmax, globmax, dmean, fac, disp2sum, globdisp2sum;
#endif
t0 = second();
if(All.ComovingIntegrationOn)
{
fac1 = 1 / (All.Time * All.Time);
fac2 = 1 / pow(All.Time, 3 * GAMMA - 2);
fac3 = pow(All.Time, 3 * (1 - GAMMA) / 2.0);
hubble_a = All.Omega0 / (All.Time * All.Time * All.Time)
+ (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda;
hubble_a = All.Hubble * sqrt(hubble_a);
a3inv = 1 / (All.Time * All.Time * All.Time);
atime = All.Time;
}
else
fac1 = fac2 = fac3 = hubble_a = a3inv = atime = 1;
#ifdef NOPMSTEPADJUSTMENT
dt_displacement = All.MaxSizeTimestep;
#else
if(Flag_FullStep || dt_displacement == 0)
find_dt_displacement_constraint(hubble_a * atime * atime);
#endif
#ifdef PMGRID
if(All.ComovingIntegrationOn)
dt_gravkickB = get_gravkick_factor(All.PM_Ti_begstep, All.Ti_Current) -
get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2);
else
dt_gravkickB = (All.Ti_Current - (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2) * All.Timebase_interval;
if(All.PM_Ti_endstep == All.Ti_Current) /* need to do long-range kick */
{
/* make sure that we reconstruct the domain/tree next time because we don't kick the tree nodes in this case */
All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency;
}
#endif
#ifdef MAKEGLASS
for(i = 0, dispmax = 0, disp2sum = 0; i < NumPart; i++)
{
for(j = 0; j < 3; j++)
{
P[i].GravPM[j] *= -1;
P[i].GravAccel[j] *= -1;
P[i].GravAccel[j] += P[i].GravPM[j];
P[i].GravPM[j] = 0;
}
disp = sqrt(P[i].GravAccel[0] * P[i].GravAccel[0] +
P[i].GravAccel[1] * P[i].GravAccel[1] + P[i].GravAccel[2] * P[i].GravAccel[2]);
disp *= 2.0 / (3 * All.Hubble * All.Hubble);
disp2sum += disp * disp;
if(disp > dispmax)
dispmax = disp;
}
MPI_Allreduce(&dispmax, &globmax, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
MPI_Allreduce(&disp2sum, &globdisp2sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
dmean = pow(P[0].Mass / (All.Omega0 * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)), 1.0 / 3);
if(globmax > dmean)
fac = dmean / globmax;
else
fac = 1.0;
if(ThisTask == 0)
{
printf("\nglass-making: dmean= %g global disp-maximum= %g rms= %g\n\n",
dmean, globmax, sqrt(globdisp2sum / All.TotNumPart));
fflush(stdout);
}
for(i = 0, dispmax = 0; i < NumPart; i++)
{
for(j = 0; j < 3; j++)
{
P[i].Vel[j] = 0;
P[i].Pos[j] += fac * P[i].GravAccel[j] * 2.0 / (3 * All.Hubble * All.Hubble);
P[i].GravAccel[j] = 0;
}
}
#endif
/* Now assign new timesteps and kick */
#ifdef FLEXSTEPS
if((All.Ti_Current % (4 * All.PresentMinStep)) == 0)
if(All.PresentMinStep < TIMEBASE)
All.PresentMinStep *= 2;
for(i = 0; i < NumPart; i++)
{
if(P[i].Ti_endstep == All.Ti_Current)
{
ti_step = get_timestep(i, &aphys, 0);
/* make it a power 2 subdivision */
ti_min = TIMEBASE;
while(ti_min > ti_step)
ti_min >>= 1;
ti_step = ti_min;
if(ti_step < All.PresentMinStep)
All.PresentMinStep = ti_step;
}
}
ti_step = All.PresentMinStep;
MPI_Allreduce(&ti_step, &All.PresentMinStep, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
if(dt_displacement < All.MaxSizeTimestep)
ti_step = (int) (dt_displacement / All.Timebase_interval);
else
ti_step = (int) (All.MaxSizeTimestep / All.Timebase_interval);
/* make it a power 2 subdivision */
ti_min = TIMEBASE;
while(ti_min > ti_step)
ti_min >>= 1;
All.PresentMaxStep = ti_min;
if(ThisTask == 0)
printf("Syn Range = %g PresentMinStep = %d PresentMaxStep = %d \n",
(double) All.PresentMaxStep / All.PresentMinStep, All.PresentMinStep, All.PresentMaxStep);
#endif
#ifdef SYNCHRONIZE_NGB_TIMESTEP
for(i = 0; i < NumPart; i++)
{
P[i].Old_Ti_begstep = P[i].Ti_begstep;
P[i].Old_Ti_endstep = P[i].Ti_endstep;
}
#endif
for(i = 0; i < NumPart; i++)
{
if(P[i].Ti_endstep == All.Ti_Current)
{
ti_step = get_timestep(i, &aphys, 0);
/* make it a power 2 subdivision */
ti_min = TIMEBASE;
while(ti_min > ti_step)
ti_min >>= 1;
ti_step = ti_min;
#ifdef FLEXSTEPS
ti_grp = P[i].FlexStepGrp % All.PresentMaxStep;
ti_grp = (ti_grp / All.PresentMinStep) * All.PresentMinStep;
ti_step = ((P[i].Ti_endstep + ti_grp + ti_step) / ti_step) * ti_step - (P[i].Ti_endstep + ti_grp);
#else
#ifdef PSEUDOSYMMETRIC
if(P[i].Type != 0)
{
if(P[i].Ti_endstep > P[i].Ti_begstep)
{
apred = aphys + ((aphys - P[i].AphysOld) / (P[i].Ti_endstep - P[i].Ti_begstep)) * ti_step;
if(fabs(apred - aphys) < 0.5 * aphys)
{
ti_step2 = get_timestep(i, &apred, -1);
ti_min = TIMEBASE;
while(ti_min > ti_step2)
ti_min >>= 1;
ti_step2 = ti_min;
if(ti_step2 < ti_step)
{
get_timestep(i, &apred, ti_step);
prob =
((apred - aphys) / (aphys - P[i].AphysOld) * (P[i].Ti_endstep -
P[i].Ti_begstep)) / ti_step;
if(prob < get_random_number(P[i].ID))
ti_step /= 2;
}
else if(ti_step2 > ti_step)
{
get_timestep(i, &apred, 2 * ti_step);
prob =
((apred - aphys) / (aphys - P[i].AphysOld) * (P[i].Ti_endstep -
P[i].Ti_begstep)) / ti_step;
if(prob < get_random_number(P[i].ID + 1))
ti_step *= 2;
}
}
}
P[i].AphysOld = aphys;
}
#endif
#ifdef SYNCHRONIZATION
if(ti_step > (P[i].Ti_endstep - P[i].Ti_begstep)) /* timestep wants to increase */
{
//if(((TIMEBASE - P[i].Ti_endstep) % ti_step) > 0)
// ti_step = P[i].Ti_endstep - P[i].Ti_begstep; /* leave at old step */
while(((TIMEBASE - P[i].Ti_endstep) % ti_step) > 0) /* yr : allow to increase */
ti_step = ti_step/2;
}
#endif
#endif /* end of FLEXSTEPS */
if(All.Ti_Current == TIMEBASE) /* we here finish the last timestep. */
ti_step = 0;
if((TIMEBASE - All.Ti_Current) < ti_step) /* check that we don't run beyond the end */
ti_step = TIMEBASE - All.Ti_Current;
#ifdef SYNCHRONIZE_NGB_TIMESTEP
/* Here, in order to performe the synchronization of the time steps
* for neighbor particles, we need to interupt the loop.
*/
P[i].Ti_step = ti_step; /* save estimated time step */
}
}
synchronize_ngb_timestep();
for(i = 0; i < NumPart; i++)
{
if(P[i].Old_Ti_endstep == All.Ti_Current) // here we use the old value, avoid problem due to the update of the timesteps
{
ti_step = P[i].Ti_step; /* recover from the estimated time step */
#endif
-
+
+
tstart = (P[i].Ti_begstep + P[i].Ti_endstep) / 2; /* midpoint of old step */
tend = P[i].Ti_endstep + ti_step / 2; /* midpoint of new step */
if(All.ComovingIntegrationOn)
{
dt_entr = (tend - tstart) * All.Timebase_interval;
dt_entr2 = (tend - P[i].Ti_endstep) * All.Timebase_interval;
dt_gravkick = get_gravkick_factor(tstart, tend);
dt_hydrokick = get_hydrokick_factor(tstart, tend);
dt_gravkick2 = get_gravkick_factor(P[i].Ti_endstep, tend);
dt_hydrokick2 = get_hydrokick_factor(P[i].Ti_endstep, tend);
}
else
{
dt_entr = dt_gravkick = dt_hydrokick = (tend - tstart) * All.Timebase_interval;
dt_gravkick2 = dt_hydrokick2 = dt_entr2 = (tend - P[i].Ti_endstep) * All.Timebase_interval;
}
P[i].Ti_begstep = P[i].Ti_endstep;
P[i].Ti_endstep = P[i].Ti_begstep + ti_step;
#ifdef CYLINDRICAL_SYMMETRY
double r,factor;
r = sqrt( P[i].Pos[0]*P[i].Pos[0] + P[i].Pos[1]*P[i].Pos[1] + P[i].Pos[2]*P[i].Pos[2] );
factor = 1/(r*r) * (P[i].Pos[0]*P[i].GravAccel[0] + P[i].Pos[1]*P[i].GravAccel[1]);
P[i].GravAccel[0] = factor * P[i].Pos[0];
P[i].GravAccel[1] = factor * P[i].Pos[1];
#endif
/* do the kick */
for(j = 0; j < 3; j++)
{
dv[j] = 0.0;
#ifdef LIMIT_DVEL
if (fabs(P[i].GravAccel[j] * dt_gravkick)>LIMIT_DVEL)
{
#ifdef MULTIPHASE
printf("Warning(LIMIT_DVEL): ID=%d j=%d dv[j]=%g Phase=%d(setting GravAccel[j] to 0.0)\n",P[i].ID,j,P[i].GravAccel[j]*dt_hydrokick,SphP[i].Phase);
#else
printf("Warning(LIMIT_DVEL): ID=%d j=%d dv[j]=%g Phase=-(setting GravAccel[j] to 0.0)\n",P[i].ID,j,P[i].GravAccel[j]*dt_hydrokick);
#endif
P[i].GravAccel[j] = 0.0;
}
#endif
dv[j] += P[i].GravAccel[j] * dt_gravkick;
P[i].Vel[j] += P[i].GravAccel[j] * dt_gravkick;
}
if(P[i].Type == 0) /* SPH stuff */
{
for(j = 0; j < 3; j++)
{
#ifdef LIMIT_DVEL /* begin LIMIT_DVEL */
if (fabs(SphP[i].HydroAccel[j] * dt_hydrokick)>LIMIT_DVEL)
{
#ifdef MULTIPHASE
printf("Warning(LIMIT_DVEL): ID=%d j=%d dv[j]=%g Phase=%d(setting HydroAccel[j] to 0.0)\n",P[i].ID,j,SphP[i].HydroAccel[j] *dt_hydrokick,SphP[i].Phase);
#else
printf("Warning(LIMIT_DVEL): ID=%d j=%d dv[j]=%g Phase=-(setting HydroAccel[j] to 0.0)\n",P[i].ID,j,SphP[i].HydroAccel[j] *dt_hydrokick);
#endif
SphP[i].HydroAccel[j] = 0.0;
}
#endif /* end LIMIT_DVEL */
dv[j] += SphP[i].HydroAccel[j] * dt_hydrokick;
P[i].Vel[j] += SphP[i].HydroAccel[j] * dt_hydrokick;
SphP[i].VelPred[j] =
P[i].Vel[j] - dt_gravkick2 * P[i].GravAccel[j] - dt_hydrokick2 * SphP[i].HydroAccel[j];
#ifdef PMGRID
SphP[i].VelPred[j] += P[i].GravPM[j] * dt_gravkickB;
#endif
#ifdef AB_TURB
dv[j] += SphP[i].TurbAccel[j] * dt_hydrokick;
P[i].Vel[j] += SphP[i].TurbAccel[j] * dt_hydrokick;
SphP[i].VelPred[j] += - dt_hydrokick2 * SphP[i].TurbAccel[j];
#endif
}
/***********************************************************/
/* compute spec energy lost/win by different other process */
/***********************************************************/
/***********************************************************/
/* compute entropy variation */
/***********************************************************/
/*******************************/
/* compute cooling */
/*******************************/
#ifdef COOLING
t2 = second();
CoolingForOne(i,tstart,tend,a3inv,hubble_a);
t3 = second();
All.CPU_Cooling += timediff(t2, t3);
#else
-
/* In case of cooling, we prevent that the entropy (and
hence temperature decreases by more than a factor 0.5 */
if(SphP[i].DtEntropy * dt_entr > -0.5 * SphP[i].Entropy)
SphP[i].Entropy += SphP[i].DtEntropy * dt_entr;
else
SphP[i].Entropy *= 0.5;
#ifdef MULTIPHASE
if (SphP[i].Phase==GAS_SPH)
{
#endif
if(All.MinEgySpec)
{
minentropy = All.MinEgySpec * GAMMA_MINUS1 / pow(SphP[i].Density * a3inv, GAMMA_MINUS1);
if(SphP[i].Entropy < minentropy)
{
SphP[i].Entropy = minentropy;
SphP[i].DtEntropy = 0;
}
}
#ifdef MULTIPHASE
}
#endif
-#endif /* COOLING */
+#endif /* COOLING */
-
+#ifndef COOLING
/* In case the timestep increases in the new step, we
make sure that we do not 'overcool' when deriving
predicted temperatures. The maximum timespan over
which prediction can occur is ti_step/2, i.e. from
the middle to the end of the current step */
//dt_entr = ti_step / 2 * All.Timebase_interval;
dt_entr = imax(ti_step / 2,1) * All.Timebase_interval; /* yr : prevent dt_entr to be zero if ti_step=1 */
if(SphP[i].Entropy + SphP[i].DtEntropy * dt_entr < 0.5 * SphP[i].Entropy)
SphP[i].DtEntropy = -0.5 * SphP[i].Entropy / dt_entr;
-
+#endif
+
#ifdef ENTROPYPRED
+
/* now, we correct the predicted Entropy */
- SphP[i].EntropyPred = SphP[i].Entropy - dt_entr2 * SphP[i].DtEntropy ;
-#ifdef CHECK_ENTROPY_SIGN
- if ((SphP[i].EntropyPred < 0))
- {
- printf("\ntask=%d: EntropyPred less than zero in advance_and_find_timesteps !\n", ThisTask);
- printf("ID=%d Entropy=%g EntropyPred=%g DtEntropy=%g dt_entr=%g\n",P[i].ID,SphP[i].Entropy,SphP[i].EntropyPred,SphP[i].DtEntropy,dt_entr);
- fflush(stdout);
- endrun(1010101000);
-
- }
+ SphP[i].EntropyPred = SphP[i].Entropy - dt_entr2 * SphP[i].DtEntropy ;
+
+
+#ifdef COOLING
+
+ if(All.MinEgySpec)
+ minentropy = All.MinEgySpec * GAMMA_MINUS1 / pow(SphP[i].Density * a3inv, GAMMA_MINUS1);
+ else
+ minentropy=0;
+
+ //dt_entr = imax(ti_step,1) * All.Timebase_interval;
+ //if (SphP[i].EntropyPred + SphP[i].DtEntropy * dt_entr < minentropy) /* if during the next step prediction entropy may be less than zero */
+ // SphP[i].DtEntropy = -(SphP[i].EntropyPred-minentropy) / dt_entr; /* modify SphP[i].DtEntropy in order to avoid problems */
+
+ dt_entr = imax(ti_step / 2,1) * All.Timebase_interval;
+ if(SphP[i].Entropy + SphP[i].DtEntropy * dt_entr < 0.5 * SphP[i].Entropy)
+ {
+ printf("(%d) particle id=%d reduces its DtEntropy (Entropy=%g DtEntropy * dt_entr=%g)\n",NTask,P[i].ID,SphP[i].Entropy,SphP[i].DtEntropy * dt_entr);
+ SphP[i].DtEntropy = -0.5 * SphP[i].Entropy / dt_entr;
+ }
+
+
+
+#else
+
+#endif /* COOLING */
+
+
+ #ifdef CHECK_ENTROPY_SIGN
+ if ((SphP[i].EntropyPred < 0))
+ {
+ printf("\ntask=%d: EntropyPred less than zero in advance_and_find_timesteps !\n", ThisTask);
+ printf("ID=%d Entropy=%g EntropyPred=%g DtEntropy=%g dt_entr=%g\n",P[i].ID,SphP[i].Entropy,SphP[i].EntropyPred,SphP[i].DtEntropy,dt_entr);
+ fflush(stdout);
+ endrun(1010101000);
+ }
#endif
#endif
+
+
+
}
/* if tree is not going to be reconstructed, kick parent nodes dynamically.
*/
if(All.NumForcesSinceLastDomainDecomp < All.TotNumPart * All.TreeDomainUpdateFrequency)
{
no = Father[i];
while(no >= 0)
{
for(j = 0; j < 3; j++)
Extnodes[no].vs[j] += dv[j] * P[i].Mass / Nodes[no].u.d.mass;
no = Nodes[no].u.d.father;
}
}
}
}
+
#ifdef PMGRID
if(All.PM_Ti_endstep == All.Ti_Current) /* need to do long-range kick */
{
ti_step = TIMEBASE;
while(ti_step > (dt_displacement / All.Timebase_interval))
ti_step >>= 1;
if(ti_step > (All.PM_Ti_endstep - All.PM_Ti_begstep)) /* PM-timestep wants to increase */
{
/* we only increase if an integer number of steps will bring us to the end */
if(((TIMEBASE - All.PM_Ti_endstep) % ti_step) > 0)
ti_step = All.PM_Ti_endstep - All.PM_Ti_begstep; /* leave at old step */
}
if(All.Ti_Current == TIMEBASE) /* we here finish the last timestep. */
ti_step = 0;
tstart = (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2;
tend = All.PM_Ti_endstep + ti_step / 2;
if(All.ComovingIntegrationOn)
dt_gravkick = get_gravkick_factor(tstart, tend);
else
dt_gravkick = (tend - tstart) * All.Timebase_interval;
All.PM_Ti_begstep = All.PM_Ti_endstep;
All.PM_Ti_endstep = All.PM_Ti_begstep + ti_step;
if(All.ComovingIntegrationOn)
dt_gravkickB = -get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2);
else
dt_gravkickB =
-((All.PM_Ti_begstep + All.PM_Ti_endstep) / 2 - All.PM_Ti_begstep) * All.Timebase_interval;
for(i = 0; i < NumPart; i++)
{
for(j = 0; j < 3; j++) /* do the kick */
P[i].Vel[j] += P[i].GravPM[j] * dt_gravkick;
if(P[i].Type == 0)
{
if(All.ComovingIntegrationOn)
{
dt_gravkickA = get_gravkick_factor(P[i].Ti_begstep, All.Ti_Current) -
get_gravkick_factor(P[i].Ti_begstep, (P[i].Ti_begstep + P[i].Ti_endstep) / 2);
dt_hydrokick = get_hydrokick_factor(P[i].Ti_begstep, All.Ti_Current) -
get_hydrokick_factor(P[i].Ti_begstep, (P[i].Ti_begstep + P[i].Ti_endstep) / 2);
}
else
dt_gravkickA = dt_hydrokick =
(All.Ti_Current - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval;
for(j = 0; j < 3; j++)
SphP[i].VelPred[j] = P[i].Vel[j]
+ P[i].GravAccel[j] * dt_gravkickA
+ SphP[i].HydroAccel[j] * dt_hydrokick
+ P[i].GravPM[j] * dt_gravkickB;
}
}
}
#endif
+
+#ifdef CHIMIE_THERMAL_FEEDBACK
+chimie_apply_thermal_feedback();
+#endif
+
+
t1 = second();
All.CPU_TimeLine += timediff(t0, t1);
#ifdef DETAILED_CPU
All.CPU_Leapfrog += timediff(t0, t1);
#endif
#ifdef COOLING
//All.CPU_TimeLine -= All.CPU_Cooling;
#endif
#ifdef CHIMIE_KINETIC_FEEDBACK
if(SetMinTimeStepForActives)
SetMinTimeStepForActives=0;
#endif
#ifdef SYNCHRONIZE_NGB_TIMESTEP
#ifdef OUTPUTOPTVAR1
for(i = 0; i < N_gas; i++)
SphP[i].OptVar1 = (float) (P[i].Ti_endstep - P[i].Ti_begstep);
#endif
#endif
}
/*! This function normally (for flag==0) returns the maximum allowed timestep
* of a particle, expressed in terms of the integer mapping that is used to
* represent the total simulated timespan. The physical acceleration is
* returned in `aphys'. The latter is used in conjunction with the
* PSEUDOSYMMETRIC integration option, which also makes of the second
* function of get_timestep. When it is called with a finite timestep for
* flag, it returns the physical acceleration that would lead to this
* timestep, assuming timestep criterion 0.
*/
int get_timestep(int p, /*!< particle index */
double *aphys, /*!< acceleration (physical units) */
int flag /*!< either 0 for normal operation, or finite timestep to get corresponding
aphys */ )
{
double ax, ay, az, ac, csnd;
double dt = 0, dt_courant = 0, dt_accel;
int ti_step;
#ifdef CONDUCTION
double dt_cond;
#endif
if(flag == 0)
{
ax = fac1 * P[p].GravAccel[0];
ay = fac1 * P[p].GravAccel[1];
az = fac1 * P[p].GravAccel[2];
#ifdef PMGRID
ax += fac1 * P[p].GravPM[0];
ay += fac1 * P[p].GravPM[1];
az += fac1 * P[p].GravPM[2];
#endif
if(P[p].Type == 0)
{
+
+#ifndef TIMESTEP_UPDATE_FOR_FEEDBACK
ax += fac2 * SphP[p].HydroAccel[0];
ay += fac2 * SphP[p].HydroAccel[1];
- az += fac2 * SphP[p].HydroAccel[2];
+ az += fac2 * SphP[p].HydroAccel[2];
+#else
+ if((SphP[p].DeltaEgySpec==-1) || (SphP[p].DeltaEgySpec>0))
+ {
+ ax += fac2 * SphP[p].FeedbackUpdatedAccel[0];
+ ay += fac2 * SphP[p].FeedbackUpdatedAccel[1];
+ az += fac2 * SphP[p].FeedbackUpdatedAccel[2];
+ if(SphP[p].DeltaEgySpec==-1)
+ SphP[p].DeltaEgySpec=0; /* unflag */
+ }
+ else
+ {
+ ax += fac2 * SphP[p].HydroAccel[0];
+ ay += fac2 * SphP[p].HydroAccel[1];
+ az += fac2 * SphP[p].HydroAccel[2];
+ }
+#endif
+
+
+
#ifdef AB_TURB
ax += fac2 * SphP[p].TurbAccel[0];
ay += fac2 * SphP[p].TurbAccel[1];
az += fac2 * SphP[p].TurbAccel[2];
#endif
}
ac = sqrt(ax * ax + ay * ay + az * az); /* this is now the physical acceleration */
*aphys = ac;
}
else
ac = *aphys;
if(ac == 0)
ac = 1.0e-30;
switch (All.TypeOfTimestepCriterion)
{
case 0:
if(flag > 0)
{
dt = flag * All.Timebase_interval;
dt /= hubble_a; /* convert dloga to physical timestep */
ac = 2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / (dt * dt);
*aphys = ac;
return flag;
}
- dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / ac);
-
-
+#ifdef IMPROVED_TIMESTEP_CRITERION_FORGAS
+ if(P[p].Type == 0)
+ dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * dmin(SphP[p].Hsml, All.SofteningTable[P[p].Type]) / ac);
+ else
+ dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / ac);
+#else
+ dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / ac);
+#endif
+
+
#ifdef ADAPTIVE_GRAVSOFT_FORGAS
if(P[p].Type == 0)
dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * SphP[p].Hsml / 2.8 / ac);
#endif
break;
default:
endrun(888);
break;
}
if(P[p].Type == 0)
{
csnd = sqrt(GAMMA * SphP[p].Pressure / SphP[p].Density);
if(All.ComovingIntegrationOn)
dt_courant = 2 * All.CourantFac * All.Time * SphP[p].Hsml / (fac3 * SphP[p].MaxSignalVel);
else
dt_courant = 2 * All.CourantFac * SphP[p].Hsml / SphP[p].MaxSignalVel;
if(dt_courant < dt)
#ifndef MULTIPHASE
dt = dt_courant;
#else
{
if (SphP[p].MaxSignalVel != 0);
dt = dt_courant;
}
#endif
-#ifdef CHIMIE_THERMAL_FEEDBACK
-
- float f;
- double EgySpec,NewEgySpec;
-
- if (SphP[p].DeltaEgySpec > 0)
- {
-
- /* spec energy at current step */
- EgySpec = SphP[p].EntropyPred / GAMMA_MINUS1 * pow(SphP[p].Density*a3inv, GAMMA_MINUS1);
-
- /* new egyspec */
- NewEgySpec = EgySpec + SphP[p].DeltaEgySpec;
-
- f = NewEgySpec/EgySpec;
-
- //if (f>1)
- // dt = dt / f;
- }
-
-#endif
+// #ifdef CHIMIE_THERMAL_FEEDBACK
+//
+// float f;
+// double EgySpec,NewEgySpec;
+//
+// if (SphP[p].DeltaEgySpec > 0)
+// {
+//
+// /* spec energy at current step */
+// EgySpec = SphP[p].EntropyPred / GAMMA_MINUS1 * pow(SphP[p].Density*a3inv, GAMMA_MINUS1);
+//
+// /* new egyspec */
+// NewEgySpec = EgySpec + SphP[p].DeltaEgySpec;
+//
+// f = NewEgySpec/EgySpec;
+//
+// //if (f>1)
+// // dt = dt / f;
+// }
+//
+// #endif
#ifdef CHIMIE_KINETIC_FEEDBACK
double dt_kinetic_feedback;
double SupernovaKieticFeedbackIntAccuracy=0.1;
dt_kinetic_feedback = SupernovaKieticFeedbackIntAccuracy * All.SofteningTable[P[p].Type] / All.ChimieWindSpeed;
if(dt_kinetic_feedback < dt)
dt = dt_kinetic_feedback;
#endif
#ifdef FEEDBACK_WIND
double dt_feedback_wind;
double vwind;
vwind = sqrt( SphP[p].FeedbackWindVel[0]*SphP[p].FeedbackWindVel[0] + SphP[p].FeedbackWindVel[1]*SphP[p].FeedbackWindVel[1] + SphP[p].FeedbackWindVel[2]*SphP[p].FeedbackWindVel[2] );
if (vwind > 0)
{
dt_feedback_wind = All.SupernovaWindIntAccuracy * All.SofteningTable[P[p].Type] / vwind;
SphP[p].FeedbackWindVel[0]=0;
SphP[p].FeedbackWindVel[1]=0;
SphP[p].FeedbackWindVel[2]=0;
if(dt_feedback_wind < dt)
dt = dt_feedback_wind;
}
#endif
}
#ifdef CHIMIE
int m;
double dt_chimie;
if(P[p].Type == ST)
{
//m = P[p].StPIdx;
//if (StP[m].Flag)
{
dt_chimie = All.ChimieMaxSizeTimestep;
}
if(dt_chimie < dt)
dt = dt_chimie;
}
#endif
/* convert the physical timestep to dloga if needed. Note: If comoving integration has not been selected,
hubble_a=1.
*/
dt *= hubble_a;
if(dt >= All.MaxSizeTimestep)
dt = All.MaxSizeTimestep;
if(dt >= dt_displacement)
dt = dt_displacement;
if(dt < All.MinSizeTimestep)
{
#ifndef NOSTOP_WHEN_BELOW_MINTIMESTEP
printf("warning: Timestep wants to be below the limit `MinSizeTimestep'\n");
if(P[p].Type == 0)
{
printf
("Part-ID=%d dt=%g dtc=%g ac=%g xyz=(%g|%g|%g) hsml=%g maxsignalvel=%g dt0=%g eps=%g\n",
(int) P[p].ID, dt, dt_courant * hubble_a, ac, P[p].Pos[0], P[p].Pos[1], P[p].Pos[2],
SphP[p].Hsml, SphP[p].MaxSignalVel,
sqrt(2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / ac) * hubble_a,
All.SofteningTable[P[p].Type]);
}
else
{
printf("Part-ID=%d dt=%g ac=%g xyz=(%g|%g|%g)\n", (int) P[p].ID, dt, ac, P[p].Pos[0], P[p].Pos[1],
P[p].Pos[2]);
}
fflush(stdout);
endrun(888);
#endif
dt = All.MinSizeTimestep;
}
ti_step = dt / All.Timebase_interval;
#ifdef CHIMIE_KINETIC_FEEDBACK
//if(SetMinTimeStepForActives)
// ti_step=1;
#endif
if(!(ti_step > 0 && ti_step < TIMEBASE))
{
printf("\nError: A timestep of size zero was assigned on the integer timeline!\n"
"We better stop.\n"
"Task=%d Part-ID=%d dt=%g tibase=%g ti_step=%d ac=%g xyz=(%g|%g|%g) tree=(%g|%g%g)\n\n",
ThisTask, (int) P[p].ID, dt, All.Timebase_interval, ti_step, ac,
P[p].Pos[0], P[p].Pos[1], P[p].Pos[2], P[p].GravAccel[0], P[p].GravAccel[1], P[p].GravAccel[2]);
#ifdef PMGRID
printf("pm_force=(%g|%g|%g)\n", P[p].GravPM[0], P[p].GravPM[1], P[p].GravPM[2]);
#endif
if(P[p].Type == 0)
printf("hydro-frc=(%g|%g|%g)\n", SphP[p].HydroAccel[0], SphP[p].HydroAccel[1], SphP[p].HydroAccel[2]);
#ifdef FEEDBACK_WIND
if(P[p].Type == 0)
printf("feedback-vel=(%g|%g|%g)\n", SphP[p].FeedbackWindVel[0], SphP[p].FeedbackWindVel[1], SphP[p].FeedbackWindVel[2]);
#endif
fflush(stdout);
endrun(818);
}
return ti_step;
}
/*! This function computes an upper limit ('dt_displacement') to the global
* timestep of the system based on the rms velocities of particles. For
* cosmological simulations, the criterion used is that the rms displacement
* should be at most a fraction MaxRMSDisplacementFac of the mean particle
* separation. Note that the latter is estimated using the assigned particle
* masses, separately for each particle type. If comoving integration is not
* used, the function imposes no constraint on the timestep.
*/
void find_dt_displacement_constraint(double hfac /*!< should be a^2*H(a) */ )
{
int i, j, type, *temp;
int count[6];
long long count_sum[6];
double v[6], v_sum[6], mim[6], min_mass[6];
double dt, dmean, asmth = 0;
dt_displacement = All.MaxSizeTimestep;
if(All.ComovingIntegrationOn)
{
for(type = 0; type < 6; type++)
{
count[type] = 0;
v[type] = 0;
mim[type] = 1.0e30;
}
for(i = 0; i < NumPart; i++)
{
v[P[i].Type] += P[i].Vel[0] * P[i].Vel[0] + P[i].Vel[1] * P[i].Vel[1] + P[i].Vel[2] * P[i].Vel[2];
if(mim[P[i].Type] > P[i].Mass)
mim[P[i].Type] = P[i].Mass;
count[P[i].Type]++;
}
MPI_Allreduce(v, v_sum, 6, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(mim, min_mass, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
temp = malloc(NTask * 6 * sizeof(int));
MPI_Allgather(count, 6, MPI_INT, temp, 6, MPI_INT, MPI_COMM_WORLD);
for(i = 0; i < 6; i++)
{
count_sum[i] = 0;
for(j = 0; j < NTask; j++)
count_sum[i] += temp[j * 6 + i];
}
free(temp);
for(type = 0; type < 6; type++)
{
if(count_sum[type] > 0)
{
if(type == 0)
dmean =
pow(min_mass[type] / (All.OmegaBaryon * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)),
1.0 / 3);
else
dmean =
pow(min_mass[type] /
((All.Omega0 - All.OmegaBaryon) * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)),
1.0 / 3);
dt = All.MaxRMSDisplacementFac * hfac * dmean / sqrt(v_sum[type] / count_sum[type]);
#ifdef PMGRID
asmth = All.Asmth[0];
#ifdef PLACEHIGHRESREGION
if(((1 << type) & (PLACEHIGHRESREGION)))
asmth = All.Asmth[1];
#endif
if(asmth < dmean)
dt = All.MaxRMSDisplacementFac * hfac * asmth / sqrt(v_sum[type] / count_sum[type]);
#endif
if(ThisTask == 0)
printf("type=%d dmean=%g asmth=%g minmass=%g a=%g sqrt(<p^2>)=%g dlogmax=%g\n",
type, dmean, asmth, min_mass[type], All.Time, sqrt(v_sum[type] / count_sum[type]), dt);
if(dt < dt_displacement)
dt_displacement = dt;
}
}
if(ThisTask == 0)
printf("displacement time constraint: %g (%g)\n", dt_displacement, All.MaxSizeTimestep);
}
}
#ifdef SYNCHRONIZE_NGB_TIMESTEP
#ifdef PERIODIC
static double boxSize, boxHalf;
#ifdef LONG_X
static double boxSize_X, boxHalf_X;
#else
#define boxSize_X boxSize
#define boxHalf_X boxHalf
#endif
#ifdef LONG_Y
static double boxSize_Y, boxHalf_Y;
#else
#define boxSize_Y boxSize
#define boxHalf_Y boxHalf
#endif
#ifdef LONG_Z
static double boxSize_Z, boxHalf_Z;
#else
#define boxSize_Z boxSize
#define boxHalf_Z boxHalf
#endif
#endif
static int NDone;
static long long NTotDone;
/*! This function share the timesteps between particles
* according the the Saitoh and Makino rule.
*/
void synchronize_ngb_timestep()
{
long long ntot, ntotleft;
int i, j, k, n, ngrp, maxfill, source, ndone;
int *nbuffer, *noffset, *nsend_local, *nsend, *numlist, *ndonelist;
int level, sendTask, recvTask, nexport, place;
double t0, t1;
double timecomp = 0, timeimbalance = 0, timecommsumm = 0;
MPI_Status status;
int CptLimit = 0;
int shrinkcount = 0, shrinktot = 0;
int tstart,tend;
double dt_entr;
double dt_gravkick;
double dt_hydrokick;
int counter;
#ifdef PERIODIC
boxSize = All.BoxSize;
boxHalf = 0.5 * All.BoxSize;
#ifdef LONG_X
boxHalf_X = boxHalf * LONG_X;
boxSize_X = boxSize * LONG_X;
#endif
#ifdef LONG_Y
boxHalf_Y = boxHalf * LONG_Y;
boxSize_Y = boxSize * LONG_Y;
#endif
#ifdef LONG_Z
boxHalf_Z = boxHalf * LONG_Z;
boxSize_Z = boxSize * LONG_Z;
#endif
#endif
/* `NumSphUpdate' gives the number of particles on this processor that want a force update */
for(n = 0, NumSphUpdate = 0; n < N_gas; n++)
{
#ifdef SFR
if((P[n].Ti_endstep == All.Ti_Current) && (P[n].Type == 0))
#else
if(P[n].Ti_endstep == All.Ti_Current)
#endif
#ifdef MULTIPHASE
if(SphP[n].Phase == GAS_SPH)
#endif
NumSphUpdate++;
}
numlist = malloc(NTask * sizeof(int) * NTask);
MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD);
for(i = 0, ntot = 0; i < NTask; i++)
ntot += numlist[i];
free(numlist);
noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */
nbuffer = malloc(sizeof(int) * NTask);
nsend_local = malloc(sizeof(int) * NTask);
nsend = malloc(sizeof(int) * NTask * NTask);
ndonelist = malloc(sizeof(int) * NTask);
if (ThisTask==0)
printf("start synchronize ngb timestep\n");
NTotDone=1;
//NTotDone=0; /* if we want to disable the multi loop */
/* loop for time-step limiter */
while(NTotDone > 0)
{
NDone = 0;
i = 0; /* first particle for this task */
ntotleft = ntot; /* particles left for all tasks together */
while(ntotleft > 0)
{
for(j = 0; j < NTask; j++)
nsend_local[j] = 0;
/* do local particles and prepare export list */
t0 = second();
for(nexport = 0, ndone = 0; i < N_gas && nexport < All.BunchSizeSynchronizeNgBTimestep - NTask; i++)
#ifdef SFR
if((P[i].Ti_endstep == All.Ti_Current) && (P[i].Type == 0))
#else
if(P[i].Ti_endstep == All.Ti_Current)
#endif
{
{
ndone++;
for(j = 0; j < NTask; j++)
Exportflag[j] = 0;
NDone += synchronize_ngb_timestep_evaluate(i, 0);
for(j = 0; j < NTask; j++)
{
if(Exportflag[j])
{
SynchroinzeNgbTimestepDataIn[nexport].Pos[0] = P[i].Pos[0];
SynchroinzeNgbTimestepDataIn[nexport].Pos[1] = P[i].Pos[1];
SynchroinzeNgbTimestepDataIn[nexport].Pos[2] = P[i].Pos[2];
SynchroinzeNgbTimestepDataIn[nexport].Hsml = SphP[i].Hsml;
SynchroinzeNgbTimestepDataIn[nexport].Ti_step = P[i].Ti_step;
SynchroinzeNgbTimestepDataIn[nexport].Ti_endstep = P[i].Ti_endstep;
SynchroinzeNgbTimestepDataIn[nexport].Index = i;
SynchroinzeNgbTimestepDataIn[nexport].Task = j;
#ifdef MULTIPHASE
SynchroinzeNgbTimestepDataIn[nexport].Phase = SphP[i].Phase;
#endif
nexport++;
nsend_local[j]++;
}
}
}
}
t1 = second();
timecomp += timediff(t0, t1);
qsort(SynchroinzeNgbTimestepDataIn, nexport, sizeof(struct SynchroinzeNgbTimestepdata_in), synchronize_ngb_timestep_compare_key);
for(j = 1, noffset[0] = 0; j < NTask; j++)
noffset[j] = noffset[j - 1] + nsend_local[j - 1];
t0 = second();
MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD);
t1 = second();
timeimbalance += timediff(t0, t1);
/* now do the particles that need to be exported */
for(level = 1; level < (1 << PTask); level++)
{
t0 = second();
for(j = 0; j < NTask; j++)
nbuffer[j] = 0;
for(ngrp = level; ngrp < (1 << PTask); ngrp++)
{
maxfill = 0;
for(j = 0; j < NTask; j++)
{
if((j ^ ngrp) < NTask)
if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j])
maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j];
}
if(maxfill >= All.BunchSizeSynchronizeNgBTimestep)
break;
sendTask = ThisTask;
recvTask = ThisTask ^ ngrp;
if(recvTask < NTask)
{
if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0)
{
/* get the particles */
MPI_Sendrecv(&SynchroinzeNgbTimestepDataIn[noffset[recvTask]],
nsend_local[recvTask] * sizeof(struct SynchroinzeNgbTimestepdata_in), MPI_BYTE,
recvTask, 0,
&SynchroinzeNgbTimestepDataGet[nbuffer[ThisTask]],
nsend[recvTask * NTask + ThisTask] * sizeof(struct SynchroinzeNgbTimestepdata_in),
MPI_BYTE, recvTask, 0, MPI_COMM_WORLD, &status);
}
}
for(j = 0; j < NTask; j++)
if((j ^ ngrp) < NTask)
nbuffer[j] += nsend[(j ^ ngrp) * NTask + j];
}
t1 = second();
timecommsumm += timediff(t0, t1);
t0 = second();
for(j = 0; j < nbuffer[ThisTask]; j++)
NDone += synchronize_ngb_timestep_evaluate(j, 1);
t1 = second();
timecomp += timediff(t0, t1);
/* do a block to explicitly measure imbalance */
t0 = second();
MPI_Barrier(MPI_COMM_WORLD);
t1 = second();
timeimbalance += timediff(t0, t1);
level = ngrp - 1;
}
t0 = second();
MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD);
for(j = 0; j < NTask; j++)
ntotleft -= ndonelist[j];
t1 = second();
timeimbalance += timediff(t0, t1);
}
t0 = second();
numlist = (int*)malloc(NTask * sizeof(int) * NTask);
MPI_Allgather(&NDone, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD);
for(i = 0, NTotDone = 0; i < NTask; i++)
NTotDone += numlist[i];
free(numlist);
t1 = second();
timeimbalance += timediff(t0, t1);
if(ThisTask == 0)
{
fprintf(stdout," %3d) ---> number of timestep shrinked gas neighbors: %6lld \n", CptLimit++, NTotDone);
fflush(stdout);
}
}
//
/* do final operations on results */
counter=0;
for(i = 0; i < N_gas; i++)
#ifdef SFR
if((P[i].Type == 0))
#endif
{
if (P[i].Old_Ti_endstep != All.Ti_Current) /* the particle is inactive */
{
if ( (P[i].Old_Ti_endstep != P[i].Ti_endstep) || (P[i].Old_Ti_begstep != P[i].Ti_begstep) ) /* its timestep has been updated */
{
//printf("---------> %d %d %d %d\n",P[i].Old_Ti_endstep,P[i].Ti_endstep,P[i].Old_Ti_begstep,P[i].Ti_begstep);
/* need to extrapolate mid-step quantities */
counter++;
/* old mid step */
tstart = (P[i].Old_Ti_begstep + P[i].Old_Ti_endstep) / 2; /* midpoint of old step */
tend = (P[i].Ti_begstep + P[i].Ti_endstep) / 2; /* midpoint of new step */
-
/* now, do the kick */
-
- if(All.ComovingIntegrationOn)
- {
- dt_entr = (tend - tstart) * All.Timebase_interval;
- dt_gravkick = get_gravkick_factor(tstart, tend);
- dt_hydrokick = get_hydrokick_factor(tstart, tend);
- }
- else
- {
- dt_entr = dt_gravkick = dt_hydrokick = (tend - tstart) * All.Timebase_interval;
- }
-
-
- /* note that VelPred is already at the right time */
- for(k = 0; k < 3; k++)
- P[i].Vel[k] += SphP[i].HydroAccel[k] * dt_hydrokick;
-
-#ifdef AB_TURB
- for(k = 0; k < 3; k++)
- P[i].Vel[k] += SphP[i].TurbAccel[k] * dt_hydrokick;
-#endif
-
- if(SphP[i].DtEntropy * dt_entr > -0.5 * SphP[i].Entropy)
- SphP[i].Entropy += SphP[i].DtEntropy * dt_entr;
- else
- SphP[i].Entropy *= 0.5;
-
-
-#ifdef LIMIT_DVEL
- printf("LIMIT_DVEL : not implemented here.");
- endrun(88998877)
-#endif
-
+ kickback(i,tstart,tend);
+
+
}
}
}
if (counter!=0)
printf("(%d) %d passive particles have been updated \n",ThisTask,counter);
if (ThisTask==0)
printf("synchronize ngb timestep done.\n");
}
int synchronize_ngb_timestep_evaluate(int target, int mode)
{
int j, k, n, startnode, numngb_inbox;
double h, h2;
double r2, dx, dy, dz;
int phase=0;
FLOAT *pos;
int ti_step_i;
int endstep_i;
int CptShrink = 0;
if(mode == 0)
{
pos = P[target].Pos;
h = SphP[target].Hsml;
ti_step_i = P[target].Ti_step;
endstep_i = P[target].Ti_endstep; /* !!! */
#ifdef MULTIPHASE
phase = SphP[target].Phase;
#endif
}
else
{
pos = SynchroinzeNgbTimestepDataGet[target].Pos;
h = SynchroinzeNgbTimestepDataGet[target].Hsml;
ti_step_i = SynchroinzeNgbTimestepDataGet[target].Ti_step;
endstep_i = SynchroinzeNgbTimestepDataGet[target].Ti_endstep; /* !!! */
#ifdef MULTIPHASE
phase = SynchroinzeNgbTimestepDataGet[target].Phase;
#endif
}
h2 = h * h;
startnode = All.MaxPart;
do
{
numngb_inbox = ngb_treefind_variable(&pos[0], h, phase, &startnode);
for(n = 0; n < numngb_inbox; n++)
{
j = Ngblist[n];
dx = P[j].Pos[0] - pos[0];
dy = P[j].Pos[1] - pos[1];
dz = P[j].Pos[2] - pos[2];
#ifdef PERIODIC /* now find the closest image in the given box size */
if(dx > boxHalf_X)
dx -= boxSize_X;
if(dx < -boxHalf_X)
dx += boxSize_X;
if(dy > boxHalf_Y)
dy -= boxSize_Y;
if(dy < -boxHalf_Y)
dy += boxSize_Y;
if(dz > boxHalf_Z)
dz -= boxSize_Z;
if(dz < -boxHalf_Z)
dz += boxSize_Z;
#endif
r2 = dx * dx + dy * dy + dz * dz;
if(r2 < h2)
{
if( P[j].Ti_endstep == All.Ti_Current ) /* the particle is active */
{
if(P[j].Ti_step > All.NgbFactorTimestep*ti_step_i )
{
CptShrink++;
P[j].Ti_step = All.NgbFactorTimestep*ti_step_i;
}
}
else /* the particle is not active */
{
-#ifdef SYNCHRONIZATION
+
if( P[j].Ti_endstep > All.Ti_Current + All.NgbFactorTimestep*ti_step_i )
{
CptShrink++;
P[j].Old_Ti_begstep = P[j].Ti_begstep;
P[j].Old_Ti_endstep = P[j].Ti_endstep;
P[j].Ti_step = All.NgbFactorTimestep*ti_step_i;
+
+
+#ifdef SYNCHRONIZATION
+
+ /* find Ti_endstep*/
+ for(k = 0; P[j].Ti_begstep + k * P[j].Ti_step <= All.Ti_Current; k++);
+
+ P[j].Ti_endstep = P[j].Ti_begstep + k * P[j].Ti_step ;
+ P[j].Ti_begstep = P[j].Ti_endstep - P[j].Ti_step;
+#else
P[j].Ti_endstep = All.Ti_Current + P[j].Ti_step;
- P[j].Ti_begstep = P[j].Ti_endstep - P[j].Ti_step;
+ P[j].Ti_begstep = P[j].Ti_endstep - P[j].Ti_step;
+#endif
+
+
}
-
-#else
- if (ThisTask==0)
- {
- printf("\n\nThis case is not taken into consideration now !\n");
- printf("When usingSYNCHRONIZE_NGB_TIMESTEP you need to use SYNCHRONIZATION\n\n");
- endrun(1010101001);
- }
-#endif
+
}
}
}
}
while(startnode >= 0);
return CptShrink;
}
/*! This is a comparison kernel for a sort routine, which is used to group
* particles that are going to be exported to the same CPU.
*/
int synchronize_ngb_timestep_compare_key(const void *a, const void *b)
{
if(((struct SynchroinzeNgbTimestepdata_in *) a)->Task < (((struct SynchroinzeNgbTimestepdata_in *) b)->Task))
return -1;
if(((struct SynchroinzeNgbTimestepdata_in *) a)->Task > (((struct SynchroinzeNgbTimestepdata_in *) b)->Task))
return +1;
return 0;
}
-#endif
\ No newline at end of file
+#endif
+
+
+
+

Event Timeline