Page MenuHomec4science

satlib.py
No OneTemporary

File Metadata

Created
Fri, Oct 18, 09:50

satlib.py

import numpy as np
from scipy import special
from scipy.integrate import quad
a = 1.859e9 # to fit Sawala 2017
b = -1.915 # to fit Sawala 2017
def CDM_DistributionFunction(Mh):
'''
dN/dM :
'''
dNdM = a *(Mh)**b
return dNdM
def CDM_CumulativeMass(Mh):
NM = -a*(Mh)**(b+1)/(b+1)
return NM
def WDMCDMRatio(Mhs):
"""
Ratio of the halo mass function of the WDM to CDM (Forouhar 2023)
"""
lnMhs = np.log10(Mhs)
f = np.array([0,0.004,0.008,0.025,0.092,0.253,0.396,0.485,0.620,0.746,0.827,0.869,0.873,0.936,1])
lnMh = np.array([7,7.176,7.549,7.934,8.367,8.900,9.244,9.457,9.742,10.00,10.35,10.55,11.10,11.38,11.63])
fs = np.interp(lnMhs,lnMh,f)
return fs
# cumulative mass fct from CDM_CumulativeMass
fctCDM_CumulativeMass = np.vectorize(lambda x: quad( lambda M: CDM_DistributionFunction(M) ,x, 1e12)[0])
# cumulative mass fct from CDM_CumulativeMass
fctWDM_CumulativeMass = np.vectorize(lambda x: quad( lambda M: CDM_DistributionFunction(M)*WDMCDMRatio(M) ,x, 1e12)[0])
def GetStellarMassHaloMassRelation(model='rj2018'):
if model=="rj2018":
import pickle
# read Mh vs Lv
f = open("MsvsMh_2018.pkl","rb")
Mhbins = pickle.load(f,encoding="latin1")
Lvmins = pickle.load(f,encoding="latin1")
Lvmaxs = pickle.load(f,encoding="latin1")
f.close()
return Mhbins,Lvmins,Lvmaxs
elif model=="model1":
Mhbins = np.linspace(7,11,20)
data = np.loadtxt("sales2022/MsMh_low_1.txt")
Mh1 = data[:,0]
Msl = data[:,1]
Lvmins = np.interp(Mhbins,Mh1,Msl)
data = np.loadtxt("sales2022/MsMh_high_1.txt")
Mh1 = data[:,0]
Msu = data[:,1]
Lvmaxs = np.interp(Mhbins,Mh1,Msu)
return 10**Mhbins,10**Lvmins,10**Lvmaxs
elif model=="model2":
Mhbins = np.linspace(7,11,20)
data = np.loadtxt("sales2022/MsMh_low_2.txt")
Mh1 = data[:,0]
Msl = data[:,1]
Lvmins = np.interp(Mhbins,Mh1,Msl)
data = np.loadtxt("sales2022/MsMh_high_2.txt")
Mh1 = data[:,0]
Msu = data[:,1]
Lvmaxs = np.interp(Mhbins,Mh1,Msu)
return 10**Mhbins,10**Lvmins,10**Lvmaxs
elif model=="model3":
Mhbins = np.linspace(7,11,20)
data = np.loadtxt("sales2022/MsMh_low_3.txt")
Mh1 = data[:,0]
Msl = data[:,1]
Lvmins = np.interp(Mhbins,Mh1,Msl)
data = np.loadtxt("sales2022/MsMh_high_3.txt")
Mh1 = data[:,0]
Msu = data[:,1]
Lvmaxs = np.interp(Mhbins,Mh1,Msu)
return 10**Mhbins,10**Lvmins,10**Lvmaxs
elif model=="model4":
Mhbins = np.linspace(7,11,20)
data = np.loadtxt("sales2022/MsMh_low_4.txt")
Mh1 = data[:,0]
Msl = data[:,1]
Lvmins = np.interp(Mhbins,Mh1,Msl)
data = np.loadtxt("sales2022/MsMh_high_4.txt")
Mh1 = data[:,0]
Msu = data[:,1]
Lvmaxs = np.interp(Mhbins,Mh1,Msu)
return 10**Mhbins,10**Lvmins,10**Lvmaxs
class DM_Model():
def __init__(self,Mmin,Mmax,N):
self.Mmin = Mmin # minimal halo mass
self.Mmax = Mmax # maximal halo mass
self.N = N # number of mass bins
# halo mass bins
self.Mh = 10**np.linspace(np.log10(self.Mmin),np.log10(self.Mmax),self.N)
def DistributionFunction(self,Mh):
'''
return the dark matter distribution function
i.e., dN/dM
'''
pass
def CumulativeMass(self,Mh):
'''
return the cumulative halo mass
'''
pass
def NumberOfHaloes(self):
'''
return the number of haloes
'''
return int(self.CumulativeMass(self.Mmin))
##############################################
# CDM from Sawala 2017 fit
##############################################
class CDM_Sawala_Model(DM_Model):
def __init__(self,Mmin,Mmax,N):
# do the initialisation
super().__init__(Mmin,Mmax,N)
self.a = 1.859e9 # to fit Sawala 2017
self.b = -1.915 # to fit Sawala 2017
# cumulative number of haloes
NM = self.CumulativeMass()
# compute the sampling function
self.fctSample = np.vectorize( lambda x: 10**np.interp(x, NM[::-1]/NM.max(), np.log10(self.Mh[::-1])) )
def DistributionFunction(self,Mh=None):
'''
return the dark matter distribution function
i.e., dN/dM
'''
if Mh is None:
Mh = self.Mh
dNdM = self.a *(Mh)**self.b
return dNdM
def CumulativeMass(self,Mh=None):
'''
return the cumulative halo mass
'''
if Mh is None:
Mh = self.Mh
NM = -self.a*(Mh)**(self.b+1)/(self.b+1)
return NM
##############################################
# WDM from Sawala 2017 + Forouhar 2023
##############################################
class CDM_Forouhar_Model(DM_Model):
def __init__(self,Mmin,Mmax,N):
# do the initialisation
super().__init__(Mmin,Mmax,N)
self.CDM = CDM_Sawala_Model(Mmin,Mmax,N)
# cumulative mass fct from CDM_CumulativeMass
self.fctCumulativeMass = np.vectorize(lambda x: quad( lambda M: self.DistributionFunction(M),x, 1e12)[0])
# cumulative number of haloes
NM = self.CumulativeMass()
# compute the sampling function
self.fctSample = np.vectorize( lambda x: 10**np.interp(x, NM[::-1]/NM.max(), np.log10(self.Mh[::-1])) )
def DistributionFunction(self,Mh=None):
'''
return the dark matter distribution function
i.e., dN/dM
'''
if Mh is None:
Mh = self.Mh
dNdM = self.CDM.DistributionFunction(Mh)*self.WDMCDMRatio(Mh)
return dNdM
def CumulativeMass(self,Mh=None):
'''
return the cumulative halo mass
'''
if Mh is None:
Mh = self.Mh
return self.fctCumulativeMass(Mh)
def WDMCDMRatio(self,Mh=None):
"""
Ratio of the halo mass function of the WDM to CDM (Forouhar 2023)
"""
if Mh is None:
Mh = self.Mh
lnMhs = np.log10(Mh)
f = np.array([0,0.004,0.008,0.025,0.092,0.253,0.396,0.485,0.620,0.746,0.827,0.869,0.873,0.936,1])
lnMh = np.array([7,7.176,7.549,7.934,8.367,8.900,9.244,9.457,9.742,10.00,10.35,10.55,11.10,11.38,11.63])
fs = np.interp(lnMhs,lnMh,f)
return fs
##############################################
# WDM from Bode, Ostriker & Turok (2001)
# Viel 2005 (nu updated)
##############################################
# Note : here we avoid to use classy to get
# the power spectrum
from scipy.integrate import cumtrapz, trapz
import genmassfct as gm
class GEN_Model(DM_Model):
def __init__(self,Mmin,Mmax,N,MDM=2,fWDM=0,M0=1e12,Om=0.315,h0=0.674):
# do the initialisation
super().__init__(Mmin,Mmax,N)
# unperturbed power spectrum filename
filename = './CDM_PS.dat'
# temporary file
self.filename = '/tmp/powerspectrum_pk.dat'
k, pk = np.loadtxt(filename, unpack=True, usecols = (0,1))
if MDM is not None:
Pk = self.Transfer(k,pk,MDM,Om,h0)
else:
Pk = pk
# save (needed for )
body = np.transpose([k, Pk])
np.savetxt(self.filename,body,delimiter='\t')
# initialise parameters
par = gmf.par()
par.window.window = "sharpk"
par.code.rmin = 0.008
par.mf.q = 1.0
par.mf.p = 0.3
par.mf.c = 2.5
# redshift
z0 = 0
# calculate stellite function
par.file.psfct = self.filename
self.mbin, dNsatdlnM = gmf.dNsatdlnm(M0,z0,par)
self.Nsat = Nsat_integral(self.mbin,dNsatdlnM)
# cumulative number of haloes
NM = self.CumulativeMass()
# compute the sampling function
self.fctSample = np.vectorize( lambda x: 10**np.interp(x, NM[::-1]/NM.max(), np.log10(self.Mh[::-1])) )
def CumulativeMass(self,Mh=None):
'''
return the cumulative halo mass
'''
if Mh is None:
Mh = self.Mh
return np.interp(Mh,self.mbin,self.Nsat)
def Transfer(self,k,pk,DMmass,OmegaWDM,h):
"""
initially from Bode, Ostriker & Turok (2001)
but values from Viel 2005 (nu updated)
"""
OmegaWDM = OmegaWDM/0.25
h = h/0.7
nu = 1.12
alpha = 0.049*DMmass**(-1.11) * (OmegaWDM)**(0.11) * (h)**(1.22)
T = (1+(alpha*k)**(2*nu))**(-5/nu)
pk = pk*T**2
return pk
##############################################
# Generic model (Schneider 2014)
##############################################
# Note : deprecated
from scipy.integrate import cumtrapz, trapz
from classy import Class
import genmassfct as gmf
def ps_class(cosmopars):
"""
Calculate power spectrum with CLASS
"""
Om, Ob, As, h0, ns, mWDM, fWDM = cosmopars
if (fWDM>0):
m_nu = 4.43*mWDM**(4./3)*((Om-Ob)*h0**2/0.1225)**(-1./3)
CLASSparams = {
'h': h0,
'T_cmb': 2.726,
'Omega_b': Ob,
'Omega_cdm': (1-fWDM)*(Om-Ob),
'Omega_ncdm': fWDM*(Om-Ob),
'N_ur': 3.04,
'N_ncdm': 1,
'm_ncdm': 1000*m_nu,
'T_ncdm': 0.715985,
'n_s': ns,
'A_s': As,
'P_k_max_h/Mpc': 200,
'k_per_decade_for_pk': 10,
'output': 'mPk',
'z_pk': 0.0,
'ncdm_fluid_approximation': 3,
}
else:
CLASSparams = {
'h': h0,
'T_cmb': 2.726,
'Omega_b': Ob,
'Omega_cdm': (Om-Ob),
'N_ur': 3.04,
'n_s': ns,
'A_s': As,
'P_k_max_h/Mpc': 200,
'k_per_decade_for_pk': 10,
'output': 'mPk',
'z_pk': 0.0,
'ncdm_fluid_approximation': 1,
}
CLASScosmo = Class()
CLASScosmo.set(CLASSparams)
CLASScosmo.compute()
s8 = CLASScosmo.sigma8()
print("s8 = ",s8)
k_bin = np.logspace(np.log10(1e-3), np.log10(200), 500) #in [h/Mpc]
z_bin = np.array([0])
pk_bin = []
for i in range(len(k_bin)):
pk_bin += [CLASScosmo.pk_lin(k_bin[i]*h0,0)]
pk_bin = np.array(pk_bin)*h0**3 # [Mpc/h]^3
CLASScosmo.struct_cleanup()
CLASScosmo.empty()
return k_bin, pk_bin
def Nsat_integral(mbin,dNsatdlnm):
"""
Cumulative mass function Nsat(>M)
"""
Nsat = trapz(dNsatdlnm/mbin,mbin) - cumtrapz(dNsatdlnm/mbin,mbin,initial=dNsatdlnm[0]/mbin[0])
return Nsat
class oldGEN_Model(DM_Model):
def __init__(self,Mmin,Mmax,N,MDM=2,fWDM=0,M0=1e12):
# do the initialisation
super().__init__(Mmin,Mmax,N)
# temporary file
self.filename = '/tmp/powerspectrum_pk.dat'
# Cosmologycal parameters
Om = 0.315
Ob = 0.048
h0 = 0.681
As = 2.07e-9
ns = 0.963
# redshift
z0 = 0
self.MDM = MDM # dark matter mass in keV
self.fWDM = fWDM # warm dark matter fraction
self.M0 = M0 # Host halo mass (Msun/h)
cosmopars = Om, Ob, As, h0, ns, self.MDM, fWDM
# calculate power spectrum
kbin, PSbin = ps_class(cosmopars)
body = np.transpose([kbin, PSbin])
np.savetxt(self.filename,body,delimiter='\t')
# initialise parameters
par = gmf.par()
par.window.window = "sharpk"
par.code.rmin = 0.008
par.mf.q = 1.0
par.mf.p = 0.3
par.mf.c = 2.5
# calculate stellite function
par.file.psfct = self.filename
self.mbin, dNsatdlnM = gmf.dNsatdlnm(M0,z0,par)
self.Nsat = Nsat_integral(self.mbin,dNsatdlnM)
# cumulative number of haloes
NM = self.CumulativeMass()
# compute the sampling function
self.fctSample = np.vectorize( lambda x: 10**np.interp(x, NM[::-1]/NM.max(), np.log10(self.Mh[::-1])) )
def CumulativeMass(self,Mh=None):
'''
return the cumulative halo mass
'''
if Mh is None:
Mh = self.Mh
return np.interp(Mh,self.mbin,self.Nsat)
def Msol2Mag(mass,Age=12,MH=-2,filter="F475X"):
'''
mass : in Msol
Age : in Gyr
MH : in [M/H]
'''
import stars_class
mass = np.array([mass])
Age = np.array([Age])
MH = np.array([MH])
###################################
# compute Magnitudes
###################################
# get the number of stars in each mass bin
Nstars = stars_class.Stars_fun(mass,None,None, 'normed_3slope')
##############################
# F475X magnitude
if filter=="F475X":
M = stars_class.HST475X_fun(None,Age,MH)
# convert to flux (ignore the zero point)
F = 10**(-M/2.5)
# sum the contribution of the mass bins
F = np.sum(F*Nstars, axis=0)
# compute the absolute magnitude in each pixel (as before we ignore the zero point)
M = - 2.5*np.log10(F)
return M[0]
##############################
# VIS Euclid magnitude
if filter=="VISeuclid":
M = stars_class.VISeuclid_fun(None,Age,MH)
# convert to flux (ignore the zero point)
F = 10**(-M/2.5)
# sum the contribution of the mass bins
F = np.sum(F*Nstars, axis=0)
# compute the absolute magnitude in each pixel (as before we ignore the zero point)
M = - 2.5*np.log10(F)
return M[0]
##############################
# Y Euclid magnitude
if filter=="Yeuclid":
M = stars_class.Yeuclid_fun(None,Age,MH)
# convert to flux (ignore the zero point)
F = 10**(-M/2.5)
# sum the contribution of the mass bins
F = np.sum(F*Nstars, axis=0)
# compute the absolute magnitude in each pixel (as before we ignore the zero point)
M = - 2.5*np.log10(F)
return M[0]
##############################
# J Euclid magnitude
if filter=="Jeuclid":
M = stars_class.Jeuclid_fun(None,Age,MH)
# convert to flux (ignore the zero point)
F = 10**(-M/2.5)
# sum the contribution of the mass bins
F = np.sum(F*Nstars, axis=0)
# compute the absolute magnitude in each pixel (as before we ignore the zero point)
M = - 2.5*np.log10(F)
return M[0]
##############################
# Vasdekhis
if filter=="V":
from pNbody.SSP import libvazdekis
LObj = libvazdekis.VazdekisLuminosities("/home/revaz/.pNbody/opt/SSP/vazdekis_kb_mu1.3.txt",band="V")
LObj.ExtrapolateMatrix(order=1,s=0)
LObj.CreateInterpolator()
LObj.Extrapolate2DMatrix()
# luminosity
Lv = LObj.Luminosities(MH,Age)*mass
# Magnitude
Mag_Vega = 4.81
Mv = Mag_Vega - 2.5*np.log10(Lv)
return Mv

Event Timeline