diff --git a/pNbody/Mkgmov.py b/pNbody/Mkgmov.py index 0788a43..cefd336 100644 --- a/pNbody/Mkgmov.py +++ b/pNbody/Mkgmov.py @@ -1,809 +1,821 @@ import Mtools import Mtools as mt import os import glob from pNbody import * from pNbody.param import Params import copy import types import gzip ######################################################################################################## # # Some info on the structure : # # 1) nb : the full nbody object (main loop) # # 2) nbf : normally set the display parameters, position of the observer # but could be used to set the component # # 3) nbfc : normally set the component to display and/or the value to dispplay # # component['id'][0] == '@' # this is used to open simple nbody object (to draw a box, for example) # component['id'][0] == '#' # no selection # component['id'] == 'gas' # selection gas # # # # # a script may be called at level 2) # # frame['ext_cmd'] = [] # frame['ext_cmd'].append("""from plot_P import MakePlot""") # frame['ext_cmd'].append("""MakePlot([nbf],output)""") # # it must save an output output, the latter is opened as an img # and appened to imgs # # # a script may be called at level 3) # # 1) # component['ext_cmd'] = [] # component['ext_cmd'].append("""from plot_P import MakePlot""") # component['ext_cmd'].append("""MakePlot([nbfc],output)""") # # it must save an output output, the latter is opened as an img # and appened to imgs # # # 2) using # component['to_img']='script_name' # # ######################################################################################################## ####################################################################### # some usefull functions ####################################################################### def ReadNbodyParameters(paramname): ''' read param from a parameter Nbody file ''' gparams = Params(paramname,None) param = {} # create new params for p in gparams.params: param[p[0]]=p[3] return param def gzip_compress(file): f = open(file,'r') content = f.read() f.close() f = gzip.open(file+'.gz', 'wb') f.write(content) f.close() ####################################################################### # # C L A S S D E F I N I T I O N # ####################################################################### class Movie(): - def __init__(self,parameterfile='filmparam.py',format=None,imdir=None,timesteps=None,pio=False,compress=True): + def __init__(self,parameterfile='filmparam.py',format=None,imdir=None,timesteps=None,pio=False,compress=True,ifile=0): self.DEFAULT_TIMESTEPS = None self.DEFAULT_FORMAT = "fits" self.DEFAULT_IMDIR = "fits" self.DEFAULT_PIO = False self.DEFAULT_COMPRESS = True self.DEFAULT_SCALE = "log" self.DEFAULT_CD = 0. self.DEFAULT_MN = 0. self.DEFAULT_MX = 0. self.DEFAULT_PALETTE = "light" self.parameterfile=parameterfile # read the parameter file self.read_parameterfile() # use options if format != None: self.film['format'] = format if imdir != None: self.film['imdir'] = imdir if timesteps != None: self.film['timesteps'] = timesteps if pio != None: self.film['pio'] = pio if compress != None: self.film['compress'] = compress self.imdir = self.film['imdir'] self.pio = self.film['pio'] self.compress = self.film['compress'] # deal with timesteps self.set_timesteps() - self.ifile=-1 + self.ifile=ifile-1 self.file_offset=0 if mpi.mpi_IsMaster(): # whith gmkgmov, the init is only runned by the master, this line is not needed... if self.pio: self.pio = "yes" else: self.pio = "no" if self.parameterfile==None: print "you must specify a parameter file" sys.exit() if not os.path.exists(self.parameterfile): print "file %s does not exists"%self.parameterfile sys.exit() if not os.path.exists(self.imdir): os.mkdir(self.imdir) else: print "directory %s exists !!!"%self.imdir files = os.listdir(self.imdir) print "the directory contains %d files"%(len(files)) # png files png_files = glob.glob(os.path.join(self.imdir,"*.png")) n_png_files = len(png_files) print "the directory contains %d png files"%(n_png_files) # fits files fits_files = glob.glob(os.path.join(self.imdir,"*.fits")) n_fits_files = len(fits_files) print "the directory contains %d fits files"%(n_fits_files) if n_png_files > 0: self.file_offset = n_png_files def info(self): print "INFO INFO INFO" #print self.film print self.parameterfile print self.getftype() print "INFO INFO INFO" def read_parameterfile(self): if not os.path.isfile(self.parameterfile): raise IOError(915,'file %s not found ! Pease check the file name.'%(self.parameterfile)) # import the parameter file as a module module_name = os.path.basename(os.path.splitext(self.parameterfile)[0]) module_dir = os.path.dirname(self.parameterfile) if sys.path.count(module_dir) == 0: sys.path.append(module_dir) filmparam = __import__(module_name,globals(), locals(), [], -1) self.film = filmparam.film # set some defaults if not self.film.has_key('timesteps'): self.film['timesteps'] = self.DEFAULT_TIMESTEPS if not self.film.has_key('imdir'): self.film['imdir'] = self.DEFAULT_IMDIR if not self.film.has_key('format'): self.film['format'] = self.DEFAULT_FORMAT if not self.film.has_key('pio'): self.film['pio'] = self.DEFAULT_PIO if not self.film.has_key('compress'): self.film['compress'] = self.DEFAULT_COMPRESS self.setftype(self.film['ftype']) # post process for i,frame in enumerate(self.film['frames']): frame['id'] = i # check #for frame in self.film['frames']: # print frame['id'] # for component in frame['components']: # print " ",component['id'] ################################## # time steps stuffs ################################## def set_timesteps(self): """ define self.times (which is a list) based on the value contained in self.film['timesteps'] """ # self.times if self.film['timesteps']=='every': self.times="every" elif type(self.film['timesteps']) == types.StringType: fname = self.film['timesteps'] if not os.path.isfile(fname): raise IOError(916,'file %s not found ! Pease check the file name.'%(fname)) times = io.read_ascii(fname,[0])[0] times = take( times,len(times)-1-arange(len(times))) # invert order times = times.tolist() self.times = times elif type(self.film['timesteps']) == types.ListType: self.times = self.film['timesteps'] elif type(self.film['timesteps']) == types.TupleType: t0 = self.film['timesteps'][0] t1 = self.film['timesteps'][1] dt = self.film['timesteps'][2] times = arange(t0,t1,dt) times = take( times,len(times)-1-arange(len(times))) # invert order times = times.tolist() self.times = times else: self.times=[] def set_next_time(self): if self.times!="every": if len(self.times)>0: self.times.pop() def get_next_time(self): if self.times=="every": return 0.0 if len(self.times)==0: return None else: return self.times[-1] def getftype(self): return self.ftype def setftype(self,ftype): self.ftype = ftype def ApplyFilmParam(self,nb,film): # set time reference for this file exec("nb.tnow = %s"%film['time']) # exec1 if film.has_key('exec'): if film['exec'] != None: exec(film['exec']) # macro if film.has_key('macro'): if film['macro'] != None: execfile(film['macro']) return nb def ApplyFrameParam(self,nb,frame): nbf = nb # exec if frame.has_key('exec'): if frame['exec'] != None: exec(frame['exec']) # macro if frame.has_key('macro'): if frame['macro'] != None: execfile(frame['macro']) return nbf def ApplyComponentParam(self,nbf,component): if component['id'][0] == '@': # here, all tasks must have an object containing all particles # ok, but not in the right order !!! nbfc = Nbody(componentid,self.getftype()) nbfc = nbfc.SendAllToAll() nbfc.sort() nbfc.componentid=component['id']#[1:] elif component['id'][0] == '#': nbfc = nbf nbfc.componentid = component['id']#[1:] else: nbfc = nbf.select(component['id']) nbfc.componentid = component['id'] # exec if component.has_key('exec'): if component['exec'] != None: exec(component['exec']) # macro if component.has_key('macro'): if component['macro'] != None: execfile(component['macro']) #print "------------------------" #print min(nbfc.u),max(nbfc.u) #print min(nbfc.rho),max(nbfc.rho) #print min(nbfc.tpe),max(nbfc.tpe) #print "temperature",min(nbfc.T()),max(nbfc.T()) #print nbfc.nbody #print min(nbfc.rsp),max(nbfc.rsp) #print "------------------------" return nbfc def dump(self,dict): # exctract dict atime = dict['atime'] pos = dict['pos'] # create nbody object nb = Nbody(pos=pos,ftype='gadget') nb.atime = atime # add other arrays if dict.has_key('vel'): nb.vel = dict['vel'] if dict.has_key('num'): nb.num = dict['num'] if dict.has_key('mass'): nb.mass = dict['mass'] if dict.has_key('tpe'): nb.tpe = dict['tpe'] if dict.has_key('u'): nb.u = dict['u'] if dict.has_key('rho'): nb.rho = dict['rho'] if dict.has_key('rsp'): nb.rsp = dict['rsp'] if dict.has_key('metals'): nb.metals = dict['metals'] #!!! nb.flag_chimie_extraheader=1 nb.ChimieNelements = 5 nb.flag_metals = 5 nb.ChimieSolarMassAbundances={} nb.ChimieSolarMassAbundances['Fe']=0.00176604 nb.init() #print "################################" #print "writing qq.dat" #print "################################" #nb.rename('qq.dat') #nb.write() self.dumpimage(nb=nb) def dumpimage(self,nb=None,file=None): # increment counter self.ifile+=1 # skip file if needed if self.ifile0: ################################################# # 1) use an outer script to create an img (this is a bit redundant with 2.2, see below ) ################################################# output= "/tmp/%015d.png"%(int(random.random()*1e17)) for cmd in frame['ext_cmd']: exec(cmd) if mpi.mpi_IsMaster(): img = Image.open(output) imgs.append(img) if os.path.exists(output): os.remove(output) continue # composition parameters if frame.has_key('cargs'): if len(frame['cargs'])!=0: frame['compose']=True datas = [] else: frame['compose']=False else: frame['cargs']=[] frame['compose']=False for component in frame['components']: if mpi.mpi_IsMaster(): print "------------------------" print "component",component['id'] - print "------------------------" + print "------------------------" nbfc = self.ApplyComponentParam(nbf,component) # find the observer position # 1) from params # 2) from pfile # 3) from tdir # and transform into parameter if frame['tdir']!=None: tfiles = glob.glob(os.path.join(frame['tdir'],"*")) tfiles.sort() bname = os.path.basename(file) - + + tfiles_for_this_file = [] for j in xrange(len(tfiles)): - tfile = "%s.%05d"%(os.path.basename(file),j) + tfile = "%s.%05d"%(bname,j) + #tfile = "%s.0.%05d"%(bname,j) # old or new version ? tmp_tfile = os.path.join(frame['tdir'],tfile) + if os.path.exists(tmp_tfile): tfiles_for_this_file.append(tmp_tfile) elif frame['pfile']!=None: if not os.path.isfile(frame['pfile']): print "parameter file %s does not exists(1)..."%(frame['pfile']) # read from pfile defined in frame param = ReadNbodyParameters(frame['pfile']) tfiles_for_this_file = [None] else: # take frame as parameter param = copy.copy(frame) tfiles_for_this_file = [None] # loop over different oberver positions for this file for iobs,tfile in enumerate(tfiles_for_this_file): if tfile!=None: + if mpi.mpi_IsMaster(): + print " using tfile : %s"%(tfile) param = ReadNbodyParameters(tfile) # add parameters defined by user in the parameter file for key in component.keys(): param[key] = component[key] # set image shape using frame param['shape'] = (frame['width'],frame['height']) # compute map mat = nbfc.CombiMap(param) if mpi.mpi_IsMaster(): if frame['compose']: datas.append(mat) if component.has_key('ext_cmd'): ################################################# # 1) use an outer script to create an img ################################################# if len(component['ext_cmd'])>0: output= "/tmp/%015d.png"%(int(random.random()*1e17)) for cmd in component['ext_cmd']: exec(cmd) if mpi.mpi_IsMaster(): img = Image.open(output) imgs.append(img) if os.path.exists(output): os.remove(output) elif self.film["format"]=="fits": ################################# # 1) save fits file ################################# output = '%04d_%04d-%s-%06d.fits'%(self.ifile,frame['id'],component['id'],iobs) output = os.path.join(self.imdir,output) print nb.atime,output if os.path.exists(output): os.remove(output) header = [('TIME',nb.tnow,'snapshot time')] io.WriteFits(transpose(mat), output, extraHeader = header) # compress if self.compress: gzip_compress(output) os.remove(output) elif self.film["format"]=="png": ################################# # 2) output png file or ... ################################# output = '%04d_%04d-%s-%06d.png'%(self.ifile,frame['id'],nbfc.componentid,iobs) output = os.path.join(self.imdir,output) print nb.atime,output # here, we should use component['scale'] ... not frame['scale'], no ? if not frame.has_key('scale'): frame['scale']=self.DEFAULT_SCALE if not frame.has_key('cd'): frame['cd']=self.DEFAULT_CD if not frame.has_key('mn'): frame['mn']=self.DEFAULT_MN if not frame.has_key('mx'): frame['mx']=self.DEFAULT_MX if not frame.has_key('palette'): frame['palette']=self.DEFAULT_PALETTE matint,mn_opt,mx_opt,cd_opt = set_ranges(mat,scale=frame['scale'],cd=frame['cd'],mn=frame['mn'],mx=frame['mx']) frame['mn'] = mn_opt frame['mx'] = mx_opt frame['cd'] = cd_opt img = get_image(matint,palette_name=frame['palette']) img.save(output) print frame['mn'],frame['mx'],frame['cd'] # need to create an img if component.has_key('to_img'): if component['to_img']==True: ########################################## # 2.1) create an img and apply commmands ########################################## # get params if not component.has_key('scale'): component['scale']=self.DEFAULT_SCALE if not component.has_key('cd'): component['cd']=self.DEFAULT_CD if not component.has_key('mn'): component['mn']=self.DEFAULT_MN if not component.has_key('mx'): component['mx']=self.DEFAULT_MX if not component.has_key('palette'): component['palette']=self.DEFAULT_PALETTE matint,mn_opt,mx_opt,cd_opt = set_ranges(mat,scale=component['scale'],cd=component['cd'],mn=component['mn'],mx=component['mx']) img = get_image(matint,palette_name=component['palette']) print mn_opt,mx_opt,cd_opt # here we can add img commands.... if component.has_key('img_cmd'): if len(component['img_cmd'])>0: for cmd in component['img_cmd']: exec(cmd) # append img to list img.atime = nb.atime imgs.append(img) elif type(component['to_img'])==types.StringType: ########################################## # 2.2) use an outer script to create an img from mat ########################################## output= "/tmp/%015d.png"%(int(random.random()*1e17)) # get params if not component.has_key('scale'): component['scale']=self.DEFAULT_SCALE if not component.has_key('cd'): component['cd']=self.DEFAULT_CD if not component.has_key('mn'): component['mn']=self.DEFAULT_MN if not component.has_key('mx'): component['mx']=self.DEFAULT_MX if not component.has_key('palette'): component['palette']=self.DEFAULT_PALETTE component['atime']=nbfc.atime mk = __import__(component['to_img'],globals(), locals(), [], -1) mk.MkImage(mat,output,component) img = Image.open(output) imgs.append(img) os.remove(output) del nbf ####################### # compose components ####################### if frame['compose']: if mpi.mpi_IsMaster(): img,cargs = Mtools.fits_compose_colors_img(datas,frame['cargs']) # save #output = '%04d_%04d.png'%(self.ifile,frame['id']) #output = os.path.join(self.imdir,output) #img.save(output) # append img to list img.atime = nb.atime imgs.append(img) del nb ####################### # compose frames ####################### if mpi.mpi_IsMaster(): if film.has_key('img_cmd'): if len(film['img_cmd'])>0: for cmd in film['img_cmd']: exec(cmd) output = '%04d.png'%(self.ifile) output = os.path.join(self.imdir,output) img.save(output) img.i=0 diff --git a/pNbody/ic.py b/pNbody/ic.py index 02cf2a5..a645c0c 100644 --- a/pNbody/ic.py +++ b/pNbody/ic.py @@ -1,1459 +1,1481 @@ # -*- coding: iso-8859-1 -*- from pNbody import * from numpy import * import iclib import profiles import sys try: from scipy.integrate import quadrature from scipy import special from scipy import optimize is_scipy = True except ImportError: is_scipy = False ''' # isotropic velocities random2 = random.random([n]) random3 = random.random([n]) p = 2*pi*random2 costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) vx = v*sinth*cos(p) vy = v*sinth*sin(p) vz = v*costh # radial velocities x = nb.pos[:,0] y = nb.pos[:,1] z = nb.pos[:,2] vx = v*x/r vy = v*y/r vz = v*z/r Somes notes on the teneration of initial conditions: Spherical case -------------- 1) if M(r) is known analitically : 1.1) if it is invertible analitically --> for a given x between 0 and 1, find r from M(r) 1.2) if it si not invertible anatitically 1.2.1) use a Monte-Carlo approach (warning : may take time if the model is cuspy) 1.2.2) invert M(r) numerically and create a vector of r and Mr then, use generic_Mr 1.2.3) for a given x between 0 and 1, find r from M(r), by inverting M(r). May take time. ''' def get_local_n(n): ''' This function set the global number of particle each node must hand. ''' # set the number of particules per procs n0 = n npercpu = n/mpi.mpi_NTask() if mpi.mpi_IsMaster(): npercpu = npercpu + (n-npercpu*mpi.mpi_NTask()) n = npercpu ntot = mpi.mpi_allreduce(n) if ntot!=n0: print "ntot=%d while n0=%d"%(ntot,n0) sys.exit() return n,ntot def ComputeGridParameters(n,args,rmax,M,pr_fct,mr_fct,Neps_des,rc,ng): ''' This function computes dR, the appropriate grid used to approximate Mr. The grid is set in order to have "Neps_des" particles in the first division of the grid. Then, the radius of the grid follows an exponnential distribution up to rmax. 1) from the density distribution, the total mass and the number of particles, using a newton algorithm, it computes eps, the radius that will contains "Neps_des" particles 2) once eps is set, we determine rc (the grid scale length) from eps and ng, in order to have a grid with the a first cell equal to eps. if the computation of rc fails, we use the default value of rc The function takes the following arguments n : number of particles M : total mass rmax : max radius args : list of args for the profile pr_fct : profile function mr_fct : mass-radius function Neps_des : desired number of point in the first beam rc : default size of the first beam ng : number of grid divisions it returns : Rs : grid points eps : radius containing about Neps_des particles Neps : number of particles in eps ''' if not is_scipy: raise "module scipy needed for function ComputeModelParameters !" ########################### # some considerations ########################### # central density rho0 = M/apply(mr_fct,(rmax,)+args) # mass of particles m = M/float(n) args = args + (rho0,) rs = args[0] ########################################################################################################################### # find eps in order to have Neps_des particles in eps def RfromN(r,args,m,N): return apply(mr_fct,(r,)+args)/m - N try: eps = optimize.newton(RfromN, x0=rs, args = (args,m,Neps_des), fprime = None, tol = 1e-10, maxiter = 500) except: print "fail to get eps from newton, trying bisection." try: eps = optimize.bisection(RfromN, a=1e-10, b=rs, args = (args,m,Neps_des), xtol = 1e-5, maxiter = 500) print "ok" except: print "fail to get eps from bisection." print "quit" sys.exit() ########################################################################################################################### ########################################################################################################################### # compute the number of particles that will fall in eps Meps = apply(mr_fct,(eps,)+args) Neps = Meps/m ########################################################################################################################### ########################################################################################################################### # parameters for the adaptative grid # find eps in order to have Neps_des particles in eps def GetRc(rc,n,rmax,eps): return (exp((1./(ng-1))/rc)-1)/(exp(1./rc)-1) * rmax - eps try: #rc = optimize.newton(GetRc, x0=0.1, args = (n,rmax,eps), fprime = None, tol = 1e-20, maxiter = 500) rc = optimize.bisection(GetRc, a=1e-4, b=rmax, args = (n,rmax,eps), xtol = 1e-3, maxiter = 500) except: print "fail to get rc, using rc=%g."%rc gm = lambda i:(exp((i/float(ng-1))/rc)-1)/(exp(1./rc)-1) * rmax g = lambda r: float(ng-1)*rc*log(r/rmax*(exp(1./rc)-1.)+1.) Rs = gm(arange(ng)) return Rs,rc,eps,Neps,g,gm def ComputeGridParameters2(eps,nmax,args,rmax,M,pr_fct,mr_fct,Neps_des,rc,ng): ''' This function computes dR, the appropriate grid used to approximate Mr. The number of particle of the model is set in order to have "Neps_des" particles in the first division of the grid. Then, the radius of the grid follows an exponnential distribution up to rmax. 1) n is set from the total mass and Neps_des 2) once n is set, we determine rc (the grid scale length) from eps and ng, in order to have a grid with the a first cell equal to eps. if the computation of rc fails, we use the default value of rc The function takes the following arguments eps : the desired grid resolution nmax : max number of particles M : total mass rmax : max radius args : list of args for the profile pr_fct : profile function mr_fct : mass-radius function Neps_des : desired number of point in the first beam rc : default size of the first beam ng : number of grid divisions it returns : n : number of particles Rs : grid points rc : parameter of the scaling fct g : scaling fct gm : inverse of scaling fct ''' if not is_scipy: raise "module scipy needed for function ComputeModelParameters !" ########################### # some considerations ########################### # central density rho0 = M/apply(mr_fct,(rmax,)+args) args = args + (rho0,) rs = args[0] # number of particles n = int(Neps_des*M/apply(mr_fct,(eps,)+args)) # if n> nmax, find eps containing Neps_des particles if n>nmax: n = nmax # mass of particles m = M/float(n) ########################################################################################################################### # find eps in order to have Neps_des particles in eps def RfromN(r,args,m,N): return apply(mr_fct,(r,)+args)/m - N try: eps = optimize.newton(RfromN, x0=rs, args = (args,m,Neps_des), fprime = None, tol = 1e-10, maxiter = 500) except: print "fail to get eps from newton, trying bisection." try: eps = optimize.bisection(RfromN, a=1e-10, b=rs, args = (args,m,Neps_des), xtol = 1e-5, maxiter = 500) print "ok" except: print "fail to get eps from bisection." print "quit" sys.exit() ########################################################################################################################### ########################################################################################################################### # compute the number of particles that will fall in eps Meps = apply(mr_fct,(eps,)+args) Neps = Meps/m ########################################################################################################################### # mass of particles m = M/float(n) ########################################################################################################################### # parameters for the adaptative grid # find eps in order to have Neps_des particles in eps def GetRc(rc,ng,rmax,eps): return (exp((1./(ng-1))/rc)-1)/(exp(1./rc)-1) * rmax - eps try: #rc = optimize.newton(GetRc, x0=0.1, args = (n,rmax,eps), fprime = None, tol = 1e-20, maxiter = 500) rc = optimize.bisection(GetRc, a=1e-4, b=rmax, args = (ng,rmax,eps), xtol = 1e-3, maxiter = 500) except: print "fail to get rc, using rc=%g."%rc gm = lambda i:(exp((i/float(ng-1))/rc)-1)/(exp(1./rc)-1) * rmax g = lambda r: float(ng-1)*rc*log(r/rmax*(exp(1./rc)-1.)+1.) Rs = gm(arange(ng)) return n,eps,Rs,rc,g,gm def invert(x,rmin,rmax,fct,args,eps=1e-10): ''' return vector r that corresponds to fct(r,args)=x This routine uses a simple bissector algorithm ''' n = len(x) rrmin = rmin*ones(n) rrmax = rmax*ones(n) xxmin = fct(rrmin,args) - x xxmax = fct(rrmax,args) - x if sum((xxmin*xxmax>=0))!=0: print "No max between rmin and rmax ! for some points" sys.exit() k = 0 while max(abs(rrmax-rrmin)) > eps: print "it = %3d err = %8.1e"%(k,max(abs(rrmax-rrmin))) k = k +1 rr = (rrmax+rrmin)/2. xx = fct(rr,args)-x rrmax = where(xxmin*xx <= 0,rr,rrmax) rrmin = where(xxmin*xx > 0,rr,rrmin) xxmin = where(xxmin*xx > 0,xx,xxmin) return rr def box(n,a,b,c,irand=1,name='box.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed in an homogeneous box of dimension a,b,c, centred at the origin radius rmax. ''' if type(n) == ndarray: rand_vec = n n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) # generate random numbers rand_vec = random.random([n,3]) pos = rand_vec-[0.5,0.5,0.5] pos = pos*array([2*a,2*b,2*c]) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def homodisk(n,a,b,dz,irand=1,name='homodisk.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed in an homogeneous oval of radius a and b, and of thickness dz. ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) xx = random1**(1./2.) theta = 2.* random2*pi x = a* xx * cos(theta) y = b* xx * sin(theta) z = dz*random3 - dz/2. pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def homosphere(n,a,b,c,irand=1,name='homosphere.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed in an homogeneous triaxial sphere of axis a,b,c. ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) xm = (random1)**(1./3.) phi = random2*pi*2. costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) axm = a*xm*sinth bxm = b*xm*sinth x = axm*cos(phi) y = bxm*sin(phi) z = c*xm*costh pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def shell(n,r,irand=1,name='cell.dat',ftype='binary',verbose=False): ''' Shell of radius r ''' if type(n) == ndarray: random2 = n[:,0] random3 = n[:,1] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random2 = random.random(n) random3 = random.random(n) phi = random2*pi*2. costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) x = r*sinth*cos(phi) y = r*sinth*sin(phi) z = r*costh pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def plummer(n,a,b,c,eps,rmax,M=1.,irand=1,vel='no',name='plummer.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed in a triaxial plummer model of axis a,b,c and core radius eps and max radius of rmax. rho = (1.+(r/eps)**2)**(-5/2) ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) # positions rmax = float(rmax) eps = float(eps) eps = eps/rmax xm = 1./(1.+(eps)**2)*random1**(2./3.) xm = eps*sqrt(xm/(1.-xm)) phi = 2*pi*random2 costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) axm = rmax*a*xm*sinth bxm = rmax*b*xm*sinth x = axm*cos(phi) y = bxm*sin(phi) z = rmax*c*xm*costh pos = transpose(array([x,y,z])) # velocities if vel == 'yes': R = sqrt(x**2+y**2) rho = (3.*M/(4.*pi*eps**3))*(1+(R**2+z**2)/eps**2)**(-5./2.) C2 = z**2+eps**2 C = sqrt(C2) TD = M*C/(R**2+C2)**(3./2.) sz = sqrt(eps**2/(8.*pi*C2)/rho) *TD vx = sz*random.standard_normal([n]) vy = sz*random.standard_normal([n]) vz = sz*random.standard_normal([n]) vel = transpose(array([vx,vy,vz])) else: vel = ones([n,3])*0.0 # masses mass = ones([n])*M/ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def kuzmin(n,eps,dz,irand=1,name='kuzmin.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed in a kuzmin (infinitely thin) disk rho = eps*M/(2*pi*(R**2+eps**2)**(3/2)) ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) rmax = 1 xx = random1 xx = sqrt((eps/(1-xx))**2-eps**2) theta = 2.* random2*pi x = rmax* xx * cos(theta) y = rmax* xx * sin(theta) z = dz*random3 - dz/2. pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def expd(n,Hr,Hz,Rmax,Zmax,irand=1,name='expd.dat',ftype='binary',verbose=False): ''' Exonential disk rho = 1/(1+(r/rc)**2) ''' # set random seed irand=irand+mpi.mpi_Rank() # set the number of particules per procs n,ntot = get_local_n(n) pos = iclib.exponential_disk(n,Hr,Hz,Rmax,Zmax,irand) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def miyamoto_nagai(n,a,b,Rmax,Zmax,irand=1,fct=None,fRmax=0,name='miyamoto.dat',ftype='binary',verbose=False): ''' Miyamoto Nagai distribution ''' # set random seed irand = irand+mpi.mpi_Rank() # set the number of particules per procs n,ntot = get_local_n(n) if fct==None: pos = iclib.miyamoto_nagai(n,a,b,Rmax,Zmax,irand) else: pos = iclib.miyamoto_nagai_f(n,a,b,Rmax,Zmax,irand,fct,fRmax) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def generic_alpha(n,a,e,rmax,irand=1,fct=None,name='generic_alpha.dat',ftype='binary',verbose=False): ''' Generic alpha distribution : rho ~ (r+e)^a ''' # set random seed irand = irand+mpi.mpi_Rank() # set the number of particules per procs n,ntot = get_local_n(n) pos = iclib.generic_alpha(n,a,e,rmax,irand) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def dl2_mr(r,args): ''' Mass in the radius r for the distribution rho = (1.-eps*(r/rmax)**2) ''' eps = args[0] rmax = args[1] return ((4./3.)*r**3 - (4./5.)*eps*r**5/rmax**2)/(((4./3.) - (4./5.)*eps)*rmax**3) def dl2(n,a,b,c,eps,rmax,irand=1,name='dl2.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed as rho = (1.-eps*(r/rmax)**2) ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) x = random1 xm = invert(x,0,rmax,dl2_mr,[eps,rmax]) phi = 2*pi*random2 costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) axm = a*xm*sinth bxm = b*xm*sinth x = axm*cos(phi) y = bxm*sin(phi) z = c*xm*costh pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def isothm_mr(r,args): ''' Mass in the radius r for the distribution rho = 1/(1+r/rc)**2 ''' rc = args[0] rm = args[1] cte = 2*rc**3*log(rc) + rc**3 Mr = r *rc**2 - 2*rc**3*log(rc+r ) - rc**4/(rc+r ) + cte Mx = rm*rc**2 - 2*rc**3*log(rc+rm) - rc**4/(rc+rm) + cte return Mr/Mx def isothm(n,rc,rmax,irand=1,name='isothm.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules distributed as rho = 1/(1+r/rc)**2 ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) x = random1 xm = invert(x,0,rmax,isothm_mr,[rc,rmax]) phi = 2*pi*random2 costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) axm = xm*sinth bxm = xm*sinth x = axm*cos(phi) y = bxm*sin(phi) z = xm*costh pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def pisothm_mr(r,args): ''' Mass in the radius r for the distribution rho = 1/(1+(r/rc)**2) ''' rc = args[0] rmn = args[1] rmx = args[2] Mr = rc**3 *( r/rc - arctan( r/rc) ) Mmn = rc**3 *( rmn/rc - arctan(rmn/rc) ) Mmx = rc**3 *( rmx/rc - arctan(rmx/rc) ) return (Mr-Mmn)/(Mmx-Mmn) def pisothm(n,rc,rmax,rmin=0,irand=1,name='pisothm.dat',ftype='binary',verbose=False): ''' Pseudo-isothermal sphere Mass in the radius r for the distribution rho = 1/(1+(r/rc)**2) ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) x = random1 xm = invert(x,rmin,rmax,pisothm_mr,[rc,rmin,rmax]) phi = 2*pi*random2 costh = 1.-2.*random3 sinth = sqrt(1.-costh**2) axm = xm*sinth bxm = xm*sinth x = axm*cos(phi) y = bxm*sin(phi) z = xm*costh pos = transpose(array([x,y,z])) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def nfw(n,rs,Rmax,dR,Rs=None,irand=1,name='nfw.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules following an nfw profile. rho = 1/[ (r/rs)(1+r/rs)^2 ] ''' def Mr(r,rs): return 4*pi*rs**3*( log(1.+r/rs) - r/(rs+r) ) if Rs==None: Rs = arange(0,Rmax+dR,dR) # should use a non linear grid Mrs = zeros(len(Rs)) ntot = len(Rs) for i in xrange(len(Rs)): Mrs[i] = Mr(Rs[i],rs) if verbose: print Rs[i],Mrs[i],i,'/',ntot # normalisation Mrs = Mrs/Mrs[-1] Mrs[0] = 0 # now use Mr nb = generic_Mr(n,rmax=Rmax,R=Rs,Mr=Mrs,irand=irand,name=name,ftype=ftype,verbose=verbose) return nb def hernquist(n,rs,Rmax,dR,Rs=None,irand=1,name='hernquist.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules following a hernquist modifed profile. rho = 1/( (r/rs) * (1+r/rs)**3 ) ''' def Mr(r,rs): return rs**3 * 0.5*(r/rs)**2/(1+r/rs)**2 if Rs==None: Rs = arange(0,Rmax+dR,dR) # should use a non linear grid Mrs = zeros(len(Rs)) ntot = len(Rs) for i in xrange(len(Rs)): Mrs[i] = Mr(Rs[i],rs) if verbose: print Rs[i],Mrs[i],i,'/',ntot # normalisation Mrs = Mrs/Mrs[-1] Mrs[0] = 0 # now use Mr nb = generic_Mr(n,rmax=Rmax,R=Rs,Mr=Mrs,irand=irand,name=name,ftype=ftype,verbose=verbose) return nb def burkert(n,rs,Rmax,dR,Rs=None,irand=1,name='burkert.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules following a burkert profile. rhob = 1 / ( ( 1 + r/rs ) * ( 1 + (r/rs)**2 ) ) ''' def Mr(r,rs): return 4*pi*rs**3*( 0.25*log((r/rs)**2+1) - 0.5*arctan(r/rs) + 0.5*log((r/rs)+1) ) if Rs==None: Rs = arange(0,Rmax+dR,dR) # should use a non linear grid Mrs = zeros(len(Rs)) ntot = len(Rs) for i in xrange(len(Rs)): Mrs[i] = Mr(Rs[i],rs) if verbose: print Rs[i],Mrs[i],i,'/',ntot # normalisation Mrs = Mrs/Mrs[-1] Mrs[0] = 0 # now use Mr nb = generic_Mr(n,rmax=Rmax,R=Rs,Mr=Mrs,irand=irand,name=name,ftype=ftype,verbose=verbose) return nb def nfwg(n,rs,gamma,Rmax,dR,Rs=None,irand=1,name='nfwg.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules following an nfw modifed profile. rho = 1/[ ((r/rs))**(gamma)(1+r/rs)^2 ]**(0.5*(3-gamma)) ''' if not is_scipy: raise "module scipy needed for function nfwg !" def Mr(r,rs,gamma): aa = 1.5-0.5*gamma cc = 2.5-0.5*gamma z = -r**2/rs**2 return 2*pi*(r/rs)**-gamma * r**3 * special.hyp2f1(aa,aa,cc,z)/aa if Rs==None: Rs = arange(0,Rmax+dR,dR) # should use a non linear grid Mrs = zeros(len(Rs)) ntot = len(Rs) for i in xrange(len(Rs)): Mrs[i] = Mr(Rs[i],rs,gamma) if verbose: print Rs[i],Mrs[i],i,'/',ntot # normalisation Mrs = Mrs/Mrs[-1] Mrs[0] = 0 # now use Mr nb = generic_Mr(n,rmax=Rmax,R=Rs,Mr=Mrs,irand=irand,name=name,ftype=ftype,verbose=verbose) return nb def generic2c(n,rs,a,b,Rmax,dR,Rs=None,irand=1,name='nfwg.dat',ftype='binary',verbose=False): ''' Return an Nbody object that contains n particules following an nfw modifed profile. rho = 1/( (r/rs)**a * (1+r/rs)**(b-a) ) ''' if not is_scipy: raise "module scipy needed for function generic2c !" def Mr(r,rs,a,b): a = float(a) b = float(b) aa = b-a bb = -a + 3 cc = 4 - a z = -r/rs return 4*pi*(r/rs)**(-a) * r**3 * special.hyp2f1(aa,bb,cc,z)/bb if Rs==None: Rs = arange(0,Rmax+dR,dR) # should use a non linear grid Mrs = zeros(len(Rs)) ntot = len(Rs) for i in xrange(len(Rs)): Mrs[i] = Mr(Rs[i],rs,a,b) if verbose: print Rs[i],Mrs[i],i,'/',ntot # normalisation Mrs = Mrs/Mrs[-1] Mrs[0] = 0 # now use Mr nb = generic_Mr(n,rmax=Rmax,R=Rs,Mr=Mrs,irand=irand,name=name,ftype=ftype,verbose=verbose) return nb #def generic_MxHyHz(n,xmax,ymax,zmax,x=None,Mx=None,name='box_Mx.dat',ftype='binary',verbose=False): # ''' # Distribute particles in a box. The density in x is defined in order to reproduce M(x) given by Mx. # Here, contrary to generic_Mx, the size of the box is defined. # ''' # # if type(nx) == ndarray: # random1 = nx # random2 = ny # random3 = nz # n = len(n) # else: # random1 = random.random([nx]) # random2 = random.random([ny]) # random3 = random.random([nz]) # # pos = iclib.generic_MxHyHz(n,xmax,ymax,zmax,x.astype(float32),Mx.astype(float32),random1.astype(float32),random2.astype(float32),random3.astype(float32),verbose) # # vel = ones([n,3])*0.0 # mass = ones([n])*1./n # # nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) # # return nb ################################# # geometric distributions ################################# def generic_Mx(n,xmax,x=None,Mx=None,irand=1,name='box_Mx.dat',ftype='binary',verbose=False): ''' Distribute particles in a box. The density in x is defined in order to reproduce M(x) given by Mx ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) pos = iclib.generic_Mx(n,xmax,x.astype(float32),Mx.astype(float32),random1.astype(float32),random2.astype(float32),random3.astype(float32),verbose) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb def generic_Mr(n,rmax,R=None,Mr=None,irand=1,name='sphere_Mr.dat',ftype='binary',verbose=False): ''' Distribute particles in order to reproduce M(R) given by Mr ''' if type(n) == ndarray: random1 = n[:,0] random2 = n[:,1] random3 = n[:,2] n = len(n) ntot = mpi.mpi_allreduce(n) else: # set random seed random.seed(irand+mpi.mpi_Rank()) # set the number of particules per procs n,ntot = get_local_n(n) random1 = random.random([n]) random2 = random.random([n]) random3 = random.random([n]) pos = iclib.generic_Mr(n,rmax,R.astype(float32),Mr.astype(float32),random1.astype(float32),random2.astype(float32),random3.astype(float32),verbose) vel = ones([n,3])*0.0 mass = ones([n])*1./ntot nb = Nbody(status='new',p_name=name,pos=pos,vel=vel,mass=mass,ftype=ftype) return nb ################################# # geometric primitives ################################# def line(M=1.,name='line.dat',ftype='binary'): x = array([-0.5,0.5]) y = array([0,0]) z = array([0,0]) n = len(x) pos = transpose((x,y,z)) mass= M*ones(n) nb = Nbody(status='new',pos=pos,mass=mass,p_name=name,ftype=ftype) return nb def square(M=1.,name='square.dat',ftype='binary'): x = array([-0.5,+0.5,+0.5,-0.5]) y = array([-0.5,-0.5,+0.5,+0.5]) z = array([0,0,0,0]) n = len(x) pos = transpose((x,y,z)) mass= M*ones(n) nb = Nbody(status='new',pos=pos,mass=mass,p_name=name,ftype=ftype) return nb def circle(n=10,M=1.,name='circle.dat',ftype='binary'): t = arange(0,2*pi,2*pi/n) x = cos(t) y = sin(t) z = zeros(n) n = len(x) pos = transpose((x,y,z)) mass= M*ones(n) nb = Nbody(status='new',pos=pos,mass=mass,p_name=name,ftype=ftype) return nb def grid(n=10,m=10,M=1.,name='grid.dat',ftype='binary'): dx = 1/float(n) dy = 1/float(m) xx = arange(0,1+dx,dx) yy = arange(0,1+dy,dy) x = zeros(4*n*m) y = zeros(4*n*m) z = zeros(4*n*m) k = 0 for i in xrange(n): for j in xrange(m): x[k+0] = xx[i+0] - 0.5 y[k+0] = yy[j+0] - 0.5 x[k+1] = xx[i+1] - 0.5 y[k+1] = yy[j+0] - 0.5 x[k+2] = xx[i+1] - 0.5 y[k+2] = yy[j+1] - 0.5 x[k+3] = xx[i+0] - 0.5 y[k+3] = yy[j+1] - 0.5 k = k + 4 n = len(x) pos = transpose((x,y,z)) mass= M*ones(n) nb = Nbody(status='new',pos=pos,mass=mass,p_name=name,ftype=ftype) return nb def cube(M=1.,name='cube.dat',ftype='binary'): x = array([0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0, 0,0, 1,1, 1,1, 0,0,])-0.5 y = array([0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0, 0,0, 0,0, 1,1, 1,1,])-0.5 z = array([0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1, 0,1, 0,1, 0,1, 0,1,])-0.5 n = len(x) pos = transpose((x,y,z)) mass= M*ones(n) nb = Nbody(status='new',pos=pos,mass=mass,p_name=name,ftype=ftype) return nb def sphere(n=10,m=10,M=1.,name='sphere.dat',ftype='binary'): pos = zeros((2*n*m,3),float32) ts = arange(0,2*pi,2*pi/n) zs = 2*arange(m)/float(m-1) - 1 k = 0 # parallels for i in range(m): for j in range(n): r = sin(arccos(zs[i])) x = r*cos(ts[j]) y = r*sin(ts[j]) z = zs[i] pos[k] = [x,y,z] k = k + 1 # meridians for j in range(n): for i in range(m): r = sin(arccos(zs[i])) x = r*cos(ts[j]) y = r*sin(ts[j]) z = zs[i] pos[k] = [x,y,z] k = k + 1 nb = Nbody(status='new',pos=pos,p_name=name,ftype=ftype) nb.mass = M*nb.mass return nb +def arrow(M=1.,name='arrow.dat',ftype='binary'): + + q = (1+sqrt(5)) / 2. # golden number + + lx = 1/q + x1 = lx/3. + x2 = 2*lx/3. + y1 = 1./q + + x = array([x1,x2,x2,lx,0.5*lx ,0,x1,x1 ]) + y = array([0,0,y1,y1,1,y1,y1,0]) + z = zeros(len(x)) + + n = len(x) + pos = transpose((x,y,z)) + mass= M*ones(n) + nb = Nbody(status='new',pos=pos,mass=mass,p_name=name,ftype=ftype) + + nb.translate([-lx/2, -1 , 0]) + nb.rotate(axis='z',angle=pi) + + return nb diff --git a/pNbody/io.py b/pNbody/io.py index e681a7f..9ca9840 100644 --- a/pNbody/io.py +++ b/pNbody/io.py @@ -1,765 +1,774 @@ # -*- coding: iso-8859-1 -*- # standard modules import os,sys,string,types import pickle # array module from numpy import * import pyfits import mpi ################################# def checkfile(name): ################################# ''' Check if a file exists. An error is generated if the file does not exists. Parameters ---------- name : the path to a filename Examples -------- >>> io.checkfile('an_existing_file') >>> >>> io.checkfile('a_non_existing_file') Traceback (most recent call last): File "", line 1, in File "/home/epfl/revaz/local/lib64/python2.6/site-packages/pNbody/io.py", line 33, in checkfile raise IOError(915,'file %s not found ! Pease check the file name.'%(name)) IOError: [Errno 915] file nofile not found ! Pease check the file name. ''' if name == None: raise Exception("file name set to None ! Please check the file name.") if not os.path.isfile(name): raise IOError(915,'file %s not found ! Pease check the file name.'%(name)) ################################# def end_of_file(f,pio='no',MPI=None): ################################# ''' Return True if we have reached the end of the file f, False instead Parameters ---------- f : ndarray or matrix object an open file pio : 'yes' or 'no' if the file is read in parallel or not MPI : MPI communicator Returns ------- status : Bool True if the we reached the end of the file False if not ''' if pio=='no': # here, the master decide for all slaves if mpi.ThisTask == 0: p1 = f.tell() f.seek(0,2) p2 = f.tell() f.seek(p1) if p1 == p2: status = True else: status = False else: status = None status = mpi.mpi_bcast(status,0) return status else: # each processus decide for himself p1 = f.tell() f.seek(0,2) p2 = f.tell() f.seek(p1) if p1 == p2: status = True else: status = False return status ##################################################### def write_array(file,vec): ##################################################### ''' Write an array to a file, in a very simple ascii format. Parameters ---------- file : the path to a file vec : an ndarray object Examples -------- >>> from numpy import * >>> x = array([1,2,3]) >>> io.write_array('/tmp/array.dat',x) ''' f = open(file,'w') for i in range(len(vec)): f.write("%f\n"%vec[i]) f.close() ##################################################### def read_ascii(file,columns=None,lines=None,dtype=float,skipheader=False,cchar='#'): ##################################################### ''' Read an ascii file. The function allows to set the number of columns or line to read. If it contains a header, the header is used to label all column. In this case, a dictionary is returned. Parameters ---------- file : the path to a file or an open file columns : list the list of the columns to read if none, all columns are read lines : list the list of the lines to read if none, all lines are read dtype : dtype the ndtype of the objects to read skipheader : bool if true, do not read the header if there is one cchar : char lines begining with cchar are skiped the first line is considered as the header Returns ------- data : Dict or ndarray A python dictionary or an ndarray object Examples -------- >>> from numpy import * >>> x = arange(10) >>> y = x*x >>> f = open('afile.txt','w') >>> f.write("# x y") >>> for i in xrange(len(x)): ... f.write('%g %g'%(x[i],y[i])) ... >>> f.close() >>> from pNbody import io >>> data = io.read_ascii("afile.txt") >>> data['x'] array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> data['y'] array([ 0., 1., 4., 9., 16., 25., 36., 49., 64., 81.]) ''' def RemoveComments(l): if l[0]==cchar: return None else: return l def toNumList(l): return map(dtype,l) if type(file) != types.FileType: f = open(file,'r') else: f = file # read header while there is one while 1: fpos = f.tell() header = f.readline() if header[0] != cchar: f.seek(fpos) header = None break else: if skipheader: header = None else: # create dict from header header = string.strip(header[2:]) elts = string.split(header) break ''' # read header if there is one header = f.readline() if header[0] != cchar: f.seek(0) header = None else: if skipheader: header = None else: # create dict from header header = string.strip(header[2:]) elts = string.split(header) ''' # now, read the file content lines = f.readlines() # remove trailing lines = map(string.strip, lines) # remove comments #lines = map(RemoveComments, lines) # split lines = map(string.split, lines) # convert into float lines = map(toNumList, lines) # convert into array lines = array(map(array, lines)) # transpose lines = transpose(lines) if header != None: iobs = {} i = 0 for elt in elts: iobs[elt]=i i = i + 1 vals = {} for key in iobs.keys(): vals[key] = lines[iobs[key]] return vals # return if columns == None: return lines else: return lines.take(axis=0,indices=columns) ##################################################### def write_dmp(file,data): ##################################################### ''' Write a dmp (pickle) file. In other word, dump the data object. Parameters ---------- file : the path to a file data : a pickable python object Examples -------- >>> x = {'a':1,'b':2} >>> io.write_dmp('/tmp/afile.dmp',x) ''' f = open(file,'w') pickle.dump(data, f) f.close() ##################################################### def read_dmp(file): ##################################################### ''' Read a dmp (pickle) file. Parameters ---------- file : the path to a file Returns ------- data : a python object Examples -------- >>> x = {'a':1,'b':2} >>> io.write_dmp('/tmp/afile.dmp',x) >>> y = io.read_dmp('/tmp/afile.dmp') >>> y {'a': 1, 'b': 2} ''' f = open(file,'r') data = pickle.load(f) f.close() return data ##################################################### def WriteFits(data, filename, extraHeader = None) : ##################################################### ''' Write a fits file ''' # image creation fitsimg = pyfits.HDUList() # add data hdu = pyfits.PrimaryHDU() hdu.data = data fitsimg.append(hdu) # add keys keys = [] if extraHeader != None: #keys.append(('INSTRUME','st4 SBIG ccd camera','Instrument name')) #keys.append(('LOCATION',"175 OFXB St-Luc (VS)",'Location')) keys = extraHeader hdr = fitsimg[0].header for key in keys: hdr.update(key[0],key[1],comment=key[2]) fitsimg.writeto(filename) ##################################################### def ReadFits(filename) : ##################################################### ''' Read a fits file. ''' # read image fitsimg = pyfits.open(filename) data = fitsimg[0].data return data ################################# -def readblock(f,data_type,shape=None,byteorder=sys.byteorder): +def readblock(f,data_type,shape=None,byteorder=sys.byteorder,skip=False): ################################# ''' data_type = int,float32,float or data_type = array shape = tuple ''' # compute the number of bytes that should be read nbytes_to_read=None if shape!=None: shape_a = array(shape) nelts_to_read = shape_a[0] for n in shape_a[1:]: nelts_to_read = nelts_to_read*n nbytes_to_read = nelts_to_read*dtype(data_type).itemsize try: nb1 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb1.byteswap(True) nb1 = nb1[0] nbytes=nb1 # check if nbytes_to_read: if nbytes_to_read!=nbytes: print "inconsistent block header, using nbytes=%d instead"%nbytes_to_read nbytes=nbytes_to_read - - - - + except IndexError: raise "ReadBlockError" - - if type(data_type) == types.TupleType: - - data = [] - for tpe in data_type: - - if type(tpe) == int: - val = f.read(tpe) - else: - bytes = dtype(tpe).itemsize - val = fromstring(f.read(bytes),tpe) - if sys.byteorder != byteorder: - val.byteswap(True) - - val = val[0] - - data.append(val) - + + + if skip: + f.seek(nbytes,1) + data = None + shape = None + print " skipping %d bytes... "%(nbytes) + + else: - data = fromstring(f.read(nbytes),data_type) - if sys.byteorder != byteorder: - data.byteswap(True) + + if type(data_type) == types.TupleType: + + data = [] + for tpe in data_type: + + if type(tpe) == int: + val = f.read(tpe) + else: + bytes = dtype(tpe).itemsize + val = fromstring(f.read(bytes),tpe) + if sys.byteorder != byteorder: + val.byteswap(True) + + val = val[0] + + data.append(val) + + else: + data = fromstring(f.read(nbytes),data_type) + if sys.byteorder != byteorder: + data.byteswap(True) + + nb2 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb2.byteswap(True) nb2 = nb2[0] if nb1 != nb2: print "ReadBlockError","nb1=%d nb2=%d"%(nb1,nb2) raise "ReadBlockError","nb1=%d nb2=%d"%(nb1,nb2) # reshape if needed if shape != None: data.shape=shape return data ################################# def ReadBlock(f,data_type,shape=None,byteorder=sys.byteorder,pio='no'): ################################# ''' data_type = int,float32,float or data_type = array shape = tuple pio : parallel io, 'yes' or 'no' if 'yes', each proc read each file if 'no', proc 0 read and send to each other ''' if mpi.NTask==1: data = readblock(f,data_type=data_type,shape=shape,byteorder=byteorder) return data if pio == 'yes': data = readblock(f,data_type=data_type,shape=shape,byteorder=byteorder) return data else: data = mpi.mpi_ReadAndSendBlock(f,data_type=data_type,shape=shape,byteorder=byteorder) return data ################################# def ReadArray(f,data_type,shape=None,byteorder=sys.byteorder,pio='no',nlocal=None): ################################# ''' data_type = int,float32,float or data_type = array shape = tuple ''' if mpi.NTask==1: data = readblock(f,data_type=data_type,shape=shape,byteorder=byteorder) return data if pio == 'yes': data = readblock(f,data_type=data_type,shape=shape,byteorder=byteorder) return data else: data = mpi.mpi_OldReadAndSendArray(f,data_type,shape=shape,byteorder=byteorder,nlocal=nlocal) return data ################################# -def ReadDataBlock(f,data_type,shape=None,byteorder=sys.byteorder,pio='no',npart=None): +def ReadDataBlock(f,data_type,shape=None,byteorder=sys.byteorder,pio='no',npart=None,skip=False): ################################# ''' Read a block containg data. If NTask = 1 or pio = 'yes', the block is read normally. If NTask > 1 and pio = 'no', the master reads the block and send the data to the slaves. In the second case : a) the master send N/Ntask element to each task. b) if the var npart is present, he send Np/Ntask to each task, for each Np of npart. data_type = array shape = tuple ''' if mpi.NTask==1 or pio == 'yes': - data = readblock(f,data_type=data_type,shape=shape,byteorder=byteorder) + data = readblock(f,data_type=data_type,shape=shape,byteorder=byteorder,skip=skip) return data else: - data = mpi.mpi_ReadAndSendArray(f,data_type,shape=shape,byteorder=byteorder,npart=npart) + data = mpi.mpi_ReadAndSendArray(f,data_type,shape=shape,byteorder=byteorder,npart=npart,skip=skip) return data ################################# def writeblock(f,data,byteorder=sys.byteorder): ################################# ''' data = array or data = ((x,float32),(y,int),(z,float32),(label,40)) shape = tuple ''' if type(data) == types.TupleType: # first, compute nbytes nbytes = 0 for dat in data: if type(dat[0])==types.StringType: nbytes = nbytes + dat[1] else: nbytes = nbytes + array([dat[0]],dat[1]).type().bytes* array([dat[0]],dat[1]).size() nbytes = array([nbytes],int32) # write block if sys.byteorder != byteorder: nbytes.byteswap(True) f.write(nbytes.tostring()) for dat in data: if type(dat[0])==types.StringType: f.write(string.ljust(dat[0],dat[1])[:dat[1]]) else: ar = array([dat[0]],dat[1]) if sys.byteorder != byteorder: ar.byteswap(True) f.write(ar.tostring()) f.write(nbytes.tostring()) else: # write block #nbytes = array([data.type().bytes*data.size()],int) nbytes = array([data.nbytes],int32) if sys.byteorder != byteorder: nbytes.byteswap(True) data.byteswap(True) f.write(nbytes.tostring()) f.write(data.tostring()) f.write(nbytes.tostring()) ################################# def WriteBlock(f,data,byteorder=sys.byteorder): ################################# ''' data = ((x,float32),(y,int),(z,float32),(label,40)) shape = tuple ''' if f!=None: if type(data) == types.TupleType: # first, compute nbytes nbytes = 0 for dat in data: if type(dat[0])==types.StringType: nbytes = nbytes + dat[1] elif type(dat[0]) == string_: nbytes = nbytes + dat[1] else: #nbytes = nbytes + array([dat[0]],dat[1]).type().bytes* array([dat[0]],dat[1]).size() nbytes = nbytes + array([dat[0]],dat[1]).nbytes nbytes = array([nbytes],int32) # write block if sys.byteorder != byteorder: nbytes.byteswap(True) f.write(nbytes.tostring()) for dat in data: if type(dat[0])==types.StringType: f.write(string.ljust(dat[0],dat[1])[:dat[1]]) elif type(dat[0]) == string_: f.write(string.ljust(dat[0],dat[1])[:dat[1]]) else: ar = array([dat[0]],dat[1]) if sys.byteorder != byteorder: ar.byteswap(True) f.write(ar.tostring()) f.write(nbytes.tostring()) ################################# def WriteArray(f,data,byteorder=sys.byteorder,pio='no',npart=None): ################################# ''' data = array shape = tuple ''' if mpi.NTask==1 or pio == 'yes': writeblock(f,data,byteorder=byteorder) else: mpi.mpi_GatherAndWriteArray(f,data,byteorder=byteorder,npart=npart) ################################# def WriteDataBlock(f,data,byteorder=sys.byteorder,pio='no',npart=None): ################################# ''' Write a block containg data. If NTask = 1 or pio = 'yes', the block is written normally. If NTask > 1 and pio = 'no', the master get the block from the slaves and write it. In the second case : a) the master get N/Ntask element from each task. b) if the var npart is present, he get Np/Ntask from each task, for each Np of npart. data = array shape = tuple ''' if mpi.NTask==1 or pio == 'yes': writeblock(f,data,byteorder=byteorder) else: mpi.mpi_GatherAndWriteArray(f,data,byteorder=byteorder,npart=npart) ############################################################### # # some special function reading gadget related files # ############################################################### ################################# def read_cooling(file): ################################# ''' Read cooling file ''' f = open(file,'r') f.readline() f.readline() lines = f.readlines() f.close() lines = map(string.strip,lines) elts = map(string.split,lines) logT = array(map(lambda x:float(x[0]),elts)) logL0 = array(map(lambda x:float(x[1]),elts)) logL1 = array(map(lambda x:float(x[2]),elts)) logL2 = array(map(lambda x:float(x[3]),elts)) logL3 = array(map(lambda x:float(x[4]),elts)) logL4 = array(map(lambda x:float(x[5]),elts)) logL5 = array(map(lambda x:float(x[6]),elts)) logL6 = array(map(lambda x:float(x[7]),elts)) return logT,logL0,logL1,logL2,logL3,logL4,logL5,logL6 ################################# def read_params(file): ################################# ''' Read params Gadget file and return the content in a dictionary ''' f = open(file) lines = f.readlines() f.close() # remove empty lines lines = filter(lambda l:l!='\n', lines) # remove trailing lines = map(string.strip, lines) # remove comments lines = filter(lambda x:x[0]!='%', lines) # split lines elts = map(string.split,lines) # make dictionary params = {} for e in elts: try : params[e[0]]=float(e[1]) except ValueError: params[e[0]]= e[1] return params diff --git a/pNbody/main.py b/pNbody/main.py index ef90432..a246a98 100644 --- a/pNbody/main.py +++ b/pNbody/main.py @@ -1,5817 +1,5831 @@ # -*- coding: utf-8 -*- # some standard modules import os,sys,string,types,glob from copy import deepcopy import warnings # array module from numpy import * from numpy import clip as numclip from numpy import random as RandomArray # module that init parameters from parameters import * # nbody python modules import io from libutil import * from palette import * import geometry as geo import fourier import param import liblog import libgrid import libdisk import libutil import nbdrklib # nbody C modules from myNumeric import * from mapping import * from nbodymodule import * # Gtools module (now integrated in nbody) #import Gtools as Gt import units import ctes import thermodyn import coolinglib import cosmo import treelib import asciilib try: import ptreelib except: pass try: import libqt except: pass try: import SM except: pass try: # all this is usefull to read files from mpi4py import MPI except: MPI = None import mpi # maybe we should send mpi instead of MPI FLOAT = float #################################################################################################################################### # # DEFAULT CLASS NBODY # #################################################################################################################################### class NbodyDefault: ''' This is the reference Nbody class. This is the constructor for the **Nbody** object. Optional arguments are: p_name : name of the file in case of multiple files, files must be included in a list ["file1","file2"] pos : positions (3xN array) vel : positions (3xN array) mass : positions (1x array) num : id of particles (1xN array) tpe : type of particles (1xN array) ftype : type of input file (binary,ascii) status : 'old' : open an old file 'new' : create a new object byteorder : 'little' or 'big' pio : parallel io : 'yes' or 'no' local : True=local object, False=global object (paralellized) Not implemeted Yet log : log file unitsfile : define the type of units by default this class initialize the following variables : self.p_name : name of the file(s) to read or write self.pos : array of positions self.vel : array of velocities self.mass : array of masses self.num : array of id self.tpe : array of types self.ftype : type of the file self.status : object status ('old' or 'new') self.byteorder : byter order ('little' or 'big') self.pio : parallel io ('yes' or 'no') self.log : log object # new variables self.nbody : local number of particles self.nbody_tot : total number of particles self.mass_tot : total mass self.npart : number of particles of each type self.npart_tot : total number of particles of each type self.spec_vars : dictionary of variables specific for the format used self.spec_vect : dictionary of vector specific for the format used ''' - def __init__(self,p_name=None,pos=None,vel=None,mass=None,num=None,tpe=None,ftype=None,status='old',byteorder=sys.byteorder,pio='no',local=False,log=None,unitsfile=None): + def __init__(self,p_name=None,pos=None,vel=None,mass=None,num=None,tpe=None,ftype=None,status='old',byteorder=sys.byteorder,pio='no',local=False,log=None,unitsfile=None,skipped_io_blocks=[]): ################################# # init vars - ################################# /home/leo/.pNbody/formats/gadget.py + ################################# if p_name == None: status = 'new' if status=="new": self.verbose=False else: self.verbose=True self.set_filenames(p_name,pio=pio) self.pos = pos self.vel = vel self.mass = mass self.num = num self.tpe = tpe self.ftype = self.__class__.__name__ self.status = status self.byteorder = byteorder self.pio = pio self.log = log self.nbody = None self.nbody_tot = None self.mass_tot = None self.npart = None self.npart_tot = None self.unitsfile = unitsfile self.localsystem_of_units=None + self.skipped_io_blocks = skipped_io_blocks + ################################# # init units ################################# self.init_units() ################################# # init other parameters ################################# self.parameters = param.Params(PARAMETERFILE,None) self.defaultparameters = self.parameters.get_dic() # log if self.log == None: self.log = liblog.Log(os.path.join(HOME,'.nbodylog'),show='yes') ################################################### # in case of an old file, open and read the file(s) ################################################### if status=='old': self.read() ################################################### # in case of a new file ################################################### elif status=='new': for i in range(len(self.p_name)): if self.p_name[i] == None: self.p_name[i] = 'file.dat' ################################################### # final initialisation ################################################### self.init() ################################################### # check consistency ################################################### # to be done ################################# # # init functions # ################################# ################################# def init(self): ################################# ''' Initialize normal and specific class variables ''' # 1) find the number of particles self.nbody = self.get_nbody() # 2) define undefined vectors if self.pos == None: self.pos = zeros((self.nbody,3),float32) self.pos = self.pos.astype(float32) else: self.pos = self.pos.astype(float32) if self.vel == None: self.vel = zeros((self.nbody,3),float32) self.vel = self.vel.astype(float32) else: self.vel = self.vel.astype(float32) if self.mass == None: self.mass = ones((self.nbody, ),float32)/self.nbody self.mass = self.mass.astype(float32) else: self.mass = self.mass.astype(float32) if self.tpe == None: self.tpe = zeros(self.nbody,int) self.tpe = self.tpe.astype(int) else: self.tpe = self.tpe.astype(int) if self.num == None: self.num = self.get_num() self.num = self.num.astype(int) else: self.num = self.num.astype(int) # 3) other variables self.nbody_tot = self.get_nbody_tot() self.mass_tot = self.get_mass_tot() self.npart = self.get_npart() self.npart_tot = self.get_npart_tot() # Init specific class variables # (may be redundant with make_specific_variables_global) self.spec_vars = self.get_default_spec_vars() list_of_vars = self.get_list_of_vars() for name in self.spec_vars.keys(): try: list_of_vars.index(name) except ValueError: setattr(self, name, self.spec_vars[name]) # Init specific class vectors self.spec_vect = self.get_default_spec_array() list_of_vect = self.get_list_of_array() for name in self.spec_vect.keys(): try: list_of_vect.index(name) except ValueError: setattr(self, name, ones(self.nbody,self.spec_vect[name][1])*self.spec_vect[name][0]) # init specific parameters self.InitSpec() # sph parameters/variables self.InitSphParameters() ################################# def InitSpec(self): ################################# """ This function allows to initialize specific parameters. It must be defined in format files. """ pass ################################# def get_format_file(self): ################################# "return the format file" return self._formatfile ################################# def get_ftype(self,ftype='binary'): ################################# """ get the current used format """ return self.ftype ################################# def set_ftype(self,ftype='binary'): ################################# """ Change the type of the file ftype : type of the file """ if mpi.NTask > 1: raise "Warning","set_ftype function is currently not suported with multi proc." new = Nbody(status='new',ftype=ftype) # now, copy all var linked to the model for name in self.get_list_of_vars(): if name != 'ftype': setattr(new, name, getattr(self,name)) # now, copy all array linked to the model for name in self.get_list_of_array(): vec = getattr(self,name) setattr(new, name, vec) # other vars new.init() return new ################################# def get_num(self): ################################# """ Compute the num variable in order to be consistent with particles types """ # compute npart_all if self.npart == None: npart = self.get_npart() else: npart = self.npart npart_all = array(mpi.mpi_allgather(npart)) return mpi.mpi_sarange(npart_all) # + 1 ################################# def get_default_spec_vars(self): ################################# ''' return specific variables default values for the class ''' return {} ################################# def get_default_spec_array(self): ################################# ''' return specific array default values for the class ''' return {} ################################# def set_pio(self,pio): ################################# """ Set parallel input/output or not io pio : 'yes' or 'no' """ self.pio = pio self.set_filenames(self.p_name_global,pio=pio) if pio=='yes': self.num_files = mpi.NTask else: self.num_files = 1 ################################# def rename(self,p_name=None): ################################# """ Rename the files p_name : new name(s) """ if p_name != None: self.set_filenames(p_name,pio=self.pio) ################################# def set_filenames(self,p_name,pio=None): ################################# """ Set the local and global names p_name : new name(s) pio : 'yes' or 'no' """ if type(p_name) == types.ListType: self.p_name_global = [] self.p_name = [] for name in p_name: if pio == 'yes': self.p_name_global.append(name) self.p_name.append("%s.%d"%(name,mpi.mpi_ThisTask())) else: self.p_name_global.append(name) self.p_name.append(name) else: if pio == 'yes': self.p_name_global = [p_name] self.p_name = ["%s.%d"%(p_name,mpi.mpi_ThisTask())] else: self.p_name_global = [p_name] self.p_name = [p_name] ################################# def get_ntype(self): ################################# """ return the number of paticles types """ return len(self.npart) ################################# def get_nbody(self): ################################# """ Return the local number of particles. """ if self.pos != None: nbody = len(self.pos) elif self.vel != None: nbody = len(self.vel) elif self.mass != None: nbody = len(self.mass) elif self.num != None: nbody = len(self.num) elif self.tpe != None: nbody = len(self.tpe) else: nbody = 0 return nbody ################################# def get_nbody_tot(self): ################################# """ Return the total number of particles. """ nbody_tot = mpi.mpi_allreduce(self.nbody) return nbody_tot ################################# def get_npart(self): ################################# """ Return the local number of particles of each types, based on the variable tpe """ npart = array([],int) n = 0 if self.tpe==None: return npart.tolist() for tpe in range(self.get_mxntpe()): np = sum( (self.tpe==tpe).astype(int) ) npart = concatenate((npart,array([np]))) n = n + np if n != self.nbody: print "get_npart : n (=%d) is different from self.nbody (=%d)"%(n,self.nbody) raise "get_npart : n is different from self.nbody" return npart.tolist() ################################# def get_npart_tot(self): ################################# """ Return the total number of particles of each types. """ npart = array(self.npart) npart_tot = mpi.mpi_allreduce(npart) npart_tot = npart_tot.tolist() return npart_tot ################################# def get_npart_all(self,npart_tot,NTask): ################################# ''' From npart_tot, the total number of particles per type, return npart_per_proc, an array where each element corresponds to the value of npart of each process. ''' if (type(npart_tot) != types.ListType) and (type(npart_tot) !=ndarray): npart_tot = array([npart_tot]) ntype = len(npart_tot) npart_all = zeros((NTask,ntype)) for i in range(len(npart_tot)): for Task in range(NTask-1,-1,-1): npart_all[Task,i] = npart_tot[i]/NTask + npart_tot[i]%NTask*(Task==0) return npart_all ################################# def get_npart_and_npart_all(self,npart): ################################# ''' From npart (usually read for the header of a file), compute : npart : number of particles in each type npart_tot : total number of particles in each type npart_all : npart for each process. ''' ################################# def get_mxntpe(self): ################################# ''' Return the max number of type for this format ''' return 6 ################################# def make_default_vars_global(self): ################################# ''' Make specific variables global ''' self.spec_vars = self.get_default_spec_vars() for name in self.spec_vars.keys(): if not self.has_var(name): setattr(self, name, self.spec_vars[name]) ################################# def set_npart(self,npart): ################################# """ Set the local number of particles of each types. This function modifies the variable self.tpe """ if sum(array(npart)) > self.nbody: raise "Error (set_npart)","sum(npart) is greater than nbody" i = 0 n0 = 0 for n in npart: self.tpe[n0:n0+n] = ones(n)*i i = i + 1 n0 = n0+n self.tpe[n0:self.nbody] = ones(self.nbody-n0)*i self.npart = self.get_npart() self.npart_tot = self.get_npart_tot() ################################# def set_tpe(self,tpe): ################################# """ Set all particles to the type tpe """ self.tpe = ones(self.nbody)*tpe self.npart = self.get_npart() self.npart_tot = self.get_npart_tot() ################################# # # parameters functions # ################################# ''' Warning, these routines are a bit bad... ''' def set_parameters(self,params): ''' Set parameters for the class ''' self.parameters = param.Params(PARAMETERFILE,None) self.parameters.params = params.params self.defaultparameters = self.parameters.get_dic() ################################# # # units functions # ################################# ''' There is several ways to set the units in pNbody In an object, the units are stored in self.localsystem_of_units which is a UnitSystem object defined in units.py We define a unit system by giving Unit_lenght, Unit_mass, Unit_time, Unit_K, Unit_mol, and Unit_C Actually only Unit_lenght, Unit_mass, Unit_time are used, all are Units object (units.py) Following Gadget2, easy ways to definde units is to give three floats, UnitVelocity_in_cm_per_s UnitMass_in_g UnitLength_in_cm This is done using the method self.set_local_system_of_units() which uses UnitVelocity_in_cm_per_s,UnitMass_in_g,UnitLength_in_cm if they are given, or read a gadget parameter file or read a pNbody unitsparameter file or use the default unitsparameter file. ''' def init_units(self): ''' This function is responsible for the units initialization. It will create : self.unitsparameters that contains parameters like - the hydrogen mass fraction, - the metalicity ionisation flag - the adiabatic index - ... and self.localsystem_of_units a UnitSystem object that really defines the system of units in the Nbody object. It uses the values : UnitLength_in_cm UnitMass_in_g UnitVelocity_in_cm_per_s All physical values computed in pNbody should use self.localsystem_of_units to be converted in other units. self.unitsparameters is usefull if other parameters needs to be known, like the adiabatic index, etc. ''' # do not init the system of unit if it already exists if self.localsystem_of_units!=None: return self.unitsparameters = param.Params(UNITSPARAMETERFILE,None) if self.unitsfile!=None: ############################################################## # 1) this part should be only in the gadget.py format file, no ? BOF, non # 2) we could simplify using self.set_local_system_of_units() # 3) and some options -> but this needs to update self.unitsparameters ############################################################## # if it is a gadget parameter file try: gparams = io.read_params(self.unitsfile) self.unitsparameters.set('HubbleParam', gparams['HubbleParam']) self.unitsparameters.set('UnitLength_in_cm', gparams['UnitLength_in_cm']) self.unitsparameters.set('UnitMass_in_g', gparams['UnitMass_in_g']) self.unitsparameters.set('UnitVelocity_in_cm_per_s',gparams['UnitVelocity_in_cm_per_s']) # those parameters may be in the header of the file self.unitsparameters.set('Omega0', gparams['Omega0']) self.unitsparameters.set('OmegaLambda', gparams['OmegaLambda']) self.unitsparameters.set('OmegaBaryon', gparams['OmegaBaryon']) self.unitsparameters.set('BoxSize', gparams['BoxSize']) self.unitsparameters.set('ComovingIntegrationOn', gparams['ComovingIntegrationOn']) #self.set_local_system_of_units(gadgetparameterfile=self.unitsfile) except: # try to read a pNbody units file try: self.unitsparameters = param.Params(self.unitsfile,None) #self.set_local_system_of_units(unitparameterfile=self.unitsfile) except: raise IOError(015,'format of unitsfile %s unknown ! Pease check.'%(self.unitsfile)) # define local system of units it it does not exists #if not self.has_var("localsystem_of_units"): self.set_local_system_of_units() # print info #self.localsystem_of_units.info() def set_unitsparameters(self,unitsparams): ''' Set units parameters for the class. ''' print "!!!!!! in set_unitsparameters !!!!" print "!!!!!! this is bad !!!! we should never use UNITSPARAMETERFILE" print "!!!!!! this is bad !!!! we should never use UNITSPARAMETERFILE" self.unitsparameters = param.Params(UNITSPARAMETERFILE,None) self.unitsparameters.params = unitsparams.params self.set_local_system_of_units() def set_local_system_of_units(self,params=None,UnitLength_in_cm=None,UnitVelocity_in_cm_per_s=None,UnitMass_in_g=None,unitparameterfile=None,gadgetparameterfile=None): ''' Set local system of units using UnitLength_in_cm,UnitVelocity_in_cm_per_s,UnitMass_in_g 1) if nothing is given, we use self.unitsparameters to obtain these values 2) if UnitLength_in_cm UnitVelocity_in_cm_per_s UnitMass_in_g are given, we use them 2b) if UnitLength_in_cm,UnitVelocity_in_cm_per_s,UnitMass_in_g are given in a dictionary 3) if unitparameterfile is given we read the parameters from the file (units parameter format) 4) if gadgetparameterfile is given we read the parameters from the file (gadget param format) ''' if gadgetparameterfile!=None: params = io.read_params(gadgetparameterfile) #print "Units Set From %s"%gadgetparameterfile elif unitparameterfile!=None: unitsparameters = param.Params(unitparameterfile,None) params = {} params['UnitLength_in_cm'] = unitsparameters.get('UnitLength_in_cm') params['UnitVelocity_in_cm_per_s'] = unitsparameters.get('UnitVelocity_in_cm_per_s') params['UnitMass_in_g'] = unitsparameters.get('UnitMass_in_g') #print "Units Set From %s"%unitparameterfile elif params!=None: pass #print "Units Set From %s"%params elif UnitLength_in_cm!=None and UnitVelocity_in_cm_per_s!=None and UnitMass_in_g!=None: params = {} params['UnitLength_in_cm'] = UnitLength_in_cm params['UnitVelocity_in_cm_per_s'] = UnitVelocity_in_cm_per_s params['UnitMass_in_g'] = UnitMass_in_g #print "Units Set From UnitLength_in_cm,UnitVelocity_in_cm_per_s,UnitMass_in_g" else: params = {} params['UnitLength_in_cm'] = self.unitsparameters.get('UnitLength_in_cm') params['UnitVelocity_in_cm_per_s'] = self.unitsparameters.get('UnitVelocity_in_cm_per_s') params['UnitMass_in_g'] = self.unitsparameters.get('UnitMass_in_g') #print "Units Set From %s (%s)"%("self.unitsparameters",self.unitsparameters.filename) # now, create the self.localsystem_of_units = units.Set_SystemUnits_From_Params(params) ################################# # # info functions # ################################# ################################# def info(self): ################################# """ Write info """ infolist = [] infolist.append("-----------------------------------") if mpi.NTask>1: infolist.append("") infolist.append("ThisTask : %s"%mpi.ThisTask.__repr__()) infolist.append("NTask : %s"%mpi.NTask.__repr__()) infolist.append("") infolist.append("particle file : %s"%self.p_name.__repr__()) infolist.append("ftype : %s"%self.ftype.__repr__()) infolist.append("mxntpe : %s"%self.get_mxntpe().__repr__()) infolist.append("nbody : %s"%self.nbody.__repr__()) infolist.append("nbody_tot : %s"%self.nbody_tot.__repr__()) infolist.append("npart : %s"%self.npart.__repr__()) infolist.append("npart_tot : %s"%self.npart_tot.__repr__()) infolist.append("mass_tot : %s"%self.mass_tot.__repr__()) infolist.append("byteorder : %s"%self.byteorder.__repr__()) infolist.append("pio : %s"%self.pio.__repr__()) if self.nbody != 0: infolist.append("") infolist.append("len pos : %s"%len(self.pos).__repr__()) infolist.append("pos[0] : %s"%self.pos[0].__repr__()) infolist.append("pos[-1] : %s"%self.pos[-1].__repr__()) infolist.append("len vel : %s"%len(self.vel).__repr__()) infolist.append("vel[0] : %s"%self.vel[0].__repr__()) infolist.append("vel[-1] : %s"%self.vel[-1].__repr__()) infolist.append("len mass : %s"%len(self.mass).__repr__()) infolist.append("mass[0] : %s"%self.mass[0].__repr__()) infolist.append("mass[-1] : %s"%self.mass[-1].__repr__()) infolist.append("len num : %s"%len(self.num).__repr__()) infolist.append("num[0] : %s"%self.num[0].__repr__()) infolist.append("num[-1] : %s"%self.num[-1].__repr__()) infolist.append("len tpe : %s"%len(self.tpe).__repr__()) infolist.append("tpe[0] : %s"%self.tpe[0].__repr__()) infolist.append("tpe[-1] : %s"%self.tpe[-1].__repr__()) if self.spec_info()!=None: infolist = infolist + self.spec_info() all_infolist = mpi.mpi_allgather(infolist) if mpi.mpi_IsMaster(): for infolist in all_infolist: for line in infolist: #print line self.log.write(line) ################################# def spec_info(self): ################################# """ Write specific info """ return None ################################# def object_info(self): ################################# """ Write class(object) info """ list_of_vars = self.get_list_of_vars() list_of_array = self.get_list_of_array() self.log.write("#############################") self.log.write("list of vars") self.log.write("#############################") for name in list_of_vars: self.log.write("%s %s"%( name,str(type(getattr(self,name))))) self.log.write("#############################") self.log.write("list of arrays") self.log.write("#############################") for name in list_of_array: self.log.write("%s %s"%(name,str(type(getattr(self,name))))) ################################# def nodes_info(self): ################################# """ Write info on nodes """ all_npart = mpi.mpi_allgather(self.npart) all_nbody = mpi.mpi_allgather(self.nbody) if mpi.mpi_IsMaster(): for Task in range(mpi.NTask): line = "Task=%4d nbody=%10d"%(Task,all_nbody[Task]) line = line + " npart= " for npart in all_npart[Task]: line = line + "%10d "%npart self.log.write(line) ################################# def memory_info(self): ################################# """ Write info on memory size of the current object (only counting arrays size) """ total_size = 0 array_size = 0 elts = self.get_list_of_array() for elt in elts: #num_of_elts = getattr(self,elt).size #byte_per_elts = getattr(self,elt).itemsize #bytes = num_of_elts*byte_per_elts bytes = getattr(self,elt).nbytes total_size = total_size + bytes array_size = array_size + bytes print "(%d) %10s %14d"%(mpi.ThisTask,elt,bytes) #elts = self.get_list_of_vars() #for elt in elts: array_size = mpi.mpi_reduce(array_size) # only the master return the info total_size = mpi.mpi_reduce(total_size) if mpi.mpi_IsMaster(): print "total size = %d octets"%(total_size) if array_size < 1024: print "total arrays size = %d octets"%(array_size) else: array_size = array_size/1024.0 if array_size < 1024: print "total arrays size = %dK"%(array_size) else: array_size = array_size/1024.0 if array_size < 1024: print "total arrays size = %dM"%(array_size) else: array_size = array_size/1024.0 if array_size < 1024: print "total arrays size = %dG"%(array_size) ################################# def print_filenames(self): ################################# """ Print files names """ self.log.write("p_name_global = %s"%str(self.p_name_global)) self.log.write("p_name = %s"%str(self.p_name)) ################################# # # list of variables functions # ################################# def get_list_of_array(self): """ Return the list of numpy vectors of size nbody. """ list_of_arrays = [] for name in dir(self): if type(getattr(self,name)) == ndarray: if len(getattr(self,name)) == self.nbody: #if (name!="nall") and (name!="nallhw") and (name!="massarr") and (name!="npart") and (name!="npart_tot"): list_of_arrays.append(name) return list_of_arrays def get_list_of_method(self): """ Return the list of instance methods (functions). """ list_of_instancemethod = [] for name in dir(self): if type(getattr(self,name)) == types.MethodType: list_of_instancemethod.append(name) return list_of_instancemethod def get_list_of_vars(self): """ Get the list of vars that are linked to the model """ list_of_allvars = dir(self) list_of_arrays = self.get_list_of_array() list_of_method = self.get_list_of_method() for name in list_of_arrays: list_of_allvars.remove(name) for name in list_of_method: list_of_allvars.remove(name) #list_of_allvars.remove('log') #list_of_allvars.remove('read_fcts') # becose these vars are linked to fcts #list_of_allvars.remove('write_fcts') # should be definitely removed return list_of_allvars def has_var(self,name): ''' Return true if the object pNbody has a variable called self.name ''' get_list_of_vars = self.get_list_of_vars() try: getattr(self,name) return True except AttributeError: return False def has_array(self,name): ''' Return true if the object pNbody has an array called self.name ''' list_of_array = self.get_list_of_array() try: list_of_array.index(name) return True except ValueError: return False def find_vars(self): ''' This function return a list of variables defined in the current object ''' elts = dir(self) lst = [] for elt in elts: exec("obj = self.%s"%(elt)) if type(obj) != types.MethodType: lst.append(elt) return lst ################################# # # check special values # ################################# def check_arrays(self): ''' check if the array contains special values like NaN or Inf ''' status = 0 for name in self.get_list_of_array(): vec = getattr(self,name) # check nan if isnan(vec).any(): msg = "array %s contains Nan !!!"%name warnings.warn(msg) status = 1 # check nan if isinf(vec).any(): msg = "array %s contains Inf !!!"%name warnings.warn(msg) status = 1 return status ################################# # # read/write functions # ################################# def read(self): """ Read the particle file(s) """ for i in range(len(self.p_name)): self.open_and_read(self.p_name[i],self.get_read_fcts()[i]) self.make_default_vars_global() def open_and_read(self,name,readfct): ''' open and read file name name : name of the input readfct : function used to read the file ''' # check p_name if self.pio=='yes' or mpi.mpi_IsMaster(): io.checkfile(name) # get size if self.pio=='yes' or mpi.mpi_IsMaster(): isize = os.path.getsize(name) # open file if self.pio=='yes' or mpi.mpi_IsMaster(): f = open(name,'r') else: f = None # read the file readfct(f) if self.pio=='yes' or mpi.mpi_IsMaster(): fsize = f.tell() else: fsize = None if self.pio=='yes' or mpi.mpi_IsMaster(): if fsize != isize: raise "ReadError","file %s not red completely"%(name) # close file if self.pio=='yes' or mpi.mpi_IsMaster(): f.close() def get_read_fcts(self): """ returns the functions needed to read a snapshot file. """ return [] def write(self): """ Write the particle file(s) """ for i in range(len(self.p_name)): self.open_and_write(self.p_name[i],self.get_write_fcts()[i]) def open_and_write(self,name,writefct): """ Open and write file name : name of the output writefct : function used to write the file """ if self.pio=='yes' or mpi.mpi_IsMaster(): f = open(name,'w') else: f = None writefct(f) if self.pio=='yes' or mpi.mpi_IsMaster(): f.close() def get_write_fcts(self): """ returns the functions needed to write a snapshot file. """ return [] def write_num(self,name): """ Write a num file name : name of the output """ if self.pio =='yes': f = open("%s.%d"%(name,mpi.ThisTask),'w') for n in self.num: f.write('%8i\n'%(n)) f.close() else: if mpi.mpi_IsMaster(): f = open(name,'w') for Task in range(mpi.NTask-1,-1,-1): if Task != 0: num = mpi.mpi_recv(source = Task) for n in num: f.write('%8i\n'%(n)) else: for n in self.num: f.write('%8i\n'%(n)) else: mpi.mpi_send(self.num, dest = 0) def read_num(self,name): """ Read a num file name : name of the input """ + + def skip_io_block(self,s): + + #self.skipped_io_blocks = ['vel','num','u','rho','metals','tstar','minit','idp'] + + c = self.skipped_io_blocks.count(s) + if c==0: + return False + else: + return True + + ################################# # # coordinate transformation # ################################# ################################# # positions ################################# def x(self): """ Return a 1xn float array containing x coordinate """ return self.pos[:,0] def y(self): """ Return a 1xn float array containing y coordinate """ return self.pos[:,1] def z(self): """ Return a 1xn float array containing z coordinate """ return self.pos[:,2] def rxyz(self,center=None): """ Return a 1xn float array that corresponds to the distance from the center of each particle. """ if center!=None: r = sqrt( (self.pos[:,0]-center[0])**2 + (self.pos[:,1]-center[1])**2 + (self.pos[:,2]-center[2])**2 ) else: r = sqrt( self.pos[:,0]**2 + self.pos[:,1]**2 + self.pos[:,2]**2 ) return r def phi_xyz(self): """ Return a 1xn float array that corresponds to the azimuth in spherical coordinate of each particle. """ r = self.rxyz() rxy = self.rxy() xp = self.pos[:,0]*r/rxy # x projection in the plane yp = self.pos[:,1]*r/rxy # y projection in the plane p = arctan2(yp,xp) return p def theta_xyz(self): """ Return a 1xn float array that corresponds to the elevation angle in spherical coordinate of each particle. """ r = self.rxyz() t = arcsin(self.pos[:,2]/r) return t def rxy(self): """ Return a 1xn float array that corresponds to the projected distance from the center of each particle. """ r = sqrt( self.pos[:,0]**2 + self.pos[:,1]**2) return r def phi_xy(self): """ Return a 1xn float array that corresponds to the azimuth in cylindrical coordinate of each particle. """ p = arctan2(self.pos[:,1],self.pos[:,0]) return p r = rxyz R = rxy ###################### # spherical coord ###################### def cart2sph(self,pos=None): """ Transform carthesian coodinates x,y,z into spherical coordinates r,p,t Return a 3xn float array. """ if pos!=None: x = pos[:,0] y = pos[:,1] z = pos[:,2] else: x = self.pos[:,0] y = self.pos[:,1] z = self.pos[:,2] r = self.rxyz() rxy = self.rxy() #xp = x*r/rxy # x projection in the plane #yp = y*r/rxy # y projection in the plane #p = arctan2(yp,xp) #t = arcsin(z/r) p = arctan2(y,x) t = arctan2(rxy,z) return transpose(array([r,p,t])).astype(float32) def sph2cart(self,pos=None): """ Transform spherical coordinates r,p,t into carthesian coodinates x,y,z Return a 3xn float array. """ if pos!=None: r = pos[:,0] p = pos[:,1] t = pos[:,2] else: r = self.pos[:,0] p = self.pos[:,1] t = self.pos[:,2] x = r*sin(t)*cos(p) y = r*sin(t)*sin(p) z = r*cos(t) return transpose(array([x,y,z])).astype(float32) ################################# # velocities ################################# def vx(self): """ Return a 1xn float array containing x velocity """ return self.vel[:,0] def vy(self): """ Return a 1xn float array containing y velocity """ return self.vel[:,1] def vz(self): """ Return a 1xn float array containing z velocity """ return self.vel[:,2] def vn(self): """ Return a 1xn float array that corresponds to the norm of velocities """ return sqrt(self.vel[:,0]*self.vel[:,0] + self.vel[:,1]*self.vel[:,1] + self.vel[:,2]*self.vel[:,2]) def vrxyz(self): """ Return a 1xn float array that corresponds to the radial velocity in spherical system """ r = self.rxyz() return (self.pos[:,0]*self.vel[:,0] + self.pos[:,1]*self.vel[:,1] + self.pos[:,2]*self.vel[:,2])/r def Vr(self): """ Return the radial velocies of particles The output is an 3xn float array. """ xr = sqrt(self.pos[:,0]**2+self.pos[:,1]**2) vr = (self.pos[:,0]*self.vel[:,0] + self.pos[:,1]*self.vel[:,1]) / xr return vr def Vt(self): """ Return the tangential velocies of particles The output is an 3xn float array. """ xr = sqrt(self.pos[:,0]**2+self.pos[:,1]**2) vt = (self.pos[:,0]*self.vel[:,1] - self.pos[:,1]*self.vel[:,0]) / xr return vt def Vz(self): """ Return a 1xn float array containing z velocity """ return self.vel[:,2] ###################### # cylindrical coord ###################### def vel_cyl2cart(self,pos=None,vel=None): """ Transform velocities in cylindrical coordinates vr,vt,vz into carthesian coodinates vx,vy,vz. Pos is the position of particles in cart. coord. Vel is the velocity in cylindrical coord. Return a 3xn float array. """ return vel_cyl2cart(self.pos,self.vel) def vel_cart2cyl(self): """ Transform velocities in carthesian coordinates vx,vy,vz into cylindrical coodinates vr,vz,vz. Pos is the position of particles in cart. coord. Vel is the velocity in cart. coord. Return a 3xn float array. """ return vel_cart2cyl(self.pos,self.vel) ################################# # # physical values # ################################# def get_ns(self): """ Return in an array the number of particles of each node. """ ns = mpi.mpi_allgather(self.nbody) return ns def get_mass_tot(self): """ Return the total mass of system. """ mass_tot = mpi.mpi_sum(self.mass) return mass_tot def size(self): """ Estimate the model size, using the inertial momentum """ return max(self.minert()) def cm(self): """ Return the mass center of the model. The output is an 3x1 float array. """ mtot = mpi.mpi_sum(self.mass.astype(FLOAT)) cmx = mpi.mpi_sum(self.pos[:,0].astype(float64)*self.mass.astype(FLOAT)) / mtot cmy = mpi.mpi_sum(self.pos[:,1].astype(float64)*self.mass.astype(FLOAT)) / mtot cmz = mpi.mpi_sum(self.pos[:,2].astype(float64)*self.mass.astype(FLOAT)) / mtot return array([cmx,cmy,cmz]) def get_histocenter(self,rbox=50,nb=500): """ Return the position of the higher density region in x,y,z (not good) found by the function "histocenter". rbox : size of the box nb : number of bins in each dimension """ rm = rbox/2. bins = arange(-rm,rm,float(2*rm)/float(nb)) # histograms in x,y,z (cut the tail) hx = mpi.mpi_histogram(self.pos[:,0],bins)[:-1] hy = mpi.mpi_histogram(self.pos[:,1],bins)[:-1] hz = mpi.mpi_histogram(self.pos[:,2],bins)[:-1] # max in each dim dx = bins[argmax(hx)] dy = bins[argmax(hy)] dz = bins[argmax(hz)] return array([dx,dy,dz]) def get_histocenter2(self,rbox=50,nb=64): """ Return the position of the higher density region in x,y,z (not good) found by the function "histocenter". rbox : size of the box nb : number of bins in each dimension """ # transformation -rbox->0, 0->nb/2, rbox->nb # y = (x+rbox)/(2*rbox)*nb pos = ( self.pos + [rbox,rbox,rbox] )/(2*rbox) # 0 to 1 pos = pos*[nb,nb,nb] # 0 to nb pos = pos.astype(float32) mass = self.mass.astype(float32) mat = mkmap3d(pos/nb,mass,mass,(nb,nb,nb)) # find max m = ravel(mat) arg = argmax(m) i = indices((nb,nb,nb)) # not that good ix = ravel(i[0]) # not that good iy = ravel(i[1]) # not that good iz = ravel(i[2]) # not that good ix = ix[arg] iy = iy[arg] iz = iz[arg] # transformation inverse # x = 2*rbox*(y/nb)-rbox dx = 2*rbox*(float(ix)/nb)-rbox dy = 2*rbox*(float(iy)/nb)-rbox dz = 2*rbox*(float(iz)/nb)-rbox return array([dx,dy,dz]) def cv(self): """ Return the center of the velocities of the model. The output is an 3x1 float array. """ cmx = mpi.mpi_sum(self.vel[:,0]*self.mass) / self.mass_tot cmy = mpi.mpi_sum(self.vel[:,1]*self.mass) / self.mass_tot cmz = mpi.mpi_sum(self.vel[:,2]*self.mass) / self.mass_tot return array([cmx,cmy,cmz]) def minert(self): """ Return the diagonal of the intertial momentum. """ mx = mpi.mpi_sum(self.pos[:,0]**2 *self.mass) / self.mass_tot my = mpi.mpi_sum(self.pos[:,1]**2 *self.mass) / self.mass_tot mz = mpi.mpi_sum(self.pos[:,2]**2 *self.mass) / self.mass_tot mx = sqrt(mx) my = sqrt(my) mz = sqrt(mz) return array([mx,my,mz]) def inertial_tensor(self): """ Return the inertial tensor. """ Ixx = mpi.mpi_sum(self.mass * (self.y()**2 + self.z()**2)) Iyy = mpi.mpi_sum(self.mass * (self.x()**2 + self.z()**2)) Izz = mpi.mpi_sum(self.mass * (self.x()**2 + self.y()**2)) Ixy = -mpi.mpi_sum(self.mass * self.x() * self.y()) Ixz = -mpi.mpi_sum(self.mass * self.x() * self.z()) Iyz = -mpi.mpi_sum(self.mass * self.y() * self.z()) I = array( [[Ixx,Ixy,Ixz],[Ixy,Iyy,Iyz],[Ixz,Iyz,Izz]] ) return I def x_sigma(self): """ Return the norm of the position dispersions. """ x = (self.pos - self.cm()) x2 = x[:,0]**2 + x[:,1]**2 + x[:,2]**2 x_s2 = mpi.mpi_sum( x2 *self.mass )/self.mass_tot x_s = sqrt(x_s2) return x_s def v_sigma(self): """ Return the norm of the velocity dispersions. """ v = (self.vel - self.cv()) v2 = v[:,0]**2 + v[:,1]**2 + v[:,2]**2 v_s2 = mpi.mpi_sum( v2 *self.mass )/self.mass_tot v_s = sqrt(v_s2) return v_s def dx_mean(self): """ Return the average distance between particles. """ # 1) estimate the size of the system D = self.x_sigma() # 2) estimate the # of particules per unit volume n = self.nbody_tot/D # 3) estimate the average distance between particules l = 1./n**(1./3.) return l def dv_mean(self): """ Return the average relative speed between particles. """ # 1) estimate the size of the system D = self.v_sigma() # 2) estimate the # of particules per unit volume n = self.nbody_tot/D # 3) estimate the average distance between particules l = 1./n**(1./3.) return l def Ekin(self): """ Return the total kinetic energy """ E = 0.5 * mpi.mpi_sum (self.mass * (self.vel[:,0]**2 + self.vel[:,1]**2 + self.vel[:,2]**2) ) return E def ekin(self): """ Return the total specific kinetic energy """ E = 0.5 * mpi.mpi_sum ( (self.vel[:,0]**2 + self.vel[:,1]**2 + self.vel[:,2]**2) ) return E def Epot(self,eps): """ Return the total potential energy using the softening lenght eps. eps : softening WARNING : THIS FUNCTION DO NOT WORK IN MPI MODE """ E = epot(self.pos,self.mass,eps) return E def epot(self,eps): """ Return the total specific potential energy using the softening lenght eps. eps : softening WARNING : THIS FUNCTION DO NOT WORK IN MPI MODE """ e = epot(self.pos,self.mass,eps)/self.mass_tot return e def L(self): """ Return the angular momentum in x,y,z of all particles. The output is an 3xn float array. """ l = amxyz(self.pos,self.vel,self.mass) return l def l(self): """ Return the specific angular momentum in x,y,z of all particles. The output is an 3xn float array. """ l = samxyz(self.pos,self.vel,self.mass) return l def Ltot(self): """ Return the total angular momentum. The output is an 3x1 float array. """ l = mpi.mpi_allreduce (am(self.pos,self.vel,self.mass)) #l = mpi.mpi_sum(self.L()) return l def ltot(self): """ Return the specific total angular momentum. The output is an 3x1 float array. """ l = mpi.mpi_allreduce (am(self.pos,self.vel,self.mass))/self.mass_tot #l = self.Ltot()/self.mass_tot return l def Pot(self,x,eps): """ Return the potential at a given position, using the softening lenght eps. x : position (array vector) eps : softening """ if type(x) == ndarray: p = zeros(len(x),float32) for i in range(len(x)): p[i] = mpi.mpi_allreduce ( potential(self.pos,self.mass,array(x[i],float32),eps) ) else: p = mpi.mpi_allreduce ( potential(self.pos,self.mass,array(x,float32),eps) ) return p def TreePot(self,pos,eps,Tree=None): """ Return the potential at a given position, using the softening lenght eps and using a tree. pos : position (array vector) eps : softening Tree: gravitational tree if already computed WARNING : this function do not work in parallel """ if Tree==None: self.Tree = Tree = self.getTree() pot = Tree.Potential(pos,eps) return pot def Accel(self,x,eps): """ Return the acceleration at a given position, using the softening lenght eps. x : position (array vector) eps : softening """ if type(x) == ndarray: ax = zeros(len(x),float32) ay = zeros(len(x),float32) az = zeros(len(x),float32) for i in range(len(x)): ax[i],ay[i],az[i] = acceleration(self.pos,self.mass,array(x[i],float32),eps) a = transpose(array([ax,ay,az],float32)) else: ax,ay,az = acceleration(self.pos,self.mass,array(x,float32),eps) ax = mpi.mpi_allreduce ( ax ) ay = mpi.mpi_allreduce ( ay ) az = mpi.mpi_allreduce ( az ) a = array([ax,ay,az],float32) return a def TreeAccel(self,pos,eps,Tree=None): """ Return the acceleration at a given position, using the softening lenght eps and using a tree. pos : position (array vector) eps : softening Tree: gravitational tree if already computed WARNING : this function do not work in parallel """ if Tree==None: self.Tree = Tree = self.getTree() acc = Tree.Acceleration(pos,eps) return acc def tork(self,acc): """ Return the total tork on the system due to the force acting on each particle (acc). The output is an 3xn float array. acc : 3xn float array """ trk = mpi.mpi_allreduce ( am(self.pos,array(acc,float32),self.mass) ) return trk def dens(self,r=None,nb=25,rm=50): """ Return the number density at radius r (supposing a spherical density distribution). If r is not specified, it is computed with nb and rm. The output is an n x 1 float array. !!! This routine do not use masses !!! r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) xr = sqrt(self.pos[:,0]**2 + self.pos[:,1]**2 + self.pos[:,2]**2) dens,r = histogram(xr,r) r1 = r[:-1] r2 = r[1:] dv = (4./3.)*pi*(r2**3-r1**3) dens = dens/dv # surface density # take the mean r = (r1+r2)/2 return r,mpi.mpi_allreduce(dens) def mdens(self,r=None,nb=25,rm=50): """ Return the density at radius r (supposing a spherical density distribution). If r is not specified, it is computed with nb and rm. The output is an n x 1 float array. r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) xr = sqrt(self.pos[:,0]**2 + self.pos[:,1]**2 + self.pos[:,2]**2) dens = whistogram(xr.astype(float),self.mass.astype(float),r.astype(float)) r1 = r[:-1] r2 = r[1:] dv = (4./3.)*pi*(r2**3-r1**3) dens = dens[:-1]/dv # surface density # take the mean r = (r1+r2)/2 return r,mpi.mpi_allreduce(dens) def mr(self,r=None,nb=25,rm=50): """ Return the mass inside radius r (supposing a spherical density distribution). If r is not specified, it is computed with nb and rm. The output is an n x 1 float array. r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) xr = self.rxyz() mr = whistogram(xr.astype(float),self.mass.astype(float),r.astype(float)) mr = add.accumulate(mr) return r,mpi.mpi_allreduce(mr) def Mr_Spherical(self,nr=25,rmin=0,rmax=50): """ Return the mass inside radius r (supposing a spherical density distribution). The output is 2 n x 1 float arrays. nr : number of bins (size of the output) rmin : minimal radius (this must be zero, instead it is wrong...) rmax : maximal radius """ rmin = float(rmin) rmax = float(rmax) shape = (nr,) val = ones(self.pos.shape).astype(float32) mass = self.mass.astype(float32) r = self.rxyz() r = (r-rmin)/(rmax-rmin) r = r.astype(float32) # compute the map mr = mkmap1d(r,mass,val,shape).astype(float) # compute the radii rs = arange(0.,rmax,(rmax-rmin)/nr) # sum mr = add.accumulate(mr) return rs,mpi.mpi_allreduce(mr) def sdens(self,r=None,nb=25,rm=50): """ Return the surface density at radius r. If r is not specified, it is computed with nb and rm. The output is an nx1 float array. !!! This routine do not uses masses !!! r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) xr = sqrt(self.pos[:,0]**2 + self.pos[:,1]**2) sdens,r = histogram(xr,r) r1 = r[:-1] r2 = r[1:] ds = pi*(r2**2-r1**2) sdens = sdens/ds # surface density # take the mean r = (r1+r2)/2. return r,mpi.mpi_allreduce(sdens) def msdens(self,r=None,nb=25,rm=50): """ Return the mass surface density at radius r. If r is not specified, it is computed with nb and rm. The output is an nx1 float array. r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) xr = sqrt(self.pos[:,0]**2 + self.pos[:,1]**2) sdens = whistogram(xr.astype(float),self.mass.astype(float),r.astype(float)) r1 = r[:-1] r2 = r[1:] ds = pi*(r2**2-r1**2) sdens = sdens[:-1]/ds # surface density # take the mean r = (r1+r2)/2. return r,mpi.mpi_allreduce(sdens) def sigma_z(self,r=None,nb=25,rm=50): """ Return the vertical dispertion in z at radius r. If r is not specified, it is computed with nb and rm. The output is an nx1 float array. r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) r,h = self.Histo(r,mode='sz') return r,h def sigma_vz(self,r=None,nb=25,rm=50): """ Return the vertical dispertion in z at radius r. If r is not specified, it is computed with nb and rm. The output is an nx1 float array. r : radius nb : number of bins (size of the output) rm : maximal radius """ if r!= None: r = array(r,float) else: rmax = rm dr = rm/float(nb) r = arange(0.,rm,dr) r,h = self.Histo(r,mode='svz') return r,h def zprof(self,z=None,r=2.5,dr=0.5,nb=25,zm=5.): """ Return the z-profile in a vector for a given radius !!! This routine works only if particles have equal masses !!! z : bins in z (optional) r : radius of the cut dr : width in r of the cut nb : number of bins (size of the output) zm : maximal height """ if z!= None: pass else: zmax = zm dz = 2.*zm/float(nb) z = arange(-zm,zm,dz) # select r1 = r-dr/2. r2 = r+dr/2. ann = self.selectc((self.rxy()>r1)*((self.rxy()r1[i])*((self.rxy() 1: sr.append(vr.std()) st.append(vt.std()) sz.append(vz.std()) mt.append(vt.mean()) else: sr.append(0.) st.append(0.) sz.append(0.) mt.append(0.) sr = array(sr,float) st = array(st,float) sz = array(sz,float) mt = array(mt,float) return r,sr,st,sz,mt def histovel(self,nb=100,vmin=None,vmax=None,mode='n'): """ Return or plot the histrogram of the norm of velocities or of the radial velocities. The output is a list (r,h) of 2 nx1 float arrays, where r is the radius and h the values of the histogram. nb : number of bins (size of the output) vmax : maximum velocity vmin : minimum velocity mode : 'n' (norme of the velocities) 'r' (radial velocities) """ if mode == 'r': v = (self.pos[:,0]*self.vel[:,0] + self.pos[:,1]*self.vel[:,1]) / sqrt(self.pos[:,0]**2+self.pos[:,1]**2) elif mode == 'n': v = sqrt(self.vel[:,0]**2 + self.vel[:,1]**2 + self.vel[:,2]**2) if vmax == None : vmax = mpi.mpi_max(v) if vmin == None : vmin = mpi.mpi_min(v) bins = arange(vmin,vmax,(vmax-vmin)/float(nb)) h = mpi.mpi_histogram(v,bins) return h,bins def zmodes(self,nr=32,nm=16,rm=32): """ Compute the vertical modes of a model nm = 16 : number of modes nr = 32 : number of radius rm = 50 : max radius return r : the radius used m : the modes computed m1 : the matrix of the amplitude m2 : the matrix of the phases """ ps = arange(-pi,pi+pi/(2.*nm),2*pi/(2.*nm))+pi # phases R = arange(0,nr+1,1)*float(rm)/nr # radius Rs = array([],float) r = self.rxy() m1 = array([],float) m2 = array([],float) # loop over all radius for i in range(len(R)-1): c = (r>=R[i])*(r<=R[i+1]) an = self.selectc(c) if sum(c.astype(int))<=1: #print "less than 1 particle in the coupe",R[i] amp = zeros(len(ps)/2).astype(float) m1 = concatenate((m1,amp)) m2 = concatenate((m2,amp)) continue x = an.pos[:,0] y = an.pos[:,1] z = an.pos[:,2] t = arctan2(y,x)+pi zm = [] ok = 0 for j in range(len(ps)-1): c = (t>=ps[j])*(t=R[i])*(r<=R[i+1]) an = self.selectc(c) if sum(c.astype(int))<=1: #print "less than 1 particle in the coupe",R[i] amp = zeros(len(ps)/2).astype(float) m1 = concatenate((m1,amp)) m2 = concatenate((m2,amp)) continue x = an.pos[:,0] y = an.pos[:,1] z = an.pos[:,2] t = arctan2(y,x)+pi dm = [] ok = 0 for j in range(len(ps)-1): c = (t>=ps[j])*(t [0,boxsize] centred : -> [-boxsize/2,boxsize/2] [x,y,z] : """ if boxsize==None: if self.has_var('boxsize'): boxsize = self.boxsize if boxsize != None: if mode == None: if string.find(axis,'x')!=-1: self.pos[:,0] = where((self.pos[:,0]<0.0 ),self.pos[:,0]+boxsize,self.pos[:,0]) if string.find(axis,'y')!=-1: self.pos[:,1] = where((self.pos[:,1]<0.0 ),self.pos[:,1]+boxsize,self.pos[:,1]) if string.find(axis,'z')!=-1: self.pos[:,2] = where((self.pos[:,2]<0.0 ),self.pos[:,2]+boxsize,self.pos[:,2]) if string.find(axis,'x')!=-1: self.pos[:,0] = where((self.pos[:,0]>boxsize),self.pos[:,0]-boxsize,self.pos[:,0]) if string.find(axis,'y')!=-1: self.pos[:,1] = where((self.pos[:,1]>boxsize),self.pos[:,1]-boxsize,self.pos[:,1]) if string.find(axis,'z')!=-1: self.pos[:,2] = where((self.pos[:,2]>boxsize),self.pos[:,2]-boxsize,self.pos[:,2]) elif mode=='centred': if string.find(axis,'x')!=-1: self.pos[:,0] = where((self.pos[:,0]<=-boxsize/2.),self.pos[:,0]+boxsize,self.pos[:,0]) if string.find(axis,'y')!=-1: self.pos[:,1] = where((self.pos[:,1]<=-boxsize/2.),self.pos[:,1]+boxsize,self.pos[:,1]) if string.find(axis,'z')!=-1: self.pos[:,2] = where((self.pos[:,2]<=-boxsize/2.),self.pos[:,2]+boxsize,self.pos[:,2]) if string.find(axis,'x')!=-1: self.pos[:,0] = where((self.pos[:,0]> boxsize/2.),self.pos[:,0]-boxsize,self.pos[:,0]) if string.find(axis,'y')!=-1: self.pos[:,1] = where((self.pos[:,1]> boxsize/2.),self.pos[:,1]-boxsize,self.pos[:,1]) if string.find(axis,'z')!=-1: self.pos[:,2] = where((self.pos[:,2]> boxsize/2.),self.pos[:,2]-boxsize,self.pos[:,2]) elif (type(mode)==ndarray) or (type(mode)==types.ListType): if string.find(axis,'x')!=-1: self.pos[:,0] = where((self.pos[:,0]<=mode[0]-boxsize/2.),self.pos[:,0]+boxsize,self.pos[:,0]) if string.find(axis,'y')!=-1: self.pos[:,1] = where((self.pos[:,1]<=mode[1]-boxsize/2.),self.pos[:,1]+boxsize,self.pos[:,1]) if string.find(axis,'z')!=-1: self.pos[:,2] = where((self.pos[:,2]<=mode[2]-boxsize/2.),self.pos[:,2]+boxsize,self.pos[:,2]) if string.find(axis,'x')!=-1: self.pos[:,0] = where((self.pos[:,0]> mode[0]+boxsize/2.),self.pos[:,0]-boxsize,self.pos[:,0]) if string.find(axis,'y')!=-1: self.pos[:,1] = where((self.pos[:,1]> mode[1]+boxsize/2.),self.pos[:,1]-boxsize,self.pos[:,1]) if string.find(axis,'z')!=-1: self.pos[:,2] = where((self.pos[:,2]> mode[2]+boxsize/2.),self.pos[:,2]-boxsize,self.pos[:,2]) def rotate_old(self,angle=0,mode='a',axis='x'): """ Rotate the positions and/or the velocities of the object around a specific axis. angle : rotation angle in radian axis : 'x' : around x 'y' : around y 'z' : around z : [x,y,z] : around this axis mode : 'p' : rotate only position 'v' : rotate only velocities 'a' : rotate both (default) """ if type(axis) == type('a'): # rotate around x,y or z if axis =='x': if mode=='p' or mode=='a': self.pos=rotx(angle,self.pos) if mode=='v' or mode=='a': self.vel=rotx(angle,self.vel) elif axis =='y': if mode=='p' or mode=='a': self.pos=roty(angle,self.pos) if mode=='v' or mode=='a': self.vel=roty(angle,self.vel) elif axis =='z': if mode=='p' or mode=='a': self.pos=rotz(angle,self.pos) if mode=='v' or mode=='a': self.vel=rotz(angle,self.vel) else: # rotate around a given axis # construction of the rotation matrix nxy = sqrt(axis[0]**2+axis[1]**2) theta_x = angle theta_z = 2.*pi - arctan2(axis[1],axis[0]) theta_y = arctan2(axis[2],nxy) if mode=='p' or mode=='a': # rot in z self.pos=rotz(theta_z,self.pos) # rot in y self.pos=roty(theta_y,self.pos) # rot in x self.pos=rotx(theta_x,self.pos) # rot in -y self.pos=roty(-theta_y,self.pos) # rot in -z self.pos=rotz(-theta_z,self.pos) if mode=='v' or mode=='a': # rot in z self.vel=rotz(theta_z,self.vel) # rot in y self.vel=roty(theta_y,self.vel) # rot in x self.vel=rotx(theta_x,self.vel) # rot in -y self.vel=roty(-theta_y,self.vel) # rot in -z self.vel=rotz(-theta_z,self.vel) def rotate(self,angle=0,axis=[1,0,0],point=[0,0,0],mode='a'): """ Rotate the positions and/or the velocities of the object around a specific axis defined by a vector and an point. angle : rotation angle in radian axis : direction of the axis point : center of the rotation mode : 'p' : rotate only position 'v' : rotate only velocities 'a' : rotate both (default) """ if axis=='x': axis = array([1,0,0],float) elif axis=='y': axis = array([0,1,0],float) elif axis=='z': axis = array([0,0,1],float) x = self.pos v = self.vel # center point x = x-point # construction of the rotation matrix norm = sqrt(axis[0]**2 + axis[1]**2 + axis[2]**2) if norm == 0: return x sn = sin(-angle/2.) e0 = cos(-angle/2.) e1 = axis[0]*sn/norm e2 = axis[1]*sn/norm e3 = axis[2]*sn/norm a = zeros((3,3),float) a[0,0] = e0**2 + e1**2 - e2**2 - e3**2 a[1,0] = 2.*(e1*e2 + e0*e3) a[2,0] = 2.*(e1*e3 - e0*e2) a[0,1] = 2.*(e1*e2 - e0*e3) a[1,1] = e0**2 - e1**2 + e2**2 - e3**2 a[2,1] = 2.*(e2*e3 + e0*e1) a[0,2] = 2.*(e1*e3 + e0*e2) a[1,2] = 2.*(e2*e3 - e0*e1) a[2,2] = e0**2 - e1**2 - e2**2 + e3**2 a = a.astype(float) # multiply x and v if mode=='p': x = dot(x,a) elif mode=='v': v = dot(v,a) else: x = dot(x,a) v = dot(v,a) # decenter point x = x+point self.pos = x.astype(float32) self.vel = v.astype(float32) def rotateR(self,R,mode='a'): """ Rotate the model using the matrix R mode : 'p' : only position 'v' : only velocities 'a' : both (default) """ # multiply x and v if mode=='p': self.pos = dot(self.pos,R) elif mode=='v': self.vel = dot(self.vel,R) else: self.pos = dot(self.pos,R) self.vel = dot(self.vel,R) def get_rotation_matrix_to_align_with_main_axis(self): """ Get the rotation matrix used to rotate the object in order to align it's main axis with the axis of its inertial tensor. """ # compute inertial tensor I = self.inertial_tensor() # find eigen values and vectors val,vec =linalg.eig(I) l1 = val[0] l2 = val[1] l3 = val[2] a1 = vec[:,0] a2 = vec[:,1] a3 = vec[:,2] # find Rm such that Rm*1,0,0 = a1 # that Rm*0,1,0 = a2 # that Rm*0,0,1 = a3 Rm = transpose(array([a1,a2,a3])) return Rm def align_with_main_axis(self,mode='a'): """ Rotate the object in order to align it's major axis with the axis of its inertial tensor. mode : 'p' : only position 'v' : only velocities 'a' : both (default) """ # find rotation matrix R = self.get_rotation_matrix_to_align_with_main_axis() # apply it self.rotateR(R,mode) def align(self,axis,mode='a',sgn='+',fact=None): """ Rotate the object in order to align the axis 'axis' with the z axis. axis : [x,y,z] mode : 'p' : only position 'v' : only velocities 'a' : both (default) sgn : '+' : normal rotation '-' : reverse sense of rotation fact : int : factor to increase the angle """ n = [axis[1], -axis[0],0.] theta = arccos(axis[2]/sqrt(axis[0]**2+axis[1]**2+axis[2]**2)) if sgn =='-': theta = -theta if fact != None: theta = theta*fact self.rotate(angle=theta,mode=mode,axis=n) def align2(self,axis1=[1,0,0],axis2=[0,0,1],point=[0,0,0]): ''' Rotate the object in order to align the axis 'axis' with the z axis. axis1 : [x,y,z] axis2 : [x,y,z] point : [x,y,z] ''' a1 = array(axis1,float) a2 = array(axis2,float) a3 = array([0,0,0],float) a3[0] = a1[1]*a2[2] - a1[2]*a2[1] a3[1] = a1[2]*a2[0] - a1[0]*a2[2] a3[2] = a1[0]*a2[1] - a1[1]*a2[0] n1 = sqrt(a1[0]**2 + a1[1]**2 + a1[2]**2) n2 = sqrt(a2[0]**2 + a2[1]**2 + a2[2]**2) angle = arccos(inner(a1,a2)/(n1*n2)) self.rotate(angle=angle,axis=a3,point=point) def spin(self,omega=None,L=None,j=None,E=None): """ Spin the object with angular velocity "omega" (rigid rotation). Omega is a 1 x 3 array object If L (total angular momentum) is explicitely given, compute Omega from L (1 x 3 array object). omega : angular speed (array vector) L : desired angular momentum j : desired energy fraction in rotation E : Total energy (without rotation) """ # do nothing if L==None and omega==None and j==None: pass # use j and E (spin around z axis) if j!=None: if E==None: "spin : print you must give E" else: if (j > 1): self.log.write("spin : j must be less than 1") sys.exit() Erot = j*E/(1-j) omega = sqrt(2*Erot/mpi_sum(self.mass*self.rxy()**2) ) omega = array([0,0,omega],float32) self.vel=spin(self.pos,self.vel,omega) # use omega elif L==None and omega!=None: omega = array(omega,float32) self.vel=spin(self.pos,self.vel,omega) # use L # Pfenniger 93 elif L!=None: L = array(L,float32) aamx = L[0] aamy = L[1] aamz = L[2] x = self.pos[:,0] y = self.pos[:,1] z = self.pos[:,2] vx = self.vel[:,0] vy = self.vel[:,1] vz = self.vel[:,2] m = self.mass Ixy = sum(m*x*y) Iyz = sum(m*y*z) Izx = sum(m*z*x) Ixx = sum(m*x*x) Iyy = sum(m*y*y) Izz = sum(m*z*z) Axx = Iyy+Izz Ayy = Izz+Ixx Azz = Ixx+Iyy Axy = -Ixy Ayz = -Iyz Azx = -Izx D = Axx*Ayy*Azz + 2*Axy*Ayz*Azx - Axx*Ayz**2 - Ayy*Azx**2 - Azz*Axy**2 DLX = sum(m*(y*vz-z*vy))-aamx DLY = sum(m*(z*vx-x*vz))-aamy DLZ = sum(m*(x*vy-y*vx))-aamz Bxx = Ayy*Azz - Ayz**2 Byy = Azz*Axx - Azx**2 Bzz = Axx*Ayy - Axy**2 Bxy = Azx*Ayz - Axy*Azz Byz = Axy*Azx - Ayz*Axx Bzx = Ayz*Axy - Azx*Ayy omega = array([0,0,0],float32) omega[0] = -(Bxx*DLX + Bxy*DLY + Bzx*DLZ)/D omega[1] = -(Bxy*DLX + Byy*DLY + Byz*DLZ)/D omega[2] = -(Bzx*DLX + Byz*DLY + Bzz*DLZ)/D self.vel=spin(self.pos,self.vel,omega) ################################# # # selection of particles # ################################# def selectc(self,c,local=False): """ Return an N-body object that contain only particles where the corresponding value in c is not zero. c is a nx1 Nbody array. c : the condition vector local : local selection (True) or global selection (False) """ new = Nbody(status='new',ftype=self.ftype[6:],local=local) # now, copy all var linked to the model for name in self.get_list_of_vars(): setattr(new, name, getattr(self,name)) # here, we create ptype on the fly (used to create new.npart) #self.ptype = array([],int) #for i in range(len(self.npart)): # self.ptype = concatenate( (self.ptype,ones(self.npart[i])*i) ) # now, copy and compress all array linked to the model for name in self.get_list_of_array(): vec = getattr(self,name) setattr(new, name, compress(c,vec,axis=0)) # now, compute new.npart #new.npart = array([],int) #for i in range(len(self.npart)): # c = (new.tpe==i) # npart_i = sum(c.astype(int)) # new.npart = concatenate( (new.npart, npart_i ) ) # check #if len(new.pos)!= sum(new.npart): # pass # other vars new.init() return new def selecti(self,i,local=False): """ Return an N-body object that contain only particles having their index (not id) in i. i : vector containing indexes local : local selection (True) or global selection (False) """ new = Nbody(status='new',ftype=self.ftype[6:],local=local) # now, copy all var linked to the model for name in self.get_list_of_vars(): setattr(new, name, getattr(self,name)) # here, we create ptype on the fly (used to create new.npart) #self.ptype = array([],int) #for i in range(len(self.npart)): # self.ptype = concatenate( (self.ptype,ones(self.npart[i])*i) ) # now, copy and compress all array linked to the model for name in self.get_list_of_array(): vec = getattr(self,name) setattr(new, name, vec[i]) # now, compute new.npart #new.npart = array([],int) #for i in range(len(self.npart)): # c = (new.tpe==i) # npart_i = sum(c.astype(int)) # new.npart = concatenate( (new.npart, npart_i ) ) # check #if len(new.pos)!= sum(new.npart): # pass # other vars new.init() return new def select(self,i=0): """ Return an N-body object that contain only particles of type i """ import types if type(i)== int: return self.selectc(self.tpe==i) elif type(i)==types.ListType: types = i for j in types: c = c * (j==self.tpe) return self.selectc(c) else: return self def sub(self,n1=0,n2=None): """ Return an N-body object that have particles whith indicies in the range [n1:n2]. n1 : number of the first particule n2 : number of the last particule Note : the first particle is 0 """ if n1 == None: n1 = 0 if n2 == None: n2 = self.nbody if n2 <= n1: n2 = n1+1 num = arange(self.nbody) return self.selectc((num>=n1)*(num<=n2)) def reduc(self,n,mass=False): """ Return an N-body object that contain a fraction 1/n of particles. n : inverse of the fraction of particule to be returned """ c = where(fmod(arange(self.nbody),n).astype(int)==0,1,0) nb = self.selectc(c) if mass: nb.mass = nb.mass * n return nb def selectp(self,lst=None,file=None,reject=False,local=False,from_num=True): """ Return an N-body object that contain only particles with specific number id. The list of id's is given either by lst (nx1 int array) or by the name ("file") of a file containing the list of id's. lst : vector list (integer) reject : True/False : if True, reject particles in lst (default = False) local : local selection (True) or global selection (False) frum_num : if True, use self.num to select particules if False, use arange(self.nbody) """ if lst != None: lst = array(lst,int) if file != None: lst = [] f = open(file) while 1: try: lst.append(int(string.split(f.readline())[0])) except: break f.close() lst = array(lst,int) # 1) sort the list ys = sort(lst) # 2) sort index in current file if from_num: xs = sort(self.num) zs = take(arange(self.nbody),argsort(self.num)) # sort 0,1,2,n following xs (or self.num) else: xs = arange(self.nbody) # 3) apply mask on sorted lists (here, getmask need xs and ys to be sorted) m = getmask(xs.astype(int),ys.astype(int)) if reject: m = logical_not(m) # 4) revert mask, following zs inverse transformation if from_num: c = take(m,argsort(zs)) else: c = m new = self.selectc(c,local=local) return new def getindex(self,num): """ Return an array of index of a particle from its specific number id. The array is empty if no particle corresponds to the specific number id. num : Id of the particle """ idx = compress((self.num == num),arange(self.nbody)) if len(idx)==1: return idx[0] else: return idx ################################# # # add particles # ################################# def append(self,solf,do_not_sort=False,do_init_num=True): """ Add to the current N-body object, particles form the N-body object "new". solf : Nbody object """ if solf.ftype != self.ftype: raise "append Error","files have different type" return if solf.get_list_of_array() != self.get_list_of_array(): raise "append Error","files have different arrays" return # loop over all types self_npart = self.npart solf_npart = solf.npart if len(self_npart) != len(self_npart): print "append : files have different mxnpart !" sys.exit() # add array linked to the model names = self.get_list_of_array() for name in names: vec1 = getattr(self,name) vec2 = getattr(solf,name) ''' vec = array([],float32) if vec1.ndim == 1: vec.shape = (0,) else: vec.shape = (0,3) # here, we guarantee the order of particles according to npart for i in arange(len(self_npart)): e11 = sum((arange(len(self_npart)) < i) * self_npart,0) e21 = sum((arange(len(solf_npart)) < i) * solf_npart,0) vec = concatenate((vec,vec1[e11:e11+self_npart[i]],vec2[e21:e21+solf_npart[i]])) ''' vec = concatenate((vec1,vec2)) setattr(self, name, vec) # here, we sort the particles, according to tpe if do_not_sort: pass else: sequence = self.tpe.argsort() for name in names: vec = getattr(self,name) vec = take(vec,sequence,axis=0) setattr(self, name, vec) self.nbody = self.nbody + solf.nbody self.npart = self.get_npart() # needed by self.get_num() self.npart_tot = self.get_npart_tot() # needed by self.get_num() if do_init_num: self.num = self.get_num() self.init() def __add__(self,solf,do_not_sort=False): # first copy self new = deepcopy(self) # now, add solf new.append(solf,do_not_sort) return new ################################# # # sort particles # ################################# def sort(self,vec=None): ''' sort particles according to the vector vec vec : vector on which to sort (default=self.num) ''' new = Nbody(status='new',ftype=self.ftype[6:]) if vec==None: vec = self.num sequence = argsort(vec) # now, copy all var linked to the model for name in self.get_list_of_vars(): setattr(new, name, getattr(self,name)) # add array linked to the model for name in self.get_list_of_array(): setattr(new, name, take(getattr(self,name),sequence,axis=0)) #new.num = new.get_num() new.init() return new def sort_type(self): ''' Contrary to sort, this fonction sort particles respecting their type. ''' new = Nbody(status='new',ftype=self.ftype[6:]) # now, copy all var linked to the model for name in self.get_list_of_vars(): setattr(new, name, getattr(self,name)) # add array linked to the model for name in self.get_list_of_array(): #vec = take(getattr(self,name),sequence,axis=0) vec = array([],float32) vec1 = getattr(self,name) if vec1.ndim == 1: vec.shape = (0,) else: vec.shape = (0,3) # loop over all types npart = self.npart for i in arange(len(npart)): e11 = sum((arange(len(npart)) < i) * npart) sequence = argsort(self.num[e11:e11+npart[i]]) vec = concatenate((vec,take(vec1[e11:e11+npart[i]],sequence,axis=0))) setattr(new, name, vec) new.num = new.get_num() new.init() return new ################################# # # Tree and SPH functions # ################################# def InitSphParameters(self,DesNumNgb=33,MaxNumNgbDeviation=3): self.DesNumNgb = DesNumNgb self.MaxNumNgbDeviation = MaxNumNgbDeviation self.Density = None self.Hsml = None if not self.has_var('Tree'): self.Tree = None def setTreeParameters(self,Tree,DesNumNgb,MaxNumNgbDeviation): if Tree==None: self.Tree = Tree = self.getTree() if DesNumNgb==None: DesNumNgb = self.DesNumNgb else: self.DesNumNgb = DesNumNgb if MaxNumNgbDeviation==None: MaxNumNgbDeviation = self.MaxNumNgbDeviation else: self.MaxNumNgbDeviation = MaxNumNgbDeviation return Tree,DesNumNgb,MaxNumNgbDeviation def getTree(self,force_computation=False,ErrTolTheta=0.8): ''' Return a Tree object ''' if self.Tree!=None and force_computation==False: return self.Tree else: print "create the tree : ErrTolTheta=",ErrTolTheta # decide if we use tree or ptree npart = array(self.npart) if mpi.mpi_NTask()>1: print "%d : use ptree"%(mpi.mpi_ThisTask()) self.Tree = ptreelib.Tree(npart=npart,pos=self.pos,vel=self.vel,mass=self.mass,num=self.num,tpe=self.tpe) else: self.Tree = treelib.Tree(npart=npart,pos=self.pos,vel=self.vel,mass=self.mass,ErrTolTheta=ErrTolTheta) return self.Tree def get_rsp_approximation(self,DesNumNgb=None,MaxNumNgbDeviation=None,Tree=None): ''' Return an aproximation of rsp, based on the tree. ''' Tree,DesNumNgb,MaxNumNgbDeviation = self.setTreeParameters(Tree,DesNumNgb,MaxNumNgbDeviation) return Tree.InitHsml(DesNumNgb,MaxNumNgbDeviation) def ComputeSph(self,DesNumNgb=None,MaxNumNgbDeviation=None,Tree=None): ''' Compute self.Density and self.Hsml using sph approximation ''' Tree,DesNumNgb,MaxNumNgbDeviation = self.setTreeParameters(Tree,DesNumNgb,MaxNumNgbDeviation) if self.Hsml==None: if not self.has_array('rsp'): self.Hsml = self.get_rsp_approximation(DesNumNgb,MaxNumNgbDeviation,Tree) else: self.Hsml=self.rsp self.Density,self.Hsml = Tree.Density(self.pos,self.Hsml,DesNumNgb,MaxNumNgbDeviation) def ComputeDensityAndHsml(self,pos=None,Hsml=None,DesNumNgb=None,MaxNumNgbDeviation=None,Tree=None): ''' Compute Density and Hsml (for a specific place) ''' Tree,DesNumNgb,MaxNumNgbDeviation = self.setTreeParameters(Tree,DesNumNgb,MaxNumNgbDeviation) if pos==None: pos = self.pos if Hsml==None: Hsml = ones(len(pos)).astype(float32) Density,Hsml = Tree.Density(pos,Hsml,DesNumNgb,MaxNumNgbDeviation) return Density,Hsml def SphEvaluate(self,val,pos=None,vel=None,hsml=None,DesNumNgb=None,MaxNumNgbDeviation=None,Tree=None): ''' Return an sph evaluation of the variable var ''' Tree,DesNumNgb,MaxNumNgbDeviation = self.setTreeParameters(Tree,DesNumNgb,MaxNumNgbDeviation) if pos == None: pos = self.pos if vel == None: vel = self.vel if hsml == None: if self.Hsml==None: if not self.has_array('rsp'): self.Hsml = self.get_rsp_approximation(DesNumNgb,MaxNumNgbDeviation,Tree) else: self.Hsml=self.rsp hsml = self.Hsml if self.Density==None: if not self.has_array('rho'): self.Density = self.SphDensity(DesNumNgb,MaxNumNgbDeviation,Tree) else: self.Density=self.rho if type(val) == ndarray: val = Tree.SphEvaluate(pos,hsml,self.Density,val,DesNumNgb,MaxNumNgbDeviation) else: if val =='div': val = Tree.SphEvaluateDiv(pos,vel,hsml,self.Density,DesNumNgb,MaxNumNgbDeviation) elif val =='rot': val = Tree.SphEvaluateRot(pos,vel,hsml,self.Density,DesNumNgb,MaxNumNgbDeviation) elif val =='ngb': val = Tree.SphEvaluateNgb(pos,hsml,DesNumNgb,MaxNumNgbDeviation) return val ################################# # # sph functions # ################################# def weighted_numngb(self,num): ''' num = particle where to compute weighted_numngb see Springel 05 ''' def wk1(hinv3,u): KERNEL_COEFF_1=2.546479089470 KERNEL_COEFF_2=15.278874536822 wk = hinv3 * (KERNEL_COEFF_1 + KERNEL_COEFF_2 * (u - 1) * u * u) return wk def wk2(hinv3,u): KERNEL_COEFF_5=5.092958178941 wk = hinv3 * KERNEL_COEFF_5 * (1.0 - u) * (1.0 - u) * (1.0 - u) return wk def getwk(r,h): # we do not exclude the particle itself u = r/h hinv3 = 1./h**3 wk = where((u<0.5),wk1(hinv3,u),wk2(hinv3,u)) wk = where((r1: list_of_array = self.get_list_of_array() # loop over all particles type npart = self.npart new_npart = npart for i in range(len(npart)): #if i==0: nparts = mpi.mpi_allgather(npart[i]) nparts = array(nparts) if mpi.mpi_IsMaster(): ex_table = mpi.mpi_GetExchangeTable(nparts) ex_table = mpi.mpi_bcast(ex_table,0) else: ex_table = None ex_table = mpi.mpi_bcast(ex_table,0) # send particles for toTask in range(mpi.NTask): if ex_table[mpi.ThisTask,toTask] > 0: n_elt = ex_table[mpi.ThisTask,toTask] #print "%d send %d to %d"%(mpi.ThisTask,n_elt,toTask) # first_elt = first elt of the current block first_elt = sum((arange(len(new_npart)) < i) * new_npart) # update npart new_npart[i] = new_npart[i] - n_elt # loop over all vect erd,mass,num,pos,rho,rsp,u,vel for name in list_of_array: vec = getattr(self,name) sub_vec = vec[first_elt:first_elt+n_elt] if len(sub_vec) != n_elt: print "redistribute error :" print "node %d should send len=%d got len=%d"%(mpi.ThisTask,n_elt,len(sub_vec)) sys.exit() mpi.mpi_send(name,toTask) mpi.mpi_send(sub_vec,toTask) #self.pos = concatenate( (self.pos[:first_elt],self.pos[first_elt+n_elt:]) ) setattr(self, name, concatenate( (vec[:first_elt],vec[first_elt+n_elt:]) ) ) # recieve particles for fromTask in range(mpi.NTask): if ex_table[fromTask,mpi.ThisTask] > 0: n_elt = ex_table[fromTask,mpi.ThisTask] #print "%d get %d from %d"%(mpi.ThisTask,n_elt,fromTask) # first_elt = first elt of the current block first_elt = sum((arange(len(new_npart)) < i) * new_npart) # update npart new_npart[i] = new_npart[i] + n_elt # loop over all vect for name in list_of_array: # first, check name send_name = mpi.mpi_recv(fromTask) if send_name != name: raise "Task %d FromTask %d, %s != %s"%(mpi.mpi_ThisTask(),fromTask,send_name,name) vec = getattr(self,name) sub_vec = mpi.mpi_recv(fromTask) if len(sub_vec) != n_elt: print "redistribute error :" print "node %d should recive len=%d got len=%d"%(mpi.ThisTask,n_elt,len(sub_vec)) sys.exit() #self.pos = concatenate( (vec[:first_elt],sub_vec,vec[first_elt:]) ) setattr(self, name, concatenate( (vec[:first_elt],sub_vec,vec[first_elt:]) ) ) self.init() def ExchangeParticles(self): ''' Exchange particles betwee procs, using peano-hilbert decomposition computed in ptree ''' if self.Tree==None: self.Tree = self.getTree() # get num and procs from the Tree num,procs = self.Tree.GetExchanges() # compute the transition table T H,bins = histogram(procs,arange(mpi.mpi_NTask())) T = mpi.mpi_AllgatherAndConcatArray(H) T.shape = (mpi.mpi_NTask(),mpi.mpi_NTask()) # loop over all numpy vectors list_of_array = self.get_list_of_array() # loop over all vect for name in list_of_array: if name != "num": setattr(self, name, mpi.mpi_ExchangeFromTable(T,procs,num,getattr(self,name),copy(self.num)) ) # do num at the end self.num = mpi.mpi_ExchangeFromTable(T,procs,num,self.num,copy(self.num)) self.init() def SendAllToAll(self): ''' Send all particles to all nodes at the end of the day, all nodes have the same nbody object ''' nbs = [] for i in xrange(mpi.NTask-1): prev = (mpi.ThisTask-i-1)%mpi.NTask next = (mpi.ThisTask+i+1)%mpi.NTask nbs.append(mpi.mpi_sendrecv(self,dest=next,source=prev)) for nbi in nbs: self = self + nbi return self ################################# # # specific parallel functions # ################################# def gather_pos(self): ''' Gather in a unique array all positions of all nodes. ''' return self.gather_vec(self.pos) def gather_vel(self): ''' Gather in a unique array all velocites of all nodes. ''' return self.gather_vec(self.vel) def gather_mass(self): ''' Gather in a unique array all mass of all nodes. ''' return self.gather_vec(self.mass) def gather_num(self): ''' Gather in a unique array all num of all nodes. ''' return self.gather_vec(self.num) def gather_vec(self,vec): ''' Gather in a unique array all vectors vec of all nodes. ''' # here, we assume that we have a vector npart # giving the number of particles per type vec_all = array([],vec.dtype) if vec.ndim==1: vec_all.shape = (0,) else: vec_all.shape = (0,vec.shape[1]) i1 = 0 npart = self.npart for i in range(len(npart)): i2 = i1 + npart[i] if (i1!=i2): vec_all = concatenate((vec_all,mpi.mpi_AllgatherAndConcatArray(vec[i1:i2]))) i1 = i1 + npart[i] return vec_all ################################# # # graphical operations # ################################# def display(self,*arg,**kw): ''' Display the model ''' if kw.has_key('palette'): palette = kw['palette'] else: palette = None if kw.has_key('save'): save = kw['save'] else: save = None if kw.has_key('marker'): marker = kw['marker'] else: marker = None params = extract_parameters(arg,kw,self.defaultparameters) mat,matint,mn_opts,mx_opts,cd_opts = self.Map(params) if mpi.mpi_IsMaster(): if save != None: if os.path.splitext(save)[1] == ".fits": io.WriteFits(transpose(mat).astype(float32),save, None) return if palette!=None: mplot(matint,palette=palette,save=save,marker=marker) else: mplot(matint,save=save,marker=marker) def show(self,*arg,**kw): ''' Display the model this is an alias to display ''' self.display(*arg,**kw) def Map(self,*arg,**kw): ''' Return 2 final images (float and int) ''' params = extract_parameters(arg,kw,self.defaultparameters) mn_opts = [] mx_opts = [] cd_opts = [] if self.nbody==0 and mpi.mpi_NTask()==1: mat = zeros(params['shape'],float32) matint = mat.astype(int) mn_opts.append(params['mn']) mx_opts.append(params['mx']) cd_opts.append(params['cd']) return mat,matint,mn_opts,mx_opts,cd_opts # compute map mat = self.CombiMap(params) # set ranges matint,mn_opt,mx_opt,cd_opt = set_ranges(mat,scale=params['scale'],cd=params['cd'],mn=params['mn'],mx=params['mx']) mn_opts.append(mn_opt) mx_opts.append(mx_opt) cd_opts.append(cd_opt) # add contour if params['l_color'] != 0: matint = contours(mat,matint,params['l_n'],params['l_min'],params['l_max'],params['l_kx'],params['l_ky'],params['l_color'],params['l_crush']) # add box and ticks if params['b_weight'] != 0: matint = add_box(matint,shape=params['shape'],size=params['size'],center=None,box_opts=(params['b_weight'],params['b_xopts'],params['b_yopts'],params['b_color'])) return mat,matint,mn_opts,mx_opts,cd_opts def CombiMap(self,*arg,**kw): ''' Return an image in form of a matrix (nx x ny float array). Contrary to ComputeMap, CombiMap compose different output of ComputeMap. pos : position of particles (moment 0) sr : dispertion in r (with respect to xp) svr : dispertion in vr vxyr : mean velocity in the plane svxyr: dispertion in vxy vtr : mean tangential velocity in the plane svtr : dispertion in vt szr : ratio sigma z/sigma r ''' params = extract_parameters(arg,kw,self.defaultparameters) mode = params['mode'] #if mode == 'pos': # mat = self.ComputeMap(params) if mode == 'm': mat = self.ComputeMap(params) elif mode == 'sr': mat = self.ComputeSigmaMap(params,mode1='r',mode2='r2') elif mode == 'svr': mat = self.ComputeSigmaMap(params,mode1='vr',mode2='vr2') elif mode == 'svxyr': mat = self.ComputeSigmaMap(params,mode1='vxyr',mode2='vxyr2') elif mode == 'svtr': mat = self.ComputeSigmaMap(params,mode1='vtr',mode2='vtr2') elif mode == 'szr': # could be simplified m0 = self.ComputeMap(params,mode='m') m1 = self.ComputeMap(params,mode='vr') m2 = self.ComputeMap(params,mode='vr2') m1 = where(m0==0,0,m1) m2 = where(m0==0,0,m2) m0 = where(m0==0,1,m0) mat = m2/m0 - (m1/m0)**2 mat_sz = sqrt(numclip(mat,0,1e10)) m0 = self.ComputeMap(params,mode='m') m1 = self.ComputeMap(params,mode='vxyr') m2 = self.ComputeMap(params,mode='vxyr2') m1 = where(m0==0,0,m1) m2 = where(m0==0,0,m2) m0 = where(m0==0,1,m0) mat = m2/m0 - (m1/m0)**2 mat_sr = sqrt(numclip(mat,0,1e10)) mat_sz = where(mat_sr==0,0,mat_sz) mat_sr = where(mat_sr==0,1,mat_sr) mat = mat_sz/mat_sr elif mode == 'lum': mat = self.ComputeMap(params,mode='lum') else: mat = self.ComputeMeanMap(params,mode1=mode) return mat def ComputeMeanMap(self,*arg,**kw): """ Compute the mean map of an observable. """ params = extract_parameters(arg,kw,self.defaultparameters) if kw.has_key('mode1'): mode1 = kw['mode1'] else: raise "ComputeMeanMap :","you must give parameter mode1" m0 = self.ComputeMap(params,mode='0') m1 = self.ComputeMap(params,mode=mode1) m1 = where(m0==0,0,m1) m0 = where(m0==0,1,m0) mat = m1/m0 return mat def ComputeSigmaMap(self,*arg,**kw): """ Compute the sigma map of an observable. """ params = extract_parameters(arg,kw,self.defaultparameters) if kw.has_key('mode1'): mode1 = kw['mode1'] else: raise "ComputeMeanMap","you must give parameter mode1" if kw.has_key('mode2'): mode2 = kw['mode2'] else: raise "ComputeMeanMap","you must give parameter mode2" m0 = self.ComputeMap(params,mode='0') m1 = self.ComputeMap(params,mode=mode1) m2 = self.ComputeMap(params,mode=mode2) m1 = where(m0==0,0,m1) m2 = where(m0==0,0,m2) m0 = where(m0==0,1,m0) mat = m2/m0 - (m1/m0)**2 mat = sqrt(numclip(mat,0,1e10)) return mat def ComputeMap(self,*arg,**kw): ''' Return an image in form of a matrix (nx x ny float array) obs : position of observer x0 : eye position xp : focal position alpha : angle of the head view : 'xy' 'xz' 'yz' eye : 'right' 'left' dist_eye : distance between eyes mode : mode of map space : pos or vel persp : 'on' 'off' clip : (near,far) size : (maxx,maxy) cut : 'yes' 'no' frsp : factor for rsp shape : shape of the map ''' params = extract_parameters(arg,kw,self.defaultparameters) obs = params['obs'] x0 = params['x0'] xp = params['xp'] alpha = params['alpha'] mode = params['mode'] view = params['view'] r_obs = params['r_obs'] eye = params['eye'] dist_eye = params['dist_eye'] foc = params['foc'] space = params['space'] persp = params['persp'] clip = params['clip'] size = params['size'] shape = params['shape'] cut = params['cut'] frsp = params['frsp'] filter_name = params['filter_name'] filter_opts = params['filter_opts'] # 0) if getvaltype(mode)=='normal': val = getval(self,mode=mode,obs=obs) # 1) get observer position if obs==None: obs = geo.get_obs(x0=x0,xp=xp,alpha=alpha,view=view,r_obs=r_obs) # 2) expose the model # !!! as in self.expose we use Nbody() this must be called by each Task nb,obs = self.expose(obs,eye,dist_eye,foc=foc,space=space) if self.nbody > 0: # 3) compute val if getvaltype(mode)=='in projection': val = getval(nb,mode=mode,obs=obs) # 4) projection transformation if persp == 'on': zp = - nb.pos[:,2] # save dist obs-point pos = geo.frustum(nb.pos,clip,size) else: pos = geo.ortho(nb.pos,clip,size) # 5) keep only particles in 1:1:1 if not self.has_array('rsp'): # bad !!! self.rsp = None if cut=='yes': if self.rsp!= None: if params['rendering']=='map': pos,(mass,rsp,val,zp) = geo.boxcut(pos,[self.mass,self.rsp,val,zp]) else: pos,(mass,rsp,val,zp) = geo.boxcut_segments(pos,[self.mass,self.rsp,val,zp]) else: if params['rendering']=='map': pos,(mass,val,zp) = geo.boxcut(pos,[self.mass,val,zp]) else: pos,(mass,val,zp) = geo.boxcut_segments(pos,[self.mass,val,zp]) rsp = None else: mass = self.mass rsp = self.rsp if len(pos)!=0: # 6) scale rsp and scale mass if frsp!= 0: if (rsp==None) or (sum(rsp)==0): rsp = ones(len(pos),float32) if persp == 'on': fact = 1/(zp+clip[0]) # rsp is distance dependant... rsp = rsp * fact rsp = rsp.astype(float32) # mass is distance dependant... mass = mass*fact**2 mass = mass.astype(float32) rsp = rsp*frsp # multiply with the factor self.log.write( "rsp : min = %10.5f max = %10.5f mean = %10.5f"%(min(rsp),max(rsp),rsp.mean()) ) rsp = numclip(rsp,0,100) self.log.write( "rsp : min = %10.5f max = %10.5f mean = %10.5f"%(min(rsp),max(rsp),rsp.mean()) ) rsp = rsp.astype(float32) else: rsp = None # 7) viewport transformation : (x,y) -> ((0,1),(0,1)) pos = geo.viewport(pos,shape=None) pos = pos.astype(float32) # 8) render : map or lines if params['rendering']=='map': if rsp != None: mat = mkmap2dsph(pos,mass,val,rsp,shape) else: mat = mkmap2d(pos,mass,val,shape) elif params['rendering']=='mapcubic': if rsp != None: mat = mkmap2dncub(pos,mass,val,rsp,shape) else: raise "rsp need to be defined" elif params['rendering']=='polygon': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_polygon(mat,pos[:,0],pos[:,1],1) elif params['rendering']=='lines': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_lines(mat,pos[:,0],pos[:,1],1) elif params['rendering']=='segments': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_segments(mat,pos[:,0],pos[:,1],1,zp) elif params['rendering']=='points': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_points(mat,pos[:,0],pos[:,1],1) elif params['rendering']=='polygon2': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_polygonN(mat,pos[:,0],pos[:,1],1,2) elif params['rendering']=='polygon4': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_polygonN(mat,pos[:,0],pos[:,1],1,4) elif params['rendering']=='polygon10': pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_polygonN(mat,pos[:,0],pos[:,1],1,10) elif params['rendering'][:8]=='polygon#': n = int(params['rendering'][8:]) pos = pos * array([params['shape'][0],params['shape'][1] ,0]) mat = zeros(params['shape'],float32) mat = draw_polygonN(mat,pos[:,0],pos[:,1],1,n) else: # compute a map if rsp != None: mat = mkmap2dsph(pos,mass,val,rsp,shape) else: mat = mkmap2d(pos,mass,val,shape) # there is no particles (after 5) else: mat = zeros(params['shape'],float32) # there is no particles else: mat = zeros(params['shape'],float32) # 9) sum mat over all proc #mat = mpi.mpi_allreduce(mat) # could be more efficient if only the master get the final mat mat = mpi.mpi_allreduce(mat) # 10) filter matrix if mpi.mpi_IsMaster(): if params['filter_name'] != None: mat = apply_filter(mat,name=filter_name,opt=filter_opts) return mat def ComputeObjectMap(self,*arg,**kw): ''' * * * IN DEVELOPPEMENT : allow to draw an object like a box, a grid... * * * Return an image in form of a matrix (nx x ny float array) obs : position of observer x0 : eye position xp : focal position alpha : angle of the head view : 'xy' 'xz' 'yz' eye : 'right' 'left' dist_eye : distance between eyes mode : mode of map space : pos or vel persp : 'on' 'off' clip : (near,far) size : (maxx,maxy) cut : 'yes' 'no' frsp : factor for rsp shape : shape of the map ''' # here, nb must represent a geometric object # ob # expose : -> must use ob instead of self # --> we can give explicitely pos and vel # ensuite, le nb est le bon params = extract_parameters(arg,kw,self.defaultparameters) obs = params['obs'] x0 = params['x0'] xp = params['xp'] alpha = params['alpha'] mode = params['mode'] view = params['view'] r_obs = params['r_obs'] eye = params['eye'] dist_eye = params['dist_eye'] foc = params['foc'] space = params['space'] persp = params['persp'] clip = params['clip'] size = params['size'] shape = params['shape'] cut = params['cut'] frsp = params['frsp'] filter_name = params['filter_name'] filter_opts = params['filter_opts'] # 0) if getvaltype(mode)=='normal': val = getval(self,mode=mode,obs=obs) # 1) get observer position if obs==None: obs = geo.get_obs(x0=x0,xp=xp,alpha=alpha,view=view,r_obs=r_obs) # 2) expose the model # !!! as in self.expose we use Nbody() this must be called by each Task nb,obs = self.expose(obs,eye,dist_eye,foc=foc,space=space) if self.nbody > 0: # 3) compute val if getvaltype(mode)=='in projection': val = getval(nb,mode=mode,obs=obs) # 4) projection transformation if persp == 'on': zp = - nb.pos[:,2] # save dist obs-point pos = geo.frustum(nb.pos,clip,size) else: pos = geo.ortho(nb.pos,clip,size) # 5) keep only particles in 1:1:1 if not self.has_array('rsp'): # bad !!! self.rsp = None if cut=='yes': if self.rsp!= None: pos,(mass,rsp,val,zp) = geo.boxcut(pos,[self.mass,self.rsp,val,zp]) else: pos,(mass,val,zp) = geo.boxcut(pos,[self.mass,val,zp]) rsp = None else: mass = self.mass rsp = self.rsp if len(pos)!=0: # 6) scale rsp and scale mass if frsp!= 0: if (rsp==None) or (sum(rsp)==0): rsp = ones(len(pos),float32) if persp == 'on': fact = 1/((zp-clip[0])+ 2*clip[0]) # rsp is distance dependant... rsp = rsp * fact rsp = rsp.astype(float32) # mass is distance dependant... mass = mass*fact**2 mass = mass.astype(float32) rsp = rsp*frsp # multiply with the factor self.log.write( "rsp : min = %10.5f max = %10.5f mean = %10.5f"%(min(rsp),max(rsp),rsp.mean()) ) rsp = numclip(rsp,0,100) self.log.write( "rsp : min = %10.5f max = %10.5f mean = %10.5f"%(min(rsp),max(rsp),rsp.mean()) ) rsp = rsp.astype(float32) else: rsp = None # 7) viewport transformation : (x,y) -> ((0,1),(0,1)) pos = geo.viewport(pos,shape=None) pos = pos.astype(float32) # 8) get the map #if rsp != None: # mat = mkmap2dsph(pos,mass,val,rsp,shape) #else: # mat = mkmap2d(pos,mass,val,shape) # # empty matrix mat = zeros(params['shape'],float32) #for po in pos: # i = int(po[0]*params['shape'][0]) # j = int(po[1]*params['shape'][1]) # mat[i,j]=255 pos = pos * [ params['shape'][0],params['shape'][1],1 ] x0 = pos[0][0] y0 = pos[0][1] x1 = pos[1][0] y1 = pos[1][1] mat = libutil.draw_line(mat,x0,x1,y0,y1,255) #mat = libutil.draw_cube(mat,pos,255) # there is no particles (after 5) else: mat = zeros(params['shape'],float32) # there is no particles else: mat = zeros(params['shape'],float32) # 9) sum mat over all proc mat = mpi.mpi_allreduce(mat) # may be inefficient, better use reduce ? # 10) filter matrix if mpi.mpi_IsMaster(): if params['filter_name'] != None: mat = apply_filter(mat,name=filter_name,opt=filter_opts) return mat def expose(self,obs,eye=None,dist_eye=None,foc=None,space='pos',pos=None,vel=None): """ Rotate and translate the object in order to be seen as if the observer was in x0, looking at a point in xp. obs : observer matrix eye : 'right' or 'left' dist_eye : distance between eyes (separation = angle) space : pos or vel foc : focal """ # create a minimal copy of self if pos!=None and vel!=None: obj = Nbody(status='new',p_name='none',pos=pos,vel=vel,mass=None,ftype='default') else: obj = Nbody(status='new',p_name='none',pos=self.pos,vel=self.vel,mass=None,ftype='default') if space=='vel': obj.pos = self.vel # first : put x0 at the origin obj.translate(- obs[0]) obs = obs - obs[0] # second : anti-align e1 with z obj.align2( axis1=obs[1],axis2=[0,0,-1]) obs = geo.align(obs,axis1=obs[1],axis2=[0,0,-1]) # third : align e3 with y obj.align2( axis1=obs[3],axis2=[0,1,0]) obs = geo.align(obs,axis1=obs[3],axis2=[0,1,0]) # fourth if eye is defined if eye=='right': if foc==None or foc==0: # simple translation (wee look at infini) obj.translate([-dist_eye,0,0]) # not /2 for compatibility with glups else: Robs = foc phi = -arctan(dist_eye) obj.rotate( angle=-phi,axis=[0,1,0],point=[0,0,-Robs]) elif eye=='left': if foc==None or foc==0: # simple translation (wee look at infini) obj.translate([+dist_eye,0,0]) # not /2 for compatibility with glups else: Robs = foc phi = -arctan(dist_eye) obj.rotate( angle=+phi,axis=[0,1,0],point=[0,0,-Robs]) return obj,obs ''' def getvxy(self,shape=(256,256),size=(30.,30.),center=(0.,0.,0.),view='xz',vn=8.,vmax=0.1,color=1): self.log.write( "the result may be incertain (in development)" ) # choice of the view if view=='xz': view=1 elif view=='xy': view=2 elif view=='yz': view=3 elif view!='xz'and view!='xy'and view!='yz': view=1 dx = mapone(self.pos,self.mass,self.vel[:,0],shape,size,center,view) * vn/vmax dy = - mapone(self.pos,self.mass,self.vel[:,2],shape,size,center,view) * vn/vmax # mask mask = fromfunction(lambda x,y: (fmod(x,vn) + fmod(y,vn))==0 ,shape) # points de depart x0 = indices(shape)[0] + int(vn/2.) y0 = indices(shape)[1] + int(vn/2.) # points d'arrivee x1 = x0 + dx.astype(int) y1 = y0 + dy.astype(int) # truncation x1 = numclip(x1,0,shape[0]) y1 = numclip(y1,0,shape[1]) # compress mask = mask*(x1!=x0)*(y1!=y0) mask = ravel(mask) x0 = compress(mask,ravel(x0)) x1 = compress(mask,ravel(x1)) y0 = compress(mask,ravel(y0)) y1 = compress(mask,ravel(y1)) # trace lines mat = zeros(shape,float32) color = array(color,int8)[0] for i in range(len(x0)): create_line(mat,x0[i],y0[i],x1[i],y1[i],color) create_line(mat,x0[i],y0[i],x0[i]+1,y0[i]+1,color) create_line(mat,x0[i],y0[i],x0[i]+1,y0[i] ,color) create_line(mat,x0[i],y0[i],x0[i] ,y0[i]+1,color) return mat.astype(int8) ''' ################################# # # 1d histograms routines # ################################# ########################### def Histo(self,bins,mode='m',space='R'): ########################### histo = self.CombiHisto(bins,mode=mode,space=space) # take the mean bins1 = bins[:-1] bins2 = bins[1:] bins = (bins1+bins2)/2. return bins,mpi.mpi_allreduce(histo) ########################### def CombiHisto(self,bins,mode='m',space='R'): ########################### if mode == 'm': histo = self.ComputeHisto(bins,mode='0',space=space) elif mode == 'sz': histo = self.ComputeSigmaHisto(bins,mode1='z',mode2='z2',space=space) elif mode == 'svz': histo = self.ComputeSigmaHisto(bins,mode1='vz',mode2='vz2',space=space) elif mode == 'svt': histo = self.ComputeSigmaHisto(bins,mode1='vt',mode2='vt2',space=space) elif mode == 'svr': histo = self.ComputeSigmaHisto(bins,mode1='vr',mode2='vr2',space=space) elif mode == 'vt': histo = self.ComputeMeanHisto(bins,mode1='vt',space=space) elif mode == 'vr': histo = self.ComputeMeanHisto(bins,mode1='vr',space=space) elif mode == 'vz': histo = self.ComputeMeanHisto(bins,mode1='vz',space=space) else: print "unknown mode %s"%(mode) return histo ################################# def ComputeMeanHisto(self,bins,mode1,space): ################################# """ Compute the mean map of an observable. """ h0 = self.ComputeHisto(bins,mode='0',space=space) h1 = self.ComputeHisto(bins,mode=mode1,space=space) h1 = where(h0==0,0,h1) h0 = where(h0==0,1,h0) h = h1/h0 return h ################################# def ComputeSigmaHisto(self,bins,mode1,mode2,space): ################################# """ Compute the histogram of an observable. """ h0 = self.ComputeHisto(bins,mode='0',space=space) h1 = self.ComputeHisto(bins,mode=mode1,space=space) h2 = self.ComputeHisto(bins,mode=mode2,space=space) h1 = where(h0==0,0,h1) h2 = where(h0==0,0,h2) h0 = where(h0==0,1,h0) h = h2/h0 - (h1/h0)**2 h = sqrt(numclip(h,0,1e10)) return h ################################# def ComputeHisto(self,bins,mode,space): ################################# ''' Compute and histogram ''' # set space if space == 'R': x = self.rxy() elif space == 'r': x = self.rxyz() # set mode if mode == 'm' or mode=='0': v = self.mass elif mode == 'z': v = self.mass*self.z() elif mode == 'z2': v = self.mass*self.z()**2 elif mode == 'vz': v = self.mass*self.vz() elif mode == 'vz2': v = self.mass*self.vz()**2 elif mode == 'vt': v = self.mass*self.Vt() elif mode == 'vt2': v = self.mass*self.Vt()**2 elif mode == 'vr': v = self.mass*self.Vr() elif mode == 'vr2': v = self.mass*self.Vr()**2 else: print "unknown mode %s"%(mode) histo = whistogram(x.astype(float),v.astype(float),bins.astype(float)) return histo ############################################ # # Routines to get velocities from positions # ############################################ def Get_Velocities_From_Virial_Approximation(self,select=None,vf=1.,eps=0.1,UseTree=True,Tree=None,ErrTolTheta=0.5): ''' This routine does not work ! Do not use it, or check ! ''' if select!=None: nb_sph = self.select(select) else: nb_sph = self # build the Tree for nb self.getTree(force_computation=True,ErrTolTheta=ErrTolTheta) # compute potential pot = 0.5*self.TreePot(nb_sph.pos,eps) # virial approximation to get the velocities sigmasp = sqrt(-pot/3*vf) # compute accel acc = self.TreeAccel(nb_sph.pos,eps) pot = (acc[:,0]*nb_sph.pos[:,0] + acc[:,1]*nb_sph.pos[:,1] + acc[:,2]*nb_sph.pos[:,2]) # virial approximation to get the velocities sigmas = sqrt(-pot/3*vf) # avoid negative values sigmas = where( (pot>0),sigmasp, sigmas ) # generate velocities vx = sigmas*RandomArray.standard_normal([nb_sph.nbody]) vy = sigmas*RandomArray.standard_normal([nb_sph.nbody]) vz = sigmas*RandomArray.standard_normal([nb_sph.nbody]) nb_sph.vel = transpose(array([vx,vy,vz])).astype(float32) return nb_sph def Get_Velocities_From_AdaptativeSpherical_Grid(self,select=None,eps=0.1,n=1000,UseTree=True,Tree=None,phi=None,ErrTolTheta=0.5): ''' Computes velocities using the jeans equation in spherical coordinates. An adaptative grid is set automatically. ''' if select!=None: nb_sph = self.select(select) else: nb_sph = self # create the adaptiative grid and compute rho r = nb_sph.rxyz() a = r.argsort() x = take(nb_sph.pos[:,0],a) y = take(nb_sph.pos[:,1],a) z = take(nb_sph.pos[:,2],a) mass = take(nb_sph.mass,a) r = sqrt( x**2 + y**2 + z**2 ) n_bins = int((nb_sph.nbody+1)/n + 1) rs = [] rsmin = [] rsmax = [] rhos = [] ns = [] for i in xrange(n_bins): jmin = i*n jmax = i*n + n jmin = min(jmin,nb_sph.nbody-1) jmax = min(jmax,nb_sph.nbody-1) if jmin!=jmax: rr = r[jmin:jmax] mm = mass[jmin:jmax] rmean = rr.mean() rmin = rr.min() rmax = rr.max() rs.append(rmean) rsmin.append(rmin) rsmax.append(rmax) # density rho = sum(mm)/(4/3.*pi*(rmax**3-rmin**3)) rhos.append(rho) # number ns.append(len(rr)) r = array(rs) rsmin = array(rsmin) rsmax = array(rsmax) rho = array(rhos) dr = rsmax-rsmin nn = array(ns) # build the Tree for nb self.getTree(force_computation=True,ErrTolTheta=ErrTolTheta) # compute potential x = r y = zeros(len(r)) z = zeros(len(r)) pos = transpose(array([x,y,z])).astype(float32) phi = self.TreePot(pos,eps) # compute sigma sigma = libdisk.get_1d_Sigma_From_Rho_Phi(rho=rho,phi=phi,r=r,dr=dr) # generate velocities for all particles sigmas = lininterp1d(nb_sph.rxyz().astype(float32),r.astype(float32),sigma.astype(float32)) vx = sigmas*RandomArray.standard_normal([nb_sph.nbody]) vy = sigmas*RandomArray.standard_normal([nb_sph.nbody]) vz = sigmas*RandomArray.standard_normal([nb_sph.nbody]) nb_sph.vel = transpose(array([vx,vy,vz])).astype(float32) # here we should limit the speed according to max speed phis = lininterp1d(nb_sph.rxyz().astype(float32),r.astype(float32),phi.astype(float32)) vm = 0.95*sqrt(-2*phis) vn = nb_sph.vn() vf = where(vn>vm,vm/vn,1) vf.shape = (len(vf),1) nb_sph.vel = nb_sph.vel * vf # other info phi dphi = libgrid.get_First_Derivative(phi,r) vcirc = libdisk.Vcirc(r,dphi) stats = {} stats['r'] = r stats['nn'] = nn stats['phi'] = phi stats['rho'] = rho stats['sigma'] = sigma stats['vc'] = vcirc return nb_sph,phi,stats def Get_Velocities_From_Spherical_Grid(self,select=None,eps=0.1,nr=128,rmax=100.0,UseTree=True,Tree=None,phi=None,ErrTolTheta=0.5,g=None,gm=None,NoDispertion=False,omega=None): ''' Computes velocities using the jeans equation in spherical coordinates. ''' if select!=None: nb_sph = self.select(select) else: nb_sph = self # build the Tree for nb self.getTree(force_computation=True,ErrTolTheta=ErrTolTheta) # create the grid G = libgrid.Spherical_1d_Grid(rmin=0,rmax=rmax,nr=nr,g=g,gm=gm) if phi==None: phi = G.get_PotentialMap(self,eps=eps,UseTree=UseTree) r = G.get_r() rho = G.get_DensityMap(nb_sph) nn = G.get_NumberMap(nb_sph) # dr dr = G.get_r(offr=1)-G.get_r(offr=0) # compute sigma sigma = libdisk.get_1d_Sigma_From_Rho_Phi(rho=rho,phi=phi,r=r,dr=dr) # correct sigma in case of rotation (we assume the rotation around z) if omega!=None: print "add rotation" e_jeans = 0.5*sigma*sigma e_rot = 0.5*r**2 * omega**2 e = e_jeans - e_rot if (e<0).any(): print "at some radius the kinetic specifig energy is less than zero\nYou should decrease omega." e = where(e<0,0,e) sigma = sqrt(2*e) # generate velocities for all particles sigmas = G.get_Interpolation(nb_sph.pos,sigma) if NoDispertion: vx = sigmas*ones(nb_sph.nbody) vy = sigmas*ones(nb_sph.nbody) vz = sigmas*ones(nb_sph.nbody) else: vx = sigmas*RandomArray.standard_normal([nb_sph.nbody]) vy = sigmas*RandomArray.standard_normal([nb_sph.nbody]) vz = sigmas*RandomArray.standard_normal([nb_sph.nbody]) nb_sph.vel = transpose(array([vx,vy,vz])).astype(float32) # do not spin # add rotation #if omega!=None: # nb_sph.spin(omega=array([0,0,omega])) # here we should limit the speed according to max speed phis = G.get_Interpolation(nb_sph.pos,phi) vm = 0.95*sqrt(-2*phis) vn = nb_sph.vn() vf = where(vn>vm,vm/vn,1) vf.shape = (len(vf),1) nb_sph.vel = nb_sph.vel * vf # other info phi dphi = libgrid.get_First_Derivative(phi,r) vcirc = libdisk.Vcirc(r,dphi) stats = {} stats['r'] = r stats['nn'] = nn stats['phi'] = phi stats['rho'] = rho stats['sigma'] = sigma stats['vc'] = vcirc return nb_sph,phi,stats def Get_Velocities_From_Cylindrical_Grid(self,select='disk',disk=('gas','disk'),eps=0.1,nR=32,nz=32,nt=2,Rmax=100,zmin=-10,zmax=10,params=[None,None,None],UseTree=True,Tree=None,Phi=None,ErrTolTheta=0.5,AdaptativeSoftenning=False,g=None,gm=None,NoDispertion=False): ''' Computes velocities using the jeans equation in cylindrical coordinates. ''' mode_sigma_z = params[0] mode_sigma_r = params[1] mode_sigma_p = params[2] if params[0]==None: mode_sigma_z = {"name":"jeans","param":None} if params[1]==None: mode_sigma_r = {"name":"toomre","param":1.0} if params[2]==None: mode_sigma_p = {"name":"epicyclic_approximation","param":None} nb_cyl = self.select(select) # current component nb_dis = self.select(disk) # disk component, for Q computation # build the Tree for nb self.getTree(force_computation=True,ErrTolTheta=ErrTolTheta) # create the grid G = libgrid.Cylindrical_2drz_Grid(rmin=0,rmax=Rmax,nr=nR,zmin=zmin,zmax=zmax,nz=nz,g=g,gm=gm) R,z = G.get_rz() #################################### # compute Phi in a 2d rz grid #################################### # here, we could use Acc instead Phi = G.get_PotentialMap(self,eps=eps,UseTree=UseTree,AdaptativeSoftenning=AdaptativeSoftenning) Phi = libgrid.get_Symetrisation_Along_Axis(Phi,axis=1) #Accx,Accy,Accz = libgrid.get_AccelerationMap_On_Cylindrical_2dv_Grid(self,nR,nz,Rmax,zmin,zmax,eps=eps) #Ar = sqrt(Accx**2+Accy**2) #################################### # compute Phi (z=0) in a 2d rt grid #################################### Grt = libgrid.Cylindrical_2drt_Grid(rmin=0,rmax=Rmax,nr=nR,nt=nt,z=0,g=g,gm=gm) Accx,Accy,Accz = Grt.get_AccelerationMap(self,eps=eps,UseTree=UseTree,AdaptativeSoftenning=AdaptativeSoftenning) Ar = sqrt(Accx**2+Accy**2) Ar = sum(Ar,axis=1)/nt Rp,tp = Grt.get_rt() Phi0 = Phi[:,nz/2] # not used dPhi0 = Ar d2Phi0 = libgrid.get_First_Derivative(dPhi0,R) # density rho = G.get_DensityMap(nb_cyl,offz=-0.5) rho = libgrid.get_Symetrisation_Along_Axis(rho,axis=1) # number per bin nn = G.get_NumberMap(nb_cyl,offz=-0.5) Sden = G.get_SurfaceDensityMap(nb_cyl,offz=-0.5) Sdend = G.get_SurfaceDensityMap(nb_dis,offz=-0.5) # compute frequencies (in the plane) kappa = libdisk.Kappa(R,dPhi0,d2Phi0) omega = libdisk.Omega(R,dPhi0) vcirc = libdisk.Vcirc(R,dPhi0) nu = libdisk.Nu(z,Phi) # compute sigma_z if mode_sigma_z['name']=='jeans': R1,z1 = G.get_rz(offz=0) R2,z2 = G.get_rz(offz=1) dz = z2-z1 sigma_z = libdisk.get_2d_Sigma_From_Rho_Phi(rho=rho,Phi=Phi,z=z,dz=dz) sigma_z2 = sigma_z**2 elif mode_sigma_z['name']=='surface density': """sigma_z2 = pi*G*Sden*Hz""" print "mode sigma z : 'surface density', not implemented yet" sys.exit() # compute sigma_r if mode_sigma_r['name']=='epicyclic_approximation': beta2 = mode_sigma_r['param'] f = where( kappa**2>0 , (1./beta2) * (nu**2/kappa**2) , 1.0 ) f.shape = (nR,1) sigma_r2 = sigma_z2 * f elif mode_sigma_r['name']=='isothropic': sigma_r2 = sigma_z2 elif mode_sigma_r['name']=='toomre': Q = mode_sigma_r['param'] Gg = 1.0 sr = where(kappa>0,Q*3.36*Gg*Sdend/kappa,sigma_z[:,nz/2]) sr.shape = (nR,1) sigma_r2 = ones((nR,nz)) * sr sigma_r2 = sigma_r2**2 elif mode_sigma_r['name']=='constant': sr = mode_sigma_r['param'] sigma_r2 = ones((nR,nz)) * sr sigma_r2 = sigma_r2**2 # compute sigma_p if mode_sigma_p['name']=='epicyclic_approximation': f = where( omega**2>0 , (1/4.0) * (kappa**2/omega**2) , 1.0 ) f.shape = (nR,1) sigma_p2 = sigma_r2 * f elif mode_sigma_p['name']=='isothropic': sigma_p2 = sigma_z2 notok = True count = 0 while notok: count = count + 1 print "compute vm" # compute vm sr2 = sigma_r2[:,nz/2] # should not be only in the plane sp2 = sigma_p2[:,nz/2] # should not be only in the plane vc = vcirc # should not be only in the plane T1 = vc**2 T2 = + sr2 - sp2 T3 = where(Sden>0,R/Sden,0)* libgrid.get_First_Derivative(Sden*sr2,R) vm2 = T1 + T2 + T3 # if vm2 < 0 c = (vm2<0) if sum(c)>0: print "Get_Velocities_From_Cylindrical_Grid : vm2 < 0 for %d elements"%(sum(c)) ''' vm2 = where(c,0,vm2) dsr2 = where(c,(T1+T2+T3)/2.,0) # energie qu'il faut retirer a sr dsp2 = where(c,(T1+T2+T3)/2.,0) # energie qu'il faut retirer a sp # take energy from sigma_r and sigma_p sigma_r2 = transpose(transpose(sigma_r2) + dsr2) sigma_p2 = transpose(transpose(sigma_p2) + dsp2) ''' E = sr2 + sp2 + vm2 if sum(E<0) != 0: print "-----------------------------------------------------" for i in range(len(R)): print R[i],vc[i]**2,sr2[i],sp2[i],vm2[i],sigma_z[i,nz/2]**2 print "-----------------------------------------------------" print "Get_Velocities_From_Cylindrical_Grid : we are in trouble here..." raise "E<0" vm2 = where(c,E/3.,vm2) sr2 = where(c,E/3.,sr2) sp2 = where(c,E/3.,sp2) sigma_r2 = transpose(ones((nz,nR)) * sr2) sigma_p2 = transpose(ones((nz,nR)) * sp2) if count > 0: notok = False else: notok = False # old implementation #vm2 = where(c,T1,vm2) #dsr2 = where(c,-(T2+T3)/2.,0) #dsp2 = where(c,-(T2+T3)/2.,0) # check again c = (vm2<0).astype(int) nvm = sum(c) if sum(nvm)>0: print "WARNING : %d cells still have vm<0 !!!"%(nvm) print "Vc^2 < 0 !!!" vm2 = where(c,0,vm2) vm = where(vm2>0,sqrt(vm2),0) # generate velocities for all particles sigma_r2s = G.get_Interpolation(nb_cyl.pos,sigma_r2) sigma_p2s = G.get_Interpolation(nb_cyl.pos,sigma_p2) sigma_z2s = G.get_Interpolation(nb_cyl.pos,sigma_z2) sigma_rs = where(sigma_r2s>0,sqrt(sigma_r2s),0) sigma_ps = where(sigma_p2s>0,sqrt(sigma_p2s),0) sigma_zs = where(sigma_z2s>0,sqrt(sigma_z2s),0) vcircs = G.get_r_Interpolation(nb_cyl.pos,vcirc) vms = G.get_r_Interpolation(nb_cyl.pos,vm) if NoDispertion: vr = sigma_rs vp = sigma_ps*0 + vms vz = sigma_zs else: vr = sigma_rs*RandomArray.standard_normal([nb_cyl.nbody]) vp = sigma_ps*RandomArray.standard_normal([nb_cyl.nbody]) + vms vz = sigma_zs*RandomArray.standard_normal([nb_cyl.nbody]) vel = transpose(array([vr,vp,vz])).astype(float32) nb_cyl.vel = libutil.vel_cyl2cart(nb_cyl.pos,vel) # here we should limit the speed according to max speed phis = G.get_Interpolation(nb_cyl.pos,Phi) vmax = 0.95*sqrt(-2*phis) vn = nb_cyl.vn() vf = where(vn>vmax,vmax/vn,1) vf.shape = (len(vf),1) nb_cyl.vel = nb_cyl.vel * vf # some output sr = sigma_r = sqrt(sigma_r2[:,nz/2]) sp = sigma_p = sqrt(sigma_p2[:,nz/2]) sz = sigma_z = sqrt(sigma_z2[:,nz/2]) Q = where((Sden>0),sr*kappa/(3.36*Sdend),0) stats = {} stats['R'] = R stats['z'] = z stats['vc'] = vc stats['vm'] = vm stats['sr'] = sr stats['sp'] = sp stats['sz'] = sz stats['kappa'] = kappa stats['omega'] = omega stats['nu'] = nu stats['Sden'] = Sden stats['Sdend'] = Sdend stats['Q'] = Q #stats['Ar'] = Ar stats['rho'] = rho stats['phi'] = Phi stats['nn'] = nn stats['sigma_z']= sqrt(sigma_z2) stats['Phi0'] = Phi0 stats['dPhi0'] = dPhi0 stats['d2Phi0'] = d2Phi0 return nb_cyl,Phi,stats ############################################ # # evolution routines # ############################################ def IntegrateUsingRK(self,tstart=0,dt=1,dt0=1e-5,epsx=1.e-13,epsv=1.e-13): """ Integrate the equation of motion using RK78 integrator. tstart : initial time dt : interval time dt0 : inital dt epsx : position precision epsv : velocity precision tmin,tmax,dt,dtout,epsx,epsv,filename """ tend = tstart + dt self.pos,self.vel,self.atime,self.dt = nbdrklib.IntegrateOverDt(self.pos.astype(float),self.vel.astype(float),self.mass.astype(float),tstart,tend,dt,epsx,epsv) self.pos = self.pos.astype(float32) self.vel = self.vel.astype(float32) ################################# # # Thermodynamic functions # ################################# def U(self): ''' Return the gas specific energy of the model. The output is an nx1 float array. ''' return self.u def Rho(self): ''' Return the gas density of the model. The output is an nx1 float array. ''' try: a3inv = 1./self.atime**3 except: a3inv = 1. if self.unitsparameters.get('HubbleParam') == 1: self.log.write("assuming non cosmological simulation") a3inv = 1. try: rho = self.rho*a3inv return rho except: return self.rho def T(self): ''' Return the gas temperature of the model. The output is an nx1 float array. ''' gamma = self.unitsparameters.get('gamma') xi = self.unitsparameters.get('xi') ionisation = self.unitsparameters.get('ionisation') mu = thermodyn.MeanWeight(xi,ionisation) mh = ctes.PROTONMASS.into(self.localsystem_of_units) k = ctes.BOLTZMANN.into(self.localsystem_of_units) thermopars = {"k":k,"mh":mh,"mu":mu,"gamma":gamma} T = where((self.u>0),thermodyn.Tru(self.Rho(),self.u,thermopars),0) return T def MeanWeight(self): ''' Return the mean weight of a model, taking into account heating by UV source. The output is an nx1 float array. ''' xi = self.unitsparameters.get('xi') Redshift = 1./self.atime - 1. UnitDensity_in_cgs = self.localsystem_of_units.get_UnitDensity_in_cgs() UnitEnergy_in_cgs = self.localsystem_of_units.get_UnitEnergy_in_cgs() UnitMass_in_g = self.localsystem_of_units.get_UnitMass_in_g() HubbleParam = self.hubbleparam # 0) convert into cgs Density = self.rho.astype(float) *UnitDensity_in_cgs * (HubbleParam*HubbleParam) / self.atime**3 Egyspec = self.u.astype(float) *UnitEnergy_in_cgs/UnitMass_in_g # 1) compute mu MeanWeight,Lambda = coolinglib.cooling(Egyspec,Density,xi,Redshift) return MeanWeight.astype(float32) def Tmu(self): ''' Return the gas temperature of the model. The output is an nx1 float array. ''' gamma = self.unitsparameters.get('gamma') mh = ctes.PROTONMASS.into(self.localsystem_of_units) k = ctes.BOLTZMANN.into(self.localsystem_of_units) T = (gamma-1) *self.MeanWeight().astype(float) *mh/k * self.u return T.astype(float32) def A(self): ''' Return the gas entropy of the model. The output is an nx1 float array. ''' gamma = self.unitsparameters.get('gamma') xi = self.unitsparameters.get('xi') ionisation = self.unitsparameters.get('ionisation') mu = thermodyn.MeanWeight(xi,ionisation) mh = ctes.PROTONMASS.into(self.localsystem_of_units) k = ctes.BOLTZMANN.into(self.localsystem_of_units) thermopars = {"k":k,"mh":mh,"mu":mu,"gamma":gamma} A = where((self.u>0),thermodyn.Aru(self.Rho(),self.u,thermopars),0) return A def P(self): ''' Return the gas pressure of the model. The output is an nx1 float array. ''' gamma = self.unitsparameters.get('gamma') xi = self.unitsparameters.get('xi') ionisation = self.unitsparameters.get('ionisation') mu = thermodyn.MeanWeight(xi,ionisation) mh = ctes.PROTONMASS.into(self.localsystem_of_units) k = ctes.BOLTZMANN.into(self.localsystem_of_units) thermopars = {"k":k,"mh":mh,"mu":mu,"gamma":gamma} P = where((self.u>0),thermodyn.Pru(self.Rho(),self.u,thermopars),0) return P def Tcool(self,coolingfile=None): ''' Return the cooling time of the model. The output is an nx1 float array. ''' gamma = self.unitsparameters.get('gamma') xi = self.unitsparameters.get('xi') ionisation = self.unitsparameters.get('ionisation') metalicity = self.unitsparameters.get('metalicity') hubbleparam= self.unitsparameters.get('HubbleParam') if coolingfile==None: coolingfile = self.unitsparameters.get('coolingfile') mu = thermodyn.MeanWeight(xi,ionisation) mh = ctes.PROTONMASS.into(self.localsystem_of_units) k = ctes.BOLTZMANN.into(self.localsystem_of_units) try: if self.hubbleparam != hubbleparam: self.log.write("Warning (Tcool): using hubbleparam=%f, but self.hubbleparam=%f"%(hubbleparam,self.hubbleparam)) except: pass thermopars = {"k":k,"mh":mh,"mu":mu,"gamma":gamma,"Xi":xi,"metalicity":metalicity,"hubbleparam":hubbleparam} tc,c = thermodyn.CoolingTime(self.Rho(),self.u,self.localsystem_of_units,thermopars,coolingfile) tc = where(c,tc,0) return tc def Ne(self): ''' Return the electron density of the model. The output is an nx1 float array. ''' xi = self.unitsparameters.get('xi') ionisation = self.unitsparameters.get('ionisation') mh = ctes.PROTONMASS.into(self.localsystem_of_units) thermopars = {"mh":mh,"Xi":xi,"ionisation":ionisation} ne = thermodyn.ElectronDensity(self.Rho(),thermopars) return ne def S(self): ''' Return the `entropy` of the model, defined as S = T * Ne^(1-gamma) The output is an nx1 float array. ''' gamma = self.unitsparameters.get('gamma') s = self.T()*self.Ne()**(1.-gamma) return s def Lum(self): ''' Return the luminosty of the model, defined as Lum = m*u/Tcool = m*Lambda/rho The output is an nx1 float array. ''' Lum = self.mass*self.u/self.Tcool() return Lum #################################################################################################################################### # # NBODY REDIRECTOR # #################################################################################################################################### if FORMATSDIR != None: formatsfiles = glob.glob(os.path.join(FORMATSDIR,'*.py')) def Nbody(*arg,**kw): """ The aim of this function is simply to return to the right class """ # default value nb = None if not kw.has_key('ftype'): kw['ftype']='default' # find the right file formatfile = os.path.join(FORMATSDIR,"%s.py"%kw['ftype']) try: formatsfiles.index(formatfile) except ValueError: print "format %s is unknown !"%kw['ftype'] print "%s does not exists"%formatfile sys.exit() # this does not work, because NbodyDefault is unknown #sys.path.append(FORMATSDIR) #__import__(kw['ftype'],) # instead, we use execfile(formatfile) # check class name class_name = "Nbody_%s"%kw['ftype'] try: dir().index(class_name) except ValueError: print "format %s is unknown !"%kw['ftype'] sys.exit() ## not very good class_name = eval(class_name) nb = class_name(*arg,**kw) nb._formatfile = formatfile return nb diff --git a/pNbody/mpi.py b/pNbody/mpi.py index 1a54f5a..283e397 100644 --- a/pNbody/mpi.py +++ b/pNbody/mpi.py @@ -1,1154 +1,1165 @@ # -*- coding: iso-8859-1 -*- from numpy import * import types import libutil import sys class myMPI: def __init__(self): pass try: from mpi4py import MPI comm = MPI.COMM_WORLD comm_self = MPI.COMM_SELF ThisTask = comm.Get_rank() NTask = comm.Get_size() Procnm = MPI.Get_processor_name() Rank = ThisTask except: MPI = myMPI() MPI.SUM = sum ThisTask = 0 NTask = 1 Rank = ThisTask Procnm = "" def mpi_IsMaster(): return ThisTask==0 def mpi_Rank(): return ThisTask def mpi_ThisTask(): return ThisTask def mpi_NTask(): return NTask def mpi_barrier(): if NTask>1: comm.barrier() else: pass ##################################### # # Output functions # ##################################### def mpi_iprint(msg,mode=None): ''' Synchronized print, including info on node. ''' msg = "[%d] : %s"%(ThisTask,msg) pprint(msg) def mpi_pprint(msg): ''' Synchronized print. ''' if NTask>1: MPI.pprint(msg) else: print msg def mpi_rprint(msg): ''' Rooted print. ''' if NTask>1: MPI.rprint(msg) else: print msg ##################################### # # communication functions # ##################################### def mpi_send(x,dest): ''' Send x to node dest. When there is only one is defined, it does nothing. ''' if NTask>1: return comm.send(x, dest = dest) else: pass def mpi_recv(source): ''' Return a variable sent by node \var{source}. When there is only one is defined, it does nothing. ''' if NTask>1: return comm.recv(source = source) else: pass def mpi_sendrecv(x,dest,source): ''' ''' if NTask>1: return comm.sendrecv(x,dest=dest,source = source) else: pass def mpi_reduce(x,root=0,op=MPI.SUM): """ Reduce x from all node only for root. When there is only one is defined, the function return x. """ if NTask>1: sx = comm.reduce(x,op=op,root=root) else: sx = x return sx def mpi_allreduce(x,op=MPI.SUM): """ Reduce x from all node for all nodes. When there is only one is defined, the function return x. """ if NTask>1: sx = comm.allreduce(x,op=op) else: sx = x return sx def mpi_bcast(x,root=0): ''' Broadcast from node root the variable x. When there is only one is defined, it simplay returns x. ''' if NTask>1: x = comm.bcast(x,root=root) else: x = x return x def mpi_gather(x,root=0): """ Gather x from all nodes to node dest. Returns a list. """ if NTask>1: x = comm.gather(x,root=root) else: x = [x] return x def mpi_allgather(x): """ Gather x from all to all. Returns a list. """ if NTask>1: x = comm.allgather(x) else: x = [x] return x def mpi_AllgatherAndConcatArray(vec): ''' AllGather array vec and concatenate it in a unique array (concatenation order is reversed). ''' if NTask>1: vec_all = array([],vec.dtype) if vec.ndim==1: vec_all.shape = (0,) else: vec_all.shape = (0,vec.shape[1]) if ThisTask==0: for Task in range(NTask-1,-1,-1): if Task != 0: v = comm.recv(source=Task) vec_all = concatenate((vec_all,v)) else: vec_all = concatenate((vec_all,vec)) else: comm.send(vec, dest = 0) # send to everybody xi = comm.bcast(vec_all) return xi else: return vec ##################################### # # array functions # ##################################### def mpi_sum(x): """ Sum elements of array x. """ if NTask>1: sx = comm.allreduce(sum(x),op=MPI.SUM) else: sx = sum(x) return sx def mpi_min(x): """ Minimum element of array x. """ if NTask>1: mx = comm.allreduce(min(x),op=MPI.MIN) else: mx = min(x) return mx def mpi_max(x): """ Maximum element of array x. """ if NTask>1: mx = comm.allreduce(max(x),op=MPI.MAX) else: mx = max(x) return mx def mpi_mean(x): """ Mean of elements of array x. """ if NTask>1: sm = comm.allreduce(sum(x),op=MPI.SUM) sn = comm.allreduce(len(x),op=MPI.SUM) mn = sm/sn else: mn = x.mean() return mn def mpi_len(x): """ Lenght of array x. """ if NTask>1: ln = comm.allreduce(len(x),op=MPI.SUM) else: ln = len(x) return ln def mpi_arange(n): """ Create an integer array containing elements from 0 to n spreaded over all nodes. """ if NTask>1: ns = array(comm.allgather(n)) c = ((NTask-1-ThisTask) > arange(len(ns)-1,-1,-1) ) i = sum(ns*c) v = arange(i,i+ns[ThisTask]) else: v = arange(n) return v def mpi_sarange(npart_all): """ Create an integer array containing elements from 0 to n, spreaded over all nodes. The repartition of elements and type of elements over nodes is given by the array npart_all """ npart = npart_all[ThisTask] if NTask>1: v = array([],int) o = 0 for i in arange(len(npart)): # number of particles of this type for this proc n = npart_all[ThisTask,i] n_tot = sum(npart_all[:,i]) ns = array(comm.allgather(n)) c = ((NTask-1-ThisTask) > arange(len(ns)-1,-1,-1) ) j = sum(ns*c) vj = arange(o+j,o+j+ns[ThisTask]) v = concatenate((v,vj)) o = o + n_tot else: n = sum(ravel(npart)) v = arange(n) return v def mpi_argmax(x): """ Find the arument of the amximum value in x. idx = (p,i) : where i = index in proc p """ if NTask>1: # 1) get all max and all argmax maxs = array(comm.allgather(max(x))) args = array(comm.allgather(argmax(x))) # 2) choose the right one p = argmax(maxs) # proc number i = args[p] # index in proc p else: p = 0 i = argmax(x) return p,i def mpi_argmin(x): """ Find the arument of the maximum value in x. idx = (p,i) : where i = index in proc p """ if NTask>1: # 1) get all mins and all argmins mins = array(comm.allgather(min(x))) args = array(comm.allgather(argmin(x))) # 2) choose the right one p = argmin(mins) # proc number i = args[p] # index in proc p else: p = 0 i = argmin(x) return p,i def mpi_getval(x,idx): """ Return the value of array x corresponding to the index idx. idx = (p,i) : where i = index in proc p equivalent to x[i] from proc p """ p = idx[0] i = idx[1] if NTask>1: # send to everybody xi = comm.bcast(x[i]) else: xi = x[i] return xi def mpi_histogram(x,bins): """ Return an histogram of vector x binned using binx. """ # histogram n = searchsorted(sort(x),bins) n = concatenate([n,[len(x)]]) hx = n[1:]-n[:-1] if NTask>1: hx = comm.allreduce(hx,op=MPI.SUM) return hx ##################################### # # exchange function # ##################################### ################################# def mpi_find_a_toTask(begTask,fromTask,ex_table,delta_n): ################################# ''' This function is used to find recursively an exange table ''' for toTask in range(begTask,NTask): if delta_n[toTask] > 0: # find a node who accept particles if delta_n[toTask] + delta_n[fromTask] >= 0: # can give all delta = -delta_n[fromTask] else: # can give only a fiew delta = +delta_n[toTask] ex_table[fromTask,toTask] = +delta ex_table[toTask,fromTask] = -delta delta_n[fromTask] = delta_n[fromTask] +delta delta_n[toTask] = delta_n[toTask] -delta if delta_n[fromTask] == 0: # everything has been distributed return ex_table else: return mpi_find_a_toTask(begTask+1,fromTask,ex_table,delta_n) ################################# def mpi_GetExchangeTable(n_i): ################################# ''' This function returns the exchange table ''' # n_i : initial repartition ntot = sum(n_i) if ntot==0: return zeros((NTask,NTask)) # final repartition n_f = zeros(NTask) for Task in range(NTask-1,-1,-1): n_f[Task] = ntot/NTask + ntot%NTask*(Task==0) # delta delta_n = n_f - n_i # find who gives what to who ex_table = zeros((NTask,NTask)) for fromTask in range(NTask): if delta_n[fromTask] < 0: # the node gives particles ex_table = mpi_find_a_toTask(0,fromTask,ex_table,delta_n) return ex_table ################################# def mpi_ExchangeFromTable(T,procs,ids,vec,num): ################################# ''' Exchange an array according to a transfer array T T : exchange table procs : list of processor (from Tree.GetExchanges()) ids : list of id (from Tree.GetExchanges()) vec : vector to exchange num : id correspondings to particles ''' # now, we have to send / recv SendPart = T[NTask-1-ThisTask,:] RecvPart = T[:,ThisTask] new_procs = array([]) new_vec = array([]) if vec.ndim == 1: #new_vec.shape = (0,3) pass elif vec.ndim == 2: new_vec.shape = (0,3) # send/recv (1) for i in range(NTask): if i > ThisTask: # here, we send to i N = T[NTask-1-ThisTask,i] comm.send(N,i) if N>0: to_procs = compress((procs==i),procs,axis=0) to_ids = compress((procs==i),ids,axis=0) to_vec = libutil.compress_from_lst(vec,num,to_ids) comm.send(to_vec,i) vec = libutil.compress_from_lst(vec,num,to_ids,reject=True) num = libutil.compress_from_lst(num,num,to_ids,reject=True) elif i < ThisTask: N = comm.recv(i) if N > 0: new_vec = concatenate((new_vec,comm.recv(i))) # send/recv (1) for i in range(NTask): if i < ThisTask: # here, we send to i N = T[NTask-1-ThisTask,i] comm.send(N,i) if N>0: to_procs = compress((procs==i),procs,axis=0) to_ids = compress((procs==i),ids,axis=0) to_vec = libutil.compress_from_lst(vec,num,to_ids) comm.send(to_vec,i) vec = libutil.compress_from_lst(vec,num,to_ids,reject=True) num = libutil.compress_from_lst(num,num,to_ids,reject=True) elif i > ThisTask: N = comm.recv(i) if N > 0: new_vec = concatenate((new_vec,comm.recv(i))) # check c = (new_procs!=ThisTask).astype(int) if sum(c)>0: print "here we are in trouble" sys.exit() return concatenate((vec,new_vec)) # !!!!!!!!!!!!!!!!!!!!!!! this has to be changed .... ##################################### # # io functions # ##################################### ################################# def mpi_ReadAndSendBlock(f,data_type,shape=None,byteorder=sys.byteorder,split=None): ################################# ''' Read and brodcast a binary block. data_type = int,float32,float or data_type = array shape = tuple ''' ##################### # read first header ##################### if ThisTask==0: try: nb1 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb1.byteswap(True) nb1 = nb1[0] except IndexError: raise "ReadBlockError" ##################### # read a tuple ##################### if type(data_type) == types.TupleType: if ThisTask==0: data = [] for tpe in data_type: if type(tpe) == int: val = f.read(tpe) else: bytes = dtype(tpe).itemsize val = fromstring(f.read(bytes),tpe) if sys.byteorder != byteorder: val.byteswap(True) val = val[0] data.append(val) # send the data if NTask > 1: if ThisTask==0: for Task in range(1,NTask): comm.send(data, dest = Task) else: data = comm.recv(source = 0) ##################### # read an array ##################### else: if split: if ThisTask==0: bytes_per_elt = data_type.bytes if shape != None: ndim = shape[1] else: ndim = 1 nelt = nb1/bytes_per_elt/ndim nleft = nelt nread = nelt/NTask for Task in range(NTask-1,-1,-1): if nleft < 2*nread and nleft > nread: nread = nleft nleft = nleft-nread data = f.read(nread*bytes_per_elt*ndim) shape = (nread,ndim) if Task==ThisTask: # this should be last data = fromstring(data,data_type) if sys.byteorder != byteorder: data.byteswap(True) data.shape=shape else: comm.send(data, dest = Task) comm.send(shape, dest = Task) else : data = comm.recv(source = 0) shape = comm.recv(source = 0) data = fromstring(data,data_type) if sys.byteorder != byteorder: data.byteswap(True) data.shape=shape else: if ThisTask==0: data = fromstring(f.read(nb1),data_type) if sys.byteorder != byteorder: data.byteswap(True) # send the data if NTask > 1: if ThisTask==0: for Task in range(1,NTask): comm.send(data, dest = Task) else: data = comm.recv(source = 0) ##################### # read second header ##################### if ThisTask==0: nb2 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb2.byteswap(True) nb2 = nb2[0] if nb1 != nb2: raise "ReadBlockError" # reshape if needed if split: # already done before pass else: if shape != None: data.shape=shape return data ################################# -def mpi_ReadAndSendArray(f,data_type,shape=None,byteorder=sys.byteorder,npart=None): +def mpi_ReadAndSendArray(f,data_type,shape=None,byteorder=sys.byteorder,npart=None,skip=False): ################################# ''' Read and Brodcast a binary block assuming it contains an array. - ''' - + ''' + oneD = False if len(shape)==1: shape = (shape[0],1) oneD = True if shape != None: n_dim = shape[1] data = array([],data_type) data.shape = (0,shape[1]) n_elts = shape[0] else: raise "mpi_ReadAndSendArray : var shape must be defined here." #n_dim = 1 #data = array([],data_type) #data.shape = (0,) nbytes_per_elt = dtype(data_type).itemsize * n_dim n_left = nbytes_per_elt * n_elts # check npart if npart != None: ntype = len(npart) else: ntype = 1 npart = array([n_elts]) # this may be wrong in 64b, as nb1 is not equal to the number of bytes in block #if sum(npart,0) != n_elts: # raise "We are in trouble here : sum(npart)=%d n_elts=%d"%(sum(npart,0),n_elts) ##################### # read first header ##################### if ThisTask==0: try: nb1 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb1.byteswap(True) nb1 = nb1[0] # this may be wrong in 64b, as nb1 is not equal to the number of bytes in block #if nb1 != n_left: # raise "We are in trouble here : nb1=%d n_left=%d"%(nb1,n_left) except IndexError: raise "ReadBlockError" ##################### # read data ##################### - if ThisTask == 0: - - for i in range(ntype): + if ThisTask == 0: + - n_write = libutil.get_n_per_task(npart[i],NTask) - - for Task in range(NTask-1,-1,-1): - - sdata = f.read(nbytes_per_elt*n_write[Task]) - - # send block for a slave - if Task != 0: - comm.send(sdata, dest = Task) - # keep block for the master - else: - sdata = fromstring(sdata,data_type) - if shape!=None: - sdata.shape = (n_write[Task],shape[1]) - data = concatenate((data,sdata)) + if skip: + nbytes = sum(npart)*nbytes_per_elt + print " skipping %d bytes... "%(nbytes) + f.seek(nbytes,1) + data = None + + else: + for i in range(ntype): + + n_write = libutil.get_n_per_task(npart[i],NTask) + + for Task in range(NTask-1,-1,-1): + + sdata = f.read(nbytes_per_elt*n_write[Task]) + + # send block for a slave + if Task != 0: + comm.send(sdata, dest = Task) + # keep block for the master + else: + sdata = fromstring(sdata,data_type) + if shape!=None: + sdata.shape = (n_write[Task],shape[1]) + data = concatenate((data,sdata)) + + else: - - for i in range(ntype): - sdata = comm.recv(source = 0) - sdata = fromstring(sdata,data_type) - if shape!=None: - ne = len(sdata)/shape[1] - sdata.shape = (ne,shape[1]) - - data = concatenate((data,sdata)) - - - if oneD: + if skip: + data=None + else: + for i in range(ntype): + sdata = comm.recv(source = 0) + sdata = fromstring(sdata,data_type) + if shape!=None: + ne = len(sdata)/shape[1] + sdata.shape = (ne,shape[1]) + + data = concatenate((data,sdata)) + + if oneD and data!=None: data.shape = (data.shape[0],) ##################### # read second header ##################### if ThisTask==0: nb2 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb2.byteswap(True) nb2 = nb2[0] if nb1 != nb2: raise "ReadBlockError" return data ################################# def mpi_GatherAndWriteArray(f,data,byteorder=sys.byteorder,npart=None): ################################# ''' Gather and array and write it in a binary block. data = array shape = tuple ''' # check npart if npart != None: pass else: npart = array([len(data)]) data_nbytes = mpi_allreduce(data.nbytes) if ThisTask==0: nbytes = array([data_nbytes],int32) if sys.byteorder != byteorder: nbytes.byteswap(True) # write header 1 f.write(nbytes.tostring()) # loop over particles type for i in range(len(npart)): n_write = libutil.get_n_per_task(npart[i],NTask) if ThisTask==0: for Task in range(NTask-1,-1,-1): if Task != 0: sdata = comm.recv(source = Task) else: i1 = sum(npart[:i]) i2 = i1+npart[i] sdata = data[i1:i2] if sys.byteorder != byteorder: sdata.byteswap(True) f.write(sdata.tostring()) else: i1 = sum(npart[:i]) i2 = i1+npart[i] comm.send(data[i1:i2], dest = 0) if ThisTask==0: # write header 2 f.write(nbytes.tostring()) ################################# def mpi_OldReadAndSendArray(f,data_type,shape=None,skip=None,byteorder=sys.byteorder,nlocal=None): ################################# ''' Read and Brodcast a binary block assuming it contains an array. The array is splitted acroding to the variable nlocal. data_type = array type shape = tuple nlocal : array NTask x Npart array NTask ''' ##################### # read first header ##################### if ThisTask==0: try: nb1 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb1.byteswap(True) nb1 = nb1[0] except IndexError: raise "ReadBlockError" ##################### # read an array ##################### if nlocal.ndim != 2: raise "ReadAndSendArray error","nlocal must be of rank 2" else: ntype = nlocal.shape[1] if shape != None: ndim = shape[1] data = array([],data_type) data.shape = (0,shape[1]) else: ndim = 1 data = array([],data_type) data.shape = (0,) nbytes_per_elt = data_type.bytes * ndim if ThisTask == 0: for i in range(ntype): for Task in range(NTask-1,-1,-1): sdata = f.read(nbytes_per_elt*nlocal[Task,i]) # send block for a slave if Task != 0: comm.send(sdata, dest = Task) # keep block for the master else: sdata = fromstring(sdata,data_type) if shape!=None: sdata.shape = (nlocal[ThisTask,i],shape[1]) data = concatenate((data,sdata)) else: for i in range(ntype): sdata = comm.recv(source = 0) sdata = fromstring(sdata,data_type) if shape!=None: sdata.shape = (nlocal[ThisTask,i],shape[1]) data = concatenate((data,sdata)) ##################### # read second header ##################### if ThisTask==0: nb2 = fromstring(f.read(4),int32) if sys.byteorder != byteorder: nb2.byteswap(True) nb2 = nb2[0] if nb1 != nb2: raise "ReadBlockError" return data ################################# def mpi_OldGatherAndWriteArray(f,data,byteorder=sys.byteorder,nlocal=None): ################################# ''' Gather and array and write it in a binary block. data = array shape = tuple ''' if nlocal.ndim != 2: raise "ReadAndSendArray error","nlocal must be of rank 2" else: ntype = nlocal.shape[1] data_size = mpi_allreduce(data.size()) if ThisTask==0: nbytes = array([data.type().bytes*data_size],int32) if sys.byteorder != byteorder: nbytes.byteswap(True) # write header 1 f.write(nbytes.tostring()) # loop over particles type i1 = 0 for i in range(ntype): if ThisTask==0: for Task in range(NTask-1,-1,-1): if Task != 0: sdata = comm.recv(source = Task) else: i2 = i1 + nlocal[Task,i] sdata = data[i1:i2] i1 = i2 if sys.byteorder != byteorder: sdata.byteswap(True) f.write(sdata.tostring()) else: i2 = i1 + nlocal[ThisTask,i] comm.send(data[i1:i2], dest = 0) i1 = i2 if ThisTask==0: # write header 2 f.write(nbytes.tostring()) diff --git a/scripts/gmkgmov b/scripts/gmkgmov index 9860418..e3f7733 100755 --- a/scripts/gmkgmov +++ b/scripts/gmkgmov @@ -1,113 +1,120 @@ #!/usr/bin/env python # -*- coding: iso-8859-1 -*- import matplotlib matplotlib.use('Agg') from optparse import OptionParser import os,sys from pNbody import Mkgmov from pNbody import mpi def parse_options(): usage = "usage: %prog [options] file" parser = OptionParser(usage=usage) parser.add_option("-p", action="store", dest="parameterfile", type="string", default = None, help="parameterfile file", metavar=" FILE") parser.add_option("--format", action="store", dest="format", type="string", default = None, help="output file format", metavar=" FILE") parser.add_option("--imdir", action="store", dest="imdir", type="string", default = None, help="outputdirectory for fits files", metavar=" DIRECTORY") + parser.add_option("-i", + action="store", + dest="ifile", + type="int", + default = 0, + help="number of first file", + metavar=" INT") parser.add_option("--pio", action="store_true", dest="pio", default = False, help="parallele io", metavar=" BOOL") parser.add_option("--compress", action="store_true", dest="compress", default = True, help="compress output files", metavar=" BOOL") (options, args) = parser.parse_args() files = args return files,options ####################################################################### # # M A I N # ####################################################################### files = None movie = None opt = None if mpi.mpi_IsMaster(): ############################## # parse options and check dirs ############################## files,opt = parse_options() - movie = Mkgmov.Movie(parameterfile=opt.parameterfile,format=opt.format,imdir=opt.imdir,timesteps=None,pio=opt.pio,compress=opt.compress) + movie = Mkgmov.Movie(parameterfile=opt.parameterfile,format=opt.format,imdir=opt.imdir,timesteps=None,pio=opt.pio,compress=opt.compress,ifile=opt.ifile) # broadcast files and parameters files = mpi.mpi_bcast(files,root=0) opt = mpi.mpi_bcast(opt,root=0) movie = mpi.mpi_bcast(movie,root=0) sys.path = mpi.mpi_bcast(sys.path,root=0) ################################################################################### ################################################################################### ## ## main loop over all files ## ################################################################################### ################################################################################### for ifile,file in enumerate(files): movie.dumpimage(file=file) diff --git a/src/iclib/iclib.c b/src/iclib/iclib.c index 7b46d55..abe3560 100644 --- a/src/iclib/iclib.c +++ b/src/iclib/iclib.c @@ -1,1110 +1,1110 @@ #include #include #include #define TWOPI 6.2831853071795862 float f_fct(float r, PyArrayObject * fct, float Rmax) { float f,y1,y2,ix; int x1,x2; int n; /* interpolate */ n = fct->dimensions[0]; ix = (r/Rmax*n); if (ix <= 0) f = *(float*)(fct->data + 0*(fct->strides[0])); else { if (ix >= (n-1)) { f = *(float*)(fct->data + (n-1)*(fct->strides[0])); } else { x1 = (int)ix; x2 = x1+1; y1 = *(float*)(fct->data + (x1)*(fct->strides[0])); y2 = *(float*)(fct->data + (x2)*(fct->strides[0])); f = (ix - x1)/(x2-x1)*(y2-y1) + y1; } } return f; } float ValFromVect(float x, PyArrayObject * xs, PyArrayObject * ys) { /* for a given x, return the interpolated corresponding y (from ys) x is assumed to be linear */ float f,y1,y2; int x1,x2,nx; float xmin,xmax; float ix; /* interpolate */ nx = xs->dimensions[0]; xmin = *(float*)(xs->data); xmax = *(float*)(xs->data + (xs->dimensions[0]-1)*(xs->strides[0])); ix = (x-xmin)/(xmax-xmin) * (nx-1); x1 = (int)ix; x2 = x1+1; if ((ix < 0) || (x1<0)) { f = *(float*)(ys->data + 0*(ys->strides[0])); return f; } if ((ix > (nx-1)) || (x2>(nx-1))) { f = *(float*)(ys->data + (ys->dimensions[0]-1)*(ys->strides[0])); return f; } /* here, we can interpolate */ y1 = *(float*)(ys->data + (x1)*(ys->strides[0])); y2 = *(float*)(ys->data + (x2)*(ys->strides[0])); f = (ix - x1)/(x2-x1)*(y2-y1) + y1; printf("%g %g %d %g - %g %g %d\n",x,ix,x1,y1,xmin,xmax,nx); return f; } float ValFromVect2(float x, PyArrayObject * xs, PyArrayObject * ys) { /* !!! here, xs is not linear !!! */ float f,y1,y2; float v1,v2; int x1,x2,nx,i; float xmin,xmax; float ix; /* interpolate */ nx = xs->dimensions[0]; xmin = *(float*)(xs->data); xmax = *(float*)(xs->data + (xs->dimensions[0]-1)*(xs->strides[0])); if (x < xmin) { f = *(float*)(ys->data + 0*(ys->strides[0])); return f; } if (x > xmax) { f = *(float*)(ys->data + (ys->dimensions[0]-1)*(ys->strides[0])); return f; } /* here, we need to loop in order to find x1,x2*/ for (i=0;i<(xs->dimensions[0]-1);i++) { x1 = i; x2 = i+1; v1 = *(float*)(xs->data + (x1)*(xs->strides[0])); v2 = *(float*)(xs->data + (x2)*(xs->strides[0])); if ((v1<=x) && (xdata + (x1)*(ys->strides[0])); y2 = *(float*)(ys->data + (x2)*(ys->strides[0])); f = (float)(x - v1)/(float)(v2-v1)*(y2-y1) + y1; } } return f; } /*********************************/ /* generic_Mx1D */ /*********************************/ static PyObject * iclib_generic_Mx1D(self, args) PyObject *self; PyObject *args; { PyArrayObject *x,*xs,*Mx; PyArrayObject *rand1; PyObject *verbose; float xmax; int n,irand; int i; npy_intp ld[1]; float XX; float MxMax; float rnd; /* parse arguments */ if (!PyArg_ParseTuple(args, "ifOOOOOO", &n,&xmax,&xs,&Mx,&rand1,&verbose)) return NULL; /* create output */ ld[0]=n; x = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); MxMax = ValFromVect(xmax,xs,Mx); for (i = 0; i < n; i++) { /* number between 0 and 1 */ //rnd = (float)random()/(float)RAND_MAX; rnd = *(float*)(rand1->data + i*(rand1->strides[0])); /* find the corresponding radius (x may be nonlinear) */ XX = ValFromVect2(rnd*MxMax,Mx,xs); if(verbose==Py_True) if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(x->data + i*(x->strides[0])) = XX ; } return PyArray_Return(x); } /*********************************/ /* generic_Mx */ /*********************************/ static PyObject * iclib_generic_Mx(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos,*xs,*Mx; PyArrayObject *rand1,*rand2,*rand3; PyObject *verbose; float xmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float XX,YY,ZZ; float MxMax; float rnd; /* parse arguments */ if (!PyArg_ParseTuple(args, "ifOOOOOO", &n,&xmax,&xs,&Mx,&rand1,&rand2,&rand3,&verbose)) return NULL; /* create output */ ld[0]=n; ld[1]=3; pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ //srandom(irand); MxMax = ValFromVect(xmax,xs,Mx); /* normalize Mr */ //for (i = 0; i < Mx->dimensions[0]; i++) // { // *(float*)(Mx->data + i*(Mx->strides[0])) = *(float*)(Mx->data + i*(Mx->strides[0]))/MxMax ; // } for (i = 0; i < n; i++) { /* number between 0 and 1 */ //rnd = (float)random()/(float)RAND_MAX; rnd = *(float*)(rand1->data + i*(rand1->strides[0])); /* find the corresponding radius (x may be nonlinear) */ - XX = ValFromVect2(rnd*MxMax,Mx,xs); + XX = ValFromVect2(rnd*MxMax,Mx,xs); YY = *(float*)(rand2->data + i*(rand2->strides[0])); ZZ = *(float*)(rand3->data + i*(rand2->strides[0])); if(verbose==Py_True) if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = ZZ ; } return PyArray_Return(pos); } /*********************************/ /* generic_alpha */ /*********************************/ float generic_alpha_density(float a, float e, float r) { return pow((r + e),a); } static PyObject * iclib_generic_alpha(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos; float a,e,rmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float URHOD0,CTHE,STHE,PHI,CX,CY,RR,RHO,R; float XX,YY,ZZ; float EPS; float DPI; /* parse arguments */ if (!PyArg_ParseTuple(args, "ifffi", &n,&a,&e,&rmax,&irand)) return NULL; /* create output */ ld[0]=n; ld[1]=3; //pos = (PyArrayObject *) PyArray_FromDims(2,ld,PyArray_FLOAT); pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ srandom(irand); EPS = 1e-30; EPS = 0; DPI = 8.*atan(1.); URHOD0 = 1./(generic_alpha_density(a,e,0.)+EPS); for (i = 0; i < n; i++) { do { RR = pow( (float)random()/(float)RAND_MAX ,1.0/3.0); PHI = DPI*(float)random()/(float)RAND_MAX; CTHE = 1.-2.*(float)random()/(float)RAND_MAX ; STHE = sqrt(1.-CTHE*CTHE); XX = RR*cos(PHI)*STHE; YY = RR*sin(PHI)*STHE; ZZ = RR*CTHE; R = sqrt( XX*XX + YY*YY + ZZ*ZZ ); RHO = URHOD0*generic_alpha_density(a,e,rmax*R); } while(RHO < (float)random()/(float)RAND_MAX); if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = rmax*XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = rmax*YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = rmax*ZZ ; } return PyArray_Return(pos); } /*********************************/ /* generic_Mr */ /*********************************/ static PyObject * iclib_generic_Mr(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos,*rs,*Mr; PyArrayObject *rand1,*rand2,*rand3; PyObject *verbose; float rmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float CTHE,STHE,PHI,RR; float XX,YY,ZZ; float DPI,MrMax; float rnd; /* parse arguments */ if (!PyArg_ParseTuple(args, "ifOOOOOO", &n,&rmax,&rs,&Mr,&rand1,&rand2,&rand3,&verbose)) return NULL; /* create output */ ld[0]=n; ld[1]=3; pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ //srandom(irand); DPI = 8.*atan(1.); MrMax = ValFromVect(rmax,rs,Mr); /* normalize Mr */ //for (i = 0; i < Mr->dimensions[0]; i++) // { // *(float*)(Mr->data + i*(Mr->strides[0])) = *(float*)(Mr->data + i*(Mr->strides[0]))/MrMax ; // } for (i = 0; i < n; i++) { /* number between 0 and 1 */ //rnd = (float)random()/(float)RAND_MAX; rnd = *(float*)(rand1->data + i*(rand1->strides[0])); /* find the corresponding radius (x may be nonlinear) */ RR = ValFromVect2(rnd*MrMax,Mr,rs); rnd = *(float*)(rand2->data + i*(rand2->strides[0])); PHI = DPI*rnd; rnd = *(float*)(rand3->data + i*(rand3->strides[0])); CTHE = 1.-2.*rnd ; STHE = sqrt(1.-CTHE*CTHE); XX = RR*cos(PHI)*STHE; YY = RR*sin(PHI)*STHE; ZZ = RR*CTHE; if(verbose==Py_True) if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = ZZ ; } return PyArray_Return(pos); } /*********************************/ /* exponential_disk */ /*********************************/ float RHO1(float X) { /* first deriv of TM1 */ return X*exp(-X); } float TM1(float X) { return 1.0 - (1.0+X)*exp(-X); } static PyObject * iclib_exponential_disk(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos; float Hr,Hz,Rmax,Zmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; int k; float XM,xx,D; float R,PHI; float x0; /* parse arguments */ if (!PyArg_ParseTuple(args, "iffffi", &n,&Hr,&Hz,&Rmax,&Zmax,&irand)) return NULL; /* create output */ //pos = (PyArrayObject *) PyArray_FromDims(as->nd,as->dimensions,as->descr->type_num); ld[0]=n; ld[1]=3; //pos = (PyArrayObject *) PyArray_FromDims(2,ld,PyArray_FLOAT); pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ srandom(irand); for (i = 0; i < n; i++) { /* radial distribution */ R = Rmax+1; while (R>Rmax) { k = 0; XM = (float)random()/(float)RAND_MAX; //xx = 2.*Hr*XM; /* initial point (bad choice... Pfen ?) */ xx = 2.*XM; /* initial point */ x0 = xx; k = 0; D = 1; while (fabs(D)> 1E-12) { if (k>32) { //printf("x0=%g xx=%g D=%g\n",x0,xx,D); break; } D = (XM - TM1(xx))/RHO1(xx); xx = xx + D; k = k + 1; } R = xx*Hr; } /* angular distribution */ PHI = TWOPI*(float)random()/(float)RAND_MAX; x = R*cos(PHI); y = R*sin(PHI); /* verticale distribution */ z = Zmax+1; while(z>Zmax) { z = -Hz*log((float)random()/(float)RAND_MAX); } if ((float)random()/(float)RAND_MAX < 0.5) z = -z; *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = x ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = y ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = z ; } return PyArray_Return(pos); } /*********************************/ /* miyamoto_nagai */ /*********************************/ float rhod1(float a, float b2, float r2, float z2) { float c,c2,d,d2; //float cte = 0.079577471636878186; c2= b2 + z2; c = sqrt(c2); d = a + c; d2 = d*d; //return cte * b2*(a*r2 + (d+c+c)*d2) / ( c*c2*sqrt( pow((r2+d2),5) ) ); return b2*(a*r2 + (d+c+c)*d2) / ( c*c2*sqrt( pow((r2+d2),5) ) ); } static PyObject * iclib_miyamoto_nagai(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos; float a,b,Rmax,Zmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float URHOD0,CTHE,STHE,PHI,CX,CY,RR,R2,Z2,RHO; float XX,YY,ZZ; float EPS; float Rmax2,Zmax2,b2; float DPI; /* parse arguments */ if (!PyArg_ParseTuple(args, "iffffi", &n,&a,&b,&Rmax,&Zmax,&irand)) return NULL; /* create output */ //pos = (PyArrayObject *) PyArray_FromDims(as->nd,as->dimensions,as->descr->type_num); ld[0]=n; ld[1]=3; //pos = (PyArrayObject *) PyArray_FromDims(2,ld,PyArray_FLOAT); pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ srandom(irand); b2 = b*b; Rmax2 = Rmax*Rmax; Zmax2 = Zmax*Zmax; EPS = 1e-30; EPS = 0; DPI = 8.*atan(1.); URHOD0 = 1./(rhod1(a,b2,0.,0.)+EPS); for (i = 0; i < n; i++) { do { RR = pow( (float)random()/(float)RAND_MAX ,1.0/3.0); PHI = DPI*(float)random()/(float)RAND_MAX; CTHE = 1.-2.*(float)random()/(float)RAND_MAX ; STHE = sqrt(1.-CTHE*CTHE); XX = RR*cos(PHI)*STHE; YY = RR*sin(PHI)*STHE; ZZ = RR*CTHE; R2 = XX*XX + YY*YY; Z2 = ZZ*ZZ; RHO = URHOD0*rhod1(a,b2,Rmax2*R2,Zmax2*Z2); } while(RHO < (float)random()/(float)RAND_MAX); if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = Rmax*XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = Rmax*YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = Zmax*ZZ ; } return PyArray_Return(pos); } static PyObject * iclib_miyamoto_nagai_f(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos, *fct; float a,b,Rmax,Zmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float URHOD0,URHOD1,CTHE,STHE,PHI,CX,CY,RR,R2,Z2,RHO,R; float XX,YY,ZZ; float EPS; float Rmax2,Zmax2,b2; float DPI; float fRmax; /* parse arguments */ if (!PyArg_ParseTuple(args, "iffffiOf", &n,&a,&b,&Rmax,&Zmax,&irand,&fct,&fRmax)) return NULL; /* create output */ ld[0]=n; ld[1]=3; //pos = (PyArrayObject *) PyArray_FromDims(2,ld,PyArray_FLOAT); pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ srandom(irand); b2 = b*b; Rmax2 = Rmax*Rmax; Zmax2 = Zmax*Zmax; EPS = 1e-30; EPS = 0; DPI = 8.*atan(1.); /* find the max density along the disk */ URHOD0 = -1; for (i=0;iURHOD0) { URHOD0 = URHOD1; } } URHOD0 = 1/URHOD0; for (i = 0; i < n; i++) { do { RR = pow( (float)random()/(float)RAND_MAX ,1.0/3.0); PHI = DPI*(float)random()/(float)RAND_MAX; CTHE = 1.-2.*(float)random()/(float)RAND_MAX ; STHE = sqrt(1.-CTHE*CTHE); XX = RR*cos(PHI)*STHE; YY = RR*sin(PHI)*STHE; ZZ = RR*CTHE; R2 = XX*XX + YY*YY; Z2 = ZZ*ZZ; RHO = URHOD0* rhod1(a,b2,Rmax2*R2,Zmax2*Z2)/f_fct(sqrt(Rmax2*R2),fct,fRmax); } while(RHO < (float)random()/(float)RAND_MAX); if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = Rmax*XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = Rmax*YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = Zmax*ZZ ; } return PyArray_Return(pos); } /*********************************/ /* Burkert */ /*********************************/ float burkert_density(float rs,float r) { return 1 / ( ( 1 + r/rs ) * ( 1 + pow((r/rs),2) ) ); } static PyObject * iclib_burkert(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos; float rs,Rmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float URHOD0,CTHE,STHE,PHI,CX,CY,RR,R2,RHO; float XX,YY,ZZ; float EPS; float Rmax2; float DPI; /* parse arguments */ if (!PyArg_ParseTuple(args, "iffi", &n,&rs,&Rmax,&irand)) return NULL; /* create output */ ld[0]=n; ld[1]=3; pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ srandom(irand); Rmax2 = Rmax*Rmax; EPS = 1e-30; EPS = 0; DPI = 8.*atan(1.); URHOD0 = 1/burkert_density(rs,0); for (i = 0; i < n; i++) { do { RR = pow( (float)random()/(float)RAND_MAX ,1.0/3.0); PHI = DPI*(float)random()/(float)RAND_MAX; CTHE = 1.-2.*(float)random()/(float)RAND_MAX ; STHE = sqrt(1.-CTHE*CTHE); XX = RR*cos(PHI)*STHE; YY = RR*sin(PHI)*STHE; ZZ = RR*CTHE; R2 = XX*XX + YY*YY + ZZ*ZZ; RHO = URHOD0*burkert_density(rs,sqrt(Rmax2*R2)); } while(RHO < (float)random()/(float)RAND_MAX); if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = Rmax*XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = Rmax*YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = Rmax*ZZ ; } return PyArray_Return(pos); } /*********************************/ /* NFWg */ /*********************************/ float nfwg_density(float rs,float gamma, float e, float r) { return 1/ ( pow(((r+e)/rs),gamma) * pow( 1+pow(r/rs,2) ,0.5*(3-gamma)) ); //return 1/ ( (((r+e)/rs) ) * pow(1+r/rs,2) ); } static PyObject * iclib_nfwg(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos; float rs,gamma,e,Rmax; float x,y,z; int n,irand; int i; npy_intp ld[2]; float URHOD0,CTHE,STHE,PHI,CX,CY,RR,R2,RHO; float XX,YY,ZZ; float EPS; float Rmax2; float DPI; /* parse arguments */ if (!PyArg_ParseTuple(args, "iffffi", &n,&rs,&gamma,&e,&Rmax,&irand)) return NULL; /* create output */ ld[0]=n; ld[1]=3; pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); /* init random */ srandom(irand); Rmax2 = Rmax*Rmax; EPS = 1e-30; EPS = 0; DPI = 8.*atan(1.); URHOD0 = 1/nfwg_density(rs,gamma,e,0); for (i = 0; i < n; i++) { do { RR = pow( (float)random()/(float)RAND_MAX ,1.0/3.0); PHI = DPI*(float)random()/(float)RAND_MAX; CTHE = 1.-2.*(float)random()/(float)RAND_MAX ; STHE = sqrt(1.-CTHE*CTHE); XX = RR*cos(PHI)*STHE; YY = RR*sin(PHI)*STHE; ZZ = RR*CTHE; R2 = XX*XX + YY*YY + ZZ*ZZ; RHO = URHOD0*nfwg_density(rs,gamma,e,sqrt(Rmax2*R2)); } while(RHO < (float)random()/(float)RAND_MAX); if (fmod(i,10000)==0 && i!=0) printf("i=%8d/%d\n",i,n); *(float*)(pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = Rmax*XX ; *(float*)(pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = Rmax*YY ; *(float*)(pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = Rmax*ZZ ; } return PyArray_Return(pos); } /* definition of the method table */ static PyMethodDef iclibMethods[] = { {"generic_Mx1D", iclib_generic_Mx1D, METH_VARARGS, "Return position following the density given by M(x)=Mx. Return only x."}, {"generic_Mx", iclib_generic_Mx, METH_VARARGS, "Return position following the density given by M(x)=Mx. We assume an homogeneous distribution in y and z."}, {"generic_alpha", iclib_generic_alpha, METH_VARARGS, "Return position following the density (r+eps)^a."}, {"generic_Mr", iclib_generic_Mr, METH_VARARGS, "Return position following the density given by M(r)=Mr."}, {"exponential_disk", iclib_exponential_disk, METH_VARARGS, "Return position of an exponential disk."}, {"miyamoto_nagai", iclib_miyamoto_nagai, METH_VARARGS, "Return position of a miyamoto_nagai model."}, {"miyamoto_nagai_f", iclib_miyamoto_nagai_f, METH_VARARGS, "Return position of a miyamoto_nagai model divided by f(R)."}, {"miyamoto_nagai_f", iclib_miyamoto_nagai_f, METH_VARARGS, "Return position of a miyamoto_nagai model divided by f(R)."}, {"burkert", iclib_burkert, METH_VARARGS, "Return position of a burkert model."}, {"nfwg", iclib_nfwg, METH_VARARGS, "Return position of a nfwg model."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; void initiclib(void) { (void) Py_InitModule("iclib", iclibMethods); import_array(); }