diff --git a/Doc/newdoc/_build/doctrees/environment.pickle b/Doc/newdoc/_build/doctrees/environment.pickle index d5d399f..23a0718 100644 Binary files a/Doc/newdoc/_build/doctrees/environment.pickle and b/Doc/newdoc/_build/doctrees/environment.pickle differ diff --git a/Doc/newdoc/_build/doctrees/index.doctree b/Doc/newdoc/_build/doctrees/index.doctree index ff4d6f5..8d2ffd9 100644 Binary files a/Doc/newdoc/_build/doctrees/index.doctree and b/Doc/newdoc/_build/doctrees/index.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_PyGadget.doctree b/Doc/newdoc/_build/doctrees/rst/C_PyGadget.doctree new file mode 100644 index 0000000..7cb3940 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_PyGadget.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_asciilib.doctree b/Doc/newdoc/_build/doctrees/rst/C_asciilib.doctree new file mode 100644 index 0000000..740c0b9 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_asciilib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_cooling_with_metals.doctree b/Doc/newdoc/_build/doctrees/rst/C_cooling_with_metals.doctree new file mode 100644 index 0000000..d26e9bf Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_cooling_with_metals.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_coolinglib.doctree b/Doc/newdoc/_build/doctrees/rst/C_coolinglib.doctree new file mode 100644 index 0000000..cdfded0 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_coolinglib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_cosmolib.doctree b/Doc/newdoc/_build/doctrees/rst/C_cosmolib.doctree new file mode 100644 index 0000000..bb22980 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_cosmolib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_iclib.doctree b/Doc/newdoc/_build/doctrees/rst/C_iclib.doctree new file mode 100644 index 0000000..ef6a580 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_iclib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_mapping-omp.doctree b/Doc/newdoc/_build/doctrees/rst/C_mapping-omp.doctree new file mode 100644 index 0000000..4ca6686 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_mapping-omp.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_mapping.doctree b/Doc/newdoc/_build/doctrees/rst/C_mapping.doctree new file mode 100644 index 0000000..bcfb4fd Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_mapping.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_montecarlolib.doctree b/Doc/newdoc/_build/doctrees/rst/C_montecarlolib.doctree new file mode 100644 index 0000000..f1650c1 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_montecarlolib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_myNumeric.doctree b/Doc/newdoc/_build/doctrees/rst/C_myNumeric.doctree new file mode 100644 index 0000000..f24590a Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_myNumeric.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_nbdrklib.doctree b/Doc/newdoc/_build/doctrees/rst/C_nbdrklib.doctree new file mode 100644 index 0000000..58b99d1 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_nbdrklib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_nbodymodule.doctree b/Doc/newdoc/_build/doctrees/rst/C_nbodymodule.doctree new file mode 100644 index 0000000..96934df Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_nbodymodule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_peanolib.doctree b/Doc/newdoc/_build/doctrees/rst/C_peanolib.doctree new file mode 100644 index 0000000..0a193cb Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_peanolib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_pmlib.doctree b/Doc/newdoc/_build/doctrees/rst/C_pmlib.doctree new file mode 100644 index 0000000..1852d20 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_pmlib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_ptreelib.doctree b/Doc/newdoc/_build/doctrees/rst/C_ptreelib.doctree new file mode 100644 index 0000000..99a81e4 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_ptreelib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_pygsl.doctree b/Doc/newdoc/_build/doctrees/rst/C_pygsl.doctree new file mode 100644 index 0000000..f50102b Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_pygsl.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_streelib.doctree b/Doc/newdoc/_build/doctrees/rst/C_streelib.doctree new file mode 100644 index 0000000..cd86e2c Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_streelib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_tessel.doctree b/Doc/newdoc/_build/doctrees/rst/C_tessel.doctree new file mode 100644 index 0000000..511106d Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_tessel.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/C_treelib.doctree b/Doc/newdoc/_build/doctrees/rst/C_treelib.doctree new file mode 100644 index 0000000..5352be8 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/C_treelib.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/CoolingModule.doctree b/Doc/newdoc/_build/doctrees/rst/CoolingModule.doctree new file mode 100644 index 0000000..096d819 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/CoolingModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/CosmoModule.doctree b/Doc/newdoc/_build/doctrees/rst/CosmoModule.doctree new file mode 100644 index 0000000..a7b6abd Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/CosmoModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/CtesModule.doctree b/Doc/newdoc/_build/doctrees/rst/CtesModule.doctree new file mode 100644 index 0000000..cab2584 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/CtesModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Default_configurations.doctree b/Doc/newdoc/_build/doctrees/rst/Default_configurations.doctree new file mode 100644 index 0000000..07e412d Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Default_configurations.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Default_parameters.doctree b/Doc/newdoc/_build/doctrees/rst/Default_parameters.doctree new file mode 100644 index 0000000..315ed41 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Default_parameters.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Display.doctree b/Doc/newdoc/_build/doctrees/rst/Display.doctree new file mode 100644 index 0000000..ced5751 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Display.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Documentation_and_examples.doctree b/Doc/newdoc/_build/doctrees/rst/Documentation_and_examples.doctree new file mode 100644 index 0000000..8496118 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Documentation_and_examples.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Examples.doctree b/Doc/newdoc/_build/doctrees/rst/Examples.doctree new file mode 100644 index 0000000..d525143 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Examples.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Formats.doctree b/Doc/newdoc/_build/doctrees/rst/Formats.doctree new file mode 100644 index 0000000..5c19b51 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Formats.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/FortranfileModule.doctree b/Doc/newdoc/_build/doctrees/rst/FortranfileModule.doctree new file mode 100644 index 0000000..e773faa Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/FortranfileModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/FourierModule.doctree b/Doc/newdoc/_build/doctrees/rst/FourierModule.doctree new file mode 100644 index 0000000..2c21b13 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/FourierModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/GeometryModule.doctree b/Doc/newdoc/_build/doctrees/rst/GeometryModule.doctree new file mode 100644 index 0000000..0aeb31a Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/GeometryModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Grids.doctree b/Doc/newdoc/_build/doctrees/rst/Grids.doctree new file mode 100644 index 0000000..d2ffe52 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Grids.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/IcModule.doctree b/Doc/newdoc/_build/doctrees/rst/IcModule.doctree new file mode 100644 index 0000000..0276a78 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/IcModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/InitialConditions.doctree b/Doc/newdoc/_build/doctrees/rst/InitialConditions.doctree new file mode 100644 index 0000000..313d003 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/InitialConditions.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Initialconditions.doctree b/Doc/newdoc/_build/doctrees/rst/Initialconditions.doctree new file mode 100644 index 0000000..c9cff9a Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Initialconditions.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Installation.doctree b/Doc/newdoc/_build/doctrees/rst/Installation.doctree new file mode 100644 index 0000000..1bc144b Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Installation.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Installing_from_tarball.doctree b/Doc/newdoc/_build/doctrees/rst/Installing_from_tarball.doctree new file mode 100644 index 0000000..63e8914 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Installing_from_tarball.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/IoModule.doctree b/Doc/newdoc/_build/doctrees/rst/IoModule.doctree new file mode 100644 index 0000000..4c580d4 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/IoModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/LibdiskModule.doctree b/Doc/newdoc/_build/doctrees/rst/LibdiskModule.doctree new file mode 100644 index 0000000..e42a35a Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/LibdiskModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/LibgridModule.doctree b/Doc/newdoc/_build/doctrees/rst/LibgridModule.doctree new file mode 100644 index 0000000..79e6527 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/LibgridModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/LiblogModule.doctree b/Doc/newdoc/_build/doctrees/rst/LiblogModule.doctree new file mode 100644 index 0000000..c3467d0 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/LiblogModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/LibmiyamotoModule.doctree b/Doc/newdoc/_build/doctrees/rst/LibmiyamotoModule.doctree new file mode 100644 index 0000000..58f6c6a Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/LibmiyamotoModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/LibqtModule.doctree b/Doc/newdoc/_build/doctrees/rst/LibqtModule.doctree new file mode 100644 index 0000000..909a491 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/LibqtModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/LibutilModule.doctree b/Doc/newdoc/_build/doctrees/rst/LibutilModule.doctree new file mode 100644 index 0000000..2bc6258 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/LibutilModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Main.doctree b/Doc/newdoc/_build/doctrees/rst/Main.doctree new file mode 100644 index 0000000..6584e74 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Main.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/MainModule.doctree b/Doc/newdoc/_build/doctrees/rst/MainModule.doctree new file mode 100644 index 0000000..21283b0 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/MainModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/MontecarloModule.doctree b/Doc/newdoc/_build/doctrees/rst/MontecarloModule.doctree new file mode 100644 index 0000000..85d1e37 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/MontecarloModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/MovieModule.doctree b/Doc/newdoc/_build/doctrees/rst/MovieModule.doctree new file mode 100644 index 0000000..7fc6280 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/MovieModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/MpiModule.doctree b/Doc/newdoc/_build/doctrees/rst/MpiModule.doctree new file mode 100644 index 0000000..513f357 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/MpiModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Overview.doctree b/Doc/newdoc/_build/doctrees/rst/Overview.doctree index e719161..411ad4c 100644 Binary files a/Doc/newdoc/_build/doctrees/rst/Overview.doctree and b/Doc/newdoc/_build/doctrees/rst/Overview.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/PaletteModule.doctree b/Doc/newdoc/_build/doctrees/rst/PaletteModule.doctree new file mode 100644 index 0000000..70abe61 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/PaletteModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/ParamModule.doctree b/Doc/newdoc/_build/doctrees/rst/ParamModule.doctree new file mode 100644 index 0000000..21231ab Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/ParamModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/ParameterModule.doctree b/Doc/newdoc/_build/doctrees/rst/ParameterModule.doctree new file mode 100644 index 0000000..10fb19a Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/ParameterModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/PhotModule.doctree b/Doc/newdoc/_build/doctrees/rst/PhotModule.doctree new file mode 100644 index 0000000..4314d2e Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/PhotModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/PlummerModule.doctree b/Doc/newdoc/_build/doctrees/rst/PlummerModule.doctree new file mode 100644 index 0000000..d122fb2 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/PlummerModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Prerequiste.doctree b/Doc/newdoc/_build/doctrees/rst/Prerequiste.doctree new file mode 100644 index 0000000..45471e2 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Prerequiste.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/ProfilesModule.doctree b/Doc/newdoc/_build/doctrees/rst/ProfilesModule.doctree new file mode 100644 index 0000000..d358eb2 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/ProfilesModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/PyfitsModule.doctree b/Doc/newdoc/_build/doctrees/rst/PyfitsModule.doctree new file mode 100644 index 0000000..23254b1 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/PyfitsModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/RecModule.doctree b/Doc/newdoc/_build/doctrees/rst/RecModule.doctree new file mode 100644 index 0000000..f2e64b1 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/RecModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Reference.doctree b/Doc/newdoc/_build/doctrees/rst/Reference.doctree new file mode 100644 index 0000000..4dd4f55 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Reference.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/TalkgdispModule.doctree b/Doc/newdoc/_build/doctrees/rst/TalkgdispModule.doctree new file mode 100644 index 0000000..6a576e1 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/TalkgdispModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Test_the_installation.doctree b/Doc/newdoc/_build/doctrees/rst/Test_the_installation.doctree new file mode 100644 index 0000000..bad6a58 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Test_the_installation.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/ThermodynModule.doctree b/Doc/newdoc/_build/doctrees/rst/ThermodynModule.doctree new file mode 100644 index 0000000..91e141b Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/ThermodynModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Tutorial.doctree b/Doc/newdoc/_build/doctrees/rst/Tutorial.doctree new file mode 100644 index 0000000..a5a2239 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Tutorial.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Tutorial_interpreter.doctree b/Doc/newdoc/_build/doctrees/rst/Tutorial_interpreter.doctree new file mode 100644 index 0000000..4e57db0 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Tutorial_interpreter.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Tutorial_parallel.doctree b/Doc/newdoc/_build/doctrees/rst/Tutorial_parallel.doctree new file mode 100644 index 0000000..fc4a705 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Tutorial_parallel.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Tutorial_scripts.doctree b/Doc/newdoc/_build/doctrees/rst/Tutorial_scripts.doctree new file mode 100644 index 0000000..c3fde1b Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Tutorial_scripts.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/Units.doctree b/Doc/newdoc/_build/doctrees/rst/Units.doctree new file mode 100644 index 0000000..3c53bc2 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/Units.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/UnitsModule.doctree b/Doc/newdoc/_build/doctrees/rst/UnitsModule.doctree new file mode 100644 index 0000000..bcc2b31 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/UnitsModule.doctree differ diff --git a/Doc/newdoc/_build/doctrees/rst/nbodymodule.doctree b/Doc/newdoc/_build/doctrees/rst/nbodymodule.doctree new file mode 100644 index 0000000..3b7b726 Binary files /dev/null and b/Doc/newdoc/_build/doctrees/rst/nbodymodule.doctree differ diff --git a/Doc/newdoc/_build/html/_images/cosmo.png b/Doc/newdoc/_build/html/_images/cosmo.png new file mode 100644 index 0000000..9717228 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/cosmo.png differ diff --git a/Doc/newdoc/_build/html/_images/cosmo1.png b/Doc/newdoc/_build/html/_images/cosmo1.png new file mode 100644 index 0000000..9717228 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/cosmo1.png differ diff --git a/Doc/newdoc/_build/html/_images/edge-on-disk.png b/Doc/newdoc/_build/html/_images/edge-on-disk.png new file mode 100644 index 0000000..b71edc6 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/edge-on-disk.png differ diff --git a/Doc/newdoc/_build/html/_images/edge-on-disk1.png b/Doc/newdoc/_build/html/_images/edge-on-disk1.png new file mode 100644 index 0000000..7fd5fe5 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/edge-on-disk1.png differ diff --git a/Doc/newdoc/_build/html/_images/edge-on-disk2.png b/Doc/newdoc/_build/html/_images/edge-on-disk2.png new file mode 100644 index 0000000..7fd5fe5 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/edge-on-disk2.png differ diff --git a/Doc/newdoc/_build/html/_images/edge-on-disk3.png b/Doc/newdoc/_build/html/_images/edge-on-disk3.png new file mode 100644 index 0000000..7fd5fe5 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/edge-on-disk3.png differ diff --git a/Doc/newdoc/_build/html/_images/edge-on-disk4.png b/Doc/newdoc/_build/html/_images/edge-on-disk4.png new file mode 100644 index 0000000..7fd5fe5 Binary files /dev/null and b/Doc/newdoc/_build/html/_images/edge-on-disk4.png differ diff --git a/Doc/newdoc/_build/html/_images/math/c154565d113d42c101983e52d19bf2ade8c95424.png b/Doc/newdoc/_build/html/_images/math/c154565d113d42c101983e52d19bf2ade8c95424.png new file mode 100644 index 0000000..40cdc6d Binary files /dev/null and b/Doc/newdoc/_build/html/_images/math/c154565d113d42c101983e52d19bf2ade8c95424.png differ diff --git a/Doc/newdoc/_build/html/_sources/index.txt b/Doc/newdoc/_build/html/_sources/index.txt index b8c7773..6e9ef7b 100644 --- a/Doc/newdoc/_build/html/_sources/index.txt +++ b/Doc/newdoc/_build/html/_sources/index.txt @@ -1,24 +1,30 @@ .. pNbody documentation master file, created by sphinx-quickstart on Wed Aug 24 16:29:02 2011. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to pNbody's documentation! ================================== Contents: .. toctree:: - :maxdepth: 2 + :maxdepth: 5 rst/Overview rst/Installation - rst/Io + rst/Tutorial + rst/Formats + rst/Display + rst/InitialConditions + rst/Units + rst/Grids + rst/Reference Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` diff --git a/Doc/newdoc/_build/html/_sources/rst/C_PyGadget.txt b/Doc/newdoc/_build/html/_sources/rst/C_PyGadget.txt new file mode 100644 index 0000000..4298d45 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_PyGadget.txt @@ -0,0 +1,16 @@ +the C PyGadget module +********************** + +This mpdule is currently not completely integrated to **pNbody**. +It is part of the **pNbody** package but must be compiled +separately. +For mpi, use:: + + export CC=mpirun + + + +.. currentmodule:: PyGadget.gadget + +.. automodule:: PyGadget.gadget + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_asciilib.txt b/Doc/newdoc/_build/html/_sources/rst/C_asciilib.txt new file mode 100644 index 0000000..b6c257b --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_asciilib.txt @@ -0,0 +1,7 @@ +the C asciilib module +********************** + +.. currentmodule:: pNbody.asciilib + +.. automodule:: pNbody.asciilib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_cooling_with_metals.txt b/Doc/newdoc/_build/html/_sources/rst/C_cooling_with_metals.txt new file mode 100644 index 0000000..155e8dc --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_cooling_with_metals.txt @@ -0,0 +1,7 @@ +the C cooling_with_metals module +********************** + +.. currentmodule:: pNbody.cooling_with_metals + +.. automodule:: pNbody.cooling_with_metals + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_coolinglib.txt b/Doc/newdoc/_build/html/_sources/rst/C_coolinglib.txt new file mode 100644 index 0000000..7c4f60e --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_coolinglib.txt @@ -0,0 +1,7 @@ +the C coolinglib module +********************** + +.. currentmodule:: pNbody.coolinglib + +.. automodule:: pNbody.coolinglib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_cosmolib.txt b/Doc/newdoc/_build/html/_sources/rst/C_cosmolib.txt new file mode 100644 index 0000000..e5608b2 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_cosmolib.txt @@ -0,0 +1,7 @@ +the C cosmolib module +********************** + +.. currentmodule:: pNbody.cosmolib + +.. automodule:: pNbody.cosmolib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_iclib.txt b/Doc/newdoc/_build/html/_sources/rst/C_iclib.txt new file mode 100644 index 0000000..4555a26 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_iclib.txt @@ -0,0 +1,7 @@ +the C iclib module +********************** + +.. currentmodule:: pNbody.iclib + +.. automodule:: pNbody.iclib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_mapping-omp.txt b/Doc/newdoc/_build/html/_sources/rst/C_mapping-omp.txt new file mode 100644 index 0000000..4a6b814 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_mapping-omp.txt @@ -0,0 +1,7 @@ +the C mapping-omp module (under construction) +********************** + + +currently not available, +still under construction. + diff --git a/Doc/newdoc/_build/html/_sources/rst/C_mapping.txt b/Doc/newdoc/_build/html/_sources/rst/C_mapping.txt new file mode 100644 index 0000000..5092972 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_mapping.txt @@ -0,0 +1,7 @@ +the C mapping module +********************** + +.. currentmodule:: pNbody.mapping + +.. automodule:: pNbody.mapping + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_montecarlolib.txt b/Doc/newdoc/_build/html/_sources/rst/C_montecarlolib.txt new file mode 100644 index 0000000..4e9ebe2 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_montecarlolib.txt @@ -0,0 +1,7 @@ +the C montecarlolib module +********************** + +.. currentmodule:: pNbody.montecarlolib + +.. automodule:: pNbody.montecarlolib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_myNumeric.txt b/Doc/newdoc/_build/html/_sources/rst/C_myNumeric.txt new file mode 100644 index 0000000..bf722bb --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_myNumeric.txt @@ -0,0 +1,7 @@ +the C myNumeric module +********************** + +.. currentmodule:: pNbody.myNumeric + +.. automodule:: pNbody.myNumeric + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_nbdrklib.txt b/Doc/newdoc/_build/html/_sources/rst/C_nbdrklib.txt new file mode 100644 index 0000000..8630470 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_nbdrklib.txt @@ -0,0 +1,7 @@ +the C nbdrklib module +********************** + +.. currentmodule:: pNbody.nbdrklib + +.. automodule:: pNbody.nbdrklib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_nbodymodule.txt b/Doc/newdoc/_build/html/_sources/rst/C_nbodymodule.txt new file mode 100644 index 0000000..23a835e --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_nbodymodule.txt @@ -0,0 +1,7 @@ +the C nbodymodule module +********************** + +.. currentmodule:: pNbody.nbodymodule + +.. automodule:: pNbody.nbodymodule + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_peanolib.txt b/Doc/newdoc/_build/html/_sources/rst/C_peanolib.txt new file mode 100644 index 0000000..3c97f65 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_peanolib.txt @@ -0,0 +1,7 @@ +the C peanolib module +********************** + +.. currentmodule:: pNbody.peanolib + +.. automodule:: pNbody.peanolib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_pmlib.txt b/Doc/newdoc/_build/html/_sources/rst/C_pmlib.txt new file mode 100644 index 0000000..60f3d6b --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_pmlib.txt @@ -0,0 +1,6 @@ +the C pmlib module (never developped) +********************** + +currently not available, +still under construction. + diff --git a/Doc/newdoc/_build/html/_sources/rst/C_ptreelib.txt b/Doc/newdoc/_build/html/_sources/rst/C_ptreelib.txt new file mode 100644 index 0000000..c58edd8 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_ptreelib.txt @@ -0,0 +1,6 @@ +the C ptreelib module (obsolete) +********************** + +currently not available, +still under construction. + diff --git a/Doc/newdoc/_build/html/_sources/rst/C_pygsl.txt b/Doc/newdoc/_build/html/_sources/rst/C_pygsl.txt new file mode 100644 index 0000000..c7df240 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_pygsl.txt @@ -0,0 +1,7 @@ +the C pygsl module +********************** + +.. currentmodule:: pNbody.pygsl + +.. automodule:: pNbody.pygsl + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_streelib.txt b/Doc/newdoc/_build/html/_sources/rst/C_streelib.txt new file mode 100644 index 0000000..532cf11 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_streelib.txt @@ -0,0 +1,5 @@ +the C streelib module (under construction) +********************** + +currently not available, +still under construction. diff --git a/Doc/newdoc/_build/html/_sources/rst/C_tessel.txt b/Doc/newdoc/_build/html/_sources/rst/C_tessel.txt new file mode 100644 index 0000000..594d86e --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_tessel.txt @@ -0,0 +1,7 @@ +the C tessel module +********************** + +.. currentmodule:: pNbody.tessel + +.. automodule:: pNbody.tessel + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/C_treelib.txt b/Doc/newdoc/_build/html/_sources/rst/C_treelib.txt new file mode 100644 index 0000000..7dc7909 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/C_treelib.txt @@ -0,0 +1,7 @@ +the C treelib module +********************** + +.. currentmodule:: pNbody.treelib + +.. automodule:: pNbody.treelib + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/CoolingModule.txt b/Doc/newdoc/_build/html/_sources/rst/CoolingModule.txt new file mode 100644 index 0000000..683567f --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/CoolingModule.txt @@ -0,0 +1,2 @@ +the cooling module +********************** diff --git a/Doc/newdoc/_build/html/_sources/rst/CosmoModule.txt b/Doc/newdoc/_build/html/_sources/rst/CosmoModule.txt new file mode 100644 index 0000000..76d7303 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/CosmoModule.txt @@ -0,0 +1,7 @@ +the cosmo module +********************** + +.. currentmodule:: pNbody.cosmo + +.. automodule:: pNbody.cosmo + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/CtesModule.txt b/Doc/newdoc/_build/html/_sources/rst/CtesModule.txt new file mode 100644 index 0000000..3b591ef --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/CtesModule.txt @@ -0,0 +1,7 @@ +the ctes module +********************** + +.. currentmodule:: pNbody.ctes + +.. automodule:: pNbody.ctes + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Default_configurations.txt b/Doc/newdoc/_build/html/_sources/rst/Default_configurations.txt new file mode 100644 index 0000000..37000bd --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Default_configurations.txt @@ -0,0 +1,41 @@ +Default configuration +********************** + +**pNbody** uses a set of parameters files, color tables and formats files. +These files are provided by the installation and are by default stored in +the directory ``site-packages/pNbody/config``. +To display where these files are taken from, you can use the command:: + + pNbody_show-path + +It is recommanded that the user uses its own configuration files. To be automatically +recongnized by **pNbody**, the latter must be in the user ``~/.pNbody`` directory. +**pNbody** provides a simple command to copy all parameters in this directory. Simply +type:: + + pNbody_copy-defaultconfig + +and check the values of the new paths:: + + pNbody_show-path + +You can now freely modify the files contains in the configuratio directory. + +By default, the content of the configuration directory is: + ++------------------------+------------+--------------------------------------------------------------------------+ +| name | type | Content | ++========================+============+==========================================================================+ +| defaultparameters | file | the default graphical parameters used by **pNbody** | ++------------------------+------------+--------------------------------------------------------------------------+ +| unitsparameters | file | the default units parameters used by **pNbody** | ++------------------------+------------+--------------------------------------------------------------------------+ +| formats | directory | specific class definition files used to read different file formats | ++------------------------+------------+--------------------------------------------------------------------------+ +| rgb_tables | directory | color tables | ++------------------------+------------+--------------------------------------------------------------------------+ +| plugins | directory | optional plugins | ++------------------------+------------+--------------------------------------------------------------------------+ +| opt | directory | optional files | ++------------------------+------------+--------------------------------------------------------------------------+ + diff --git a/Doc/newdoc/_build/html/_sources/rst/Default_parameters.txt b/Doc/newdoc/_build/html/_sources/rst/Default_parameters.txt new file mode 100644 index 0000000..16687c0 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Default_parameters.txt @@ -0,0 +1,67 @@ +Default parameters +********************** + +To see what default parameters **pNbody** uses, type:: + + pNbody_show-parameters + +The script returns the parameters taken from the files +*defaultparameters* and *unitsparameters*. +Their current values are displayed:: + + parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters + + ---------------------------------------------------------------------------------------------------- + name meaning value (type) + ---------------------------------------------------------------------------------------------------- + obs : observer = None (ArrayObs) + xp : observing position = None (List) + x0 : position of observer = None (List) + alpha : angle of the head = None (Float) + view : view = xz (String) + r_obs : dist. to the observer = 201732.223771 (Float) + clip : clip planes = (100866.11188556443, 403464.44754225772) (Tuple) + cut : cut clip planes = no (String) + eye : name of the eye = None (String) + dist_eye : distance between eyes = -0.0005 (Float) + foc : focal = 300.0 (Float) + persp : perspective = off (String) + shape : shape of the image = (512, 512) (Tuple) + size : pysical size = (6000, 6000) (Tuple) + frsp : frsp = 0.0 (Float) + space : space = pos (String) + mode : mode = m (String) + rendering : rendering mode = map (String) + filter_name : name of the filter = None (String) + filter_opts : filter options = [10, 10, 2, 2] (List) + scale : scale = log (String) + cd : cd = 0.0 (Float) + mn : mn = 0.0 (Float) + mx : mx = 0.0 (Float) + l_n : number of levels = 15 (Int) + l_min : min level = 0.0 (Float) + l_max : max level = 0.0 (Float) + l_kx : l_kx = 10 (Int) + l_ky : l_ky = 10 (Int) + l_color : level color = 0 (Int) + l_crush : crush background = no (String) + b_weight : box line weight = 0 (Int) + b_xopts : x axis options = None (Tuple) + b_yopts : y axis options = None (Tuple) + b_color : line color = 255 (Int) + + parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters + + ---------------------------------------------------------------------------------------------------- + name meaning value (type) + ---------------------------------------------------------------------------------------------------- + xi : hydrogen mass fraction = 0.76 (Float) + ionisation : ionisation flag = 1 (Int) + metalicity : metalicity index = 4 (Int) + Nsph : number of sph neighbors = 50 (Int) + gamma : adiabatic index = 1.66666666667 (Float) + coolingfile : Cooling file = ~/.Nbody/cooling.dat (String) + HubbleParam : HubbleParam = 1.0 (Float) + UnitLength_in_cm : UnitLength in cm = 3.085e+21 (Float) + UnitMass_in_g : UnitMass in g = 4.435693e+44 (Float) + UnitVelocity_in_cm_per_s : UnitVelocity in cm per s = 97824708.2699 (Float) diff --git a/Doc/newdoc/_build/html/_sources/rst/Display.txt b/Doc/newdoc/_build/html/_sources/rst/Display.txt new file mode 100644 index 0000000..955ced3 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Display.txt @@ -0,0 +1,3 @@ +Display Models +********************** + diff --git a/Doc/newdoc/_build/html/_sources/rst/Documentation_and_examples.txt b/Doc/newdoc/_build/html/_sources/rst/Documentation_and_examples.txt new file mode 100644 index 0000000..8d0f809 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Documentation_and_examples.txt @@ -0,0 +1,2 @@ +Examples +********************** diff --git a/Doc/newdoc/_build/html/_sources/rst/Examples.txt b/Doc/newdoc/_build/html/_sources/rst/Examples.txt new file mode 100644 index 0000000..1e91547 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Examples.txt @@ -0,0 +1,9 @@ +Examples +********************** + +A series of examples is provided by **pNbody** in the +``PNBODYPATH/examples``, where NBODYPATH is obtained +with the command:: + + pNbody_show-path + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/_sources/rst/Formats.txt b/Doc/newdoc/_build/html/_sources/rst/Formats.txt new file mode 100644 index 0000000..bcfb7f9 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Formats.txt @@ -0,0 +1,3 @@ +Setting a format file +********************** + diff --git a/Doc/newdoc/_build/html/_sources/rst/FortranfileModule.txt b/Doc/newdoc/_build/html/_sources/rst/FortranfileModule.txt new file mode 100644 index 0000000..b1c6756 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/FortranfileModule.txt @@ -0,0 +1,7 @@ +the fortranfile module +********************** + +.. currentmodule:: pNbody.fortranfile + +.. automodule:: pNbody.fortranfile + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/FourierModule.txt b/Doc/newdoc/_build/html/_sources/rst/FourierModule.txt new file mode 100644 index 0000000..ce448bd --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/FourierModule.txt @@ -0,0 +1,7 @@ +the fourier module +********************** + +.. currentmodule:: pNbody.fourier + +.. automodule:: pNbody.fourier + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/GeometryModule.txt b/Doc/newdoc/_build/html/_sources/rst/GeometryModule.txt new file mode 100644 index 0000000..a53df9f --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/GeometryModule.txt @@ -0,0 +1,7 @@ +the geometry module +********************** + +.. currentmodule:: pNbody.geometry + +.. automodule:: pNbody.geometry + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Grids.txt b/Doc/newdoc/_build/html/_sources/rst/Grids.txt new file mode 100644 index 0000000..bcb2ce8 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Grids.txt @@ -0,0 +1,3 @@ +Generating grids +********************** + diff --git a/Doc/newdoc/_build/html/_sources/rst/IcModule.txt b/Doc/newdoc/_build/html/_sources/rst/IcModule.txt new file mode 100644 index 0000000..a442db0 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/IcModule.txt @@ -0,0 +1,7 @@ +the ic module +********************** + +.. currentmodule:: pNbody.ic + +.. automodule:: pNbody.ic + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/InitialConditions.txt b/Doc/newdoc/_build/html/_sources/rst/InitialConditions.txt new file mode 100644 index 0000000..002549a --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/InitialConditions.txt @@ -0,0 +1,3 @@ +Generating initial conditions +********************** + diff --git a/Doc/newdoc/_build/html/_sources/rst/Initialconditions.txt b/Doc/newdoc/_build/html/_sources/rst/Initialconditions.txt new file mode 100644 index 0000000..002549a --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Initialconditions.txt @@ -0,0 +1,3 @@ +Generating initial conditions +********************** + diff --git a/Doc/newdoc/rst/Installation.rst b/Doc/newdoc/_build/html/_sources/rst/Installation.txt similarity index 75% copy from Doc/newdoc/rst/Installation.rst copy to Doc/newdoc/_build/html/_sources/rst/Installation.txt index 7ece16d..b251858 100644 --- a/Doc/newdoc/rst/Installation.rst +++ b/Doc/newdoc/_build/html/_sources/rst/Installation.txt @@ -1,15 +1,17 @@ Installation ********************** pNbody is curently only supported by linux. .. toctree:: :maxdepth: 2 Prerequiste Installing_from_tarball Test_the_installation - Documentation_and_examples + Default_configurations + Default_parameters + Examples diff --git a/Doc/newdoc/rst/Installing_from_tarball.rst b/Doc/newdoc/_build/html/_sources/rst/Installing_from_tarball.txt similarity index 90% copy from Doc/newdoc/rst/Installing_from_tarball.rst copy to Doc/newdoc/_build/html/_sources/rst/Installing_from_tarball.txt index 978d8d1..b6a1f2c 100644 --- a/Doc/newdoc/rst/Installing_from_tarball.rst +++ b/Doc/newdoc/_build/html/_sources/rst/Installing_from_tarball.txt @@ -1,39 +1,39 @@ Installing from source ********************** Decompress the tarball ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Decompress the tarball file:: tar -xzf pNbody-4.x.tar.gz enter the directory:: cd pNbody-4.x Compile ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compilation is performed using the standard command:: python setup.py build If one wants to install in another directory than the default -python one, it is possible to use the standard *--prefix* option:: +python one, it is possible to use the standard ``--prefix`` option:: python setup.py build --prefix other_directory Install ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now, depending on your python installation you need to be root. The module is installed with the following command:: python setup.py install diff --git a/Doc/newdoc/_build/html/_sources/rst/IoModule.txt b/Doc/newdoc/_build/html/_sources/rst/IoModule.txt new file mode 100644 index 0000000..d571bba --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/IoModule.txt @@ -0,0 +1,14 @@ +the io module +********************** + +.. currentmodule:: pNbody.io + +.. autofunction:: checkfile +.. autofunction:: end_of_file +.. autofunction:: write_array +.. autofunction:: read_ascii +.. autofunction:: write_dump +.. autofunction:: read_dump + + + diff --git a/Doc/newdoc/_build/html/_sources/rst/LibdiskModule.txt b/Doc/newdoc/_build/html/_sources/rst/LibdiskModule.txt new file mode 100644 index 0000000..c8f18bb --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/LibdiskModule.txt @@ -0,0 +1,7 @@ +the libdisk module +********************** + +.. currentmodule:: pNbody.libdisk + +.. automodule:: pNbody.libdisk + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/LibgridModule.txt b/Doc/newdoc/_build/html/_sources/rst/LibgridModule.txt new file mode 100644 index 0000000..1703590 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/LibgridModule.txt @@ -0,0 +1,7 @@ +the libgrid module +********************** + +.. currentmodule:: pNbody.libgrid + +.. automodule:: pNbody.libgrid + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/LiblogModule.txt b/Doc/newdoc/_build/html/_sources/rst/LiblogModule.txt new file mode 100644 index 0000000..5584e90 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/LiblogModule.txt @@ -0,0 +1,7 @@ +the liblog module +********************** + +.. currentmodule:: pNbody.liblog + +.. automodule:: pNbody.liblog + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/LibmiyamotoModule.txt b/Doc/newdoc/_build/html/_sources/rst/LibmiyamotoModule.txt new file mode 100644 index 0000000..8fd942d --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/LibmiyamotoModule.txt @@ -0,0 +1,7 @@ +the libmiyamoto module +********************** + +.. currentmodule:: pNbody.libmiyamoto + +.. automodule:: pNbody.libmiyamoto + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/LibqtModule.txt b/Doc/newdoc/_build/html/_sources/rst/LibqtModule.txt new file mode 100644 index 0000000..ae36fbc --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/LibqtModule.txt @@ -0,0 +1,7 @@ +the libqt module +********************** + +.. currentmodule:: pNbody.libqt + +.. automodule:: pNbody.libqt + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/LibutilModule.txt b/Doc/newdoc/_build/html/_sources/rst/LibutilModule.txt new file mode 100644 index 0000000..a330841 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/LibutilModule.txt @@ -0,0 +1,7 @@ +the libutil module +********************** + +.. currentmodule:: pNbody.libutil + +.. automodule:: pNbody.libutil + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Main.txt b/Doc/newdoc/_build/html/_sources/rst/Main.txt new file mode 100644 index 0000000..69b7d8d --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Main.txt @@ -0,0 +1,7 @@ +the main module +********************** + +.. currentmodule:: pNbody + +.. automodule:: pNbody + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/MainModule.txt b/Doc/newdoc/_build/html/_sources/rst/MainModule.txt new file mode 100644 index 0000000..6f61a81 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/MainModule.txt @@ -0,0 +1,8 @@ +the main module +********************** + +.. currentmodule:: pNbody.main + +.. automodule:: pNbody.main + :members: + diff --git a/Doc/newdoc/_build/html/_sources/rst/MontecarloModule.txt b/Doc/newdoc/_build/html/_sources/rst/MontecarloModule.txt new file mode 100644 index 0000000..6ef45f7 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/MontecarloModule.txt @@ -0,0 +1,7 @@ +the montecarlo module +********************** + +.. currentmodule:: pNbody.montecarlo + +.. automodule:: pNbody.montecarlo + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/MovieModule.txt b/Doc/newdoc/_build/html/_sources/rst/MovieModule.txt new file mode 100644 index 0000000..bb127d9 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/MovieModule.txt @@ -0,0 +1,7 @@ +the Movie module +********************** + +.. currentmodule:: pNbody.Movie + +.. automodule:: pNbody.Movie + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/MpiModule.txt b/Doc/newdoc/_build/html/_sources/rst/MpiModule.txt new file mode 100644 index 0000000..2049388 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/MpiModule.txt @@ -0,0 +1,7 @@ +the mpi module +********************** + +.. currentmodule:: pNbody.mpi + +.. automodule:: pNbody.mpi + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Overview.txt b/Doc/newdoc/_build/html/_sources/rst/Overview.txt index 04c6c96..978b948 100644 --- a/Doc/newdoc/_build/html/_sources/rst/Overview.txt +++ b/Doc/newdoc/_build/html/_sources/rst/Overview.txt @@ -1,26 +1,26 @@ Overview ********************** -pNbody is a parallelized python module toolbox designed to manipulate and display +**pNbody** is a parallelized python module toolbox designed to manipulate and display interactively very lage N-body systems. Its oriented object approche allows the user to perform complicate manipulation with only very few commands. As python is an interpreted language, the user can load an N-body system and explore it interactively using the python interpreter. pNbody may also be used in python scripts. The module also contains graphical facilities desinged to create maps of physical values of the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are also implemented. pNbody is not limited by file format. Each user may redefine in a parameter file how to read its prefered format. Its new parallel (mpi) facilities make it works on computer cluster without being limitted by memory consumption. It has already been tested with several millions of particles. .. image:: ../images/cosmo.png diff --git a/Doc/newdoc/_build/html/_sources/rst/PaletteModule.txt b/Doc/newdoc/_build/html/_sources/rst/PaletteModule.txt new file mode 100644 index 0000000..51d8a5c --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/PaletteModule.txt @@ -0,0 +1,7 @@ +the palette module +********************** + +.. currentmodule:: pNbody.palette + +.. automodule:: pNbody.palette + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/ParamModule.txt b/Doc/newdoc/_build/html/_sources/rst/ParamModule.txt new file mode 100644 index 0000000..b618d2b --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/ParamModule.txt @@ -0,0 +1,7 @@ +the param module +********************** + +.. currentmodule:: pNbody.param + +.. automodule:: pNbody.param + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/ParameterModule.txt b/Doc/newdoc/_build/html/_sources/rst/ParameterModule.txt new file mode 100644 index 0000000..60c0366 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/ParameterModule.txt @@ -0,0 +1,7 @@ +the parameters module +********************** + +.. currentmodule:: pNbody.parameters + +.. automodule:: pNbody.parameters + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/PhotModule.txt b/Doc/newdoc/_build/html/_sources/rst/PhotModule.txt new file mode 100644 index 0000000..e85fd04 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/PhotModule.txt @@ -0,0 +1,7 @@ +the phot module +********************** + +.. currentmodule:: pNbody.phot + +.. automodule:: pNbody.phot + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/PlummerModule.txt b/Doc/newdoc/_build/html/_sources/rst/PlummerModule.txt new file mode 100644 index 0000000..1cb52c3 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/PlummerModule.txt @@ -0,0 +1,7 @@ +the plummer module +********************** + +.. currentmodule:: pNbody.plummer + +.. automodule:: pNbody.plummer + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Prerequiste.txt b/Doc/newdoc/_build/html/_sources/rst/Prerequiste.txt new file mode 100644 index 0000000..7bb7f08 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Prerequiste.txt @@ -0,0 +1,44 @@ +Prerequiste +********************** + +The basic module of pNbody needs python and additional packages : + +1) Python 2.5.x, 2.6.x, 2.7.x + + http://www.python.org + +2) a C compiler + + gcc is fine http://gcc.gnu.org/ + +3) numpy-1.0.4 or higher + + http://numpy.scipy.org/ + +4) Imaging 1.1.5 or higher + + http://www.pythonware.com/products/pil/ + + +For additional but usefull special functions : + +5) scipy 0.7 or higher + + http://www.scipy.org/ + + +For the parallel capabilities, an mpi distribution is needed (ex. openmpi) +as well as the additional python mpi wrapping: + +6) mpi4py + http://cheeseshop.python.org/pypi/mpi4py + +In order to convert movies in standard format (gif or mpeg), the two following applications are needed : + +1) convert (imagemagick) + + http://www.imagemagick.org/script/index.php + +2) mencoder (mplayer) + + http://www.mplayerhq.hu/design7/news.html diff --git a/Doc/newdoc/_build/html/_sources/rst/ProfilesModule.txt b/Doc/newdoc/_build/html/_sources/rst/ProfilesModule.txt new file mode 100644 index 0000000..f228716 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/ProfilesModule.txt @@ -0,0 +1,7 @@ +the profiles module +********************** + +.. currentmodule:: pNbody.profiles + +.. automodule:: pNbody.profiles + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/PyfitsModule.txt b/Doc/newdoc/_build/html/_sources/rst/PyfitsModule.txt new file mode 100644 index 0000000..f9dcabb --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/PyfitsModule.txt @@ -0,0 +1,7 @@ +the pyfits module +********************** + +.. currentmodule:: pNbody.pyfits + +.. automodule:: pNbody.pyfits + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/RecModule.txt b/Doc/newdoc/_build/html/_sources/rst/RecModule.txt new file mode 100644 index 0000000..f73f6ba --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/RecModule.txt @@ -0,0 +1,7 @@ +the rec module +********************** + +.. currentmodule:: pNbody.rec + +.. automodule:: pNbody.rec + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Reference.txt b/Doc/newdoc/_build/html/_sources/rst/Reference.txt new file mode 100644 index 0000000..27e1f71 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Reference.txt @@ -0,0 +1,69 @@ +Reference +********************** + +Contents: + +.. toctree:: + :maxdepth: 2 + + MainModule + + IcModule + IoModule + UnitsModule + CtesModule + + MpiModule + LibutilModule + ParamModule + ParameterModule + LiblogModule + TalkgdispModule + PyfitsModule + RecModule + LibqtModule + FortranfileModule + PaletteModule + MovieModule + + ProfilesModule + GeometryModule + LibmiyamotoModule + PlummerModule + + + LibgridModule + LibdiskModule + + CosmoModule + ThermodynModule + FourierModule + PhotModule + CoolingModule + + C_asciilib + C_coolinglib + C_cooling_with_metals + C_cosmolib + C_iclib + C_mapping + C_mapping-omp + C_montecarlolib + C_myNumeric + C_nbdrklib + C_nbodymodule + C_peanolib + C_pmlib + C_ptreelib + C_PyGadget + C_pygsl + C_streelib + C_tessel + C_treelib + + + + + + + diff --git a/Doc/newdoc/_build/html/_sources/rst/TalkgdispModule.txt b/Doc/newdoc/_build/html/_sources/rst/TalkgdispModule.txt new file mode 100644 index 0000000..e35093b --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/TalkgdispModule.txt @@ -0,0 +1,7 @@ +the talkgdisp module +********************** + +.. currentmodule:: pNbody.talkgdisp + +.. automodule:: pNbody.talkgdisp + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Test_the_installation.txt b/Doc/newdoc/_build/html/_sources/rst/Test_the_installation.txt new file mode 100644 index 0000000..c0280d1 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Test_the_installation.txt @@ -0,0 +1,48 @@ +Check the installation +********************** + + +You can check the installation by simply running the following +command:: + + pNbody_checkall + +This command must of course be in your path. This will be the case +if you did not specified any ``--prefix``. On the contrary if ``--prefix`` +is set to for example, *localdir* you should have your *PATH* environment +variable should contains:: + + localdir/bin + +and you *PYTHONPATH* environment should contains:: + + localdir/lib/python2.x/site-packages/ + +to ensure that the **pNbody** package will be found. + +If everything goes well, you should see a lots of outputs on your screen, +as well as a window displaying an edge-on disk. + +.. image:: ../images/edge-on-disk.png + :width: 200 px + + +Close it when you see it. +The script should finally ends up with something like :: + + + ######################################################################## + Good News ! pNbody with format gadget is working ! + ######################################################################## + + You are currently using the following paths + + HOME : /home/leo + PNBODYPATH : /home/leo/local/lib/python2.6/site-packages/pNbody + CONFIGDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config + PARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters + UNITSPARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters + PALETTEDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/rgb_tables + PLUGINSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/plugins + OPTDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/opt + FORMATSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/formats diff --git a/Doc/newdoc/_build/html/_sources/rst/ThermodynModule.txt b/Doc/newdoc/_build/html/_sources/rst/ThermodynModule.txt new file mode 100644 index 0000000..fc2d1b7 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/ThermodynModule.txt @@ -0,0 +1,7 @@ +the thermodyn module +********************** + +.. currentmodule:: pNbody.thermodyn + +.. automodule:: pNbody.thermodyn + :members: diff --git a/Doc/newdoc/_build/html/_sources/rst/Tutorial.txt b/Doc/newdoc/_build/html/_sources/rst/Tutorial.txt new file mode 100644 index 0000000..2aad6bb --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Tutorial.txt @@ -0,0 +1,9 @@ +Tutorial +********************** + +.. toctree:: + :maxdepth: 2 + + Tutorial_interpreter + Tutorial_scripts + Tutorial_parallel \ No newline at end of file diff --git a/Doc/newdoc/_build/html/_sources/rst/Tutorial_interpreter.txt b/Doc/newdoc/_build/html/_sources/rst/Tutorial_interpreter.txt new file mode 100644 index 0000000..702ca13 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Tutorial_interpreter.txt @@ -0,0 +1,280 @@ +Using **pNbody** with the python interpreter +********************** + +In order to use this tutorial, you first need to copy some examples provided +with **pNbody**. This can be done by typing:: + + pNbody_copy-examples + +by default, this create a directory in your home ``~/pnbody_examples``. +Move to this directory:: + + cd ~/pnbody_examples + +Then you can simply follow the instructions below. +First, start the python interpreter:: + + leo@obsrevaz:~/pnbody_examples python + Python 2.4.2 (#2, Jul 13 2006, 15:26:48) + [GCC 4.0.1 (4.0.1-5mdk for Mandriva Linux release 2006.0)] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + >>> + + +Now, you can load the **pNbody** module:: + + >>> from pNbody import * + + +Creating **pNbody** objects from scratch +======================================== + + +We can first start by creating a default **pNbody** objet and get info about it :: + + >>> nb = Nbody() + >>> nb.info() + ----------------------------------- + particle file : ['file.dat'] + ftype : 'Nbody_default' + mxntpe : 6 + nbody : 0 + nbody_tot : 0 + npart : [0, 0, 0, 0, 0, 0] + npart_tot : [0, 0, 0, 0, 0, 0] + mass_tot : 0.0 + byteorder : 'little' + pio : 'no' + >>> + + +All variables linked to the object nb are accesible by typing nb. followed by the associated variables : + + >>> nb.nbody + 0 + >>> nb.mass_tot + 0.0 + >>> nb.pio + 'no' + +Now, you can create an object by giving the positions of particles:: + + >>> pos = ones((10,3),float32) + >>> nb = Nbody(pos=pos) + >>> nb.info() + ----------------------------------- + particle file : ['file.dat'] + ftype : 'Nbody_default' + mxntpe : 6 + nbody : 10 + nbody_tot : 10 + npart : array([10, 0, 0, 0, 0, 0]) + npart_tot : array([10, 0, 0, 0, 0, 0]) + mass_tot : 1.00000011921 + byteorder : 'little' + pio : 'no' + + len pos : 10 + pos[0] : array([ 1., 1., 1.], dtype=float32) + pos[-1] : array([ 1., 1., 1.], dtype=float32) + len vel : 10 + vel[0] : array([ 0., 0., 0.], dtype=float32) + vel[-1] : array([ 0., 0., 0.], dtype=float32) + len mass : 10 + mass[0] : 0.10000000149 + mass[-1] : 0.10000000149 + len num : 10 + num[0] : 0 + num[-1] : 9 + len tpe : 10 + tpe[0] : 0 + tpe[-1] : 0 + +In this case, you can see that the class automatically intitialize other arrays variables +(vel, mass, num and rsp) with default values. Only the first and the last element of +each defined vector are displyed by the methode info. All defined arrays and array elements +may be easily accessible using the numarray convensions. For exemple, to display and +change the positions of the tree first particles, type:: + + >>> nb.pos[:3] + array([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]], type=float32) + >>> nb.pos[:3]=2*ones((3,3),float32) + >>> nb.pos[:3] + array([[ 2., 2., 2.], + [ 2., 2., 2.], + [ 2., 2., 2.]], type=float32) + +Open from existing file +======================================== + +Now, lets try to open the gadget snapshot gadget_z00.dat. This is achieved by typing:: + + >>> nb = Nbody('gadget_z00.dat',ftype='gadget') + +Again, informatins on this snapshot may be obtained using the instance info():: + + >>> nb.info() + ----------------------------------- + particle file : ['gadget_z00.dat'] + ftype : 'Nbody_gadget' + mxntpe : 6 + nbody : 20560 + nbody_tot : 20560 + npart : array([ 9160, 10280, 0, 0, 1120, 0]) + npart_tot : array([ 9160, 10280, 0, 0, 1120, 0]) + mass_tot : 79.7066955566 + byteorder : 'little' + pio : 'no' + + len pos : 20560 + pos[0] : array([-1294.48828125, -2217.09765625, -9655.49609375], dtype=float32) + pos[-1] : array([ -986.0625 , -2183.83203125, 4017.04296875], dtype=float32) + len vel : 20560 + vel[0] : array([ -69.80491638, 60.56475067, -166.32981873], dtype=float32) + vel[-1] : array([-140.59715271, -66.44669342, -37.01613235], dtype=float32) + len mass : 20560 + mass[0] : 0.00108565215487 + mass[-1] : 0.00108565215487 + len num : 20560 + num[0] : 21488 + num[-1] : 1005192 + len tpe : 20560 + tpe[0] : 0 + tpe[-1] : 4 + + atime : 1.0 + redshift : 2.22044604925e-16 + flag_sfr : 1 + flag_feedback : 1 + nall : [ 9160 10280 0 0 1120 0] + flag_cooling : 1 + num_files : 1 + boxsize : 100000.0 + omega0 : 0.3 + omegalambda : 0.7 + hubbleparam : 0.7 + flag_age : 0 + flag_metals : 0 + nallhw : [0 0 0 0 0 0] + flag_entr_ic : 0 + critical_energy_spec: 0.0 + + len u : 20560 + u[0] : 6606.63037109 + u[-1] : 0.0 + len rho : 20560 + rho[0] : 7.05811936674e-11 + rho[-1] : 0.0 + len rsp : 20560 + rsp[0] : 909.027587891 + rsp[-1] : 0.0 + len opt : 20560 + opt[0] : 446292.5625 + opt[-1] : 0.0 + +You can obtain informations on physical values, like the center of mass +or the total angular momentum vector by typing:: + + >>> nb.cm() + array([-1649.92651346, 609.98256428, -1689.04011033]) + >>> nb.Ltot() + array([-1112078.125 , -755964.1875, -1536667.125 ], dtype=float32) + +In order to visualise the model in position space, it is possible to +generate a surface density map of it using the display instance:: + + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + +You can now performe some operations on the model in order to explore a specific +region. First, translate the model in position space:: + + >>> nb.translate([3125,-4690,1720]) + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + >>> nb.display(size=(1000,1000),shape=(256,256),palette='light') + +Ou can now rotate around:: + + >>> nb.rotate(angle=pi) + >>> nb.display(size=(1000,1000),shape=(256,256),palette='light') + +You can now display a temperature map of the model. First, +create a new object with only the gas particles:: + + >>> nb_gas = nb.select('gas') + >>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='light') + +now, display the temperture mass-weighted map:: + + >>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='rainbow4',mode='T',filter_name='gaussian') + + +Selection of particles +======================================== + +You can select only particles within a radius smaller tha 500 (in user units) +with respect to the center:: + + >>> nb_sub = nb.selectc((nb.rxyz()<500)) + >>> nb_sub.display(size=(1000,1000),shape=(256,256),palette='light') + +Now, rename the new model and save it:: + + >>> nb_sub.rename('gadget_z00_sub.dat') + >>> nb_sub.write() + +A new gadget file has been created and saved in the current directory. +We can now select particles as a function of the temperature. +First, display the maximum temperature among all gas particles, +then selectc particles and finally save in 'T11.num' the identifier (variable num) of these particles:: + + >>> log10(max(nb_gas.T())) + 12.8707923889 + >>> nb_sub = nb_gas.selectc( (nb_gas.T()>1e11) ) + >>> nb_sub.write_num('T11.num') + +Now open a new snapshot, from the same simulation, but at different redshift and find the +particles in previous snapshot with temperature higher than $10^{11}$:: + + >>> nb = Nbody('gadget_z40.dat',ftype='gadget') + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + >>> nb_sub = nb.selectp(file='T11.num') + >>> nb_sub.display(size=(10000,10000),shape=(256,256),palette='light') + +Now, instead of saving it in a gadget file, save it in a binary file type. +You simply need to call the set_ftype instance before saving it:: + + >>> nb = nb.set_ftype('binary') + >>> nb.rename('binary.dat') + >>> nb.write() + +Merging two models +=================== + +As a last example, we show how two **pNbody** models can be easyly merged with only 11 lines:: + + >>> nb1 = Nbody('disk.dat',ftype='gadget') + >>> nb2 = Nbody('disk.dat',ftype='gadget') + >>> nb1.rotate(angle=pi/4,axis=[0,1,0]) + >>> nb1.translate([-150,0,0]) + >>> nb1.vel = nb1.vel + [50,0,0] + >>> nb2.rotate(angle=pi/4,axis=[1,0,0]) + >>> nb2.translate([+150,0,50]) + >>> nb2.vel = nb2.vel - [50,0,0] + >>> nb3 = nb1 + nb2 + >>> nb3.rename('merge.dat') + >>> nb3.write() + +Now display the result from different point of view:: + + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2') + >>> nb3 = nb3.select('disk') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xz') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xy') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='yz') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0]) + +or save it into a gif file:: + + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0],save='image.gif') diff --git a/Doc/newdoc/_build/html/_sources/rst/Tutorial_parallel.txt b/Doc/newdoc/_build/html/_sources/rst/Tutorial_parallel.txt new file mode 100644 index 0000000..6837763 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Tutorial_parallel.txt @@ -0,0 +1,162 @@ +Using pNbody in parallel +********************** + +With **pNbody**, it is possible to run scripts in parallel, using the ``mpi`` libary. +You need to have of course ``mpi`` and ``mpi4py`` installed. +To check your installation, try:: + + mpirun -np 2 pNbody_mpi + +you should get:: + + This is task 0 over 2 + This is task 1 over 2 + +but if you get:: + + This is task 0 over 1 + This is task 0 over 1 + +this means that something is not working correctly, and you should check your path or ``mpi`` and ``mpi4py`` installation +before reading further. + +The prevous scripts ``scripts/slice.py`` can diretely be run in paralle. +This is simply obtained by calling the ``mpirun`` command:: + + mpirun -np 2 scripts/slice.py gadget_z*0.dat + +In this simple script, only the processus of rank 0 (the master) open the file. +The content of the file (particles) is then distributed among all the other processors. +Eeach processor recives a fraction of the particles. +Then, the selection of gas gas particles and the slice are preformed by all processors on +their local particles. +Finally, the ``nb.write()`` command, run by the master, gather all particles and write the output file. + +Parallel output +======================================== + +With **pNbody**, its possible to write files in parallel, i.e., each task write its own file. +We can do this in the previous script simply by adding the line ``nb.set_pio('yes')``. This +tells **pNbody** to write files in parallel when ``nb.write()`` is called. +The content of the new scripts ``scripts/slice-p1.py`` is:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + files = sys.argv[1:] + + for file in files: + print "slicing",file + nb = Nbody(file,ftype='gadget') + nb = nb.select('gas') + nb = nb.selectc((fabs(nb.pos[:,1])<1000)) + nb.rename(file+'.slice') + nb.set_pio='yes' + nb.write() + +We can now run it:: + + mpirun -np 2 scripts/slice-p1.py gadget_z00.dat + +This creates two new files:: + + gadget_z00.dat.slice.1 + gadget_z00.dat.slice.0 + +The files have the same name than the initial name given in ``Nbody()`` with an extention ``.i`` where ``i`` +corresponds to the processus rank. Each file contains the particles attributed to the corresponding task. + + +Parallel input +======================================== + +Now, it possible to start by reading these two files in parallel instead of asking only the master to read one file:: +In our script, we add the optional argument ``pio='yes'`` when creating the object with ``Nbody()``:: + + + +Note also that we have used ``nb.set_pio('no')``. This force at the end the file te be written only by the master. + + #!/usr/bin/env python + + import sys + from pNbody import * + + files = sys.argv[1:] + + for file in files: + print "slicing",file + nb = Nbody(file,ftype='gadget',pio='yes') + nb = nb.select('gas') + nb = nb.selectc((fabs(nb.pos[:,1])<1000)) + nb.rename(file+'.slice.new') + nb.set_pio('no') + nb.write() + + +When we lunch it:: + + mpirun -np 2 scripts/slice-p2.py gadget_z00.dat.slice + +the two files ``gadget_z00.dat.slice.0`` and ``gadget_z00.dat.slice.1`` are read +each by one task, processed but at the end only the master write the final output : `gadget_z00.dat.slice.slice.new``. + + +More on parallelisme +======================================== + + +Lets try two other scripts. The first one (``findmax.py``) try to find the radial maximum distance among +all particles and the center. It illustrate the difference between using ``max()`` +wich gives the local maximum (maximum among particles of the node) and ``mpi.mpi_max()`` +which gives the global maximum among all particles:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + file = sys.argv[1] + + nb = Nbody(file,ftype='gadget',pio='yes') + local_max = max(nb.rxyz()) + global_max = mpi.mpi_max(nb.rxyz()) + + print "proc %d local_max = %f global_max = %f"%(mpi.ThisTask,local_max,global_max) + + +When running it, you should get:: + + mpirun -np 2 ./scripts/findmax.py gadget_z00.dat.slice + proc 1 local_max = 8109.682129 global_max = 8109.682129 + proc 0 local_max = 7733.846680 global_max = 8109.682129 + + +which illustrate clearly the point. Finally, the latter script shows that even graphical +functions support parallelisme. The script ``showmap.py`` illustrate this point by computing +a map of the model:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + file = sys.argv[1] + + nb = Nbody(file,ftype='gadget',pio='yes') + nb.display(size=(10000,10000),shape=(256,256),palette='light') + +When running :: + + mpirun -np 2 ./scripts/showmap.py gadget_z00.dat.slice + +you get an image of the model. The mapping has been performed independently by two processors. + + + + + + + diff --git a/Doc/newdoc/_build/html/_sources/rst/Tutorial_scripts.txt b/Doc/newdoc/_build/html/_sources/rst/Tutorial_scripts.txt new file mode 100644 index 0000000..594d232 --- /dev/null +++ b/Doc/newdoc/_build/html/_sources/rst/Tutorial_scripts.txt @@ -0,0 +1,45 @@ +Using pNbody with scripts +********************** + + +In addition to using **pNbody** in the python interpreter, +it is very useful to use **pNbody** in python scripts. Usually a python script +begin by the line #!/usr/bin/env python and must be executable:: + + chmod a+x file.py + +The following example (slice.py), we show how to write a script that opens a gadget file, +select gas particles and cut a thin slice + +.. math:: -1000 Index — pNbody v4 documentation

Index

- C | E | R | W + A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | T | U | V | W | X | Y | Z
+

A

+ + + +
+
A() (pNbody.main.NbodyDefault method)
+
a_CosmicTime() (in module pNbody.cosmo)
+
A_z() (in module pNbody.cosmo)
+
Accel() (pNbody.main.NbodyDefault method)
+
acceleration() (in module pNbody.nbodymodule)
+
Acceleration() (in module PyGadget.gadget)
+
add_blank() (pNbody.pyfits.Header method)
+
add_box() (in module pNbody.libutil)
+
add_col() (pNbody.pyfits.ColDefs method)
+
add_comment() (pNbody.pyfits.Header method)
+
add_history() (pNbody.pyfits.Header method)
+
Adot_a() (in module pNbody.cosmo)
+
Age_a() (in module pNbody.cosmo)
+
+
(in module pNbody.cosmolib)
+
+
align() (in module pNbody.geometry)
+
+
(pNbody.main.NbodyDefault method)
+
+
+
align2() (pNbody.main.NbodyDefault method)
+
align_with_main_axis() (pNbody.main.NbodyDefault method)
+
AllAcceleration() (in module PyGadget.gadget)
+
AllPotential() (in module PyGadget.gadget)
+
am() (in module pNbody.nbodymodule)
+
ampmap() (in module pNbody.nbodymodule)
+
amxyz() (in module pNbody.nbodymodule)
+
append() (in module pNbody.pyfits)
+
+
(pNbody.main.NbodyDefault method)
+
(pNbody.pyfits.CardList method)
+
(pNbody.pyfits.HDUList method)
+
+
apply_filter() (in module pNbody.libutil)
+
Arp() (in module pNbody.thermodyn)
+
Art() (in module pNbody.thermodyn)
+
Aru() (in module pNbody.thermodyn)
+
ascardlist() (pNbody.pyfits.Header method)
+
+ +

B

+ + + +
+
BinTableHDU (class in pNbody.pyfits)
+
box() (in module pNbody.ic)
+
boxcut() (in module pNbody.geometry)
+
boxcut_segments() (in module pNbody.geometry)
+
+
burkert() (in module pNbody.ic)
+
+
(in module pNbody.iclib)
+
+
burkert_mr() (in module pNbody.profiles)
+
burkert_profile() (in module pNbody.profiles)
+
+

C

+ +
-
checkfile() (in module pNbody.io)
+
CardList (class in pNbody.pyfits)
+
cart2sph() (pNbody.main.NbodyDefault method)
+
change_attrib() (pNbody.pyfits.ColDefs method)
+
change_name() (pNbody.pyfits.ColDefs method)
+
change_unit() (pNbody.pyfits.ColDefs method)
+
check_arrays() (pNbody.main.NbodyDefault method)
+
checkfile() (in module pNbody.io)
+
CircumCircleProperties() (in module pNbody.tessel)
+
close() (pNbody.liblog.Log method)
+
+
(pNbody.pyfits.HDUList method)
+
(pNbody.pyfits.StreamingHDU method)
+
+
cm() (pNbody.main.NbodyDefault method)
+
cmcenter() (pNbody.main.NbodyDefault method)
+
ColDefs (class in pNbody.pyfits)
+
Column (class in pNbody.pyfits)
+
CombiMap() (pNbody.main.NbodyDefault method)
+
compress_from_lst() (in module pNbody.libutil)
+
Compute() (in module pNbody.nbdrklib)
+
ComputeDensityAndHsml() (pNbody.main.NbodyDefault method)
+
ComputeGridParameters() (in module pNbody.ic)
+
ComputeGridParameters2() (in module pNbody.ic)
+
ComputeHisto() (pNbody.main.NbodyDefault method)
+
ComputeIsoContours() (in module pNbody.tessel)
+
ComputeMap() (pNbody.main.NbodyDefault method)
+
+
ComputeMeanHisto() (pNbody.main.NbodyDefault method)
+
ComputeMeanMap() (pNbody.main.NbodyDefault method)
+
ComputeObjectMap() (pNbody.main.NbodyDefault method)
+
ComputeSigmaHisto() (pNbody.main.NbodyDefault method)
+
ComputeSigmaMap() (pNbody.main.NbodyDefault method)
+
ComputeSph() (pNbody.main.NbodyDefault method)
+
ConstructDelaunay() (in module pNbody.tessel)
+
contours() (in module pNbody.libutil)
+
convert_ctes() (in module pNbody.ctes)
+
convertionFactorTo() (pNbody.units.UnitSystem method)
+
convol() (in module pNbody.nbodymodule)
+
cooling() (in module pNbody.coolinglib)
+
cooling_from_nH_and_T() (in module pNbody.coolinglib)
+
copy() (pNbody.pyfits.CardList method)
+
+
(pNbody.pyfits.Header method)
+
+
CosmicTime_a() (in module pNbody.cosmo)
+
count_blanks() (pNbody.pyfits.CardList method)
+
create_line() (in module pNbody.mapping)
+
create_line2() (in module pNbody.mapping)
+
create_line3() (in module pNbody.mapping)
+
cv() (pNbody.main.NbodyDefault method)
+
cvcenter() (pNbody.main.NbodyDefault method)
+
+ +

D

+ + +
+
d2R_Potential() (in module pNbody.libmiyamoto)
+
d2z_Potential() (in module pNbody.libmiyamoto)
+
del_col() (pNbody.pyfits.ColDefs method)
+
Delayed (class in pNbody.pyfits)
+
dens() (pNbody.main.NbodyDefault method)
+
Density() (in module pNbody.libmiyamoto)
+
+
(in module PyGadget.gadget)
+
(in module pNbody.plummer)
+
+
Diff() (in module pNbody.libdisk)
+
display() (in module pNbody.libqt)
+
+
(pNbody.main.NbodyDefault method)
+
+
dl2() (in module pNbody.ic)
+
+
dl2_mr() (in module pNbody.ic)
+
dmodes() (pNbody.main.NbodyDefault method)
+
dPotential() (in module pNbody.plummer)
+
dR_Potential() (in module pNbody.libmiyamoto)
+
drawxticks() (in module pNbody.libutil)
+
drawyticks() (in module pNbody.libutil)
+
dv_mean() (pNbody.main.NbodyDefault method)
+
dx_mean() (pNbody.main.NbodyDefault method)
+
dz_Potential() (in module pNbody.libmiyamoto)

E

+ +
-
end_of_file() (in module pNbody.io)
+
Ekin() (pNbody.main.NbodyDefault method)
+
ekin() (pNbody.main.NbodyDefault method)
+
ElectronDensity() (in module pNbody.thermodyn)
+
end_of_file() (in module pNbody.io)
+
ENDIAN (pNbody.fortranfile.FortranFile attribute)
+
epot() (in module pNbody.nbodymodule)
+
+
(pNbody.main.NbodyDefault method)
+
+
Epot() (pNbody.main.NbodyDefault method)
+
ErrorURLopener (class in pNbody.pyfits)
+
+
ExchangeParticles() (pNbody.main.NbodyDefault method)
+
expand() (in module pNbody.myNumeric)
+
expd() (in module pNbody.ic)
+
expd_mr() (in module pNbody.ic)
+
exponential_disk() (in module pNbody.iclib)
+
expose() (in module pNbody.geometry)
+
+
(pNbody.main.NbodyDefault method)
+
+
Extract1dMeanFrom2dMap() (in module pNbody.libutil)
+
extract_parameters() (in module pNbody.libutil)
+
+ +

F

+ + + +
+
field() (pNbody.pyfits.FITS_rec method)
+
+
(pNbody.pyfits.FITS_record method)
+
+
find_vars() (pNbody.main.NbodyDefault method)
+
FITS_rec (class in pNbody.pyfits)
+
FITS_record (class in pNbody.pyfits)
+
fitsopen() (in module pNbody.pyfits)
+
+
flush() (pNbody.pyfits.HDUList method)
+
FortranFile (class in pNbody.fortranfile)
+
fourier() (in module pNbody.fourier)
+
frustum() (in module pNbody.geometry)
+
+ +

G

+ + + +
+
gal (in module pNbody.units)
+
gather_mass() (pNbody.main.NbodyDefault method)
+
gather_num() (pNbody.main.NbodyDefault method)
+
gather_pos() (pNbody.main.NbodyDefault method)
+
gather_vec() (pNbody.main.NbodyDefault method)
+
gather_vel() (pNbody.main.NbodyDefault method)
+
GatherBaseUnits() (pNbody.units.Units method)
+
generic2c() (in module pNbody.ic)
+
generic2c_mr() (in module pNbody.profiles)
+
generic2c_profile() (in module pNbody.profiles)
+
generic_alpha() (in module pNbody.ic)
+
+
(in module pNbody.iclib)
+
+
generic_Mr() (in module pNbody.ic)
+
+
(in module pNbody.iclib)
+
+
generic_Mx() (in module pNbody.ic)
+
+
(in module pNbody.iclib)
+
+
generic_Mx1D() (in module pNbody.iclib)
+
get() (pNbody.param.Params method)
+
+
(pNbody.pyfits.Header method)
+
+
get_AccelerationMap_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_AccumulatedMassMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_Accumulation_Along_Axis() (in module pNbody.libgrid)
+
get_comment() (pNbody.pyfits.Header method)
+
get_cooling_time_from_Density_EnergyInt_FeH() (in module pNbody.cooling_with_metals)
+
get_cooling_time_from_Density_Temperature_FeH() (in module pNbody.cooling_with_metals)
+
get_default_spec_vars() (pNbody.main.NbodyDefault method)
+
get_default_spec_vect() (pNbody.main.NbodyDefault method)
+
get_DensityMap_On_Carthesian_3d_Grid() (in module pNbody.libgrid)
+
get_DensityMap_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_DensityMap_On_Cylindrical_3d_Grid() (in module pNbody.libgrid)
+
get_DensityMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_DensityMap_On_Spherical_3d_Grid() (in module pNbody.libgrid)
+
get_dic() (pNbody.param.Params method)
+
get_eyes() (in module pNbody.libutil)
+
get_First_Derivative() (in module pNbody.libgrid)
+
get_GenericMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_histocenter() (pNbody.main.NbodyDefault method)
+
get_histocenter2() (pNbody.main.NbodyDefault method)
+
get_history() (pNbody.pyfits.Header method)
+
get_image() (in module pNbody.libutil)
+
get_img() (pNbody.Movie.Movie method)
+
get_Integral() (in module pNbody.libdisk)
+
+
(in module pNbody.libgrid)
+
+
get_Interpolation_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_Interpolation_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_known_formats() (in module pNbody.main)
+
get_lambda_from_Density_EnergyInt_FeH() (in module pNbody.cooling_with_metals)
+
get_lambda_from_Density_Entropy_FeH() (in module pNbody.cooling_with_metals)
+
get_lambda_from_Density_Temperature_FeH() (in module pNbody.cooling_with_metals)
+
get_lambda_normalized_from_Temperature_FeH() (in module pNbody.cooling_with_metals)
+
get_LinearDensityMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_list_of_array() (pNbody.main.NbodyDefault method)
+
get_list_of_method() (pNbody.main.NbodyDefault method)
+
get_list_of_vars() (pNbody.main.NbodyDefault method)
+
get_mass_tot() (pNbody.main.NbodyDefault method)
+
get_MassMap_On_Carthesian_2d_Grid() (in module pNbody.libgrid)
+
get_MassMap_On_Carthesian_3d_Grid() (in module pNbody.libgrid)
+
get_MassMap_On_Cylindrical_2dh_Grid() (in module pNbody.libgrid)
+
get_MassMap_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_MassMap_On_Cylindrical_3d_Grid() (in module pNbody.libgrid)
+
get_MassMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_MassMap_On_Spherical_3d_Grid() (in module pNbody.libgrid)
+
get_mxntpe() (pNbody.main.NbodyDefault method)
+
get_nbody() (pNbody.main.NbodyDefault method)
+
get_nbody_tot() (pNbody.main.NbodyDefault method)
+
get_npart() (pNbody.main.NbodyDefault method)
+
get_npart_all() (pNbody.main.NbodyDefault method)
+
get_npart_and_npart_all() (pNbody.main.NbodyDefault method)
+
get_npart_tot() (pNbody.main.NbodyDefault method)
+
get_ns() (pNbody.main.NbodyDefault method)
+
get_ntype() (pNbody.main.NbodyDefault method)
+
get_num() (pNbody.main.NbodyDefault method)
+
get_NumberMap_On_Carthesian_2d_Grid() (in module pNbody.libgrid)
+
get_NumberMap_On_Carthesian_3d_Grid() (in module pNbody.libgrid)
+
get_NumberMap_On_Cylindrical_2dh_Grid() (in module pNbody.libgrid)
+
get_NumberMap_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_NumberMap_On_Cylindrical_3d_Grid() (in module pNbody.libgrid)
+
get_NumberMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_NumberMap_On_Spherical_3d_Grid() (in module pNbody.libgrid)
+
get_obs() (in module pNbody.geometry)
+
+
get_Points_On_Carthesian_2d_Grid() (in module pNbody.libgrid)
+
get_Points_On_Carthesian_3d_Grid() (in module pNbody.libgrid)
+
get_Points_On_Cylindrical_2dh_Grid() (in module pNbody.libgrid)
+
get_Points_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_Points_On_Cylindrical_3d_Grid() (in module pNbody.libgrid)
+
get_Points_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_Points_On_Spherical_3d_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Carthesian_2d_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Carthesian_3d_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Cylindrical_2dh_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Cylindrical_3d_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_PotentialMap_On_Spherical_3d_Grid() (in module pNbody.libgrid)
+
get_r_Interpolation_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_rotation_matrix_to_align_with_main_axis() (pNbody.main.NbodyDefault method)
+
get_rsp_approximation() (pNbody.main.NbodyDefault method)
+
get_string() (pNbody.param.Params method)
+
get_SurfaceDensityMap_From_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_SurfaceDensityMap_On_Carthesian_2d_Grid() (in module pNbody.libgrid)
+
get_SurfaceDensityMap_On_Cylindrical_2dh_Grid() (in module pNbody.libgrid)
+
get_SurfaceMap_On_Carthesian_2d_Grid() (in module pNbody.libgrid)
+
get_SurfaceMap_On_Cylindrical_2dh_Grid() (in module pNbody.libgrid)
+
get_SurfaceMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_Symetrisation_Along_Axis() (in module pNbody.libgrid)
+
get_Symetrisation_Along_Axis_Old() (in module pNbody.libgrid)
+
get_type() (pNbody.param.Params method)
+
Get_Velocities_From_Virial_Approximation() (pNbody.main.NbodyDefault method)
+
get_VolumeMap_On_Carthesian_3d_Grid() (in module pNbody.libgrid)
+
get_VolumeMap_On_Cylindrical_2dv_Grid() (in module pNbody.libgrid)
+
get_VolumeMap_On_Cylindrical_3d_Grid() (in module pNbody.libgrid)
+
get_VolumeMap_On_Spherical_1d_Grid() (in module pNbody.libgrid)
+
get_VolumeMap_On_Spherical_3d_Grid() (in module pNbody.libgrid)
+
getAccelerationInCylindricalGrid() (pNbody.main.NbodyDefault method)
+
GetAllAcceleration() (in module PyGadget.gadget)
+
GetAllDensities() (in module PyGadget.gadget)
+
GetAllHsml() (in module PyGadget.gadget)
+
GetAllID() (in module PyGadget.gadget)
+
GetAllIDQ() (in module PyGadget.gadget)
+
GetAllMasses() (in module PyGadget.gadget)
+
GetAllMassesQ() (in module PyGadget.gadget)
+
GetAllPositions() (in module PyGadget.gadget)
+
GetAllPositionsQ() (in module PyGadget.gadget)
+
GetAllPotential() (in module PyGadget.gadget)
+
GetAllTypes() (in module PyGadget.gadget)
+
GetAllTypesQ() (in module PyGadget.gadget)
+
GetAllVelocities() (in module PyGadget.gadget)
+
GetAllVelocitiesQ() (in module PyGadget.gadget)
+
getdata() (in module pNbody.pyfits)
+
geter() (in module pNbody.libutil)
+
geter2() (in module pNbody.libutil)
+
getheader() (in module pNbody.pyfits)
+
getindex() (pNbody.main.NbodyDefault method)
+
getmask() (in module pNbody.myNumeric)
+
GetMassMap() (in module pNbody.libutil)
+
GetMeanMap() (in module pNbody.libutil)
+
GetMeanValMap() (in module pNbody.libutil)
+
GetNumberMap() (in module pNbody.libutil)
+
getNumberParticlesInCylindricalGrid() (pNbody.main.NbodyDefault method)
+
GetParameters() (in module PyGadget.gadget)
+
GetPos() (in module PyGadget.gadget)
+
getPotentialInCylindricalGrid() (pNbody.main.NbodyDefault method)
+
getr() (in module pNbody.libutil)
+
getRadialVelocityDispersionInCylindricalGrid() (pNbody.main.NbodyDefault method)
+
getRadiusInCylindricalGrid() (pNbody.main.NbodyDefault method)
+
GetSigmaMap() (in module pNbody.libutil)
+
GetSigmaValMap() (in module pNbody.libutil)
+
getSurfaceDensityInCylindricalGrid() (pNbody.main.NbodyDefault method)
+
getTree() (pNbody.main.NbodyDefault method)
+
GetTriangles() (in module pNbody.tessel)
+
getval() (in module pNbody.libutil)
+
+
(in module pNbody.pyfits)
+
+
getvaltype() (in module pNbody.libutil)
+
GetVoronoi() (in module pNbody.tessel)
+
GroupData (class in pNbody.pyfits)
+
GroupsHDU (class in pNbody.pyfits)
+
+ +

H

+ + + +
+
has_array() (pNbody.main.NbodyDefault method)
+
has_key() (pNbody.pyfits.Header method)
+
has_var() (pNbody.main.NbodyDefault method)
+
hdcenter() (pNbody.main.NbodyDefault method)
+
HDUList (class in pNbody.pyfits)
+
Header (class in pNbody.pyfits)
+
HEADER_PREC (pNbody.fortranfile.FortranFile attribute)
+
hernquist() (in module pNbody.ic)
+
hernquist_mr() (in module pNbody.profiles)
+
hernquist_mR() (in module pNbody.profiles)
+
+
hernquist_profile() (in module pNbody.profiles)
+
histocenter() (pNbody.main.NbodyDefault method)
+
histocenter2() (pNbody.main.NbodyDefault method)
+
histogram2d() (in module pNbody.myNumeric)
+
histovel() (pNbody.main.NbodyDefault method)
+
hnd() (in module pNbody.myNumeric)
+
homodisk() (in module pNbody.ic)
+
homosphere() (in module pNbody.ic)
+
Hubble_a() (in module pNbody.cosmo)
+
+ +

I

+ + + +
+
ImageHDU (class in pNbody.pyfits)
+
InCircumCircle() (in module pNbody.tessel)
+
index_of() (pNbody.pyfits.CardList method)
+
+
(pNbody.pyfits.HDUList method)
+
+
inertial_tensor() (pNbody.main.NbodyDefault method)
+
info() (in module pNbody.pyfits)
+
+
(in module pNbody.tessel)
+
+
Info() (in module PyGadget.gadget)
+
info() (pNbody.main.NbodyDefault method)
+
+
(pNbody.Movie.Movie method)
+
(pNbody.pyfits.ColDefs method)
+
(pNbody.pyfits.HDUList method)
+
(pNbody.units.UnitSystem method)
+
+
init() (pNbody.main.NbodyDefault method)
+
init_cooling() (in module pNbody.cooling_with_metals)
+
init_units() (pNbody.main.NbodyDefault method)
+
InitDefaultParameters() (in module PyGadget.gadget)
+
InitHsml() (in module PyGadget.gadget)
+
InitMPI() (in module PyGadget.gadget)
+
InitSpec() (pNbody.main.NbodyDefault method)
+
insert() (pNbody.pyfits.CardList method)
+
+
integrate1() (in module pNbody.cooling_with_metals)
+
integrate2() (in module pNbody.cooling_with_metals)
+
IntegrateOverDt() (in module pNbody.nbdrklib)
+
IntegrateUsingRK() (pNbody.main.NbodyDefault method)
+
Interpolate_From_1d_Array() (in module pNbody.myNumeric)
+
Interpolate_From_2d_Array() (in module pNbody.myNumeric)
+
into() (pNbody.units.UnitSystem method)
+
InTriangle() (in module pNbody.tessel)
+
InTriangleOrOutside() (in module pNbody.tessel)
+
inv_viewport() (in module pNbody.geometry)
+
invert() (in module pNbody.ic)
+
invgetr() (in module pNbody.libutil)
+
isothm() (in module pNbody.ic)
+
isothm_mr() (in module pNbody.ic)
+
items() (pNbody.pyfits.Header method)
+
+ +

J

+ + + +
+
jaffe_mr() (in module pNbody.profiles)
+
+
jaffe_profile() (in module pNbody.profiles)
+
+ +

K

+ + + +
+
Kappa() (in module pNbody.libmiyamoto)
+
keys() (pNbody.pyfits.CardList method)
+
king_profile() (in module pNbody.profiles)
+
king_profile_Rz() (in module pNbody.profiles)
+
+
king_Rc() (in module pNbody.profiles)
+
king_surface_density() (in module pNbody.profiles)
+
king_surface_density_old() (in module pNbody.profiles)
+
kuzmin() (in module pNbody.ic)
+
+ +

L

+ + + +
+
l() (pNbody.main.NbodyDefault method)
+
L() (pNbody.main.NbodyDefault method)
+
Lambda() (in module pNbody.thermodyn)
+
LDensity() (in module pNbody.plummer)
+
lininterp1d() (in module pNbody.myNumeric)
+
lists() (pNbody.param.Params method)
+
LoadParticles() (in module PyGadget.gadget)
+
LoadParticles2() (in module PyGadget.gadget)
+
+
LoadParticlesQ() (in module PyGadget.gadget)
+
Log (class in pNbody.liblog)
+
log_filter() (in module pNbody.libutil)
+
log_filter_inv() (in module pNbody.libutil)
+
Ltot() (pNbody.main.NbodyDefault method)
+
ltot() (pNbody.main.NbodyDefault method)
+
Lum() (pNbody.main.NbodyDefault method)
+
LvtoMv() (in module pNbody.phot)
+
+ +

M

+ + + +
+
make_default_vars_global() (pNbody.main.NbodyDefault method)
+
Map() (pNbody.main.NbodyDefault method)
+
mc1d() (in module pNbody.montecarlolib)
+
mc2d() (in module pNbody.montecarlolib)
+
mc3d() (in module pNbody.montecarlolib)
+
mdens() (pNbody.main.NbodyDefault method)
+
MeanWeight() (pNbody.main.NbodyDefault method)
+
memory_info() (pNbody.main.NbodyDefault method)
+
minert() (pNbody.main.NbodyDefault method)
+
miyamoto_nagai() (in module pNbody.ic)
+
+
(in module pNbody.iclib)
+
+
miyamoto_nagai_f() (in module pNbody.iclib)
+
mkmap1d() (in module pNbody.mapping)
+
mkmap1dn() (in module pNbody.mapping)
+
mkmap1dw() (in module pNbody.mapping)
+
mkmap2d() (in module pNbody.mapping)
+
mkmap2dn() (in module pNbody.mapping)
+
mkmap2dnsph() (in module pNbody.mapping)
+
mkmap2dsph() (in module pNbody.mapping)
+
mkmap2dw() (in module pNbody.mapping)
+
mkmap3d() (in module pNbody.mapping)
+
mkmap3dn() (in module pNbody.mapping)
+
mkmap3dslicesph() (in module pNbody.mapping)
+
mkmap3dsortedsph() (in module pNbody.mapping)
+
mkmap3dw() (in module pNbody.mapping)
+
Movie (class in pNbody.Movie)
+
mpi_allgather() (in module pNbody.mpi)
+
mpi_AllgatherAndConcatArray() (in module pNbody.mpi)
+
mpi_allreduce() (in module pNbody.mpi)
+
mpi_arange() (in module pNbody.mpi)
+
mpi_argmax() (in module pNbody.mpi)
+
mpi_argmin() (in module pNbody.mpi)
+
+
mpi_bcast() (in module pNbody.mpi)
+
mpi_ExchangeFromTable() (in module pNbody.mpi)
+
mpi_find_a_toTask() (in module pNbody.mpi)
+
mpi_gather() (in module pNbody.mpi)
+
mpi_GatherAndWriteArray() (in module pNbody.mpi)
+
mpi_GetExchangeTable() (in module pNbody.mpi)
+
mpi_getval() (in module pNbody.mpi)
+
mpi_histogram() (in module pNbody.mpi)
+
mpi_iprint() (in module pNbody.mpi)
+
mpi_len() (in module pNbody.mpi)
+
mpi_max() (in module pNbody.mpi)
+
mpi_mean() (in module pNbody.mpi)
+
mpi_min() (in module pNbody.mpi)
+
mpi_OldGatherAndWriteArray() (in module pNbody.mpi)
+
mpi_OldReadAndSendArray() (in module pNbody.mpi)
+
mpi_pprint() (in module pNbody.mpi)
+
mpi_ReadAndSendArray() (in module pNbody.mpi)
+
mpi_ReadAndSendBlock() (in module pNbody.mpi)
+
mpi_recv() (in module pNbody.mpi)
+
mpi_reduce() (in module pNbody.mpi)
+
mpi_rprint() (in module pNbody.mpi)
+
mpi_sarange() (in module pNbody.mpi)
+
mpi_send() (in module pNbody.mpi)
+
mpi_sendrecv() (in module pNbody.mpi)
+
mpi_sum() (in module pNbody.mpi)
+
mplot() (in module pNbody.libutil)
+
mr() (pNbody.main.NbodyDefault method)
+
Mr_Spherical() (pNbody.main.NbodyDefault method)
+
msdens() (pNbody.main.NbodyDefault method)
+
MvtoLv() (in module pNbody.phot)
+
myhistogram() (in module pNbody.libutil)
+
+ +

N

+ + + +
+
Nbody() (in module pNbody.main)
+
Nbody_default (class in pNbody.main)
+
NbodyDefault (class in pNbody.main)
+
Ne() (pNbody.main.NbodyDefault method)
+
new_table() (in module pNbody.pyfits)
+
nfw() (in module pNbody.ic)
+
nfw_mr() (in module pNbody.ic)
+
+
(in module pNbody.profiles)
+
+
nfw_profile() (in module pNbody.profiles)
+
+
nfwg() (in module pNbody.ic)
+
+
(in module pNbody.iclib)
+
+
nfwg_mr() (in module pNbody.profiles)
+
nfwg_profile() (in module pNbody.profiles)
+
nfws_mr() (in module pNbody.profiles)
+
nfws_profile() (in module pNbody.profiles)
+
Ngbs() (in module PyGadget.gadget)
+
nodes_info() (pNbody.main.NbodyDefault method)
+
norm() (in module pNbody.geometry)
+
+ +

O

+ + + +
+
object_info() (pNbody.main.NbodyDefault method)
+
Omega() (in module pNbody.libmiyamoto)
+
open() (in module pNbody.pyfits)
+
+
(pNbody.Movie.Movie method)
+
+
+
open_and_read() (pNbody.main.NbodyDefault method)
+
open_and_write() (pNbody.main.NbodyDefault method)
+
ortho() (in module pNbody.geometry)
+
+ +

P

+ + + +
+
P() (pNbody.main.NbodyDefault method)
+
pamap() (in module pNbody.nbodymodule)
+
par() (pNbody.pyfits.GroupData method)
+
Params (class in pNbody.param)
+
pdmap() (in module pNbody.nbodymodule)
+
peano2xyz() (in module pNbody.peanolib)
+
perspective() (in module pNbody.geometry)
+
+
(in module pNbody.nbodymodule)
+
+
phi_xy() (pNbody.main.NbodyDefault method)
+
phi_xyz() (pNbody.main.NbodyDefault method)
+
phys2img() (in module pNbody.libutil)
+
PhysCte (class in pNbody.units)
+
pisothm() (in module pNbody.ic)
+
pisothm_mr() (in module pNbody.ic)
+
+
(in module pNbody.profiles)
+
+
pisothm_profile() (in module pNbody.profiles)
+
plummer() (in module pNbody.ic)
+
plummer_mr() (in module pNbody.profiles)
+
plummer_profile() (in module pNbody.profiles)
+
pNbody.asciilib (module)
+
pNbody.cooling_with_metals (module)
+
pNbody.coolinglib (module)
+
pNbody.cosmo (module)
+
pNbody.cosmolib (module)
+
pNbody.ctes (module)
+
pNbody.fortranfile (module)
+
pNbody.fourier (module)
+
pNbody.geometry (module)
+
pNbody.ic (module)
+
pNbody.iclib (module)
+
pNbody.libdisk (module)
+
pNbody.libgrid (module)
+
pNbody.liblog (module)
+
pNbody.libmiyamoto (module)
+
pNbody.libqt (module)
+
pNbody.libutil (module)
+
pNbody.main (module)
+
+
pNbody.mapping (module)
+
pNbody.montecarlolib (module)
+
pNbody.Movie (module)
+
pNbody.mpi (module)
+
pNbody.myNumeric (module)
+
pNbody.nbdrklib (module)
+
pNbody.nbodymodule (module)
+
pNbody.palette (module)
+
pNbody.param (module)
+
pNbody.parameters (module)
+
pNbody.peanolib (module)
+
pNbody.phot (module)
+
pNbody.plummer (module)
+
pNbody.profiles (module)
+
pNbody.pyfits (module)
+
pNbody.pygsl (module)
+
pNbody.rec (module)
+
pNbody.talkgdisp (module)
+
pNbody.tessel (module)
+
pNbody.thermodyn (module)
+
pNbody.treelib (module)
+
pNbody.units (module)
+
polint() (in module pNbody.myNumeric)
+
Pot() (pNbody.main.NbodyDefault method)
+
Potential() (in module pNbody.libmiyamoto)
+
potential() (in module pNbody.nbodymodule)
+
Potential() (in module pNbody.plummer)
+
+
(in module PyGadget.gadget)
+
+
Pra() (in module pNbody.thermodyn)
+
PrimaryHDU (class in pNbody.pyfits)
+
print_filenames() (pNbody.main.NbodyDefault method)
+
PrintParameters() (in module pNbody.cooling_with_metals)
+
Prt() (in module pNbody.thermodyn)
+
Pru() (in module pNbody.thermodyn)
+
PyGadget.gadget (module)
+
+ +

Q

+ + +
+
QNumarrayImage (class in pNbody.libqt)
+
qtplot() (in module pNbody.libqt)
+
+
quaddinterp1d() (in module pNbody.myNumeric)
+
quadinterp1d() (in module pNbody.myNumeric)

R

+ +
-
read_ascii() (in module pNbody.io)
+
r() (pNbody.main.NbodyDefault method)
+
R() (pNbody.main.NbodyDefault method)
+
ratint() (in module pNbody.myNumeric)
+
read() (in module pNbody.asciilib)
+
+
(pNbody.Movie.Movie method)
+
(pNbody.main.NbodyDefault method)
+
+
read_ascii() (in module pNbody.io)
+
read_ascii_value() (in module pNbody.param)
+
read_dump() (in module pNbody.io)
+
read_num() (pNbody.main.NbodyDefault method)
+
readall() (pNbody.pyfits.HDUList method)
+
readInts() (pNbody.fortranfile.FortranFile method)
+
readlut() (in module pNbody.palette)
+
readReals() (pNbody.fortranfile.FortranFile method)
+
readRecord() (pNbody.fortranfile.FortranFile method)
+
readString() (pNbody.fortranfile.FortranFile method)
+
real_numngb() (pNbody.main.NbodyDefault method)
+
rebox() (pNbody.main.NbodyDefault method)
+
+
redistribute() (pNbody.main.NbodyDefault method)
+
reduc() (pNbody.main.NbodyDefault method)
+
rename() (pNbody.main.NbodyDefault method)
+
rename_key() (pNbody.pyfits.Header method)
+
Rho() (pNbody.main.NbodyDefault method)
+
Rho_c() (in module pNbody.cosmo)
+
rotate() (in module pNbody.geometry)
+
+
(pNbody.main.NbodyDefault method)
+
+
rotate_old() (pNbody.main.NbodyDefault method)
+
RotateAround() (in module pNbody.libutil)
+
rotateR() (pNbody.main.NbodyDefault method)
+
rotx() (in module pNbody.myNumeric)
+
+
(in module pNbody.nbodymodule)
+
+
roty() (in module pNbody.myNumeric)
+
+
(in module pNbody.nbodymodule)
+
+
rotz() (in module pNbody.myNumeric)
+
+
(in module pNbody.nbodymodule)
+
+
rxy() (pNbody.main.NbodyDefault method)
+
rxyz() (pNbody.main.NbodyDefault method)
+
+ +

S

+ + + +
+
S() (pNbody.main.NbodyDefault method)
+
samxyz() (in module pNbody.nbodymodule)
+
save() (pNbody.param.Params method)
+
sbox() (in module pNbody.libutil)
+
sdens() (pNbody.main.NbodyDefault method)
+
Section (class in pNbody.pyfits)
+
select() (pNbody.main.NbodyDefault method)
+
selectc() (pNbody.main.NbodyDefault method)
+
selecti() (pNbody.main.NbodyDefault method)
+
selectp() (pNbody.main.NbodyDefault method)
+
SendAllToAll() (pNbody.main.NbodyDefault method)
+
set() (pNbody.param.Params method)
+
set_filenames() (pNbody.main.NbodyDefault method)
+
set_ftype() (pNbody.main.NbodyDefault method)
+
set_local_system_of_units() (pNbody.main.NbodyDefault method)
+
set_npart() (pNbody.main.NbodyDefault method)
+
set_parameters() (pNbody.main.NbodyDefault method)
+
set_pio() (pNbody.main.NbodyDefault method)
+
set_ranges() (in module pNbody.libutil)
+
Set_SystemUnits_From_Params() (in module pNbody.units)
+
set_tpe() (pNbody.main.NbodyDefault method)
+
set_unitsparameters() (pNbody.main.NbodyDefault method)
+
setdefault() (in module pNbody.cosmo)
+
setfield() (pNbody.pyfits.FITS_record method)
+
setpar() (pNbody.pyfits.GroupData method)
+
+
SetParameters() (in module PyGadget.gadget)
+
shell() (in module pNbody.ic)
+
show() (pNbody.main.NbodyDefault method)
+
Sigma() (in module pNbody.plummer)
+
sigma() (pNbody.main.NbodyDefault method)
+
Sigma_t() (in module pNbody.libmiyamoto)
+
sigma_vz() (pNbody.main.NbodyDefault method)
+
Sigma_z() (in module pNbody.libmiyamoto)
+
sigma_z() (pNbody.main.NbodyDefault method)
+
Sigma_zbis() (in module pNbody.libmiyamoto)
+
size() (pNbody.main.NbodyDefault method)
+
+
(pNbody.pyfits.GroupsHDU method)
+
(pNbody.pyfits.StreamingHDU method)
+
+
sobol_sequence() (in module pNbody.pygsl)
+
sort() (pNbody.main.NbodyDefault method)
+
sort_type() (pNbody.main.NbodyDefault method)
+
spec_info() (pNbody.main.NbodyDefault method)
+
sph2cart() (pNbody.main.NbodyDefault method)
+
SphEvaluate() (in module PyGadget.gadget)
+
+
(pNbody.main.NbodyDefault method)
+
+
sphmap() (in module pNbody.nbodymodule)
+
spin() (in module pNbody.nbodymodule)
+
+
(pNbody.main.NbodyDefault method)
+
+
spline() (in module pNbody.myNumeric)
+
spline3d() (in module pNbody.myNumeric)
+
splint() (in module pNbody.myNumeric)
+
StreamingHDU (class in pNbody.pyfits)
+
sub() (pNbody.main.NbodyDefault method)
+
SurfaceDensity() (in module pNbody.libmiyamoto)
+
+ +

T

+ + + +
+
T() (pNbody.main.NbodyDefault method)
+
TableHDU (class in pNbody.pyfits)
+
TalkServer (class in pNbody.talkgdisp)
+
Tcool() (pNbody.main.NbodyDefault method)
+
test() (in module pNbody.myNumeric)
+
+
(in module pNbody.tessel)
+
+
theta_xyz() (pNbody.main.NbodyDefault method)
+
Tmu() (pNbody.main.NbodyDefault method)
+
tork() (pNbody.main.NbodyDefault method)
+
Tra() (in module pNbody.thermodyn)
+
+
tranfert_functions() (in module pNbody.libutil)
+
translate() (pNbody.main.NbodyDefault method)
+
TreeAccel() (pNbody.main.NbodyDefault method)
+
TreePot() (pNbody.main.NbodyDefault method)
+
TriangleMedians() (in module pNbody.tessel)
+
Trp() (in module pNbody.thermodyn)
+
Tru() (in module pNbody.thermodyn)
+
turnup() (in module pNbody.myNumeric)
+
+ +

U

+ + + +
+
U() (pNbody.main.NbodyDefault method)
+
Undefined (class in pNbody.pyfits)
+
Units (class in pNbody.units)
+
UnitSystem (class in pNbody.units)
+
update() (in module pNbody.pyfits)
+
+
(pNbody.pyfits.Header method)
+
+
update_extend() (pNbody.pyfits.HDUList method)
+
+
update_tbhdu() (pNbody.pyfits.HDUList method)
+
Ura() (in module pNbody.thermodyn)
+
Urp() (in module pNbody.thermodyn)
+
Urt() (in module pNbody.thermodyn)
+
usual_numngb() (pNbody.main.NbodyDefault method)
+
+ +

V

+ +
+
v_sigma() (pNbody.main.NbodyDefault method)
+
Vcirc() (in module pNbody.libmiyamoto)
+
+
(in module pNbody.plummer)
+
+
vel_cart2cyl() (in module pNbody.libutil)
+
+
(pNbody.main.NbodyDefault method)
+
+
vel_cyl2cart() (in module pNbody.libutil)
+
+
(pNbody.main.NbodyDefault method)
+
+
VerifyError
+
viewport() (in module pNbody.geometry)
+
vn() (pNbody.main.NbodyDefault method)
+
vprod() (in module pNbody.myNumeric)
-
read_dump() (in module pNbody.io)
+
Vr() (pNbody.main.NbodyDefault method)
+
vrxyz() (pNbody.main.NbodyDefault method)
+
Vt() (pNbody.main.NbodyDefault method)
+
vx() (pNbody.main.NbodyDefault method)
+
vy() (pNbody.main.NbodyDefault method)
+
vz() (pNbody.main.NbodyDefault method)
+
Vz() (pNbody.main.NbodyDefault method)

W

+ +
-
write_array() (in module pNbody.io)
+
weighted_numngb() (pNbody.main.NbodyDefault method)
+
whistogram() (in module pNbody.myNumeric)
+
write() (pNbody.liblog.Log method)
+
+
(pNbody.main.NbodyDefault method)
+
(pNbody.pyfits.StreamingHDU method)
+
+
write_array() (in module pNbody.io)
+
write_ascii_value() (in module pNbody.param)
+
write_dump() (in module pNbody.io)
+
+
write_num() (pNbody.main.NbodyDefault method)
+
writeInts() (pNbody.fortranfile.FortranFile method)
+
writeReals() (pNbody.fortranfile.FortranFile method)
+
writeRecord() (pNbody.fortranfile.FortranFile method)
+
writeString() (pNbody.fortranfile.FortranFile method)
+
writeto() (in module pNbody.pyfits)
+
+
(pNbody.pyfits.HDUList method)
+
+
+ +

X

+ + + +
+
x() (pNbody.main.NbodyDefault method)
+
x_sigma() (pNbody.main.NbodyDefault method)
+
+
xyz2peano() (in module pNbody.peanolib)
+
+ +

Y

+ + +
+
y() (pNbody.main.NbodyDefault method)
+
+ +

Z

+ +
+
z() (pNbody.main.NbodyDefault method)
+
Z_a() (in module pNbody.cosmo)
-
write_dump() (in module pNbody.io)
+
zmodes() (pNbody.main.NbodyDefault method)
+
zprof() (pNbody.main.NbodyDefault method)
\ No newline at end of file diff --git a/Doc/newdoc/_build/html/index.html b/Doc/newdoc/_build/html/index.html index 443bab2..4dd9b2f 100644 --- a/Doc/newdoc/_build/html/index.html +++ b/Doc/newdoc/_build/html/index.html @@ -1,132 +1,225 @@ Welcome to pNbody’s documentation! — pNbody v4 documentation

Welcome to pNbody’s documentation!¶

Contents:

Indices and tables¶

Table Of Contents

Next topic

Overview

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/np-modindex.html b/Doc/newdoc/_build/html/np-modindex.html new file mode 100644 index 0000000..d8f65b4 --- /dev/null +++ b/Doc/newdoc/_build/html/np-modindex.html @@ -0,0 +1,328 @@ + + + + + + + + + Python Module Index — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ + +

Python Module Index

+ +
+ p +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ p
+ pNbody +
    + pNbody.asciilib +
    + pNbody.cooling_with_metals +
    + pNbody.coolinglib +
    + pNbody.cosmo +
    + pNbody.cosmolib +
    + pNbody.ctes +
    + pNbody.fortranfile +
    + pNbody.fourier +
    + pNbody.geometry +
    + pNbody.ic +
    + pNbody.iclib +
    + pNbody.libdisk +
    + pNbody.libgrid +
    + pNbody.liblog +
    + pNbody.libmiyamoto +
    + pNbody.libqt +
    + pNbody.libutil +
    + pNbody.main +
    + pNbody.mapping +
    + pNbody.montecarlolib +
    + pNbody.Movie +
    + pNbody.mpi +
    + pNbody.myNumeric +
    + pNbody.nbdrklib +
    + pNbody.nbodymodule +
    + pNbody.palette +
    + pNbody.param +
    + pNbody.parameters +
    + pNbody.peanolib +
    + pNbody.phot +
    + pNbody.plummer +
    + pNbody.profiles +
    + pNbody.pyfits +
    + pNbody.pygsl +
    + pNbody.rec +
    + pNbody.talkgdisp +
    + pNbody.tessel +
    + pNbody.thermodyn +
    + pNbody.treelib +
    + pNbody.units +
+ PyGadget +
    + PyGadget.gadget +
+ + +
+
+
+
+
+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/objects.inv b/Doc/newdoc/_build/html/objects.inv index 0e59276..aa9f9c0 100644 Binary files a/Doc/newdoc/_build/html/objects.inv and b/Doc/newdoc/_build/html/objects.inv differ diff --git a/Doc/newdoc/_build/html/py-modindex.html b/Doc/newdoc/_build/html/py-modindex.html new file mode 100644 index 0000000..45d7e0d --- /dev/null +++ b/Doc/newdoc/_build/html/py-modindex.html @@ -0,0 +1,328 @@ + + + + + + + + + Python Module Index — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ + +

Python Module Index

+ +
+ p +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ p
+ pNbody +
    + pNbody.asciilib +
    + pNbody.cooling_with_metals +
    + pNbody.coolinglib +
    + pNbody.cosmo +
    + pNbody.cosmolib +
    + pNbody.ctes +
    + pNbody.fortranfile +
    + pNbody.fourier +
    + pNbody.geometry +
    + pNbody.ic +
    + pNbody.iclib +
    + pNbody.libdisk +
    + pNbody.libgrid +
    + pNbody.liblog +
    + pNbody.libmiyamoto +
    + pNbody.libqt +
    + pNbody.libutil +
    + pNbody.main +
    + pNbody.mapping +
    + pNbody.montecarlolib +
    + pNbody.Movie +
    + pNbody.mpi +
    + pNbody.myNumeric +
    + pNbody.nbdrklib +
    + pNbody.nbodymodule +
    + pNbody.palette +
    + pNbody.param +
    + pNbody.parameters +
    + pNbody.peanolib +
    + pNbody.phot +
    + pNbody.plummer +
    + pNbody.profiles +
    + pNbody.pyfits +
    + pNbody.pygsl +
    + pNbody.rec +
    + pNbody.talkgdisp +
    + pNbody.tessel +
    + pNbody.thermodyn +
    + pNbody.treelib +
    + pNbody.units +
+ PyGadget +
    + PyGadget.gadget +
+ + +
+
+
+
+
+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_PyGadget.html b/Doc/newdoc/_build/html/rst/C_PyGadget.html new file mode 100644 index 0000000..83aea44 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_PyGadget.html @@ -0,0 +1,319 @@ + + + + + + + + + the C PyGadget module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C PyGadget module¶

+

This mpdule is currently not completely integrated to pNbody. +It is part of the pNbody package but must be compiled +separately. +For mpi, use:

+
export CC=mpirun
+
+
+
+PyGadget.gadget.Acceleration()¶
+

get the acceleration for a givent sample of points

+
+ +
+
+PyGadget.gadget.AllAcceleration()¶
+

Computes the gravitational acceleration for each particle

+
+ +
+
+PyGadget.gadget.AllPotential()¶
+

Computes the potential for each particle

+
+ +
+
+PyGadget.gadget.Density()¶
+

compute Density based on the three for a given number of points

+
+ +
+
+PyGadget.gadget.GetAllAcceleration()¶
+

get the gravitational acceleration for each particle

+
+ +
+
+PyGadget.gadget.GetAllDensities()¶
+

get the densities for each particle

+
+ +
+
+PyGadget.gadget.GetAllHsml()¶
+

get the sph smoothing length for each particle

+
+ +
+
+PyGadget.gadget.GetAllID()¶
+

get the ID for each particle

+
+ +
+
+PyGadget.gadget.GetAllIDQ()¶
+

get the ID for each particle Q

+
+ +
+
+PyGadget.gadget.GetAllMasses()¶
+

get the mass for each particle

+
+ +
+
+PyGadget.gadget.GetAllMassesQ()¶
+

get the mass for each particle Q

+
+ +
+
+PyGadget.gadget.GetAllPositions()¶
+

get the position for each particle

+
+ +
+
+PyGadget.gadget.GetAllPositionsQ()¶
+

get the position for each particle Q

+
+ +
+
+PyGadget.gadget.GetAllPotential()¶
+

get the potential for each particle

+
+ +
+
+PyGadget.gadget.GetAllTypes()¶
+

get the type for each particle

+
+ +
+
+PyGadget.gadget.GetAllTypesQ()¶
+

get the type for each particle Q

+
+ +
+
+PyGadget.gadget.GetAllVelocities()¶
+

get the velocities for each particle

+
+ +
+
+PyGadget.gadget.GetAllVelocitiesQ()¶
+

get the velocities for each particle Q

+
+ +
+
+PyGadget.gadget.GetParameters()¶
+

get gadget parameters

+
+ +
+
+PyGadget.gadget.GetPos()¶
+

get the position for each particle (no memory overhead)

+
+ +
+
+PyGadget.gadget.Info()¶
+

give some info

+
+ +
+
+PyGadget.gadget.InitDefaultParameters()¶
+

Init default parameters

+
+ +
+
+PyGadget.gadget.InitHsml()¶
+

Init hsml based on the three for a given number of points

+
+ +
+
+PyGadget.gadget.InitMPI()¶
+

Init MPI

+
+ +
+
+PyGadget.gadget.LoadParticles()¶
+

LoadParticles partilces

+
+ +
+
+PyGadget.gadget.LoadParticles2()¶
+

LoadParticles partilces

+
+ +
+
+PyGadget.gadget.LoadParticlesQ()¶
+

LoadParticles partilces Q

+
+ +
+
+PyGadget.gadget.Ngbs()¶
+

return the position of the neighbors for a given point

+
+ +
+
+PyGadget.gadget.Potential()¶
+

get the potential for a givent sample of points

+
+ +
+
+PyGadget.gadget.SetParameters()¶
+

Set gadget parameters

+
+ +
+
+PyGadget.gadget.SphEvaluate()¶
+

compute mean value based on the sph convolution for a given number of points

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C ptreelib module

+

Next topic

+

the C pygsl module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_asciilib.html similarity index 60% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_asciilib.html index e0c0581..490782e 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_asciilib.html @@ -1,126 +1,133 @@ - Overview — pNbody v4 documentation + the C asciilib module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C asciilib module¶

+
+
+pNbody.asciilib.read()¶
+

Read ascii file

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the cooling module

Next topic

-

the Io module

+

the C coolinglib module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_cooling_with_metals.html b/Doc/newdoc/_build/html/rst/C_cooling_with_metals.html new file mode 100644 index 0000000..67f0d3e --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_cooling_with_metals.html @@ -0,0 +1,187 @@ + + + + + + + + + the C cooling_with_metals module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C cooling_with_metals module¶

+
+
+pNbody.cooling_with_metals.PrintParameters()¶
+

Print parameters.

+
+ +
+
+pNbody.cooling_with_metals.get_cooling_time_from_Density_EnergyInt_FeH()¶
+

Get the cooling time in user units.

+
+ +
+
+pNbody.cooling_with_metals.get_cooling_time_from_Density_Temperature_FeH()¶
+

Get the cooling time in user units.

+
+ +
+
+pNbody.cooling_with_metals.get_lambda_from_Density_EnergyInt_FeH()¶
+

Get the lambda function in user units.

+
+ +
+
+pNbody.cooling_with_metals.get_lambda_from_Density_Entropy_FeH()¶
+

Get the lambda function in user units.

+
+ +
+
+pNbody.cooling_with_metals.get_lambda_from_Density_Temperature_FeH()¶
+

Get the lambda function in user units.

+
+ +
+
+pNbody.cooling_with_metals.get_lambda_normalized_from_Temperature_FeH()¶
+

Get the normalized lambda function in mks.

+
+ +
+
+pNbody.cooling_with_metals.init_cooling()¶
+

Init cooling.

+
+ +
+
+pNbody.cooling_with_metals.integrate1()¶
+

Integrate cooling during a timestep using first scheme of integration.

+
+ +
+
+pNbody.cooling_with_metals.integrate2()¶
+

Integrate cooling during a timestep using second scheme of integration.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C coolinglib module

+

Next topic

+

the C cosmolib module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_coolinglib.html similarity index 54% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_coolinglib.html index e0c0581..9b6d72e 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_coolinglib.html @@ -1,126 +1,139 @@ - Overview — pNbody v4 documentation + the C coolinglib module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C coolinglib module¶

+
+
+pNbody.coolinglib.cooling()¶
+

Return Mu and Lambda. Energy spec and Density must be in cgs.

+
+ +
+
+pNbody.coolinglib.cooling_from_nH_and_T()¶
+

Return Mu and Lambda. Temperature and Hydrogen Density must be in cgs.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the C asciilib module

Next topic

-

the Io module

+

the C cooling_with_metals module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_cosmolib.html similarity index 59% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_cosmolib.html index e0c0581..597900c 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_cosmolib.html @@ -1,126 +1,133 @@ - Overview — pNbody v4 documentation + the C cosmolib module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C cosmolib module¶

+
+
+pNbody.cosmolib.Age_a()¶
+

Return age of the univers as a function of a (expansion factor).

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the C cooling_with_metals module

Next topic

-

the Io module

+

the C iclib module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_iclib.html b/Doc/newdoc/_build/html/rst/C_iclib.html new file mode 100644 index 0000000..7fa6aa0 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_iclib.html @@ -0,0 +1,181 @@ + + + + + + + + + the C iclib module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C iclib module¶

+
+
+pNbody.iclib.burkert()¶
+

Return position of a burkert model.

+
+ +
+
+pNbody.iclib.exponential_disk()¶
+

Return position of an exponential disk.

+
+ +
+
+pNbody.iclib.generic_Mr()¶
+

Return position following the density given by M(r)=Mr.

+
+ +
+
+pNbody.iclib.generic_Mx()¶
+

Return position following the density given by M(x)=Mx. We assume an homogeneous distribution in y and z.

+
+ +
+
+pNbody.iclib.generic_Mx1D()¶
+

Return position following the density given by M(x)=Mx. Return only x.

+
+ +
+
+pNbody.iclib.generic_alpha()¶
+

Return position following the density (r+eps)^a.

+
+ +
+
+pNbody.iclib.miyamoto_nagai()¶
+

Return position of a miyamoto_nagai model.

+
+ +
+
+pNbody.iclib.miyamoto_nagai_f()¶
+

Return position of a miyamoto_nagai model divided by f(R).

+
+ +
+
+pNbody.iclib.nfwg()¶
+

Return position of a nfwg model.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C cosmolib module

+

Next topic

+

the C mapping module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_mapping-omp.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_mapping-omp.html index e0c0581..d91ad9d 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_mapping-omp.html @@ -1,126 +1,129 @@ - Overview — pNbody v4 documentation + the C mapping-omp module (under construction) — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C mapping-omp module (under construction)¶

+

currently not available, +still under construction.

Previous topic

-

Welcome to pNbody’s documentation!

+

the C mapping module

Next topic

-

the Io module

+

the C montecarlolib module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_mapping.html b/Doc/newdoc/_build/html/rst/C_mapping.html new file mode 100644 index 0000000..f146d77 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_mapping.html @@ -0,0 +1,223 @@ + + + + + + + + + the C mapping module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C mapping module¶

+
+
+pNbody.mapping.create_line()¶
+

Add a line in the given matrice using the Bresenham algorithm.

+
+ +
+
+pNbody.mapping.create_line2()¶
+

Add a line in the given matrice using the Bresenham algorithm.

+
+ +
+
+pNbody.mapping.create_line3()¶
+

Add a line in the given matrice using a personal algorithm.

+
+ +
+
+pNbody.mapping.mkmap1d()¶
+

Return a 1d mapping.

+
+ +
+
+pNbody.mapping.mkmap1dn()¶
+

Return a 1d mapping.

+
+ +
+
+pNbody.mapping.mkmap1dw()¶
+

Return a 1d mapping (a particle is distributed over 2 nodes).

+
+ +
+
+pNbody.mapping.mkmap2d()¶
+

Return a 2d mapping.

+
+ +
+
+pNbody.mapping.mkmap2dn()¶
+

Return a 2d mapping.

+
+ +
+
+pNbody.mapping.mkmap2dnsph()¶
+

Return a 2d smoothed maping.

+
+ +
+
+pNbody.mapping.mkmap2dsph()¶
+

Return a 2d smoothed maping.

+
+ +
+
+pNbody.mapping.mkmap2dw()¶
+

Return a 2d mapping (a particle is distributed over 4 nodes).

+
+ +
+
+pNbody.mapping.mkmap3d()¶
+

Return a 3d mapping.

+
+ +
+
+pNbody.mapping.mkmap3dn()¶
+

Return a 3d mapping.

+
+ +
+
+pNbody.mapping.mkmap3dslicesph()¶
+

Return a 3d slice (sph).

+
+ +
+
+pNbody.mapping.mkmap3dsortedsph()¶
+

Return a 3d mapping (sph).

+
+ +
+
+pNbody.mapping.mkmap3dw()¶
+

Return a 3d mapping (a particle is distributed over 8 nodes).

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C iclib module

+

Next topic

+

the C mapping-omp module (under construction)

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_montecarlolib.html similarity index 52% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_montecarlolib.html index e0c0581..bff9198 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_montecarlolib.html @@ -1,126 +1,145 @@ - Overview — pNbody v4 documentation + the C montecarlolib module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C montecarlolib module¶

+
+
+pNbody.montecarlolib.mc1d()¶
+

Return a 1d monte carlo distribution.

+
+ +
+
+pNbody.montecarlolib.mc2d()¶
+

Return a 2d monte carlo distribution.

+
+ +
+
+pNbody.montecarlolib.mc3d()¶
+

Return a 3d monte carlo distribution.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the C mapping-omp module (under construction)

Next topic

-

the Io module

+

the C myNumeric module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_myNumeric.html b/Doc/newdoc/_build/html/rst/C_myNumeric.html new file mode 100644 index 0000000..e9c1ade --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_myNumeric.html @@ -0,0 +1,253 @@ + + + + + + + + + the C myNumeric module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C myNumeric module¶

+
+
+pNbody.myNumeric.Interpolate_From_1d_Array()¶
+

Interpolate values from a given array.

+
+ +
+
+pNbody.myNumeric.Interpolate_From_2d_Array()¶
+

Interpolate values from a given array.

+
+ +
+
+pNbody.myNumeric.expand()¶
+

Expand a matrix.

+
+ +
+
+pNbody.myNumeric.getmask()¶
+

Return a mask of the same type as x which has ones where elemets of x that have a corespondant in y and zeros instead.

+
+ +
+
+pNbody.myNumeric.histogram2d()¶
+

Return a 2d matrix corresponding to the histrogram of two vector values in given ranges.

+
+ +
+
+pNbody.myNumeric.hnd()¶
+

Return a 3d matrix corresponding to the histrogram in n dim of a vector 3xn

+
+ +
+
+pNbody.myNumeric.lininterp1d()¶
+

Linear interpolation of 1d function given by two vectors.

+
+ +
+
+pNbody.myNumeric.polint()¶
+

Polynomial interpolation.

+
+ +
+
+pNbody.myNumeric.quaddinterp1d()¶
+

Quadratic interpolation of 1d function given by two vectors (the slope is continuous).

+
+ +
+
+pNbody.myNumeric.quadinterp1d()¶
+

Quadratic interpolation of 1d function given by two vectors.

+
+ +
+
+pNbody.myNumeric.ratint()¶
+

Polynomial interpolation.

+
+ +
+
+pNbody.myNumeric.rotx()¶
+

Rotation around the x axis.

+
+ +
+
+pNbody.myNumeric.roty()¶
+

Rotation around the y axis.

+
+ +
+
+pNbody.myNumeric.rotz()¶
+

Rotation around the z axis.

+
+ +
+
+pNbody.myNumeric.spline()¶
+

spline.

+
+ +
+
+pNbody.myNumeric.spline3d()¶
+

Return a 3d interpolation.

+
+ +
+
+pNbody.myNumeric.splint()¶
+

splint.

+
+ +
+
+pNbody.myNumeric.test()¶
+

Some test on PyArray object.

+
+ +
+
+pNbody.myNumeric.turnup()¶
+

Turn up a matrix.

+
+ +
+
+pNbody.myNumeric.vprod()¶
+

Calculate the vectorial product of two vectors.

+
+ +
+
+pNbody.myNumeric.whistogram()¶
+

Return a weighted histogram.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C montecarlolib module

+

Next topic

+

the C nbdrklib module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_nbdrklib.html similarity index 56% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_nbdrklib.html index e0c0581..a828809 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_nbdrklib.html @@ -1,126 +1,139 @@ - Overview — pNbody v4 documentation + the C nbdrklib module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C nbdrklib module¶

+
+
+pNbody.nbdrklib.Compute()¶
+

Compute all.

+
+ +
+
+pNbody.nbdrklib.IntegrateOverDt()¶
+

Integrate the system over dt.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the C myNumeric module

Next topic

-

the Io module

+

the C nbodymodule module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_nbodymodule.html b/Doc/newdoc/_build/html/rst/C_nbodymodule.html new file mode 100644 index 0000000..1f19bf3 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_nbodymodule.html @@ -0,0 +1,223 @@ + + + + + + + + + the C nbodymodule module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C nbodymodule module¶

+
+
+pNbody.nbodymodule.acceleration()¶
+

Calculate the acceleration at a given position, with a given softening.

+
+ +
+
+pNbody.nbodymodule.am()¶
+

Calculate the angular momentum of the model.

+
+ +
+
+pNbody.nbodymodule.ampmap()¶
+

Return a map of amplitude of the given points.

+
+ +
+
+pNbody.nbodymodule.amxyz()¶
+

Calculate the angular momentum in x,y,z for all particles.

+
+ +
+
+pNbody.nbodymodule.convol()¶
+

Return a 2d convolution of a with kernel b.

+
+ +
+
+pNbody.nbodymodule.epot()¶
+

Calculate the total potential energy.

+
+ +
+
+pNbody.nbodymodule.pamap()¶
+

Return a map of the given points.

+
+ +
+
+pNbody.nbodymodule.pdmap()¶
+

Return a ponderated map of the given points.

+
+ +
+
+pNbody.nbodymodule.perspective()¶
+

Return a 3d projection of the given points.

+
+ +
+
+pNbody.nbodymodule.potential()¶
+

Calculate the potential at a given position, with a given softening.

+
+ +
+
+pNbody.nbodymodule.rotx()¶
+

Rotation around the x axis.

+
+ +
+
+pNbody.nbodymodule.roty()¶
+

Rotation around the y axis.

+
+ +
+
+pNbody.nbodymodule.rotz()¶
+

Rotation around the z axis.

+
+ +
+
+pNbody.nbodymodule.samxyz()¶
+

Calculate the specific angular momentum in x,y,z for all particles.

+
+ +
+
+pNbody.nbodymodule.sphmap()¶
+

Return a sphmap of the given points.

+
+ +
+
+pNbody.nbodymodule.spin()¶
+

Spin the model around an axis.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C nbdrklib module

+

Next topic

+

the C peanolib module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_peanolib.html similarity index 55% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_peanolib.html index e0c0581..ea119c4 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_peanolib.html @@ -1,126 +1,139 @@ - Overview — pNbody v4 documentation + the C peanolib module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C peanolib module¶

+
+
+pNbody.peanolib.peano2xyz()¶
+

From a peano key return a triplet xyz.

+
+ +
+
+pNbody.peanolib.xyz2peano()¶
+

From a triplet xyz return a peano key.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the C nbodymodule module

Next topic

-

the Io module

+

the C pmlib module (never developped)

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_pmlib.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_pmlib.html index e0c0581..6257b99 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_pmlib.html @@ -1,126 +1,129 @@ - Overview — pNbody v4 documentation + the C pmlib module (never developped) — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C pmlib module (never developped)¶

+

currently not available, +still under construction.

Previous topic

-

Welcome to pNbody’s documentation!

+

the C peanolib module

Next topic

-

the Io module

+

the C ptreelib module (obsolete)

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_ptreelib.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_ptreelib.html index e0c0581..26a27b6 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_ptreelib.html @@ -1,126 +1,129 @@ - Overview — pNbody v4 documentation + the C ptreelib module (obsolete) — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C ptreelib module (obsolete)¶

+

currently not available, +still under construction.

Previous topic

-

Welcome to pNbody’s documentation!

+

the C pmlib module (never developped)

Next topic

-

the Io module

+

the C PyGadget module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_pygsl.html similarity index 59% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_pygsl.html index e0c0581..6cb8851 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_pygsl.html @@ -1,126 +1,133 @@ - Overview — pNbody v4 documentation + the C pygsl module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C pygsl module¶

+
+
+pNbody.pygsl.sobol_sequence()¶
+

Return a sobol sequence of len n and of dimension d.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the C PyGadget module

Next topic

-

the Io module

+

the C streelib module (under construction)

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_streelib.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_streelib.html index e0c0581..70b0810 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_streelib.html @@ -1,126 +1,129 @@ - Overview — pNbody v4 documentation + the C streelib module (under construction) — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C streelib module (under construction)¶

+

currently not available, +still under construction.

Previous topic

-

Welcome to pNbody’s documentation!

+

the C pygsl module

Next topic

-

the Io module

+

the C tessel module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/C_tessel.html b/Doc/newdoc/_build/html/rst/C_tessel.html new file mode 100644 index 0000000..5a7e958 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/C_tessel.html @@ -0,0 +1,193 @@ + + + + + + + + + the C tessel module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the C tessel module¶

+
+
+pNbody.tessel.CircumCircleProperties()¶
+

Get Circum Circle Properties

+
+ +
+
+pNbody.tessel.ComputeIsoContours()¶
+

Compute iso-contours.

+
+ +
+
+pNbody.tessel.ConstructDelaunay()¶
+

Construct the Delaunay tesselation for a given sample of points

+
+ +
+
+pNbody.tessel.GetTriangles()¶
+

Get the trianles in a list of 3x3 arrays.

+
+ +
+
+pNbody.tessel.GetVoronoi()¶
+

Get a list of segements corresponding to the voronoi.

+
+ +
+
+pNbody.tessel.InCircumCircle()¶
+

Return if the circum circle of the triangle (P1,P2,P3) contains the point P4

+
+ +
+
+pNbody.tessel.InTriangle()¶
+

Return if the triangle (P1,P2,P3) contains the point P4

+
+ +
+
+pNbody.tessel.InTriangleOrOutside()¶
+

Return if the triangle (P1,P2,P3) contains the point P4

+
+ +
+
+pNbody.tessel.TriangleMedians()¶
+

Get Triangle Medians

+
+ +
+
+pNbody.tessel.info()¶
+

info on tesselation

+
+ +
+
+pNbody.tessel.test()¶
+

Simple Test

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the C streelib module (under construction)

+

Next topic

+

the C treelib module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/C_treelib.html similarity index 60% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/C_treelib.html index e0c0581..4cb08d2 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/C_treelib.html @@ -1,126 +1,118 @@ - Overview — pNbody v4 documentation + the C treelib module — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the C treelib module¶

+

Provides serial tree routines based on Gadget-2. Should be replaced by PyGadget.

Previous topic

-

Welcome to pNbody’s documentation!

-

Next topic

-

the Io module

+

the C tessel module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/CoolingModule.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/CoolingModule.html index e0c0581..12a8672 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/CoolingModule.html @@ -1,126 +1,127 @@ - Overview — pNbody v4 documentation + the cooling module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the cooling module¶

Previous topic

-

Welcome to pNbody’s documentation!

+

the geometry module

Next topic

-

the Io module

+

the cosmo module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/CosmoModule.html b/Doc/newdoc/_build/html/rst/CosmoModule.html new file mode 100644 index 0000000..1813267 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/CosmoModule.html @@ -0,0 +1,179 @@ + + + + + + + + + the cosmo module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the cosmo module¶

+
+
+pNbody.cosmo.A_z(z)¶
+
+ +
+
+pNbody.cosmo.Adot_a(a, pars={'Hubble': 0.100000001545942, 'HubbleParam': 0.72999999999999998, 'Omega0': 0.23999999999999999, 'OmegaLambda': 0.76000000000000001})¶
+

da/dt

+
+ +
+
+pNbody.cosmo.Age_a(a, pars={'Hubble': 0.100000001545942, 'HubbleParam': 0.72999999999999998, 'Omega0': 0.23999999999999999, 'OmegaLambda': 0.76000000000000001})¶
+

cosmic age as a function of a

+
+ +
+
+pNbody.cosmo.CosmicTime_a(a, pars={'Hubble': 0.100000001545942, 'HubbleParam': 0.72999999999999998, 'Omega0': 0.23999999999999999, 'OmegaLambda': 0.76000000000000001})¶
+

cosmic time as a function of a in internal units, +ie, (1/h)

+
+ +
+
+pNbody.cosmo.Hubble_a(a)¶
+
+ +
+
+pNbody.cosmo.Rho_c(localsystem_of_units)¶
+

Critical density

+
+ +
+
+pNbody.cosmo.Z_a(a)¶
+
+ +
+
+pNbody.cosmo.a_CosmicTime(t, pars={'Hubble': 0.100000001545942, 'HubbleParam': 0.72999999999999998, 'Omega0': 0.23999999999999999, 'OmegaLambda': 0.76000000000000001}, a0=0.5)¶
+

return a for a given cosmic time

+
+ +
+
+pNbody.cosmo.setdefault()¶
+

set default cosmological parameters

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the cooling module

+

Next topic

+

the thermodyn module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/CtesModule.html similarity index 60% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/CtesModule.html index e0c0581..11f10d2 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/CtesModule.html @@ -1,126 +1,123 @@ - Overview — pNbody v4 documentation + the ctes module — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the ctes module¶

+
+
+pNbody.ctes.convert_ctes(units)¶
+

convert a constante into a given unit system.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

-

Next topic

-

the Io module

+

the io module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Default_configurations.html b/Doc/newdoc/_build/html/rst/Default_configurations.html new file mode 100644 index 0000000..96d49bf --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Default_configurations.html @@ -0,0 +1,174 @@ + + + + + + + + + Default configuration — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

Default configuration¶

+

pNbody uses a set of parameters files, color tables and formats files. +These files are provided by the installation and are by default stored in +the directory site-packages/pNbody/config. +To display where these files are taken from, you can use the command:

+
pNbody_show-path
+
+
+

It is recommanded that the user uses its own configuration files. To be automatically +recongnized by pNbody, the latter must be in the user ~/.pNbody directory. +pNbody provides a simple command to copy all parameters in this directory. Simply +type:

+
pNbody_copy-defaultconfig
+
+
+

and check the values of the new paths:

+
pNbody_show-path
+
+
+

You can now freely modify the files contains in the configuratio directory.

+

By default, the content of the configuration directory is:

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
nametypeContent
defaultparametersfilethe default graphical parameters used by pNbody
unitsparametersfilethe default units parameters used by pNbody
formatsdirectoryspecific class definition files used to read different file formats
rgb_tablesdirectorycolor tables
pluginsdirectoryoptional plugins
optdirectoryoptional files
+
+ + +
+
+
+
+
+ +

Previous topic

+

Check the installation

+

Next topic

+

Default parameters

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Default_parameters.html b/Doc/newdoc/_build/html/rst/Default_parameters.html new file mode 100644 index 0000000..470612b --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Default_parameters.html @@ -0,0 +1,179 @@ + + + + + + + + + Default parameters — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

Default parameters¶

+

To see what default parameters pNbody uses, type:

+
pNbody_show-parameters
+
+
+

The script returns the parameters taken from the files +defaultparameters and unitsparameters. +Their current values are displayed:

+
parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters
+
+----------------------------------------------------------------------------------------------------
+                          name                          meaning             value (type)
+----------------------------------------------------------------------------------------------------
+                           obs :                       observer =            None (ArrayObs)
+                            xp :             observing position =            None (List)
+                            x0 :           position of observer =            None (List)
+                         alpha :              angle of the head =            None (Float)
+                          view :                           view =              xz (String)
+                         r_obs :          dist. to the observer =   201732.223771 (Float)
+                          clip :                    clip planes = (100866.11188556443, 403464.44754225772) (Tuple)
+                           cut :                cut clip planes =              no (String)
+                           eye :                name of the eye =            None (String)
+                      dist_eye :          distance between eyes =         -0.0005 (Float)
+                           foc :                          focal =           300.0 (Float)
+                         persp :                    perspective =             off (String)
+                         shape :             shape of the image =      (512, 512) (Tuple)
+                          size :                   pysical size =    (6000, 6000) (Tuple)
+                          frsp :                           frsp =             0.0 (Float)
+                         space :                          space =             pos (String)
+                          mode :                           mode =               m (String)
+                     rendering :                 rendering mode =             map (String)
+                   filter_name :             name of the filter =            None (String)
+                   filter_opts :                 filter options =  [10, 10, 2, 2] (List)
+                         scale :                          scale =             log (String)
+                            cd :                             cd =             0.0 (Float)
+                            mn :                             mn =             0.0 (Float)
+                            mx :                             mx =             0.0 (Float)
+                           l_n :               number of levels =              15 (Int)
+                         l_min :                      min level =             0.0 (Float)
+                         l_max :                      max level =             0.0 (Float)
+                          l_kx :                           l_kx =              10 (Int)
+                          l_ky :                           l_ky =              10 (Int)
+                       l_color :                    level color =               0 (Int)
+                       l_crush :               crush background =              no (String)
+                      b_weight :                box line weight =               0 (Int)
+                       b_xopts :                 x axis options =            None (Tuple)
+                       b_yopts :                 y axis options =            None (Tuple)
+                       b_color :                     line color =             255 (Int)
+
+parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters
+
+----------------------------------------------------------------------------------------------------
+                          name                          meaning             value (type)
+----------------------------------------------------------------------------------------------------
+                            xi :         hydrogen mass fraction =            0.76 (Float)
+                    ionisation :                ionisation flag =               1 (Int)
+                    metalicity :               metalicity index =               4 (Int)
+                          Nsph :        number of sph neighbors =              50 (Int)
+                         gamma :                adiabatic index =   1.66666666667 (Float)
+                   coolingfile :                   Cooling file = ~/.Nbody/cooling.dat (String)
+                   HubbleParam :                    HubbleParam =             1.0 (Float)
+              UnitLength_in_cm :               UnitLength in cm =       3.085e+21 (Float)
+                 UnitMass_in_g :                  UnitMass in g =    4.435693e+44 (Float)
+      UnitVelocity_in_cm_per_s :       UnitVelocity in cm per s =   97824708.2699 (Float)
+
+
+ + +
+
+
+
+
+ +

Previous topic

+

Default configuration

+

Next topic

+

Examples

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Display.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Display.html index e0c0581..d0f0dab 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Display.html @@ -1,126 +1,124 @@ - Overview — pNbody v4 documentation + Display Models — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Display Models¶

Previous topic

-

Welcome to pNbody’s documentation!

+

Setting a format file

Next topic

-

the Io module

+

Reference

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Documentation_and_examples.html similarity index 56% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Documentation_and_examples.html index e0c0581..82aaaae 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Documentation_and_examples.html @@ -1,126 +1,92 @@ - Overview — pNbody v4 documentation + Examples — pNbody v4 documentation - - - +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Examples¶

-

Previous topic

-

Welcome to pNbody’s documentation!

-

Next topic

-

the Io module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Examples.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Examples.html index e0c0581..c313431 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Examples.html @@ -1,126 +1,121 @@ - Overview — pNbody v4 documentation + Examples — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Examples¶

+

A series of examples is provided by pNbody in the +PNBODYPATH/examples, where NBODYPATH is obtained +with the command:

+
pNbody_show-path
+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

Default parameters

Next topic

-

the Io module

+

Tutorial

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Formats.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Formats.html index e0c0581..2b4c4bc 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Formats.html @@ -1,126 +1,124 @@ - Overview — pNbody v4 documentation + Setting a format file — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Setting a format file¶

Previous topic

-

Welcome to pNbody’s documentation!

+

Using pNbody in parallel

Next topic

-

the Io module

+

Display Models

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/FortranfileModule.html b/Doc/newdoc/_build/html/rst/FortranfileModule.html new file mode 100644 index 0000000..308ad1f --- /dev/null +++ b/Doc/newdoc/_build/html/rst/FortranfileModule.html @@ -0,0 +1,302 @@ + + + + + + + + + the fortranfile module — pNbody v4 documentation + + + + + + + + + + + + + +
+
+
+
+ +
+

the fortranfile module¶

+

Defines a file-derived class to read/write Fortran unformatted files.

+

The assumption is that a Fortran unformatted file is being written by +the Fortran runtime as a sequence of records. Each record consists of +an integer (of the default size [usually 32 or 64 bits]) giving the +length of the following data in bytes, then the data itself, then the +same integer as before.

+
+

Examples¶

+
+
To use the default endian and precision settings, one can just do::
+
>>> f = FortranFile('filename')
+>>> x = f.readReals()
+
+
+
+
One can read arrays with varying precisions::
+
>>> f = FortranFile('filename')
+>>> x = f.readInts('h')
+>>> y = f.readInts('q')
+>>> z = f.readReals('f')
+
+
+
+
+

Where the format codes are those used by Python’s struct module.

+
+
One can change the default endian-ness and header precision::
+
>>> f = FortranFile('filename', endian='>', header_prec='l')
+
+
+
+
+

for a file with little-endian data whose record headers are long +integers.

+
+
+class pNbody.fortranfile.FortranFile(fname, endian='@', header_prec='i', *args, **kwargs)¶
+

File with methods for dealing with fortran unformatted data files

+

Methods

+
+
+ENDIAN¶
+

Possible endian values are ‘<’, ‘>’, ‘@’, ‘=’

+
+ +
+
+HEADER_PREC¶
+

Possible header precisions are ‘h’, ‘i’, ‘l’, ‘q’

+
+ +
+
+readInts(prec='i')¶
+

Read an array of integers.

+ +++ + + + +
Parameters :

prec : character, optional

+
+

Specify the precision of the data to be read using +character codes from Python’s struct module. Possible +values are ‘h’, ‘i’, ‘l’ and ‘q’

+
+
+
+ +
+
+readReals(prec='f')¶
+

Read in an array of real numbers.

+ +++ + + + +
Parameters :

prec : character, optional

+
+

Specify the precision of the array using character codes from +Python’s struct module. Possible values are ‘d’ and ‘f’.

+
+
+
+ +
+
+readRecord()¶
+

Read a single fortran record

+
+ +
+
+readString()¶
+

Read a string.

+
+ +
+
+writeInts(ints, prec='i')¶
+

Write an array of integers in given precision

+ +++ + + + +
Parameters :

reals : array

+
+

Data to write

+
+

prec : string

+
+

Character code for the precision to use in writing

+
+
+
+ +
+
+writeReals(reals, prec='f')¶
+

Write an array of floats in given precision

+ +++ + + + +
Parameters :

reals : array

+
+

Data to write

+
+

prec` : string

+
+

Character code for the precision to use in writing

+
+
+
+ +
+
+writeRecord(s)¶
+

Write a record with the given bytes.

+ +++ + + + +
Parameters :s : the string to write
+
+ +
+
+writeString(s)¶
+

Write a string

+ +++ + + + +
Parameters :s : the string to write
+
+ +
+ +
+
+ + +
+
+
+
+
+ +

Table Of Contents

+ + +

Previous topic

+

the libqt module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/FourierModule.html similarity index 59% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/FourierModule.html index e0c0581..dac7909 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/FourierModule.html @@ -1,126 +1,133 @@ - Overview — pNbody v4 documentation + the fourier module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the fourier module¶

+
+
+pNbody.fourier.fourier(m=1, n) amp_m cos( 2.pi f_m phi + phi_m ) = Sum_(m=1, n) amp_m cos( m phi + phi_m)¶
+

m = 2.pi f_m

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the montecarlo module

Next topic

-

the Io module

+

the phot module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/GeometryModule.html b/Doc/newdoc/_build/html/rst/GeometryModule.html new file mode 100644 index 0000000..fb0c187 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/GeometryModule.html @@ -0,0 +1,229 @@ + + + + + + + + + the geometry module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the geometry module¶

+
+
+pNbody.geometry.align(x, axis1=[1, 0, 0], axis2=[0, 0, 1], point=[0, 0, 0])¶
+

Rotate the object around point in order to align the axis ‘axis1’ with the axis ‘axis2’.

+

axis1 : [x,y,z] +axis2 : [x,y,z]

+
+ +
+
+pNbody.geometry.boxcut(pos, args)¶
+

Return only particles that are inside box 1:1:1

+
+ +
+
+pNbody.geometry.boxcut_segments(pos, args)¶
+

Return only particles that are inside box 1:1:1

+
+ +
+
+pNbody.geometry.expose(obj, obs, eye=None, dist_eye=None, foc=None)¶
+

Rotate and translate the object in order to be seen as if the +observer was in x0, looking at a point in xp.

+

obj : object array to expose +obs : representation of the observer +eye : ‘right’ or ‘left’ +dist_eye : distance between eyes (separation = angle)) +foc : focal

+
+ +
+
+pNbody.geometry.frustum(pos, clip, size)¶
+

Project using a frustrum matrix

+

clip = near and far planes +size = size of the box

+
+ +
+
+pNbody.geometry.get_obs(x0=[0.0, -50.0, 0.0], xp=[0.0, 0.0, 0.0], alpha=0, view='xz', r_obs=50)¶
+

From some parameters, return an obs matrix

+
+
x0,xp,alpha
+
+
or
+
xz
+
+
+ +
+
+pNbody.geometry.inv_viewport(xw, yw, zw, shape)¶
+

viewport transformation

+

xn = position (output from frustum or ortho) +shape = shape of the image

+
+ +
+
+pNbody.geometry.norm(x)¶
+

return the norm of vector x

+
+ +
+
+pNbody.geometry.ortho(pos, clip, size)¶
+

Project using an ortho matrix

+

clip = near and far planes +size = size of the box

+
+ +
+
+pNbody.geometry.perspective(r_obs=100.0, foc=50.0, view='xz')¶
+

Project the N-body model in order to get a perspective view.

+

r_obs = distance of the observer to the looking point +foc = focal +view = ‘xz’ , ‘xy’ , ‘yz’

+
+ +
+
+pNbody.geometry.rotate(x, angle=0, axis=[1, 0, 0], point=[0, 0, 0])¶
+

Rotate the positions and/or the velocities of the object around a specific axis +with respect to a specific point

+

angle : rotation angle in radian +axis : [x,y,z] : around this axis +point : [x,y,z] : rotation origin

+

use the euler rotation matrix

+
+ +
+
+pNbody.geometry.viewport(xn, shape=None)¶
+

viewport transformation

+

xn = position (output from frustum or ortho) +shape = shape of the image

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the profiles module

+

Next topic

+

the cooling module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Grids.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Grids.html index e0c0581..43b3218 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Grids.html @@ -1,126 +1,124 @@ - Overview — pNbody v4 documentation + Generating grids — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Generating grids¶

Previous topic

-

Welcome to pNbody’s documentation!

+

How to deal with units ?

Next topic

-

the Io module

+

Reference

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/IcModule.html b/Doc/newdoc/_build/html/rst/IcModule.html new file mode 100644 index 0000000..386eca8 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/IcModule.html @@ -0,0 +1,375 @@ + + + + + + + + + the ic module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the ic module¶

+
+
+pNbody.ic.ComputeGridParameters(n, args, rmax, M, pr_fct, mr_fct, Neps_des, rc, ng)¶
+

This function computes dR, the appropriate grid used to approximate Mr.

+

The grid is set in order to have “Neps_des” particles +in the first division of the grid. Then, the radius of the grid +follows an exponnential distribution up to rmax.

+
    +
  1. from the density distribution, the total mass and the number of particles, +using a newton algorithm, it computes eps, the radius that will contains “Neps_des” particles

    +
  2. +
  3. once eps is set, we determine rc (the grid scale length) from eps and ng, in order to +have a grid with the a first cell equal to eps.

    +

    if the computation of rc fails, we use the default value of rc

    +
  4. +
+

The function takes the following arguments

+

n : number of particles +M : total mass +rmax : max radius +args : list of args for the profile +pr_fct : profile function +mr_fct : mass-radius function

+

Neps_des : desired number of point in the first beam +rc : default size of the first beam +ng : number of grid divisions

+

it returns :

+

Rs : grid points +eps : radius containing about Neps_des particles +Neps : number of particles in eps

+
+ +
+
+pNbody.ic.ComputeGridParameters2(eps, nmax, args, rmax, M, pr_fct, mr_fct, Neps_des, rc, ng)¶
+

This function computes dR, the appropriate grid used to approximate Mr.

+

The number of particle of the model is set in order to have “Neps_des” particles +in the first division of the grid. Then, the radius of the grid +follows an exponnential distribution up to rmax.

+
    +
  1. n is set from the total mass and Neps_des

    +
  2. +
  3. once n is set, we determine rc (the grid scale length) from eps and ng, in order to +have a grid with the a first cell equal to eps.

    +

    if the computation of rc fails, we use the default value of rc

    +
  4. +
+

The function takes the following arguments

+

eps : the desired grid resolution +nmax : max number of particles +M : total mass +rmax : max radius +args : list of args for the profile +pr_fct : profile function +mr_fct : mass-radius function

+

Neps_des : desired number of point in the first beam +rc : default size of the first beam +ng : number of grid divisions

+

it returns :

+

n : number of particles +Rs : grid points +rc : parameter of the scaling fct +g : scaling fct +gm : inverse of scaling fct

+
+ +
+
+pNbody.ic.box(n, a, b, c, name='box.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed +in an homogeneous box of dimension a,b,c, centred at the origin +radius rmax.

+
+ +
+
+pNbody.ic.burkert(n, rs, Rmax, dR, Rs=None, name='burkert.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules following +a burkert profile.

+

rhob = 1 / ( ( 1 + r/rs ) * ( 1 + (r/rs)**2 ) )

+
+ +
+
+pNbody.ic.dl2(n, a, b, c, eps, rmax, name='dl2.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed as

+

rho = (1.-eps*(r/rmax)**2)

+
+ +
+
+pNbody.ic.dl2_mr(r, args)¶
+

Mass in the radius r for the distribution

+

rho = (1.-eps*(r/rmax)**2)

+
+ +
+
+pNbody.ic.expd(n, Hr, Hz, Rmax, Zmax, irand=1, name='expd.dat', ftype='binary', verbose=False)¶
+

Exonential disk

+

rho = 1/(1+(r/rc)**2)

+
+ +
+
+pNbody.ic.expd_mr(r, args)¶
+

Mass in the radius r for the distribution

+

Sigma = epx(-r)

+
+ +
+
+pNbody.ic.generic2c(n, rs, a, b, Rmax, dR, Rs=None, name='nfwg.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules following +an nfw modifed profile.

+

rho = 1/( (r/rs)**a * (1+r/rs)**(b-a) )

+
+ +
+
+pNbody.ic.generic_Mr(n, rmax, R=None, Mr=None, name='sphere_Mr.dat', ftype='binary', verbose=False)¶
+

Distribute particles in order to reproduce M(R) given by Mr

+
+ +
+
+pNbody.ic.generic_Mx(n, xmax, x=None, Mx=None, name='box_Mx.dat', ftype='binary', verbose=False)¶
+

Distribute particles in a box. The density in x is defined in order to reproduce M(x) given by Mx

+
+ +
+
+pNbody.ic.generic_alpha(n, a, e, rmax, irand=1, fct=None, name='generic_alpha.dat', ftype='binary', verbose=False)¶
+

Generic alpha distribution : rho ~ (r+e)^a

+
+ +
+
+pNbody.ic.hernquist(n, rs, Rmax, dR, Rs=None, name='hernquist.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules following +a hernquist modifed profile.

+

rho = 1/( (r/rs) * (1+r/rs)**3 )

+
+ +
+
+pNbody.ic.homodisk(n, a, b, dz, name='homodisk.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed +in an homogeneous oval of radius a and b, and of thickness dz.

+
+ +
+
+pNbody.ic.homosphere(n, a, b, c, name='homosphere.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed +in an homogeneous triaxial sphere of axis a,b,c.

+
+ +
+
+pNbody.ic.invert(x, rmin, rmax, fct, args, eps=1e-10)¶
+

return vector r that corresponds to +fct(r,args)=x +This routine uses a simple bissector algorithm

+
+ +
+
+pNbody.ic.isothm(n, rc, rmax, name='isothm.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed as

+

rho = 1/(1+r/rc)**2

+
+ +
+
+pNbody.ic.isothm_mr(r, args)¶
+

Mass in the radius r for the distribution

+

rho = 1/(1+r/rc)**2

+
+ +
+
+pNbody.ic.kuzmin(n, eps, dz, name='kuzmin.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed +in a kuzmin (infinitely thin) disk

+

rho = eps*M/(2*pi*(R**2+eps**2)**(3/2))

+
+ +
+
+pNbody.ic.miyamoto_nagai(n, a, b, Rmax, Zmax, irand=1, fct=None, fRmax=0, name='miyamoto.dat', ftype='binary', verbose=False)¶
+

Miyamoto Nagai distribution

+
+ +
+
+pNbody.ic.nfw(n, rs, Rmax, dR, Rs=None, name='nfw.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules following +an nfw profile.

+

rho = 1/[ (r/rs)(1+r/rs)^2 ]

+
+ +
+
+pNbody.ic.nfw_mr(r, args)¶
+

Mass in the radius r for the nfw distribution

+

rho = rhos/[ (r/rs)(1+r/rs)^2 ]

+

mr = 4*pi*rhos*rs**3 *(log(1+r/rs)-r/(rs+r))

+
+ +
+
+pNbody.ic.nfwg(n, rs, gamma, Rmax, dR, Rs=None, name='nfwg.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules following +an nfw modifed profile.

+

rho = 1/[ ((r/rs))**(gamma)(1+r/rs)^2 ]**(0.5*(3-gamma))

+
+ +
+
+pNbody.ic.pisothm(n, rc, rmax, rmin=0, name='pisothm.dat', ftype='binary', verbose=False)¶
+

Pseudo-isothermal sphere +Mass in the radius r for the distribution

+

rho = 1/(1+(r/rc)**2)

+
+ +
+
+pNbody.ic.pisothm_mr(r, args)¶
+

Mass in the radius r for the distribution

+

rho = 1/(1+(r/rc)**2)

+
+ +
+
+pNbody.ic.plummer(n, a, b, c, eps, rmax, M=1.0, vel='no', name='plummer.dat', ftype='binary', verbose=False)¶
+

Return an Nbody object that contains n particules distributed +in a triaxial plummer model of axis a,b,c and core radius eps +and max radius of rmax.

+

rho = (1.+(r/eps)**2)**(-5/2)

+
+ +
+
+pNbody.ic.shell(n, r, name='cell.dat', ftype='binary', verbose=False)¶
+

Shell of radius r

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

Reference

+

Next topic

+

the Io module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/InitialConditions.html similarity index 58% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/InitialConditions.html index e0c0581..9f7c9ba 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/InitialConditions.html @@ -1,126 +1,104 @@ - Overview — pNbody v4 documentation + Generating initial conditions — pNbody v4 documentation - - - +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Generating initial conditions¶

-

Previous topic

-

Welcome to pNbody’s documentation!

-

Next topic

-

the Io module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Initialconditions.html similarity index 58% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Initialconditions.html index e0c0581..1477ed6 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Initialconditions.html @@ -1,126 +1,104 @@ - Overview — pNbody v4 documentation + Generating initial conditions — pNbody v4 documentation - - - +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Generating initial conditions¶

-

Previous topic

-

Welcome to pNbody’s documentation!

-

Next topic

-

the Io module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Installation.html similarity index 61% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Installation.html index e0c0581..fb802a8 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Installation.html @@ -1,126 +1,128 @@ - Overview — pNbody v4 documentation + Installation — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +

Previous topic

-

Welcome to pNbody’s documentation!

+

Overview

Next topic

-

the Io module

+

Prerequiste

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Installing_from_tarball.html similarity index 52% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Installing_from_tarball.html index e0c0581..0b16372 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Installing_from_tarball.html @@ -1,126 +1,151 @@ - Overview — pNbody v4 documentation + Installing from source — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Installing from source¶

+
+

Decompress the tarball¶

+

Decompress the tarball file:

+
tar -xzf pNbody-4.x.tar.gz
+
+

enter the directory:

+
cd pNbody-4.x
+
+
+
+

Compile¶

+

The compilation is performed using the standard command:

+
python setup.py build
+
+

If one wants to install in another directory than the default +python one, it is possible to use the standard --prefix option:

+
python setup.py build  --prefix other_directory
+
+
+
+

Install¶

+

Now, depending on your python installation you need to be root. +The module is installed with the following command:

+
python setup.py install
+
+
+

Table Of Contents

+ +

Previous topic

-

Welcome to pNbody’s documentation!

+

Prerequiste

Next topic

-

the Io module

+

Check the installation

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/IoModule.html b/Doc/newdoc/_build/html/rst/IoModule.html new file mode 100644 index 0000000..0895478 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/IoModule.html @@ -0,0 +1,319 @@ + + + + + + + + + the io module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the io module¶

+
+
+pNbody.io.checkfile(name)¶
+

Check if a file exists. An error is generated if the file +does not exists.

+ +++ + + + +
Parameters :name : the path to a filename
+

Examples

+
>>> io.checkfile('an_existing_file')
+>>> 
+
+
+
>>> io.checkfile('a_non_existing_file')
+Traceback (most recent call last):
+  File "<stdin>", line 1, in <module>
+  File "/home/epfl/revaz/local/lib64/python2.6/site-packages/pNbody/io.py", line 33, in checkfile
+    raise IOError(915,'file %s not found ! Pease check the file name.'%(name))
+IOError: [Errno 915] file nofile not found ! Pease check the file name.  
+
+
+
+ +
+
+pNbody.io.end_of_file(f, pio='no', MPI=None)¶
+

Return True if we have reached the end of the file f, False instead

+ +++ + + + + + +
Parameters :

f : ndarray or matrix object

+
+

an open file

+
+

pio : ‘yes’ or ‘no’

+
+

if the file is read in parallel or not

+
+

MPI : MPI communicator

+
Returns :

status : Bool

+
+

True if the we reached the end of the file +False if not

+
+
+
+ +
+
+pNbody.io.write_array(file, vec)¶
+

Write an array to a file, in a very simple ascii format.

+ +++ + + + +
Parameters :

file : the path to a file

+

vec : an ndarray object

+
+

Examples

+
>>> from numpy import *
+>>> x = array([1,2,3])
+>>> io.write_array('/tmp/array.dat',x)
+
+
+
+ +
+
+pNbody.io.read_ascii(file, columns=None, lines=None, dtype=<type 'float'>, skipheader=False, cchar='#')¶
+

Read an ascii file. +The function allows to set the number of columns or line to read. +If it contains a header, the header is used to label all column. In +this case, a dictionary is returned.

+ +++ + + + + + +
Parameters :

file : the path to a file or an open file

+

columns : list

+
+

the list of the columns to read +if none, all columns are read

+
+

lines : list

+
+

the list of the lines to read +if none, all lines are read

+
+

dtype : dtype

+
+

the ndtype of the objects to read

+
+

skipheader : bool

+
+

if true, do not read the header +if there is one

+
+

cchar : char

+
+

lines begining with cchar are skiped +the first line is considered as the header

+
+
Returns :

data : Dict or ndarray

+
+

A python dictionary or an ndarray object

+
+
+

Examples

+
>>> from numpy import *
+>>> x = arange(10)
+>>> y = x*x
+>>> f = open('afile.txt','w')
+>>> f.write("# x y")      
+>>> for i in xrange(len(x)):
+...   f.write('%g %g'%(x[i],y[i]))
+... 
+>>> f.close()
+>>> from pNbody import io
+>>> data = io.read_ascii("afile.txt")
+>>> data['x']
+array([ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9.])
+>>> data['y']
+array([  0.,   1.,   4.,   9.,  16.,  25.,  36.,  49.,  64.,  81.])                                    
+
+
+
+ +
+
+pNbody.io.write_dump(file, data)¶
+

Write a dmp (pickle) file. In other word, +dump the data object.

+ +++ + + + +
Parameters :

file : the path to a file

+

data : a pickable python object

+
+

Examples

+
>>> x = {'a':1,'b':2}
+>>> io.write_dump('/tmp/afile.dmp',x)
+
+
+
+ +
+
+pNbody.io.read_dump(file)¶
+

Read a dmp (pickle) file.

+ +++ + + + + + +
Parameters :file : the path to a file
Returns :data : a python object
+

Examples

+
>>> x = {'a':1,'b':2}
+>>> io.write_dump('/tmp/afile.dmp',x)
+>>> y = io.read_dump('/tmp/afile.dmp')
+>>> y
+{'a': 1, 'b': 2}                                       
+
+
+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the ic module

+

Next topic

+

the ctes module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/LibdiskModule.html similarity index 53% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/LibdiskModule.html index e0c0581..04a94ce 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/LibdiskModule.html @@ -1,126 +1,143 @@ - Overview — pNbody v4 documentation + the libdisk module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the libdisk module¶

+
+
+pNbody.libdisk.Diff(f, x, s=None, k=2)¶
+

First derivative of f(x)

+
+ +
+
+pNbody.libdisk.get_Integral(v, dr, ia, ib)¶
+

Integrate the vector v, between ia and ib, using trapezes.

+

v : values of cells (must be 1 dimensional) +dr : corresponding physical size of cells +ia : lower real indice WARNING : the indicies must now be integer !!! +ib : higher real indice WARNING : the indicies must now be integer !!!

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the libgrid module

Next topic

-

the Io module

+

the palette module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/LibgridModule.html b/Doc/newdoc/_build/html/rst/LibgridModule.html new file mode 100644 index 0000000..5199a6e --- /dev/null +++ b/Doc/newdoc/_build/html/rst/LibgridModule.html @@ -0,0 +1,542 @@ + + + + + + + + + the libgrid module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the libgrid module¶

+
+
+pNbody.libgrid.get_AccelerationMap_On_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax, eps, Tree=None)¶
+

Return an array of points containing accelerations

+
+ +
+
+pNbody.libgrid.get_AccumulatedMassMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing M(r) in each cell

+
+ +
+
+pNbody.libgrid.get_Accumulation_Along_Axis(mat, axis=0)¶
+

Accumulate values along an axis

+
+ +
+
+pNbody.libgrid.get_DensityMap_On_Carthesian_3d_Grid(nb, nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_DensityMap_On_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_DensityMap_On_Cylindrical_3d_Grid(nb, nr, nt, nz, rmax, zmin, zmax)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_DensityMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_DensityMap_On_Spherical_3d_Grid(nb, nr, np, nt, rmax)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_First_Derivative(f, x, s=None, k=2)¶
+

First derivative of f(x)

+
+ +
+
+pNbody.libgrid.get_GenericMap_On_Spherical_1d_Grid(nb, nr, rmax, val, f=None, fm=None)¶
+

Return an array of points containing mass*val

+
+ +
+
+pNbody.libgrid.get_Integral(v, dr, ia, ib)¶
+

Integrate the vector v, between ia and ib.

+

v : values of cells (must be 1 dimensional) +dr : corresponding physical size of cells +ia : lower real indice +ib : higher real indice

+
+ +
+
+pNbody.libgrid.get_Interpolation_On_Cylindrical_2dv_Grid(pos, mat, nr, nz, rmax, zmin, zmax, offr=0, offz=0)¶
+

Interpolates continuous value of pos, using matrix mat

+
+ +
+
+pNbody.libgrid.get_Interpolation_On_Spherical_1d_Grid(pos, mat, nr, rmax, offr=0, f=None, fm=None)¶
+

Interpolates continuous value of pos, using matrix mat

+
+ +
+
+pNbody.libgrid.get_LinearDensityMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing the linear density in each cell

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Carthesian_2d_Grid(nb, nx, ny, xmin, xmax, ymin, ymax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Carthesian_3d_Grid(nb, nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Cylindrical_2dh_Grid(nb, nr, nt, rmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Cylindrical_3d_Grid(nb, nr, nt, nz, rmax, zmin, zmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_MassMap_On_Spherical_3d_Grid(nb, nr, np, nt, rmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Carthesian_2d_Grid(nb, nx, ny, xmin, xmax, ymin, ymax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Carthesian_3d_Grid(nb, nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Cylindrical_2dh_Grid(nb, nr, nt, rmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Cylindrical_3d_Grid(nb, nr, nt, nz, rmax, zmin, zmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing number of particles

+
+ +
+
+pNbody.libgrid.get_NumberMap_On_Spherical_3d_Grid(nb, nr, np, nt, rmax)¶
+

Return an array of points containing mass of particles

+
+ +
+
+pNbody.libgrid.get_Points_On_Carthesian_2d_Grid(nx, ny, xmin, xmax, ymin, ymax, offx=0, offy=0)¶
+

Return an array of points corresponding to the center of cells +af a 2d carthesian grid.

+

To get a nt X nr array from the returned vector (pos), do

+

x = copy(pos[:,0]) +y = copy(pos[:,1]) +z = copy(pos[:,2])

+

x.shape = (nx,ny) +y.shape = (nx,ny) +z.shape = (nx,ny)

+
+ +
+
+pNbody.libgrid.get_Points_On_Carthesian_3d_Grid(nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax, offx=0, offy=0, offz=0)¶
+

Return an array of points corresponding to the center of cells +af a 3d carthesian grid.

+

To get a nt X nr array from the returned vector (pos), do

+

x = copy(pos[:,0]) +y = copy(pos[:,1]) +z = copy(pos[:,2])

+

x.shape = (nx,ny,nz) +y.shape = (nx,ny,nz) +z.shape = (nx,ny,nz)

+
+ +
+
+pNbody.libgrid.get_Points_On_Cylindrical_2dh_Grid(nr, nt, rmax, offr=0, offt=0)¶
+

Return an array of points corresponding to the nodes of +a 2d cylindrical grid

+

To get a nt X nr array from the returned vector (pos), do

+

x = copy(pos[:,0]) +y = copy(pos[:,1]) +z = copy(pos[:,2])

+

x.shape = (nr,nt) +y.shape = (nr,nt) +z.shape = (nr,nt)

+

# to get r and theta +r = sqrt(x**2+y**2+z**2) +t = arctan2(y,x)*180/pi

+
+ +
+
+pNbody.libgrid.get_Points_On_Cylindrical_2dv_Grid(nr, nz, rmax, zmin, zmax, offr=0, offz=0)¶
+

Return an array of points corresponding to the nodes of +a 2d cylindrical grid

+

To get a nt X nr array from the returned vector (pos), do

+

x = copy(pos[:,0]) +y = copy(pos[:,1]) +z = copy(pos[:,2])

+

x.shape = (nr,nt) +y.shape = (nr,nt) +z.shape = (nr,nt)

+

# to get r and theta +r = sqrt(x**2+y**2+z**2) +t = arctan2(y,x)*180/pi

+
+ +
+
+pNbody.libgrid.get_Points_On_Cylindrical_3d_Grid(nr, nt, nz, rmax, zmin, zmax, offr=0, offt=0, offz=0)¶
+

Return an array of points corresponding to the nodes of +a 2d cylindrical grid

+

To get a nt X nr array from the returned vector (pos), do

+

x = pos[:,0] +y = pos[:,0] +z = pos[:,0]

+

x.shape = (nr,nt,nz) +y.shape = (nr,nt,nz) +z.shape = (nr,nt,nz)

+

# to get r and theta +r = sqrt(x**2+y**2+z**2) +t = arctan2(y,x)*180/pi

+
+ +
+
+pNbody.libgrid.get_Points_On_Spherical_1d_Grid(nr, rmax, offr=0, f=None, fm=None)¶
+

Return an array of points corresponding to the nodes of +a 1d spherical grid

+

To get a nt X nr array from the returned vector (pos), do

+

x = pos[:,0] +y = pos[:,0] +z = pos[:,0]

+

x.shape = (nr,np,nt) +y.shape = (nr,np,nt) +z.shape = (nr,np,nt)

+
+ +
+
+pNbody.libgrid.get_Points_On_Spherical_3d_Grid(nr, np, nt, rmax, offr=0, offp=0, offt=0)¶
+

Return an array of points corresponding to the nodes of +a 3d spherical grid

+

To get a nt X nr array from the returned vector (pos), do

+

x = pos[:,0] +y = pos[:,0] +z = pos[:,0]

+

x.shape = (nr,np,nt) +y.shape = (nr,np,nt) +z.shape = (nr,np,nt)

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Carthesian_2d_Grid(nb, nx, ny, xmin, xmax, ymin, ymax, eps, Tree=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Carthesian_3d_Grid(nb, nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax, eps, Tree=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Cylindrical_2dh_Grid(nb, nr, nt, rmax, eps, Tree=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax, eps, Tree=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Cylindrical_3d_Grid(nb, nr, nt, nz, rmax, zmin, zmax, eps, Tree=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Spherical_1d_Grid(nb, nr, rmax, eps, Tree=None, f=None, fm=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_PotentialMap_On_Spherical_3d_Grid(nb, nr, np, nt, rmax, eps, Tree=None)¶
+

Return an array of points containing potential

+
+ +
+
+pNbody.libgrid.get_SurfaceDensityMap_From_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax)¶
+

Return an array of points containing the surface density along r

+
+ +
+
+pNbody.libgrid.get_SurfaceDensityMap_On_Carthesian_2d_Grid(nb, nx, ny, xmin, xmax, ymin, ymax)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_SurfaceDensityMap_On_Cylindrical_2dh_Grid(nb, nr, nt, rmax)¶
+

Return an array of points containing density in each cell

+
+ +
+
+pNbody.libgrid.get_SurfaceMap_On_Carthesian_2d_Grid(nb, nx, ny, xmin, xmax, ymin, ymax)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_SurfaceMap_On_Cylindrical_2dh_Grid(nb, nr, nt, rmax)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_SurfaceMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing surface (volume) +of each cell.

+
+ +
+
+pNbody.libgrid.get_Symetrisation_Along_Axis(mat, axis=1)¶
+

Return an array where the two half are symetrized

+
+ +
+
+pNbody.libgrid.get_Symetrisation_Along_Axis_Old(mat, axis=1)¶
+

Return an array where the two half are symetrized +Old but more correct than new one

+
+ +
+
+pNbody.libgrid.get_VolumeMap_On_Carthesian_3d_Grid(nb, nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_VolumeMap_On_Cylindrical_2dv_Grid(nb, nr, nz, rmax, zmin, zmax)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_VolumeMap_On_Cylindrical_3d_Grid(nb, nr, nt, nz, rmax, zmin, zmax)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_VolumeMap_On_Spherical_1d_Grid(nb, nr, rmax, f=None, fm=None)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_VolumeMap_On_Spherical_3d_Grid(nb, nr, np, nt, rmax)¶
+

Return an array of points containing corresponding physical +volumes of each cell (usefull to compute density)

+
+ +
+
+pNbody.libgrid.get_r_Interpolation_On_Cylindrical_2dv_Grid(pos, mat, nr, nz, rmax, zmin, zmax, offr=0)¶
+

Interpolates continuous value of pos, using matrix mat +only along first axis.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the phot module

+

Next topic

+

the libdisk module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/LiblogModule.html similarity index 53% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/LiblogModule.html index e0c0581..045ad3c 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/LiblogModule.html @@ -1,126 +1,146 @@ - Overview — pNbody v4 documentation + the liblog module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the liblog module¶

+
+
+class pNbody.liblog.Log(directory, show='yes', append='no', filename=None, logframe=None)¶
+

a log class

+

Methods

+
+
+close()¶
+

close the file

+
+ +
+
+write(line, name=None)¶
+

write a line

+
+ +
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the parameter module

Next topic

-

the Io module

+

the talkgdisp module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/LibmiyamotoModule.html b/Doc/newdoc/_build/html/rst/LibmiyamotoModule.html new file mode 100644 index 0000000..698e803 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/LibmiyamotoModule.html @@ -0,0 +1,206 @@ + + + + + + + + + the libmiyamoto module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the libmiyamoto module¶

+
+
+pNbody.libmiyamoto.Density(G, M, a, b, R, z)¶
+

Miyamoto-Nagai Density

+
+ +
+
+pNbody.libmiyamoto.Kappa(G, M, a, b, R)¶
+

Miyamoto-Nagai Kappa

+
+ +
+
+pNbody.libmiyamoto.Omega(G, M, a, b, R)¶
+

Miyamoto-Nagai Omega

+
+ +
+
+pNbody.libmiyamoto.Potential(G, M, a, b, R, z)¶
+

Miyamoto-Nagai Potential

+
+ +
+
+pNbody.libmiyamoto.Sigma_t(G, M, a, b, R, z)¶
+

Return sigma_z from Jeans equation : 1/rho Int( rho * dzPhi * dz )

+

sigma_t^2 = R*d/dr(Phi) + R/rho*d/dr(rho*sigma_z^2)

+
+ +
+
+pNbody.libmiyamoto.Sigma_z(G, M, a, b, R, z)¶
+

Return sigma_z from Jeans equation : 1/rho Int( rho * dzPhi * dz )

+
+ +
+
+pNbody.libmiyamoto.Sigma_zbis(G, M, a, b, R, z)¶
+

Same than Sigma_z, but optimized

+
+ +
+
+pNbody.libmiyamoto.SurfaceDensity(G, M, a, b, R)¶
+

Miyamoto-Nagai Surface density

+
+ +
+
+pNbody.libmiyamoto.Vcirc(G, M, a, b, R)¶
+

Miyamoto-Nagai Circular velocity

+
+ +
+
+pNbody.libmiyamoto.d2R_Potential(G, M, a, b, R, z)¶
+

second derivative in R

+
+ +
+
+pNbody.libmiyamoto.d2z_Potential(G, M, a, b, R, z)¶
+

second derivative in R

+
+ +
+
+pNbody.libmiyamoto.dR_Potential(G, M, a, b, R, z)¶
+

first derivative in R

+
+ +
+
+pNbody.libmiyamoto.dz_Potential(G, M, a, b, R, z)¶
+

first derivative in R

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the Movie module

+

Next topic

+

the plummer module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/LibqtModule.html similarity index 53% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/LibqtModule.html index e0c0581..ae60e4c 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/LibqtModule.html @@ -1,126 +1,146 @@ - Overview — pNbody v4 documentation + the libqt module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the libqt module¶

+
+
+class pNbody.libqt.QNumarrayImage(data, palette_name)¶
+

QNumarrayImage class

+

Methods

+
+ +
+
+pNbody.libqt.display(imagePIL)¶
+

display a PIL image

+
+ +
+
+pNbody.libqt.qtplot(mat, palette='light')¶
+

plot a matrix using qt

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the rec module

Next topic

-

the Io module

+

the fortranfile module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/LibutilModule.html b/Doc/newdoc/_build/html/rst/LibutilModule.html new file mode 100644 index 0000000..8f0beaa --- /dev/null +++ b/Doc/newdoc/_build/html/rst/LibutilModule.html @@ -0,0 +1,421 @@ + + + + + + + + + the libutil module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the libutil module¶

+
+
+pNbody.libutil.Extract1dMeanFrom2dMap(x, y, mass, val, kx, ky, nmin, momentum=0)¶
+

Extract the mean along one axis, from a 2d mean or sigma matrix

+

x : pos in first dim. (beetween 0 and 1) +y : pos in sec dim. (beetween 0 and 1) +mass : mass of particles +val : value to compute

+

kx : number of bins in x +ky : number of bins in y

+

nmin : min number of particles needed to compute value

+

momentum : 0,1,2 (-1=number)

+
+ +
+
+pNbody.libutil.GetMassMap(pos, mass, shape)¶
+
+ +
+
+pNbody.libutil.GetMeanMap(m0, m1)¶
+

Return a MeanMap using the 0 and 1 momentum

+

m0 : zero momentum +m1 : first momentum

+
+ +
+
+pNbody.libutil.GetMeanValMap(pos, mass, val, shape)¶
+
+ +
+
+pNbody.libutil.GetNumberMap(pos, shape)¶
+
+ +
+
+pNbody.libutil.GetSigmaMap(m0, m1, m2)¶
+

Return a MeanMap using the 0 and 1 and 2 momentum

+

m0 : zero momentum +m1 : first momentum +m2 : second momentum

+
+ +
+
+pNbody.libutil.GetSigmaValMap(pos, mass, val, shape)¶
+
+ +
+
+pNbody.libutil.RotateAround(angle, axis, point, ObsM)¶
+

this should be C

+
+ +
+
+pNbody.libutil.add_box(matint, shape=(256, 256), size=(30.0, 30.0), center=None, box_opts=(1, None, None, 255))¶
+

add a box on the frame

+
+ +
+
+pNbody.libutil.apply_filter(mat, name=None, opt=None)¶
+

Apply a filter to an image

+
+ +
+
+pNbody.libutil.compress_from_lst(x, num, lst, reject=False)¶
+

Return the compression of x

+
+ +
+
+pNbody.libutil.contours(m, matint, nl, mn, mx, kx, ky, color, crush='no')¶
+

Compute iso-contours on a n x m float array. +If “l_min” equal “l_max”, levels are automatically between the minimum and +maximum values of the matrix “mat”.

+

m = matrice (real values) +matint = matrice (interger values) +kx = num of sub-boxes +ky = num of sub-boxes +nl = # of levels +mn = min +mx = max +color = color of contours

+
+ +
+
+pNbody.libutil.drawxticks(matint, m0, d0, n0, h0, shape, size, center, color)¶
+

draw x ticks in a matrix

+
+ +
+
+pNbody.libutil.drawyticks(matint, m0, d0, n0, h0, shape, size, center, color)¶
+

draw x ticks in a matrix

+
+ +
+
+pNbody.libutil.extract_parameters(arg, kw, defaultparams)¶
+

this function extract parameters given to a function +it returns a dictionary of parameters with respective value

+

defaultparams : dictionary of default parameters

+
+ +
+
+pNbody.libutil.get_eyes(x0, xp, alpha, dr)¶
+

Return the position of two eyes.

+

x0 : position of the head +xp : looking point +theta : rotation of the head +dr : distance of the eyes

+
+ +
+
+pNbody.libutil.get_image(mat, name=None, palette_name='light', mode='RGB')¶
+

Return an image (PIL object).

+

data : numpy 2x2 object +name : name of the output +palette_name : name of a palette

+
+ +
+
+pNbody.libutil.geter(n, rmin, rmax, g, gm)¶
+

Generate a one dimentional non linear array of r

+
+ +
+
+pNbody.libutil.geter2(n, rmin, rmax, g, gm)¶
+

Generate a one dimentional non linear array of r

+
+ +
+
+pNbody.libutil.getr(nr=31, nt=32, rm=100.0)¶
+

Return a sequence of number (n x 1 array), +where n=nr+1 defined by: Pfenniger & Friedli (1994)

+
+ +
+
+pNbody.libutil.getval(nb, mode='m', obs=None)¶
+

For each point, return a specific value linked to this point

+

0 : moment 0 +m : moment 0

+

x : first moment in x +y : first moment in y +z : first moment in z +y2 : second moment in x +y2 : second moment in y +z2 : second moment in z

+

vx : first velocity moment in x +vy : first velocity moment in y +vz : first velocity moment in z +vx2 : second velocity moment in x +vy2 : second velocity moment in y +vz2 : second velocity moment in z

+

Lx : kinetic momemtum in x +Ly : kinetic momemtum in y +Lz : kinetic momemtum in z +lx : specific kinetic momemtum in x +ly : specific kinetic momemtum in y +lz : specific kinetic momemtum in z

+

u : specific energy +rho : density +T : temperature +A : entropy +P : pressure +Tcool : cooling time +Lum : luminosity +Ne : local electro density

+

# depends on projection

+

r : first momemtum of radial distance +r2 : second momemtum of radial distance

+

vr : first momemtum of radial velocity +vr2 : second momemtum of radial velocity

+

vxyr : first momemtum of radial velocity in the plane +vxyr2 : second momemtum of radial velocity in the plane

+

vtr : first momemtum of tangential velocity in the plane +vtr2 : second momemtum of tangential velocity in the plane

+
+ +
+
+pNbody.libutil.getvaltype(mode='m')¶
+

list values that depends on projection

+
+ +
+
+pNbody.libutil.invgetr(r, nr=31, nt=32, rm=100.0)¶
+

From r, return the corresponding indexes. +Inverse of getr function.

+
+ +
+
+pNbody.libutil.log_filter(x, xmin, xmax, xc, kx=1.0)¶
+

map a value between 0 and kx

+
+ +
+
+pNbody.libutil.log_filter_inv(k, xmin, xmax, xc, kx=1.0)¶
+

map a value betwen xmin and xmax

+
+ +
+
+pNbody.libutil.mplot(mat, palette='light', save=None, marker=None, header=None)¶
+

plot a 2d array

+
+ +
+
+pNbody.libutil.myhistogram(a, bins)¶
+

Return the histogram (n x 1 float array) of the +n x 1 array “a”. +“bins” (m x 1 array) specify the bins of the histogram.

+
+ +
+
+pNbody.libutil.phys2img(shape, size, center, x, y)¶
+

convert physical position into the image pixel

+
+ +
+
+pNbody.libutil.sbox(shape, size, lweight=1, xticks=None, yticks=None, color=255)¶
+

simple box

+

return a matrix of integer, containing a box with labels

+

xticks = (m0,d0,h0,m1,d1,h1)

+

0 = big +1 = small

+

m0,m1 = dist between ticks +d0,d1 = first tick +h0,h1 = height of the ticks

+
+ +
+
+pNbody.libutil.set_ranges(mat, scale='log', mn=None, mx=None, cd=None)¶
+

Transform an n x m float array into an n x m int array that will be +used to create an image. The float values are rescaled and cutted in order to range +between 0 and 255.

+

mat : the matrice +scale : lin or log +mn : lower value for the cutoff +mx : higer value for the cutoff +cd : parameter

+
+ +
+
+pNbody.libutil.tranfert_functions(rmin, rmax, g=None, gm=None)¶
+

This function computes the normalized tranfer fonction from g and gm +It is very usefull to tranform a linear vetor in a non linear one

+

example of g:

+
+
g = lambda r:log(r/rc+1) +gm = lambda r:rc*(exp(r)-1)
+
+ +
+
+pNbody.libutil.vel_cart2cyl(pos, vel)¶
+

Transform velocities in carthesian coordinates vx,vy,vz into cylindrical +coodinates vr,vz,vz. +Pos is the position of particles in cart. coord. +Vel is the velocity in cart. coord. +Return a 3xn float array.

+
+ +
+
+pNbody.libutil.vel_cyl2cart(pos=None, vel=None)¶
+

Transform velocities in cylindrical coordinates vr,vt,vz into carthesian +coodinates vx,vy,vz. +Pos is the position of particles in cart. coord. +Vel is the velocity in cylindrical coord. +Return a 3xn float array.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the mpi module

+

Next topic

+

the param module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Main.html similarity index 58% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Main.html index e0c0581..6d268b1 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Main.html @@ -1,126 +1,113 @@ - Overview — pNbody v4 documentation + the main module — pNbody v4 documentation - - - +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the main module¶

+

This python module is useful to manipulate N-body data. +It allows to compute simple pysical values like energy, +kinetic momentum, intertial momentum, centrer of mass etc. +It also allows to modify the data with rotation, translations, +select some particules, add particles etc. +Associated scripts like “gdisp”, “mkmovie” or “movie” allow +to visualise the N-body data in different ways : surface density, +velocity map, velocity dispertion map, etc.

+

Yves Revaz 14.05.05

-

Previous topic

-

Welcome to pNbody’s documentation!

-

Next topic

-

the Io module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/MainModule.html b/Doc/newdoc/_build/html/rst/MainModule.html new file mode 100644 index 0000000..27b39fc --- /dev/null +++ b/Doc/newdoc/_build/html/rst/MainModule.html @@ -0,0 +1,1642 @@ + + + + + + + + + the main module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the main module¶

+
+
+pNbody.main.Nbody(*arg, **kw)¶
+

The aim of this function is simply to return to the right class

+
+ +
+
+class pNbody.main.NbodyDefault(p_name=None, pos=None, vel=None, mass=None, num=None, tpe=None, ftype=None, status='old', byteorder='little', pio='no', local=False, log=None, unitsfile=None)¶
+

This is the reference Nbody class.

+

This is the constructor for the Nbody object. Optional arguments are:

+
+
p_name : name of the file
+
in case of multiple files, files must be included in a list [“file1”,”file2”]
+
+

pos : positions (3xN array) +vel : positions (3xN array) +mass : positions (1x array) +num : id of particles (1xN array) +tpe : type of particles (1xN array)

+

ftype : type of input file (binary,ascii)

+
+
status : ‘old’ : open an old file
+
‘new’ : create a new object
+
+

byteorder : ‘little’ or ‘big’ +pio : parallel io : ‘yes’ or ‘no’

+

local : True=local object, False=global object (paralellized) Not implemeted Yet

+

log : log file

+

unitsfile : define the type of units

+

by default this class initialize the following variables :

+
+

self.p_name : name of the file(s) to read or write

+

self.pos : array of positions +self.vel : array of velocities +self.mass : array of masses +self.num : array of id +self.tpe : array of types

+

self.ftype : type of the file +self.status : object status (‘old’ or ‘new’) +self.byteorder : byter order (‘little’ or ‘big’) +self.pio : parallel io (‘yes’ or ‘no’) +self.log : log object

+

# new variables

+

self.nbody : local number of particles +self.nbody_tot : total number of particles +self.mass_tot : total mass +self.npart : number of particles of each type +self.npart_tot : total number of particles of each type +self.spec_vars : dictionary of variables specific for the format used +self.spec_vect : dictionary of vector specific for the format used

+
+

Methods

+
+
+A()¶
+

Return the gas entropy of the model. +The output is an nx1 float array.

+
+ +
+
+Accel(x, eps)¶
+

Return the acceleration at a given position, using the softening lenght eps.

+

x : position (array vector) +eps : softening

+
+ +
+
+CombiMap(*arg, **kw)¶
+

Return an image in form of a matrix (nx x ny float array). +Contrary to ComputeMap, CombiMap compose different output of ComputeMap.

+

pos : position of particles (moment 0)

+

sr : dispertion in r (with respect to xp) +svr : dispertion in vr

+

vxyr : mean velocity in the plane +svxyr: dispertion in vxy

+

vtr : mean tangential velocity in the plane +svtr : dispertion in vt

+

szr : ratio sigma z/sigma r

+
+ +
+
+ComputeDensityAndHsml(pos=None, Hsml=None, DesNumNgb=None, MaxNumNgbDeviation=None, Tree=None)¶
+

Compute Density and Hsml (for a specific place)

+
+ +
+
+ComputeHisto(bins, mode, space)¶
+

Compute and histogram

+
+ +
+
+ComputeMap(*arg, **kw)¶
+

Return an image in form of a matrix (nx x ny float array)

+

obs : position of observer +x0 : eye position +xp : focal position +alpha : angle of the head +view : ‘xy’ ‘xz’ ‘yz’

+

eye : ‘right’ ‘left’ +dist_eye : distance between eyes

+

mode : mode of map +space : pos or vel

+

persp : ‘on’ ‘off’ +clip : (near,far) +size : (maxx,maxy)

+

cut : ‘yes’ ‘no’

+

frsp : factor for rsp +shape : shape of the map

+
+ +
+
+ComputeMeanHisto(bins, mode1, space)¶
+

Compute the mean map of an observable.

+
+ +
+
+ComputeMeanMap(*arg, **kw)¶
+

Compute the mean map of an observable.

+
+ +
+
+ComputeObjectMap(*arg, **kw)¶
+
    +
    • +
      • +
      • IN DEVELOPPEMENT : allow to draw an object like a box, a grid... * * *
      • +
      +
    • +
    +
  • +
+

Return an image in form of a matrix (nx x ny float array)

+

obs : position of observer +x0 : eye position +xp : focal position +alpha : angle of the head +view : ‘xy’ ‘xz’ ‘yz’

+

eye : ‘right’ ‘left’ +dist_eye : distance between eyes

+

mode : mode of map +space : pos or vel

+

persp : ‘on’ ‘off’ +clip : (near,far) +size : (maxx,maxy)

+

cut : ‘yes’ ‘no’

+

frsp : factor for rsp +shape : shape of the map

+
+ +
+
+ComputeSigmaHisto(bins, mode1, mode2, space)¶
+

Compute the histogram of an observable.

+
+ +
+
+ComputeSigmaMap(*arg, **kw)¶
+

Compute the sigma map of an observable.

+
+ +
+
+ComputeSph(DesNumNgb=None, MaxNumNgbDeviation=None, Tree=None)¶
+

Compute self.Density and self.Hsml using sph approximation

+
+ +
+
+Ekin()¶
+

Return the total kinetic energy

+
+ +
+
+Epot(eps)¶
+

Return the total potential energy using the softening lenght eps.

+

eps : softening

+

WARNING : THIS FUNCTION DO NOT WORK IN MPI MODE

+
+ +
+
+ExchangeParticles()¶
+

Exchange particles betwee procs, using peano-hilbert decomposition computed in ptree

+
+ +
+
+Get_Velocities_From_Virial_Approximation(select=None, vf=1.0, eps=0.10000000000000001, UseTree=True, Tree=None, ErrTolTheta=0.5)¶
+

does not work well

+
+ +
+
+InitSpec()¶
+

This function allows to initialize specific parameters. +It must be defined in format files.

+
+ +
+
+IntegrateUsingRK(tstart=0, dt=1, dt0=1.0000000000000001e-05, epsx=1e-13, epsv=1e-13)¶
+

Integrate the equation of motion using RK78 integrator.

+

tstart : initial time +dt : interval time +dt0 : inital dt +epsx : position precision +epsv : velocity precision

+

tmin,tmax,dt,dtout,epsx,epsv,filename

+
+ +
+
+L()¶
+

Return the angular momentum in x,y,z of all particles. +The output is an 3xn float array.

+
+ +
+
+Ltot()¶
+

Return the total angular momentum. +The output is an 3x1 float array.

+
+ +
+
+Lum()¶
+

Return the luminosty of the model, defined as +Lum = m*u/Tcool = m*Lambda/rho

+

The output is an nx1 float array.

+
+ +
+
+Map(*arg, **kw)¶
+

Return 2 final images (float and int)

+
+ +
+
+MeanWeight()¶
+

Return the mean weight of a model, taking into account +heating by UV source. +The output is an nx1 float array.

+
+ +
+
+Mr_Spherical(nr=25, rmin=0, rmax=50)¶
+

Return the mass inside radius r (supposing a spherical density distribution). +The output is 2 n x 1 float arrays.

+

nr : number of bins (size of the output) +rmin : minimal radius (this must be zero, instead it is wrong...) +rmax : maximal radius

+
+ +
+
+Ne()¶
+

Return the electron density of the model. +The output is an nx1 float array.

+
+ +
+
+P()¶
+

Return the gas pressure of the model. +The output is an nx1 float array.

+
+ +
+
+Pot(x, eps)¶
+

Return the potential at a given position, using the softening lenght eps.

+

x : position (array vector) +eps : softening

+
+ +
+
+R()¶
+

Return a 1xn float array that corresponds to +the projected distance from the center of each particle.

+
+ +
+
+Rho()¶
+

Return the gas density of the model. +The output is an nx1 float array.

+
+ +
+
+S()¶
+

Return the entropy of the model, defined as +S = T * Ne^(1-gamma) +The output is an nx1 float array.

+
+ +
+
+SendAllToAll()¶
+

Send all particles to all nodes +at the end of the day, all nodes have the same nbody object

+
+ +
+
+SphEvaluate(val, pos=None, vel=None, hsml=None, DesNumNgb=None, MaxNumNgbDeviation=None, Tree=None)¶
+

Return an sph evaluation of the variable var

+
+ +
+
+T()¶
+

Return the gas temperature of the model. +The output is an nx1 float array.

+
+ +
+
+Tcool(coolingfile=None)¶
+

Return the cooling time of the model. +The output is an nx1 float array.

+
+ +
+
+Tmu()¶
+

Return the gas temperature of the model. +The output is an nx1 float array.

+
+ +
+
+TreeAccel(pos, eps, Tree=None)¶
+

Return the acceleration at a given position, using the softening lenght eps +and using a tree.

+

pos : position (array vector) +eps : softening +Tree: gravitational tree if already computed

+

WARNING : this function do not work in parallel

+
+ +
+
+TreePot(pos, eps, Tree=None)¶
+

Return the potential at a given position, using the softening lenght eps +and using a tree.

+

pos : position (array vector) +eps : softening +Tree: gravitational tree if already computed

+

WARNING : this function do not work in parallel

+
+ +
+
+U()¶
+

Return the gas specific energy of the model. +The output is an nx1 float array.

+
+ +
+
+Vr()¶
+

Return the radial velocies of particles +The output is an 3xn float array.

+
+ +
+
+Vt()¶
+

Return the tangential velocies of particles +The output is an 3xn float array.

+
+ +
+
+Vz()¶
+

Return a 1xn float array containing z velocity

+
+ +
+
+align(axis, mode='a', sgn='+', fact=None)¶
+

Rotate the object in order to align the axis ‘axis’ with the z axis.

+

axis : [x,y,z] +mode : ‘p’ : only position

+
+
‘v’ : only velocities +‘a’ : both (default)
+
+
sgn : ‘+’ : normal rotation
+
‘-‘ : reverse sense of rotation
+
+

fact : int : factor to increase the angle

+
+ +
+
+align2(axis1=[1, 0, 0], axis2=[0, 0, 1], point=[0, 0, 0])¶
+

Rotate the object in order to align the axis ‘axis’ with the z axis.

+

axis1 : [x,y,z] +axis2 : [x,y,z] +point : [x,y,z]

+
+ +
+
+align_with_main_axis(mode='a')¶
+

Rotate the object in order to align it’s major axis with +the axis of its inertial tensor.

+
+
mode : ‘p’ : only position
+
‘v’ : only velocities +‘a’ : both (default)
+
+
+ +
+
+append(solf, do_not_sort=False)¶
+

Add to the current N-body object, particles form the +N-body object “new”.

+

solf : Nbody object

+
+ +
+
+cart2sph(pos=None)¶
+

Transform carthesian coodinates x,y,z into spherical +coordinates r,p,t +Return a 3xn float array.

+
+ +
+
+check_arrays()¶
+

check if the array contains special values like NaN or Inf

+
+ +
+
+cm()¶
+

Return the mass center of the model. +The output is an 3x1 float array.

+
+ +
+
+cmcenter()¶
+

Move the N-body object in order +to center the mass center at the origin.

+
+ +
+
+cv()¶
+

Return the center of the velocities of the model. +The output is an 3x1 float array.

+
+ +
+
+cvcenter()¶
+

Center the center of velocities at the origin.

+
+ +
+
+dens(r=None, nb=25, rm=50)¶
+

Return the number density at radius r (supposing a spherical density distribution). +If r is not specified, it is computed with nb and rm. +The output is an n x 1 float array.

+

!!! This routine do not use masses !!!

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+display(*arg, **kw)¶
+

Display the model

+
+ +
+
+dmodes(nr=32, nm=16, rm=32)¶
+

Compute the density modes of a model

+

nm = 16 : number of modes +nr = 32 : number of radius +rm = 50 : max radius

+

return

+

r : the radius used +m : the modes computed +m1 : the matrix of the amplitude +m2 : the matrix of the phases

+
+ +
+
+dv_mean()¶
+

Return the average relative speed between particles.

+
+ +
+
+dx_mean()¶
+

Return the average distance between particles.

+
+ +
+
+ekin()¶
+

Return the total specific kinetic energy

+
+ +
+
+epot(eps)¶
+

Return the total specific potential energy using the softening lenght eps.

+

eps : softening

+

WARNING : THIS FUNCTION DO NOT WORK IN MPI MODE

+
+ +
+
+expose(obs, eye=None, dist_eye=None, foc=None, space='pos', pos=None, vel=None)¶
+

Rotate and translate the object in order to be seen as if the +observer was in x0, looking at a point in xp.

+

obs : observer matrix +eye : ‘right’ or ‘left’ +dist_eye : distance between eyes (separation = angle) +space : pos or vel +foc : focal

+
+ +
+
+find_vars()¶
+

This function return a list of variables defined in the current object

+
+ +
+
+gather_mass()¶
+

Gather in a unique array all mass of all nodes.

+
+ +
+
+gather_num()¶
+

Gather in a unique array all num of all nodes.

+
+ +
+
+gather_pos()¶
+

Gather in a unique array all positions of all nodes.

+
+ +
+
+gather_vec(vec)¶
+

Gather in a unique array all vectors vec of all nodes.

+
+ +
+
+gather_vel()¶
+

Gather in a unique array all velocites of all nodes.

+
+ +
+
+getAccelerationInCylindricalGrid(eps, z, Rmax, nr=32, nt=32, UseTree=False)¶
+

Compute the Acceleration in cells of a cylindrical grid

+
+ +
+
+getNumberParticlesInCylindricalGrid(Rmax, nr=32, nt=32)¶
+

Compute the number of particles in cells of a cylindrical grid

+
+ +
+
+getPotentialInCylindricalGrid(eps, z, Rmax, nr=32, nt=32, UseTree=False)¶
+

Compute the potential in cells of a cylindrical grid

+
+ +
+
+getRadialVelocityDispersionInCylindricalGrid(Rmax, nr=32, nt=32)¶
+

Compute the radial velocity dispersion in cells of a cylindrical grid

+
+ +
+
+getRadiusInCylindricalGrid(z, Rmax, nr=32, nt=32)¶
+

Compute the radius in cells of a cylindrical grid

+
+ +
+
+getSurfaceDensityInCylindricalGrid(Rmax, nr=32, nt=32)¶
+

Compute the surface density in cells of a cylindrical grid

+
+ +
+
+getTree(force_computation=False, ErrTolTheta=0.80000000000000004)¶
+

Return a Tree object

+
+ +
+
+get_default_spec_vars()¶
+

return specific variables default values for the class

+
+ +
+
+get_default_spec_vect()¶
+

return specific vector default values for the class

+
+ +
+
+get_histocenter(rbox=50, nb=500)¶
+

Return the position of the higher density region +in x,y,z (not good) +found by the function “histocenter”.

+

rbox : size of the box +nb : number of bins in each dimension

+
+ +
+
+get_histocenter2(rbox=50, nb=64)¶
+

Return the position of the higher density region +in x,y,z (not good) +found by the function “histocenter”.

+

rbox : size of the box +nb : number of bins in each dimension

+
+ +
+
+get_list_of_array()¶
+

Return the list of numpy vectors of size nbody.

+
+ +
+
+get_list_of_method()¶
+

Return the list of instance methods (functions).

+
+ +
+
+get_list_of_vars()¶
+

Get the list of vars that are linked to the model

+
+ +
+
+get_mass_tot()¶
+

Return the total mass of system.

+
+ +
+
+get_mxntpe()¶
+

Return the max number of type for this format

+
+ +
+
+get_nbody()¶
+

Return the local number of particles.

+
+ +
+
+get_nbody_tot()¶
+

Return the total number of particles.

+
+ +
+
+get_npart()¶
+

Return the local number of particles of each types, +based on the variable tpe

+
+ +
+
+get_npart_all(npart_tot, NTask)¶
+

From npart_tot, the total number of particles per type, +return npart_per_proc, an array where each element corresponds +to the value of npart of each process.

+
+ +
+
+get_npart_and_npart_all(npart)¶
+

From npart (usually read for the header of a file), compute :

+

npart : number of particles in each type +npart_tot : total number of particles in each type +npart_all : npart for each process.

+
+ +
+
+get_npart_tot()¶
+

Return the total number of particles of each types.

+
+ +
+
+get_ns()¶
+

Return in an array the number of particles of each node.

+
+ +
+
+get_ntype()¶
+

return the number of paticles types

+
+ +
+
+get_num()¶
+

Compute the num variable in order to be consistent with particles types

+
+ +
+
+get_rotation_matrix_to_align_with_main_axis()¶
+

Get the rotation matrix used to rotate the object in order to align +it’s main axis with the axis of its inertial tensor.

+
+ +
+
+get_rsp_approximation(DesNumNgb=None, MaxNumNgbDeviation=None, Tree=None)¶
+

Return an aproximation of rsp, based on the tree.

+
+ +
+
+getindex(num)¶
+

Return an array of index of a particle from its specific number id. +The array is empty if no particle corresponds to the specific number id.

+

num : Id of the particle

+
+ +
+
+has_array(name)¶
+

Return true if the object pNbody has +an array called self.name

+
+ +
+
+has_var(name)¶
+

Return true if the object pNbody has +a variable called self.name

+
+ +
+
+hdcenter()¶
+

Move the N-body object in order to center the higher +density region found.

+
+ +
+
+histocenter(rbox=50, nb=500)¶
+

Move the N-body object in order to center the higher +density region found near the mass center. +The higher density region is determined whith density histograms.

+

rbox : box dimension, where to compute the histograms +nb : number of bins for the histograms

+
+ +
+
+histocenter2(rbox=50, nb=64)¶
+

Move the N-body object in order to center the higher +density region found near the mass center. +The higher density region is determined whith density histograms.

+

rbox : box dimension, where to compute the histograms +nb : number of bins for the histograms

+
+ +
+
+histovel(nb=100, vmin=None, vmax=None, mode='n')¶
+

Return or plot the histrogram of the norm of velocities or of the radial velocities.

+

The output is a list (r,h) of 2 nx1 float arrays, +where r is the radius and h the values of the histogram.

+

nb : number of bins (size of the output) +vmax : maximum velocity +vmin : minimum velocity +mode : ‘n’ (norme of the velocities)

+
+
‘r’ (radial velocities)
+
+ +
+
+inertial_tensor()¶
+

Return the inertial tensor.

+
+ +
+
+info()¶
+

Write info

+
+ +
+
+init()¶
+

Initialize normal and specific class variables

+
+ +
+
+init_units()¶
+

This function is responsible for the units initialization.

+

It will create :

+
+

self.unitsparameters

+
+
+
that contains parameters like
+
    +
  • the hydrogen mass fraction,
  • +
  • the metalicity ionisation flag
  • +
  • the adiabatic index
  • +
  • ...
  • +
+
+
+
+
+

and

+
+

self.localsystem_of_units

+
+

a UnitSystem object that really defines the system of units +in the Nbody object. It uses the values :

+
+
UnitLength_in_cm +UnitMass_in_g +UnitVelocity_in_cm_per_s
+
+
+

All physical values computed in pNbody should use self.localsystem_of_units to +be converted in other units. +self.unitsparameters is usefull if other parameters needs to be known, like +the adiabatic index, etc.

+
+ +
+
+l()¶
+

Return the specific angular momentum in x,y,z of all particles. +The output is an 3xn float array.

+
+ +
+
+ltot()¶
+

Return the specific total angular momentum. +The output is an 3x1 float array.

+
+ +
+
+make_default_vars_global()¶
+

Make specific variables global

+
+ +
+
+mdens(r=None, nb=25, rm=50)¶
+

Return the density at radius r (supposing a spherical density distribution). +If r is not specified, it is computed with nb and rm. +The output is an n x 1 float array.

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+memory_info()¶
+

Write info on memory size of the current object (only counting arrays size)

+
+ +
+
+minert()¶
+

Return the diagonal of the intertial momentum.

+
+ +
+
+mr(r=None, nb=25, rm=50)¶
+

Return the mass inside radius r (supposing a spherical density distribution). +If r is not specified, it is computed with nb and rm. +The output is an n x 1 float array.

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+msdens(r=None, nb=25, rm=50)¶
+

Return the mass surface density at radius r. +If r is not specified, it is computed with nb and rm. +The output is an nx1 float array.

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+nodes_info()¶
+

Write info on nodes

+
+ +
+
+object_info()¶
+

Write class(object) info

+
+ +
+
+open_and_read(name, readfct)¶
+

open and read file name

+

name : name of the input +readfct : function used to read the file

+
+ +
+
+open_and_write(name, writefct)¶
+

Open and write file

+

name : name of the output +writefct : function used to write the file

+
+ +
+
+phi_xy()¶
+

Return a 1xn float array that corresponds to +the azimuth in cylindrical coordinate of each particle.

+
+ +
+
+phi_xyz()¶
+

Return a 1xn float array that corresponds to +the azimuth in spherical coordinate of each particle.

+
+ +
+
+print_filenames()¶
+

Print files names

+
+ +
+
+r(center=None)¶
+

Return a 1xn float array that corresponds to +the distance from the center of each particle.

+
+ +
+
+read()¶
+

Read the particle file(s)

+
+ +
+
+read_num(name)¶
+

Read a num file

+

name : name of the input

+
+ +
+
+real_numngb(num)¶
+

number of particles wor wich r<h

+
+ +
+
+rebox(boxsize=None, mode=None)¶
+

Translate the positions of the object in order that all particles +being contained in a box of size boxsize.

+
+
boxsize : size of the box
+
if boxsize is not defined, we try first to see if self.boxsize +is defined.
+
mode : type of reboxing
+
None : -> [0,boxsize] +centred : -> [-boxsize/2,boxsize/2] +[x,y,z] :
+
+
+ +
+
+redistribute()¶
+

This function redistribute particles amoung all nodes in order to +have a similar number of particles per nodes

+
+ +
+
+reduc(n, mass=False)¶
+

Return an N-body object that contain a fraction 1/n of particles.

+

n : inverse of the fraction of particule to be returned

+
+ +
+
+rename(p_name=None)¶
+

Rename the files

+

p_name : new name(s)

+
+ +
+
+rotate(angle=0, axis=[1, 0, 0], point=[0, 0, 0], mode='a')¶
+

Rotate the positions and/or the velocities of the object around a specific axis +defined by a vector and an point.

+

angle : rotation angle in radian +axis : direction of the axis +point : center of the rotation

+
+
mode : ‘p’ : rotate only position
+
‘v’ : rotate only velocities +‘a’ : rotate both (default)
+
+
+ +
+
+rotateR(R, mode='a')¶
+

Rotate the model using the matrix R

+
+
mode : ‘p’ : only position
+
‘v’ : only velocities +‘a’ : both (default)
+
+
+ +
+
+rotate_old(angle=0, mode='a', axis='x')¶
+

Rotate the positions and/or the velocities of the object around a specific axis.

+

angle : rotation angle in radian +axis : ‘x’ : around x

+
+
+
‘y’ : around y +‘z’ : around z
+

: [x,y,z] : around this axis

+
+
+
mode : ‘p’ : rotate only position
+
‘v’ : rotate only velocities +‘a’ : rotate both (default)
+
+
+ +
+
+rxy()¶
+

Return a 1xn float array that corresponds to +the projected distance from the center of each particle.

+
+ +
+
+rxyz(center=None)¶
+

Return a 1xn float array that corresponds to +the distance from the center of each particle.

+
+ +
+
+sdens(r=None, nb=25, rm=50)¶
+

Return the surface density at radius r. +If r is not specified, it is computed with nb and rm. +The output is an nx1 float array.

+

!!! This routine do not uses masses !!!

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+select(i=0)¶
+

Return an N-body object that contain only particles of type i

+
+ +
+
+selectc(c, local=False)¶
+

Return an N-body object that contain only particles where the +corresponding value in c is not zero. +c is a nx1 Nbody array.

+

c : the condition vector +local : local selection (True) or global selection (False)

+
+ +
+
+selecti(i, local=False)¶
+

Return an N-body object that contain only particles having +their index (not id) in i.

+

i : vector containing indexes +local : local selection (True) or global selection (False)

+
+ +
+
+selectp(lst=None, file=None, reject=False, local=False, from_num=True)¶
+

Return an N-body object that contain only particles with specific number id.

+

The list of id’s is given either by lst (nx1 int array) or +by the name (“file”) of a file containing the list of id’s.

+

lst : vector list (integer)

+

reject : True/False : if True, reject particles in lst (default = False) +local : local selection (True) or global selection (False)

+
+
frum_num : if True, use self.num to select particules
+
if False, use arange(self.nbody)
+
+
+ +
+
+set_filenames(p_name, pio=None)¶
+

Set the local and global names

+

p_name : new name(s) +pio : ‘yes’ or ‘no’

+
+ +
+
+set_ftype(ftype='binary')¶
+

Change the type of the file

+

ftype : type of the file

+
+ +
+
+set_local_system_of_units(params=None, UnitLength_in_cm=None, UnitVelocity_in_cm_per_s=None, UnitMass_in_g=None, unitparameterfile=None, gadgetparameterfile=None)¶
+

Set local system of units using UnitLength_in_cm,UnitVelocity_in_cm_per_s,UnitMass_in_g

+
    +
  1. if nothing is given, we use self.unitsparameters to obtain these values

    +
  2. +
  3. +
    if UnitLength_in_cm
    +

    UnitVelocity_in_cm_per_s +UnitMass_in_g

    +
    +
    +

    are given, we use them

    +
  4. +
+
+
2b) if UnitLength_in_cm,UnitVelocity_in_cm_per_s,UnitMass_in_g
+
are given in a dictionary
+
+
    +
  1. if unitparameterfile is given we read the parameters from the file (units parameter format)
  2. +
  3. if gadgetparameterfile is given we read the parameters from the file (gadget param format)
  4. +
+
+ +
+
+set_npart(npart)¶
+

Set the local number of particles of each types. +This function modifies the variable self.tpe

+
+ +
+
+set_parameters(params)¶
+

Set parameters for the class

+
+ +
+
+set_pio(pio)¶
+

Set parallel input/output or not io

+

pio : ‘yes’ or ‘no’

+
+ +
+
+set_tpe(tpe)¶
+

Set all particles to the type tpe

+
+ +
+
+set_unitsparameters(unitsparams)¶
+

Set units parameters for the class.

+
+ +
+
+show(*arg, **kw)¶
+

Display the model +this is an alias to display

+
+ +
+
+sigma(r=None, nb=25.0, rm=50.0)¶
+

Return the 3 velocity dispersion (in cylindrical coordinates) and the mean azimuthal velocity curve. +If r is not specified, it is computed with nb and rm.

+

The output is a list (r,sr,st,sz,mt) of 5 $n imes 1$ float arrays, +where r is the radius, sr the radial velocity dispersion, st, the azimuthal velocity dispersion, +sz, the vertical velocity dispersion and mt, the mean azimuthal velocity curve.

+

!!! This routine works only if particles have equal masses !!!

+

r : radius where to compute the values +nb : number of bins (size of the output) +rm : maximal radius

+

return : r,sr,st,sz,mt

+
+ +
+
+sigma_vz(r=None, nb=25, rm=50)¶
+

Return the vertical dispertion in z at radius r. +If r is not specified, it is computed with nb and rm. +The output is an nx1 float array.

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+sigma_z(r=None, nb=25, rm=50)¶
+

Return the vertical dispertion in z at radius r. +If r is not specified, it is computed with nb and rm. +The output is an nx1 float array.

+

r : radius +nb : number of bins (size of the output) +rm : maximal radius

+
+ +
+
+size()¶
+

Estimate the model size, using the inertial momentum

+
+ +
+
+sort()¶
+

sort particles according to their num variable

+
+ +
+
+sort_type()¶
+

Contrary to sort, this fonction sort particles +respecting their type.

+
+ +
+
+spec_info()¶
+

Write specific info

+
+ +
+
+sph2cart(pos=None)¶
+

Transform spherical coordinates r,p,t into carthesian +coodinates x,y,z +Return a 3xn float array.

+
+ +
+
+spin(omega=None, L=None, j=None, E=None)¶
+

Spin the object with angular velocity “omega” (rigid rotation). +Omega is a 1 x 3 array object

+

If L (total angular momentum) is explicitely given, compute Omega from L (1 x 3 array object).

+

omega : angular speed (array vector) +L : desired angular momentum +j : desired energy fraction in rotation +E : Total energy (without rotation)

+
+ +
+
+sub(n1=0, n2=None)¶
+

Return an N-body object that have particles whith indicies in the range [n1:n2].

+

n1 : number of the first particule +n2 : number of the last particule

+

Note : the first particle is 0

+
+ +
+
+theta_xyz()¶
+

Return a 1xn float array that corresponds to +the elevation angle in spherical coordinate of each particle.

+
+ +
+
+tork(acc)¶
+

Return the total tork on the system due to the force +acting on each particle (acc). +The output is an 3xn float array.

+

acc : 3xn float array

+
+ +
+
+translate(dx, mode='p')¶
+

Translate the positions or the velocities of the object.

+

dx : shift (array vector) +mode : ‘p’ : translate positions

+
+
‘v’ : translate velocities
+
+ +
+
+usual_numngb(num)¶
+

usual way to compute the number of neighbors

+
+ +
+
+v_sigma()¶
+

Return the norm of the velocity dispersions.

+
+ +
+
+vel_cart2cyl()¶
+

Transform velocities in carthesian coordinates vx,vy,vz into cylindrical +coodinates vr,vz,vz. +Pos is the position of particles in cart. coord. +Vel is the velocity in cart. coord. +Return a 3xn float array.

+
+ +
+
+vel_cyl2cart(pos=None, vel=None)¶
+

Transform velocities in cylindrical coordinates vr,vt,vz into carthesian +coodinates vx,vy,vz. +Pos is the position of particles in cart. coord. +Vel is the velocity in cylindrical coord. +Return a 3xn float array.

+
+ +
+
+vn()¶
+

Return a 1xn float array that corresponds to +the norm of velocities

+
+ +
+
+vrxyz()¶
+

Return a 1xn float array that corresponds to +the radial velocity in spherical system

+
+ +
+
+vx()¶
+

Return a 1xn float array containing x velocity

+
+ +
+
+vy()¶
+

Return a 1xn float array containing y velocity

+
+ +
+
+vz()¶
+

Return a 1xn float array containing z velocity

+
+ +
+
+weighted_numngb(num)¶
+

num = particle where to compute weighted_numngb +see Springel 05

+
+ +
+
+write()¶
+

Write the particle file(s)

+
+ +
+
+write_num(name)¶
+

Write a num file

+

name : name of the output

+
+ +
+
+x()¶
+

Return a 1xn float array containing x coordinate

+
+ +
+
+x_sigma()¶
+

Return the norm of the position dispersions.

+
+ +
+
+y()¶
+

Return a 1xn float array containing y coordinate

+
+ +
+
+z()¶
+

Return a 1xn float array containing z coordinate

+
+ +
+
+zmodes(nr=32, nm=16, rm=32)¶
+

Compute the vertical modes of a model

+

nm = 16 : number of modes +nr = 32 : number of radius +rm = 50 : max radius

+

return

+

r : the radius used +m : the modes computed +m1 : the matrix of the amplitude +m2 : the matrix of the phases

+
+ +
+
+zprof(z=None, r=2.5, dr=0.5, nb=25, zm=5.0)¶
+

Return the z-profile in a vector for a given radius

+

!!! This routine works only if particles have equal masses !!!

+

z : bins in z (optional) +r : radius of the cut +dr : width in r of the cut +nb : number of bins (size of the output) +zm : maximal height

+
+ +
+ +
+
+class pNbody.main.Nbody_default(p_name=None, pos=None, vel=None, mass=None, num=None, tpe=None, ftype=None, status='old', byteorder='little', pio='no', local=False, log=None, unitsfile=None)¶
+

This class is usefull to create an empty Nbody object

+

Methods

+
+ +
+
+pNbody.main.get_known_formats()¶
+

return the name of known Nbody formats

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

Reference

+

Next topic

+

the ic module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/MontecarloModule.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/MontecarloModule.html index e0c0581..a432986 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/MontecarloModule.html @@ -1,126 +1,127 @@ - Overview — pNbody v4 documentation + the montecarlo module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the montecarlo module¶

Previous topic

-

Welcome to pNbody’s documentation!

+

the thermodyn module

Next topic

-

the Io module

+

the fourier module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/MovieModule.html b/Doc/newdoc/_build/html/rst/MovieModule.html new file mode 100644 index 0000000..d2339da --- /dev/null +++ b/Doc/newdoc/_build/html/rst/MovieModule.html @@ -0,0 +1,161 @@ + + + + + + + + + the Movie module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the Movie module¶

+
+
+class pNbody.Movie.Movie(name, mode=None)¶
+

a Movie class

+

Methods

+
+
+get_img(data)¶
+

can be replaced by read_one with option “image”

+
+ +
+
+info()¶
+

give info

+
+ +
+
+open(mode='r', readall=0)¶
+

open a file

+
+ +
+
+read(skip=0, mode='array')¶
+

skip = 0 : read image at the current position +skip = 1 : skip an image +skip = -1 : read the image before (go back) +skip = -2 : skip an image before (go back)

+
+ +
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the palette module

+

Next topic

+

the profiles module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/MpiModule.html b/Doc/newdoc/_build/html/rst/MpiModule.html new file mode 100644 index 0000000..9b08704 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/MpiModule.html @@ -0,0 +1,347 @@ + + + + + + + + + the mpi module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the mpi module¶

+
+
+pNbody.mpi.mpi_AllgatherAndConcatArray(vec)¶
+

AllGather array vec and concatenate it in a unique array +(concatenation order is reversed).

+
+ +
+
+pNbody.mpi.mpi_ExchangeFromTable(T, procs, ids, vec, num)¶
+

Exchange an array according to a transfer array T

+

T : exchange table

+

procs : list of processor (from Tree.GetExchanges()) +ids : list of id (from Tree.GetExchanges())

+

vec : vector to exchange +num : id correspondings to particles

+
+ +
+
+pNbody.mpi.mpi_GatherAndWriteArray(f, data, byteorder='little', npart=None)¶
+

Gather and array and write it in a binary block.

+

data = array

+

shape = tuple

+
+ +
+
+pNbody.mpi.mpi_GetExchangeTable(n_i)¶
+

This function returns the exchange table

+
+ +
+
+pNbody.mpi.mpi_OldGatherAndWriteArray(f, data, byteorder='little', nlocal=None)¶
+

Gather and array and write it in a binary block.

+

data = array

+

shape = tuple

+
+ +
+
+pNbody.mpi.mpi_OldReadAndSendArray(f, data_type, shape=None, skip=None, byteorder='little', nlocal=None)¶
+

Read and Brodcast a binary block assuming it contains an array. +The array is splitted acroding to the variable nlocal.

+

data_type = array type +shape = tuple

+
+
nlocal : array NTask x Npart
+
array NTask
+
+
+ +
+
+pNbody.mpi.mpi_ReadAndSendArray(f, data_type, shape=None, byteorder='little', npart=None)¶
+

Read and Brodcast a binary block assuming it contains an array.

+
+ +
+
+pNbody.mpi.mpi_ReadAndSendBlock(f, data_type, shape=None, byteorder='little', split=None)¶
+

Read and brodcast a binary block.

+

data_type = int,float32,float +or +data_type = array

+

shape = tuple

+
+ +
+
+pNbody.mpi.mpi_allgather(x)¶
+

Gather x from all to all. +Returns a list.

+
+ +
+
+pNbody.mpi.mpi_allreduce(x, op=<mpi4py.MPI.Op object at 0x3850690>)¶
+

Reduce x from all node for all nodes. +When there is only one is defined, the function return x.

+
+ +
+
+pNbody.mpi.mpi_arange(n)¶
+

Create an integer array containing elements from 0 to n +spreaded over all nodes.

+
+ +
+
+pNbody.mpi.mpi_argmax(x)¶
+

Find the arument of the amximum value in x.

+

idx = (p,i) : where i = index in proc p

+
+ +
+
+pNbody.mpi.mpi_argmin(x)¶
+

Find the arument of the maximum value in x. +idx = (p,i) : where i = index in proc p

+
+ +
+
+pNbody.mpi.mpi_bcast(x, root=0)¶
+

Broadcast from node root the variable x. +When there is only one is defined, it simplay returns x.

+
+ +
+
+pNbody.mpi.mpi_find_a_toTask(begTask, fromTask, ex_table, delta_n)¶
+

This function is used to find recursively an exange table

+
+ +
+
+pNbody.mpi.mpi_gather(x, root=0)¶
+

Gather x from all nodes to node dest. +Returns a list.

+
+ +
+
+pNbody.mpi.mpi_getval(x, idx)¶
+

Return the value of array x corresponding to the index idx.

+

idx = (p,i) : where i = index in proc p +equivalent to x[i] from proc p

+
+ +
+
+pNbody.mpi.mpi_histogram(x, bins)¶
+

Return an histogram of vector x binned using binx.

+
+ +
+
+pNbody.mpi.mpi_iprint(msg, mode=None)¶
+

Synchronized print, including info on node.

+
+ +
+
+pNbody.mpi.mpi_len(x)¶
+

Lenght of array x.

+
+ +
+
+pNbody.mpi.mpi_max(x)¶
+

Maximum element of array x.

+
+ +
+
+pNbody.mpi.mpi_mean(x)¶
+

Mean of elements of array x.

+
+ +
+
+pNbody.mpi.mpi_min(x)¶
+

Minimum element of array x.

+
+ +
+
+pNbody.mpi.mpi_pprint(msg)¶
+

Synchronized print.

+
+ +
+
+pNbody.mpi.mpi_recv(source)¶
+

Return a variable sent by node ar{source}. +When there is only one is defined, it does nothing.

+
+ +
+
+pNbody.mpi.mpi_reduce(x, root=0, op=<mpi4py.MPI.Op object at 0x3850690>)¶
+

Reduce x from all node only for root. +When there is only one is defined, the function return x.

+
+ +
+
+pNbody.mpi.mpi_rprint(msg)¶
+

Rooted print.

+
+ +
+
+pNbody.mpi.mpi_sarange(npart_all)¶
+

Create an integer array containing elements from 0 to n, +spreaded over all nodes. The repartition of elements and +type of elements over nodes is given by the array npart_all

+
+ +
+
+pNbody.mpi.mpi_send(x, dest)¶
+

Send x to node dest. +When there is only one is defined, it does nothing.

+
+ +
+
+pNbody.mpi.mpi_sendrecv(x, dest, source)¶
+
+ +
+
+pNbody.mpi.mpi_sum(x)¶
+

Sum elements of array x.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the ctes module

+

Next topic

+

the mpi module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Overview.html index e0c0581..f4cbee8 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Overview.html @@ -1,126 +1,126 @@ Overview — pNbody v4 documentation - +

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display +

pNbody is a parallelized python module toolbox designed to manipulate and display interactively very lage N-body systems.

Its oriented object approche allows the user to perform complicate manipulation with only very few commands.

As python is an interpreted language, the user can load an N-body system and explore it interactively using the python interpreter. pNbody may also be used in python scripts.

The module also contains graphical facilities desinged to create maps of physical values of the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are also implemented.

pNbody is not limited by file format. Each user may redefine in a parameter file how to read its prefered format.

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +../_images/cosmo1.png

Previous topic

Welcome to pNbody’s documentation!

Next topic

-

the Io module

+

Installation

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/PaletteModule.html similarity index 59% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/PaletteModule.html index e0c0581..b4dd051 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/PaletteModule.html @@ -1,126 +1,134 @@ - Overview — pNbody v4 documentation + the palette module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the palette module¶

+

this module is used to deal with color palettes.

+
+
+pNbody.palette.readlut(filename='/home/revaz/.pNbody/rgb_tables/light')¶
+

Read a lut file.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the libdisk module

Next topic

-

the Io module

+

the Movie module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/ParamModule.html b/Doc/newdoc/_build/html/rst/ParamModule.html new file mode 100644 index 0000000..e1f9441 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/ParamModule.html @@ -0,0 +1,195 @@ + + + + + + + + + the param module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the param module¶

+
+
+class pNbody.param.Params(filename, master)¶
+
+
params = [[‘imwidth’,’image width’,’i’,512],
+
[‘imheight’,’image height’,’i’,384], +[‘winwidth’,’window width’,’f’,50.], +[‘winheight’,’window height’,’f’,37.5]]
+
+

Methods

+
+
+get(name)¶
+

return the value of a parameter

+
+ +
+
+get_dic()¶
+

return values of parameters in a dictionary

+
+ +
+
+get_string(name)¶
+

return the value of a parameter in a string

+
+ +
+
+get_type(name)¶
+

return the type of a parameter

+
+ +
+
+lists()¶
+

print the list of the content of the class

+
+ +
+
+save(filename=None)¶
+

[‘cd’, ‘cd’, ‘Float’, 0.0]

+
+ +
+
+set(name, value)¶
+

set the value of a parameter

+
+ +
+ +
+
+pNbody.param.read_ascii_value(value, tpe, name)¶
+

from a name and type, return +an object corresponding to the value given

+
+ +
+
+pNbody.param.write_ascii_value(value, tpe, name)¶
+

from a name and type and value, return +an ascii representation of the object

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the mpi module

+

Next topic

+

the parameter module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/ParameterModule.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/ParameterModule.html index e0c0581..7bfb166 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/ParameterModule.html @@ -1,126 +1,127 @@ - Overview — pNbody v4 documentation + the parameters module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the parameters module¶

Previous topic

-

Welcome to pNbody’s documentation!

+

the param module

Next topic

-

the Io module

+

the liblog module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/PhotModule.html similarity index 53% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/PhotModule.html index e0c0581..aec9f50 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/PhotModule.html @@ -1,126 +1,145 @@ - Overview — pNbody v4 documentation + the phot module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the phot module¶

+
+
+pNbody.phot.LvtoMv(Lv, Mvsun=4.8300000000000001, Lvsun=1.0)¶
+

Lv : V-band luminosity in solar luminosity unit +Mvsun : magnitude of the sun in V-band +Lvsun : V-band solar luminosity in solar luminosity unit

+

Return the magnitude in V-band

+
+ +
+
+pNbody.phot.MvtoLv(Mv, Mvsun=4.8300000000000001, Lvsun=1.0)¶
+

Mv : magnitude in V-band +Mvsun : magnitude of the sun in V-band +Lvsun : V-band solar luminosity in solar luminosity unit

+

Return the corresponding V-band luminosity in solar luminosity unit.

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the fourier module

Next topic

-

the Io module

+

the libgrid module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/PlummerModule.html b/Doc/newdoc/_build/html/rst/PlummerModule.html new file mode 100644 index 0000000..b4ffc11 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/PlummerModule.html @@ -0,0 +1,154 @@ + + + + + + + + + the plummer module — pNbody v4 documentation + + + + + + + + + + + + + +
+
+
+
+ +
+

the plummer module¶

+

plummer model

+
+
+pNbody.plummer.Density(G, M, a, r)¶
+

Plummer Density

+
+ +
+
+pNbody.plummer.LDensity(G, M, a, r)¶
+

Plummer Linear Density

+
+ +
+
+pNbody.plummer.Potential(G, M, a, r)¶
+

Plummer Potential

+
+ +
+
+pNbody.plummer.Sigma(G, M, a, r)¶
+

Return sigma (radial) from Jeans equation : 1/rho Int( rho * drPhi * dr )

+
+ +
+
+pNbody.plummer.Vcirc(G, M, a, r)¶
+

Plummer circular velocity

+
+ +
+
+pNbody.plummer.dPotential(G, M, a, r)¶
+

Plummer first derivative of Potential

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the libmiyamoto module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Prerequiste.html similarity index 51% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Prerequiste.html index e0c0581..fdd6026 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Prerequiste.html @@ -1,126 +1,151 @@ - Overview — pNbody v4 documentation + Prerequiste — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

Prerequiste¶

+

The basic module of pNbody needs python and additional packages :

+
    +
  1. Python 2.5.x, 2.6.x, 2.7.x

    +

    http://www.python.org

    +
  2. +
  3. a C compiler

    +

    gcc is fine http://gcc.gnu.org/

    +
  4. +
  5. numpy-1.0.4 or higher

    +

    http://numpy.scipy.org/

    +
  6. +
  7. Imaging 1.1.5 or higher

    +

    http://www.pythonware.com/products/pil/

    +
  8. +
+

For additional but usefull special functions :

+
    +
  1. scipy 0.7 or higher

    +

    http://www.scipy.org/

    +
  2. +
+

For the parallel capabilities, an mpi distribution is needed (ex. openmpi) +as well as the additional python mpi wrapping:

+
    +
  1. mpi4py +http://cheeseshop.python.org/pypi/mpi4py
  2. +
+

In order to convert movies in standard format (gif or mpeg), the two following applications are needed :

+
    +
  1. convert (imagemagick)

    +

    http://www.imagemagick.org/script/index.php

    +
  2. +
  3. mencoder (mplayer)

    +

    http://www.mplayerhq.hu/design7/news.html

    +
  4. +

Previous topic

-

Welcome to pNbody’s documentation!

+

Installation

Next topic

-

the Io module

+

Installing from tarball

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/ProfilesModule.html b/Doc/newdoc/_build/html/rst/ProfilesModule.html new file mode 100644 index 0000000..be45b75 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/ProfilesModule.html @@ -0,0 +1,304 @@ + + + + + + + + + the profiles module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the profiles module¶

+
+
+pNbody.profiles.burkert_mr(r, rs, rho0=1.0)¶
+

Burkert profile +rhob = rho0 / ( ( 1 + r/rs ) * ( 1 + (r/rs)**2 ) )

+
    +
  1. Burkert, Astrophys. J. 447 (1995) L25.
  2. +
+
+ +
+
+pNbody.profiles.burkert_profile(r, rs, rho0=1.0)¶
+

Burkert profile +rhob = rho0 / ( ( 1 + r/rs ) * ( 1 + (r/rs)**2 ) )

+
    +
  1. Burkert, Astrophys. J. 447 (1995) L25.
  2. +
+
+ +
+
+pNbody.profiles.generic2c_mr(r, rs, a, b, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = 1/( (r/rs)**a * (1+r/rs)**(b-a) )

+
+ +
+
+pNbody.profiles.generic2c_profile(r, rs, a, b, rho0=1.0)¶
+

generic2c profile +rho = 1/( (r/rs)**a * (1+r/rs)**(b-a) )

+
+ +
+
+pNbody.profiles.hernquist_mR(R, rs, rho0=1)¶
+

Mass in the projected radius R for the distribution +rho = 1/( (r/rs) * (1+r/rs)**3 )

+

(Hernquist 90, Eq. 37)

+

Warning : the function diverges in r=0 and r/rs=1. +Warning : it is badly implemented for arrays

+
+ +
+
+pNbody.profiles.hernquist_mr(r, rs, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = 1/( (r/rs) * (1+r/rs)**3 )

+
+ +
+
+pNbody.profiles.hernquist_profile(r, rs, rho0=1.0)¶
+

hernquist profile +rho = 1/( (r/rs) * (1+r/rs)**3 )

+
+ +
+
+pNbody.profiles.jaffe_mr(r, rs, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = 1/( (r/rs)**2 * (1+r/rs)**2 )

+
+ +
+
+pNbody.profiles.jaffe_profile(r, rs, rho0=1.0)¶
+

jaffe profile +rho = 1/( (r/rs)**2 * (1+r/rs)**2 )

+
+ +
+
+pNbody.profiles.king_Rc(rs, rt)¶
+

Core radius +Find R such that

+

Sigma(Rc) = Sigma(0)/2.

+
+ +
+
+pNbody.profiles.king_profile(r, rs, rt)¶
+

King profile +(see King 62)

+
+ +
+
+pNbody.profiles.king_profile_Rz(R, z, rs, rt)¶
+

King profile in cyclindrical coord (needed for surface density computation) +(see King 62)

+
+ +
+
+pNbody.profiles.king_surface_density(R, rs, rt)¶
+

Surface density of King profile +(see King 62)

+
+ +
+
+pNbody.profiles.king_surface_density_old(R, rs, rt)¶
+

Obsolete implementation

+
+ +
+
+pNbody.profiles.nfw_mr(r, rs, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = rho0/((r/rs)*(1+r/rs)**2)

+
+ +
+
+pNbody.profiles.nfw_profile(r, rs, rho0=1.0)¶
+

NFW profile +rho = rho0/((r/rs)*(1+r/rs)**2)

+
+ +
+
+pNbody.profiles.nfwg_mr(r, rs, gamma, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = rho0/((r/rs)**(gamma)*(1+(r/rs)**2)**(0.5*(3.-gamma)))

+
+ +
+
+pNbody.profiles.nfwg_profile(r, rs, gamma, rho0=1.0)¶
+

NFW modified profile +rho = rho0/((r/rs)**(gamma)*(1+(r/rs)**2)**(0.5*(3.-gamma)))

+
+ +
+
+pNbody.profiles.nfws_mr(r, rhos, rs, r0)¶
+

Mass in the radius r for the distribution +rho = rhos/((r/rs)*(1+r/rs)**2)

+
+ +
+
+pNbody.profiles.nfws_profile(r, rhos, rs, r0)¶
+

NFW softened profile +rho = rhos/(((r+r0)/rs)*(1+r/rs)**2)

+
+ +
+
+pNbody.profiles.pisothm_mr(r, rs, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = 1/(1+(r/rs)**2)

+
+ +
+
+pNbody.profiles.pisothm_profile(r, rs, rho0=1.0)¶
+

Pseudo-isothermal profile +rho = 1/(1+(r/rs)**2)

+
+ +
+
+pNbody.profiles.plummer_mr(r, rc, rho0=1.0)¶
+

Mass in the radius r for the distribution +rho = 1/(1+(r/rc)**2)**(5/2)

+
+ +
+
+pNbody.profiles.plummer_profile(r, rc, rho0=1.0)¶
+

Plummer profile +rho = 1/(1+(r/rc)**2)**(5/2)

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the fortranfile module

+

Next topic

+

the geometry module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/PyfitsModule.html b/Doc/newdoc/_build/html/rst/PyfitsModule.html new file mode 100644 index 0000000..86948e9 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/PyfitsModule.html @@ -0,0 +1,891 @@ + + + + + + + + + the pyfits module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the pyfits module¶

+

A module for reading and writing FITS files and manipulating their contents.

+

A module for reading and writing Flexible Image Transport System +(FITS) files. This file format was endorsed by the International +Astronomical Union in 1999 and mandated by NASA as the standard format +for storing high energy astrophysics data. For details of the FITS +standard, see the NASA/Science Office of Standards and Technology +publication, NOST 100-2.0.

+

License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE

+

For detailed examples of usage, see the I{PyFITS User’s Manual} available from +U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf}

+

Epydoc markup used for all docstrings in this module.

+
+
@group Header-related Classes: Card, CardList, _Card_with_continue,
+
Header, _Hierarch
+
@group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU,
+
GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, +_TableBaseHDU, _TempHDU, _ValidHDU
+
@group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP,
+
_FormatX, _VLF
+
+
+
+class pNbody.pyfits.BinTableHDU(data=None, header=None, name=None)¶
+

Binary table HDU class.

+

Methods

+
+ +
+
+class pNbody.pyfits.CardList(cards=[], keylist=None)¶
+

FITS header card list class.

+

Methods

+
+
+append(card, useblanks=1, bottom=0)¶
+

Append a Card to the CardList.

+

card: The Card to be appended. +useblanks: Use any extra blank cards? default=1.

+
+
If useblanks != 0, and if there are blank cards directly +before END, it will use this space first, instead of +appending after these blank cards, so the total space +will not increase (default). When useblanks == 0, the +card will be appended at the end, even if there are +blank cards in front of END.
+
+
bottom: If =0 (default) the card will be appended after the last
+
non-commentary card. If =1, the card will be appended +after the last non-blank card.
+
+
+ +
+
+copy()¶
+

Make a (deep)copy of the CardList.

+
+ +
+
+count_blanks()¶
+

Find out how many blank cards are directly before the END card.

+
+ +
+
+index_of(key, backward=0)¶
+

Get the index of a keyword in the CardList.

+

key: the keyword name (a string) or the index (an integer). +backward: search the index from the END, i.e. backward? default=0.

+
+
If backward = 1, search from the end.
+
+ +
+
+insert(pos, card, useblanks=1)¶
+

Insert a Card to the CardList.

+
+
pos: The position (index, keyword name will not be allowed) to
+
insert. The new card will be inserted before it.
+
+

card: The Card to be inserted. +useblanks: Use any extra blank cards? default=1.

+
+
If useblanks != 0, and if there are blank cards directly +before END, it will use this space first, instead of +appending after these blank cards, so the total space +will not increase (default). When useblanks == 0, the +card will be appended at the end, even if there are +blank cards in front of END.
+
+ +
+
+keys()¶
+

Return a list of all keywords from the CardList.

+
+ +
+ +
+
+class pNbody.pyfits.ColDefs(input, tbtype='BinTableHDU')¶
+

Column definitions class. It has attributes corresponding to the +Column attributes (e.g. ColDefs has the attribute .names while Column +has .name), Each attribute in ColDefs is a list of corresponding +attribute values from all Columns.

+

Methods

+
+
+add_col(column)¶
+

Append one Column to the column definition.

+
+ +
+
+change_attrib(col_name, attrib, new_value)¶
+

Change an attribute (in the commonName list) of a Column.

+
+ +
+
+change_name(col_name, new_name)¶
+

Change a Column’s name.

+
+ +
+
+change_unit(col_name, new_unit)¶
+

Change a Column’s unit.

+
+ +
+
+del_col(col_name)¶
+

Delete (the definition of) one Column.

+
+ +
+
+info(attrib='all')¶
+

Get attribute(s) information of the column definition.

+
+ +
+ +
+
+class pNbody.pyfits.Column(name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, array=None)¶
+

Column class which contains the definition of one column, e.g. +ttype, tform, etc. and the array. Does not support theap yet.

+

Methods

+
+ +
+
+class pNbody.pyfits.Delayed(hdu=None, field=None)¶
+

Delayed file-reading data.

+
+ +
+
+class pNbody.pyfits.ErrorURLopener(*args, **kwargs)¶
+

A class to use with urlretrieve to allow IOError exceptions to be +raised when a file specified by a URL cannot be accessed

+

Methods

+
+ +
+
+class pNbody.pyfits.FITS_rec¶
+

FITS record array class. FITS record array is the data part of a +table HDU’s data part. This is a layer over the recarray, so we +can deal with scaled columns.

+

Methods

+
+
+field(key)¶
+

A view of a Column’s data as an array.

+
+ +
+ +
+
+class pNbody.pyfits.FITS_record(input, row=0)¶
+

FITS record class. FITS record class is used to access records of +the FITS_rec object. This will allow us to deal with scaled columns. +The FITS_record class expects a FITS_rec object as input

+

Methods

+
+
+field(fieldName)¶
+

Get the field data of the record.

+
+ +
+
+setfield(fieldName, value)¶
+

Set the field data of the record.

+
+ +
+ +
+
+class pNbody.pyfits.GroupData¶
+

Random groups data object.

+

Allows structured access to FITS Group data in a manner analogous to tables

+

Methods

+
+
+par(parName)¶
+

Get the group parameter values.

+
+ +
+
+setpar(parName, value)¶
+

Set the group parameter values.

+
+ +
+ +
+
+class pNbody.pyfits.GroupsHDU(data=None, header=None, name=None)¶
+

FITS Random Groups HDU class.

+

Methods

+
+
+size()¶
+

Returns the size (in bytes) of the HDU’s data part.

+
+ +
+ +
+
+class pNbody.pyfits.HDUList(hdus=[], file=None)¶
+

HDU list class. This is the top-level FITS object. When a FITS +file is opened, a HDUList object is returned.

+

Methods

+
+
+append(hdu)¶
+

Append a new HDU to the HDUList.

+
+ +
+
+close(output_verify='exception', verbose=0)¶
+

Close the associated FITS file and memmap object, if any.

+

output_verify: output verification option, default = ‘exception’. +verbose: print out verbose messages? default = 0.

+

This simply calls the close method of the _File class. It has this +two-tier calls because _File has ts own private attribute __file.

+
+ +
+
+flush(output_verify='exception', verbose=0, classExtensions={})¶
+

Force a write of the HDUList back to the file (for append and +update modes only).

+

output_verify: output verification option, default = ‘exception’. +verbose: print out verbose messages? default = 0. +classExtensions: A dictionary that maps pyfits classes to extensions

+
+
of those classes. When present in the dictionary, +the extension class will be constructed in place of +the pyfits class.
+
+ +
+
+index_of(key)¶
+

Get the index of an HDU from the HDUList. The key can be an +integer, a string, or a tuple of (string, integer).

+
+ +
+
+info()¶
+

Summarize the info of the HDU’s in this HDUList.

+
+ +
+
+readall()¶
+

Read data of all HDU’s into memory.

+
+ +
+
+update_extend()¶
+

Make sure if the primary header needs the keyword EXTEND or if +it has the proper value.

+
+ +
+
+update_tbhdu()¶
+

Update all table HDU’s for scaled fields.

+
+ +
+
+writeto(name, output_verify='exception', clobber=False, classExtensions={})¶
+

Write the HDUList to a new file.

+

name: output FITS file name to be written to. +output_verify: output verification option, default = ‘exception’. +clobber: Overwrite the output file if exists, default = False. +classExtensions: A dictionary that maps pyfits classes to extensions

+
+
of those classes. When present in the dictionary, +the extension class will be constructed in place of +the pyfits class.
+
+ +
+ +
+
+class pNbody.pyfits.Header(cards=[])¶
+

FITS header class.

+

Methods

+
+
+add_blank(value='', before=None, after=None)¶
+

Add a blank card.

+

value: Text to be added. +before: [same as in update()] +after: [same as in update()]

+
+ +
+
+add_comment(value, before=None, after=None)¶
+

Add a COMMENT card.

+

value: Comment text to be added. +before: [same as in update()] +after: [same as in update()]

+
+ +
+
+add_history(value, before=None, after=None)¶
+

Add a HISTORY card.

+

value: History text to be added. +before: [same as in update()] +after: [same as in update()]

+
+ +
+
+ascardlist()¶
+

Returns a CardList.

+
+ +
+
+copy()¶
+

Make a copy of the Header.

+
+ +
+
+get(key, default=None)¶
+

Get a keyword value from the CardList. +If no keyword is found, return the default value.

+

key: keyword name or index +default: if no keyword is found, the value to be returned.

+
+ +
+
+get_comment()¶
+

Get all comments as a list of string texts.

+
+ +
+
+get_history()¶
+

Get all histories as a list of string texts.

+
+ +
+
+has_key(key)¶
+

Check for existence of a keyword. Returns 1 if found, otherwise, 0.

+

key: keyword name. If given an index, always returns 0.

+
+ +
+
+items()¶
+

Return a list of all keyword-value pairs from the CardList.

+
+ +
+
+rename_key(oldkey, newkey, force=0)¶
+

Rename a card’s keyword in the header.

+

oldkey: old keyword, can be a name or index. +newkey: new keyword, must be a string. +force: if new key name already exist, force to have duplicate name.

+
+ +
+
+update(key, value, comment=None, before=None, after=None)¶
+

Update one header card.

+
+ +
+ +
+
+class pNbody.pyfits.ImageHDU(data=None, header=None, name=None)¶
+

FITS image extension HDU class.

+

Methods

+
+ +
+
+class pNbody.pyfits.PrimaryHDU(data=None, header=None)¶
+

FITS primary HDU class.

+

Methods

+
+ +
+
+class pNbody.pyfits.Section(hdu)¶
+

Image section.

+
+ +
+
+class pNbody.pyfits.StreamingHDU(name, header)¶
+

A class that provides the capability to stream data to a FITS file +instead of requiring data to all be written at once.

+

The following psudo code illustrates its use:

+

header = pyfits.Header()

+
+
for all the cards you need in the header:
+
header.update(key,value,comment)
+
+

shdu = pyfits.StreamingHDU(‘filename.fits’,header)

+
+
for each piece of data:
+
shdu.write(data)
+
+

shdu.close()

+

Methods

+
+
+close()¶
+

Close the ‘physical’ FITS file.

+ +++ + + + + + +
Parameters :None
Returns :None
+
+ +
+
+size()¶
+

Return the size (in bytes) of the data portion of the HDU.

+ +++ + + + + + +
Parameters :

None

+
Returns :
+
size : integer
+

The number of bytes of data required to fill the stream +per the header provided in the constructor.

+
+
+
+
+ +
+
+write(data)¶
+

Write the given data to the stream.

+ +++ + + + + + +
Parameters :
+
data : ndarray
+

Data to stream to the file.

+
+
+
Returns :
+
writeComplete : integer
+

Flag that when true indicates that all of the required data +has been written to the stream.

+
+
+
+

Notes

+

Only the amount of data specified in the header provided to the +class constructor may be written to the stream. If the provided +data would cause the stream to overflow, an IOError exception is +raised and the data is not written. Once sufficient data has been +written to the stream to satisfy the amount specified in the header, +the stream is padded to fill a complete FITS block and no more data +will be accepted. An attempt to write more data after the stream +has been filled will raise an IOError exception. If the dtype of +the input data does not match what is expected by the header, a +TypeError exception is raised.

+
+ +
+ +
+
+class pNbody.pyfits.TableHDU(data=None, header=None, name=None)¶
+

FITS ASCII table extension HDU class.

+

Methods

+
+ +
+
+class pNbody.pyfits.Undefined¶
+

Undefined value.

+
+ +
+
+exception pNbody.pyfits.VerifyError¶
+

Verify exception class.

+
+ +
+
+pNbody.pyfits.append(filename, data, header=None, classExtensions={})¶
+

Append the header/data to FITS file if filename exists, create if not.

+

If only data is supplied, a minimal header is created

+
+

@type filename: string +@param filename: name of the file to append to +@type data: array, table, or group data object +@param data: the new data used for appending +@type header: L{Header} object or None +@param header: the header associated with ‘data’, if None,

+
+
an appropriate header will be created for the data object +supplied.
+

@type classExtensions: dictionary +@param classExtensions: A dictionary that maps pyfits classes to

+
+
extensions of those classes. When present in +the dictionary, the extension class will be +constructed in place of the pyfits class.
+
+
+ +
+
+pNbody.pyfits.fitsopen(name, mode='copyonwrite', memmap=0, classExtensions={})¶
+

Factory function to open a FITS file and return an HDUList object.

+

name: Name of the FITS file to be opened or already opened file object. +mode: Open mode, ‘readonly’ (default), ‘update’, or ‘append’. +memmap: Is memmory mapping to be used? default=0. +classExtensions: A dictionary that maps pyfits classes to extensions of

+
+
those classes. When present in the dictionary, the +extension class will be constructed in place of the +pyfits class.
+
+ +
+
+pNbody.pyfits.getdata(filename, *ext, **extkeys)¶
+

Get the data from an extension of a FITS file (and optionally the header).

+

@type filename: string +@param filename: input FITS file name

+
+
@keyword classExtensions: (optional) A dictionary that maps pyfits
+
classes to extensions of those classes. When present in the +dictionary, the extension class will be constructed in place +of the pyfits class.
+
+

@param ext: The rest of the arguments are for extension specification. They are +flexible and are best illustrated by examples:

+

No extra arguments implies the primary header

+
>>> getdata('in.fits')
+
+
+

By extension number:

+
>>> getdata('in.fits', 0)    # the primary header      
+>>> getdata('in.fits', 2)    # the second extension
+>>> getdata('in.fits', ext=2) # the second extension
+
+
+

By name, i.e., EXTNAME value (if unique):

+
>>> getdata('in.fits', 'sci')
+>>> getdata('in.fits', extname='sci') # equivalent
+
+
+

Note EXTNAMEs are not case sensitive

+

By combination of EXTNAME and EXTVER, as separate arguments or as a tuple:

+
>>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
+>>> getdata('in.fits', extname='sci', extver=2) # equivalent
+>>> getdata('in.fits', ('sci', 2)) # equivalent
+
+
+

Ambiguous or conflicting specifications will raise an exception, e.g.,

+
>>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) 
+
+
+

@return: an array, record array (i.e. table), or groups data object +depending on the type of the extension being referenced +If the optional keyword ‘header’ is set to True, this function will +return a (data, header) tuple.

+
+ +
+
+pNbody.pyfits.getheader(filename, *ext, **extkeys)¶
+

Get the header from an extension of a FITS file.

+

@param filename: input FITS file name +@type: string +@keyword classExtensions: (optional) A dictionary that maps pyfits

+
+
classes to extensions of those classes. When present in the +dictionary, the extension class will be constructed in place +of the pyfits class.
+
+
@param ext: The rest of the arguments are for extension specification.
+
See L{getdata} for explanations/examples.
+
+

@rtype: L{Header} object +@return: header

+
+ +
+
+pNbody.pyfits.getval(filename, key, *ext, **extkeys)¶
+

Get a keyword’s value from a header in a FITS file.

+

@type filename: string +@param filename: input FITS file name +@type key: string +@param key: keyword name +@keyword classExtensions: (optional) A dictionary that maps pyfits

+
+
classes to extensions of those classes. When present in the +dictionary, the extension class will be constructed in place +of the pyfits class.
+
+
@param ext: The rest of the arguments are for extension specification.
+
See L{getdata} for explanations/examples.
+
+

@return: keyword value +@rtype: string, integer, or float

+
+ +
+
+pNbody.pyfits.info(filename, classExtensions={})¶
+

Print the summary information on a FITS file.

+

This includes the name, type, length of header, data shape and type +for each extension.

+

@type filename: string +@param filename: input FITS file name +@type classExtensions: dictionary +@param classExtensions: A dictionary that maps pyfits classes to

+
+
extensions of those classes. When present in +the dictionary, the extension class will be +constructed in place of the pyfits class.
+
+ +
+
+pNbody.pyfits.new_table(input, header=None, nrows=0, fill=0, tbtype='BinTableHDU')¶
+

Create a new table from the input column definitions.

+
+ +
+
+pNbody.pyfits.open(name, mode='copyonwrite', memmap=0, classExtensions={})¶
+

Factory function to open a FITS file and return an HDUList object.

+

name: Name of the FITS file to be opened or already opened file object. +mode: Open mode, ‘readonly’ (default), ‘update’, or ‘append’. +memmap: Is memmory mapping to be used? default=0. +classExtensions: A dictionary that maps pyfits classes to extensions of

+
+
those classes. When present in the dictionary, the +extension class will be constructed in place of the +pyfits class.
+
+ +
+
+pNbody.pyfits.update(filename, data, *ext, **extkeys)¶
+

Update the specified extension with the input data/header.

+

@type filename: string +@param filename: name of the file to be updated +data: the new data used for updating +@keyword classExtensions: (optional) A dictionary that maps pyfits

+
+
classes to extensions of those classes. When present in the +dictionary, the extension class will be constructed in place +of the pyfits class.
+

The rest of the arguments are flexible: +the 3rd argument can be the header associated with the data. +If the 3rd argument is not a header, it (and other positional +arguments) are assumed to be the extension specification(s). +Header and extension specs can also be keyword arguments. +For example:

+
>>> update(file, dat, hdr, 'sci')  # update the 'sci' extension
+>>> update(file, dat, 3)  # update the 3rd extension
+>>> update(file, dat, hdr, 3)  # update the 3rd extension
+>>> update(file, dat, 'sci', 2)  # update the 2nd SCI extension
+>>> update(file, dat, 3, header=hdr)  # update the 3rd extension
+>>> update(file, dat, header=hdr, ext=5)  # update the 5th extension
+
+
+
+ +
+
+pNbody.pyfits.writeto(filename, data, header=None, **keys)¶
+

Create a new FITS file using the supplied data/header.

+

@type filename: string +@param filename: name of the new FITS file to write to +@type data: array, record array, or groups data object +@param data: data to write to the new file +@type header: L{Header} object or None +@param header: the header associated with ‘data’, if None, a

+
+
header of the appropriate type is created for the supplied +data. This argument is optional.
+
+
@keyword classExtensions: (optional) A dictionary that maps pyfits
+
classes to extensions of those classes. When present in the +dictionary, the extension class will be constructed in place +of the pyfits class.
+
@keyword clobber: (optional) if True and if filename already exists, it
+
will overwrite the file. Default is False.
+
+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the talkgdisp module

+

Next topic

+

the rec module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/RecModule.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/RecModule.html index e0c0581..cf74d6e 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/RecModule.html @@ -1,126 +1,127 @@ - Overview — pNbody v4 documentation + the rec module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the rec module¶

Previous topic

-

Welcome to pNbody’s documentation!

+

the pyfits module

Next topic

-

the Io module

+

the libqt module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Reference.html b/Doc/newdoc/_build/html/rst/Reference.html new file mode 100644 index 0000000..3b25825 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Reference.html @@ -0,0 +1,179 @@ + + + + + + + + + Reference — pNbody v4 documentation + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/TalkgdispModule.html similarity index 59% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/TalkgdispModule.html index e0c0581..8ce8a0b 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/TalkgdispModule.html @@ -1,126 +1,134 @@ - Overview — pNbody v4 documentation + the talkgdisp module — pNbody v4 documentation - - + + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

the talkgdisp module¶

+
+
+class pNbody.talkgdisp.TalkServer(address=None, module=None, cl=None)¶
+

talk server class

+

Methods

+
+

Previous topic

-

Welcome to pNbody’s documentation!

+

the liblog module

Next topic

-

the Io module

+

the pyfits module

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Test_the_installation.html b/Doc/newdoc/_build/html/rst/Test_the_installation.html new file mode 100644 index 0000000..b27f1fc --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Test_the_installation.html @@ -0,0 +1,152 @@ + + + + + + + + + Check the installation — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

Check the installation¶

+

You can check the installation by simply running the following +command:

+
pNbody_checkall
+
+
+

This command must of course be in your path. This will be the case +if you did not specified any --prefix. On the contrary if --prefix +is set to for example, localdir you should have your PATH environment +variable should contains:

+
localdir/bin
+
+
+

and you PYTHONPATH environment should contains:

+
localdir/lib/python2.x/site-packages/
+
+

to ensure that the pNbody package will be found.

+

If everything goes well, you should see a lots of outputs on your screen, +as well as a window displaying an edge-on disk.

+../_images/edge-on-disk4.png +

Close it when you see it. +The script should finally ends up with something like

+
########################################################################
+Good News ! pNbody with format gadget is working !
+########################################################################
+
+You are currently using the following paths
+
+HOME               : /home/leo
+PNBODYPATH         : /home/leo/local/lib/python2.6/site-packages/pNbody
+CONFIGDIR          : /home/leo/local/lib/python2.6/site-packages/pNbody/config
+PARAMETERFILE      : /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters
+UNITSPARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters
+PALETTEDIR         : /home/leo/local/lib/python2.6/site-packages/pNbody/config/rgb_tables
+PLUGINSDIR         : /home/leo/local/lib/python2.6/site-packages/pNbody/config/plugins
+OPTDIR             : /home/leo/local/lib/python2.6/site-packages/pNbody/config/opt
+FORMATSDIR         : /home/leo/local/lib/python2.6/site-packages/pNbody/config/formats
+
+
+ + +
+
+
+
+
+ +

Previous topic

+

Installing from source

+

Next topic

+

Default configuration

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/ThermodynModule.html b/Doc/newdoc/_build/html/rst/ThermodynModule.html new file mode 100644 index 0000000..9658893 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/ThermodynModule.html @@ -0,0 +1,201 @@ + + + + + + + + + the thermodyn module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the thermodyn module¶

+
+
+pNbody.thermodyn.Arp(rho, P)¶
+
+ +
+
+pNbody.thermodyn.Art(rho, T)¶
+
+ +
+
+pNbody.thermodyn.Aru(rho, U)¶
+
+ +
+
+pNbody.thermodyn.ElectronDensity(rho, pars)¶
+

Electron density for a mixture of H + He

+
+ +
+
+pNbody.thermodyn.Lambda(rho, u, localsystem, thermopars, coolingfile)¶
+

This corresponds to Lambda normalized

+

Ln = L / nh 2

+

nh = (xi*rho/mh)

+
+ +
+
+pNbody.thermodyn.Pra(rho, A)¶
+
+ +
+
+pNbody.thermodyn.Prt(rho, T)¶
+
+ +
+
+pNbody.thermodyn.Pru(rho, U)¶
+
+ +
+
+pNbody.thermodyn.Tra(rho, A)¶
+
+ +
+
+pNbody.thermodyn.Trp(rho, P)¶
+
+ +
+
+pNbody.thermodyn.Tru(rho, U)¶
+
+ +
+
+pNbody.thermodyn.Ura(rho, A)¶
+
+ +
+
+pNbody.thermodyn.Urp(rho, P)¶
+
+ +
+
+pNbody.thermodyn.Urt(rho, T)¶
+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the cosmo module

+

Next topic

+

the montecarlo module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Tutorial.html similarity index 57% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Tutorial.html index e0c0581..c077eec 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Tutorial.html @@ -1,126 +1,130 @@ - Overview — pNbody v4 documentation + Tutorial — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +

Previous topic

-

Welcome to pNbody’s documentation!

+

Examples

Next topic

-

the Io module

+

Using pNbody with the python interpreter

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Tutorial_interpreter.html b/Doc/newdoc/_build/html/rst/Tutorial_interpreter.html new file mode 100644 index 0000000..40e2661 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Tutorial_interpreter.html @@ -0,0 +1,397 @@ + + + + + + + + + Using pNbody with the python interpreter — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

Using pNbody with the python interpreter¶

+

In order to use this tutorial, you first need to copy some examples provided +with pNbody. This can be done by typing:

+
pNbody_copy-examples
+
+
+

by default, this create a directory in your home ~/pnbody_examples. +Move to this directory:

+
cd ~/pnbody_examples
+
+

Then you can simply follow the instructions below. +First, start the python interpreter:

+
leo@obsrevaz:~/pnbody_examples python
+Python 2.4.2 (#2, Jul 13 2006, 15:26:48)
+[GCC 4.0.1 (4.0.1-5mdk for Mandriva Linux release 2006.0)] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>>
+
+

Now, you can load the pNbody module:

+
>>> from pNbody import *
+
+
+
+

Creating pNbody objects from scratch¶

+

We can first start by creating a default pNbody objet and get info about it

+
>>> nb = Nbody()
+>>> nb.info()
+-----------------------------------
+particle file       : ['file.dat']
+ftype               : 'Nbody_default'
+mxntpe              : 6
+nbody               : 0
+nbody_tot           : 0
+npart               : [0, 0, 0, 0, 0, 0]
+npart_tot           : [0, 0, 0, 0, 0, 0]
+mass_tot            : 0.0
+byteorder           : 'little'
+pio                 : 'no'
+>>>
+
+
+

All variables linked to the object nb are accesible by typing nb. followed by the associated variables :

+
>>> nb.nbody
+0
+>>> nb.mass_tot
+0.0
+>>> nb.pio
+'no'
+
+
+

Now, you can create an object by giving the positions of particles:

+
>>> pos = ones((10,3),float32)
+>>> nb = Nbody(pos=pos)
+>>> nb.info()
+-----------------------------------
+particle file       : ['file.dat']
+ftype               : 'Nbody_default'
+mxntpe              : 6
+nbody               : 10
+nbody_tot           : 10
+npart               : array([10,  0,  0,  0,  0,  0])
+npart_tot           : array([10,  0,  0,  0,  0,  0])
+mass_tot            : 1.00000011921
+byteorder           : 'little'
+pio                 : 'no'
+
+len pos             : 10
+pos[0]              : array([ 1.,  1.,  1.], dtype=float32)
+pos[-1]             : array([ 1.,  1.,  1.], dtype=float32)
+len vel             : 10
+vel[0]              : array([ 0.,  0.,  0.], dtype=float32)
+vel[-1]             : array([ 0.,  0.,  0.], dtype=float32)
+len mass            : 10
+mass[0]             : 0.10000000149
+mass[-1]            : 0.10000000149
+len num             : 10
+num[0]              : 0
+num[-1]             : 9
+len tpe             : 10
+tpe[0]              : 0
+tpe[-1]             : 0
+
+
+

In this case, you can see that the class automatically intitialize other arrays variables +(vel, mass, num and rsp) with default values. Only the first and the last element of +each defined vector are displyed by the methode info. All defined arrays and array elements +may be easily accessible using the numarray convensions. For exemple, to display and +change the positions of the tree first particles, type:

+
>>> nb.pos[:3]
+array([[ 1.,  1.,  1.],
+       [ 1.,  1.,  1.],
+       [ 1.,  1.,  1.]], type=float32)
+>>> nb.pos[:3]=2*ones((3,3),float32)
+>>> nb.pos[:3]
+array([[ 2.,  2.,  2.],
+       [ 2.,  2.,  2.],
+       [ 2.,  2.,  2.]], type=float32)
+
+
+
+
+

Open from existing file¶

+

Now, lets try to open the gadget snapshot gadget_z00.dat. This is achieved by typing:

+
>>> nb = Nbody('gadget_z00.dat',ftype='gadget')
+
+
+

Again, informatins on this snapshot may be obtained using the instance info():

+
>>> nb.info()
+-----------------------------------
+particle file       : ['gadget_z00.dat']
+ftype               : 'Nbody_gadget'
+mxntpe              : 6
+nbody               : 20560
+nbody_tot           : 20560
+npart               : array([ 9160, 10280,     0,     0,  1120,     0])
+npart_tot           : array([ 9160, 10280,     0,     0,  1120,     0])
+mass_tot            : 79.7066955566
+byteorder           : 'little'
+pio                 : 'no'
+
+len pos             : 20560
+pos[0]              : array([-1294.48828125, -2217.09765625, -9655.49609375], dtype=float32)
+pos[-1]             : array([ -986.0625    , -2183.83203125,  4017.04296875], dtype=float32)
+len vel             : 20560
+vel[0]              : array([ -69.80491638,   60.56475067, -166.32981873], dtype=float32)
+vel[-1]             : array([-140.59715271,  -66.44669342,  -37.01613235], dtype=float32)
+len mass            : 20560
+mass[0]             : 0.00108565215487
+mass[-1]            : 0.00108565215487
+len num             : 20560
+num[0]              : 21488
+num[-1]             : 1005192
+len tpe             : 20560
+tpe[0]              : 0
+tpe[-1]             : 4
+
+atime               : 1.0
+redshift            : 2.22044604925e-16
+flag_sfr            : 1
+flag_feedback       : 1
+nall                : [ 9160 10280     0     0  1120     0]
+flag_cooling        : 1
+num_files           : 1
+boxsize             : 100000.0
+omega0              : 0.3
+omegalambda         : 0.7
+hubbleparam         : 0.7
+flag_age            : 0
+flag_metals         : 0
+nallhw              : [0 0 0 0 0 0]
+flag_entr_ic        : 0
+critical_energy_spec: 0.0
+
+len u               : 20560
+u[0]                : 6606.63037109
+u[-1]               : 0.0
+len rho             : 20560
+rho[0]              : 7.05811936674e-11
+rho[-1]             : 0.0
+len rsp             : 20560
+rsp[0]              : 909.027587891
+rsp[-1]             : 0.0
+len opt             : 20560
+opt[0]              : 446292.5625
+opt[-1]             : 0.0
+
+
+

You can obtain informations on physical values, like the center of mass +or the total angular momentum vector by typing:

+
>>> nb.cm()
+array([-1649.92651346,   609.98256428, -1689.04011033])
+>>> nb.Ltot()
+array([-1112078.125 ,  -755964.1875, -1536667.125 ], dtype=float32)
+
+
+

In order to visualise the model in position space, it is possible to +generate a surface density map of it using the display instance:

+
>>> nb.display(size=(10000,10000),shape=(256,256),palette='light')
+
+
+

You can now performe some operations on the model in order to explore a specific +region. First, translate the model in position space:

+
>>> nb.translate([3125,-4690,1720])
+>>> nb.display(size=(10000,10000),shape=(256,256),palette='light')
+>>> nb.display(size=(1000,1000),shape=(256,256),palette='light')
+
+
+

Ou can now rotate around:

+
>>> nb.rotate(angle=pi)
+>>> nb.display(size=(1000,1000),shape=(256,256),palette='light')
+
+
+

You can now display a temperature map of the model. First, +create a new object with only the gas particles:

+
>>> nb_gas = nb.select('gas')
+>>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='light')
+
+
+

now, display the temperture mass-weighted map:

+
>>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='rainbow4',mode='T',filter_name='gaussian')
+
+
+
+
+

Selection of particles¶

+

You can select only particles within a radius smaller tha 500 (in user units) +with respect to the center:

+
>>> nb_sub = nb.selectc((nb.rxyz()<500))
+>>> nb_sub.display(size=(1000,1000),shape=(256,256),palette='light')
+
+
+

Now, rename the new model and save it:

+
>>> nb_sub.rename('gadget_z00_sub.dat')
+>>> nb_sub.write()
+
+
+

A new gadget file has been created and saved in the current directory. +We can now select particles as a function of the temperature. +First, display the maximum temperature among all gas particles, +then selectc particles and finally save in ‘T11.num’ the identifier (variable num) of these particles:

+
>>> log10(max(nb_gas.T()))
+12.8707923889
+>>> nb_sub = nb_gas.selectc( (nb_gas.T()>1e11) )
+>>> nb_sub.write_num('T11.num')
+
+
+

Now open a new snapshot, from the same simulation, but at different redshift and find the +particles in previous snapshot with temperature higher than $10^{11}$:

+
>>> nb = Nbody('gadget_z40.dat',ftype='gadget')
+>>> nb.display(size=(10000,10000),shape=(256,256),palette='light')
+>>> nb_sub = nb.selectp(file='T11.num')
+>>> nb_sub.display(size=(10000,10000),shape=(256,256),palette='light')
+
+
+

Now, instead of saving it in a gadget file, save it in a binary file type. +You simply need to call the set_ftype instance before saving it:

+
>>> nb = nb.set_ftype('binary')
+>>> nb.rename('binary.dat')
+>>> nb.write()
+
+
+
+
+

Merging two models¶

+

As a last example, we show how two pNbody models can be easyly merged with only 11 lines:

+
>>> nb1 = Nbody('disk.dat',ftype='gadget')
+>>> nb2 = Nbody('disk.dat',ftype='gadget')
+>>> nb1.rotate(angle=pi/4,axis=[0,1,0])
+>>> nb1.translate([-150,0,0])
+>>> nb1.vel = nb1.vel + [50,0,0]
+>>> nb2.rotate(angle=pi/4,axis=[1,0,0])
+>>> nb2.translate([+150,0,50])
+>>> nb2.vel = nb2.vel - [50,0,0]
+>>> nb3 = nb1 + nb2
+>>> nb3.rename('merge.dat')
+>>> nb3.write()
+
+
+

Now display the result from different point of view:

+
>>> nb3.display(size=(300,300),shape=(256,256),palette='lut2')
+>>> nb3 = nb3.select('disk')
+>>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xz')
+>>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xy')
+>>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='yz')
+>>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0])
+
+
+

or save it into a gif file:

+
>>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0],save='image.gif')
+
+
+
+
+ + +
+
+
+
+
+ +

Table Of Contents

+ + +

Previous topic

+

Tutorial

+

Next topic

+

Using pNbody with scripts

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Tutorial_parallel.html b/Doc/newdoc/_build/html/rst/Tutorial_parallel.html new file mode 100644 index 0000000..7ccfa96 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Tutorial_parallel.html @@ -0,0 +1,254 @@ + + + + + + + + + Using pNbody in parallel — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

Using pNbody in parallel¶

+

With pNbody, it is possible to run scripts in parallel, using the mpi libary. +You need to have of course mpi and mpi4py installed. +To check your installation, try:

+
mpirun -np 2 pNbody_mpi
+
+

you should get:

+
This is task 0 over 2
+This is task 1 over 2
+
+

but if you get:

+
This is task 0 over 1
+This is task 0 over 1
+
+

this means that something is not working correctly, and you should check your path or mpi and mpi4py installation +before reading further.

+

The prevous scripts scripts/slice.py can diretely be run in paralle. +This is simply obtained by calling the mpirun command:

+
mpirun -np 2 scripts/slice.py gadget_z*0.dat
+
+

In this simple script, only the processus of rank 0 (the master) open the file. +The content of the file (particles) is then distributed among all the other processors. +Eeach processor recives a fraction of the particles. +Then, the selection of gas gas particles and the slice are preformed by all processors on +their local particles. +Finally, the nb.write() command, run by the master, gather all particles and write the output file.

+
+

Parallel output¶

+

With pNbody, its possible to write files in parallel, i.e., each task write its own file. +We can do this in the previous script simply by adding the line nb.set_pio('yes'). This +tells pNbody to write files in parallel when nb.write() is called. +The content of the new scripts scripts/slice-p1.py is:

+
#!/usr/bin/env python
+
+import sys
+from pNbody import *
+
+files = sys.argv[1:]
+
+for file in files:
+  print "slicing",file
+  nb = Nbody(file,ftype='gadget')
+  nb = nb.select('gas')
+  nb = nb.selectc((fabs(nb.pos[:,1])<1000))
+  nb.rename(file+'.slice')
+  nb.set_pio='yes'
+  nb.write()
+
+
+

We can now run it:

+
mpirun -np 2 scripts/slice-p1.py gadget_z00.dat
+
+

This creates two new files:

+
gadget_z00.dat.slice.1
+gadget_z00.dat.slice.0
+
+

The files have the same name than the initial name given in Nbody() with an extention .i where i +corresponds to the processus rank. Each file contains the particles attributed to the corresponding task.

+
+
+

Parallel input¶

+

Now, it possible to start by reading these two files in parallel instead of asking only the master to read one file:: +In our script, we add the optional argument pio='yes' when creating the object with Nbody():

+

Note also that we have used nb.set_pio('no'). This force at the end the file te be written only by the master.

+
+

#!/usr/bin/env python

+

import sys +from pNbody import *

+

files = sys.argv[1:]

+
+
for file in files:
+
print “slicing”,file +nb = Nbody(file,ftype=’gadget’,pio=’yes’) +nb = nb.select(‘gas’) +nb = nb.selectc((fabs(nb.pos[:,1])<1000)) +nb.rename(file+’.slice.new’) +nb.set_pio(‘no’) +nb.write()
+
+
+

When we lunch it:

+
mpirun -np 2 scripts/slice-p2.py gadget_z00.dat.slice
+
+

the two files gadget_z00.dat.slice.0 and gadget_z00.dat.slice.1 are read +each by one task, processed but at the end only the master write the final output : gadget_z00.dat.slice.slice.new`.

+
+
+

More on parallelisme¶

+

Lets try two other scripts. The first one (findmax.py) try to find the radial maximum distance among +all particles and the center. It illustrate the difference between using max() +wich gives the local maximum (maximum among particles of the node) and mpi.mpi_max() +which gives the global maximum among all particles:

+
#!/usr/bin/env python
+
+import sys
+from pNbody import *
+
+file = sys.argv[1]
+
+nb = Nbody(file,ftype='gadget',pio='yes')
+local_max  = max(nb.rxyz())
+global_max = mpi.mpi_max(nb.rxyz())
+
+print "proc %d local_max = %f global_max = %f"%(mpi.ThisTask,local_max,global_max)
+
+
+

When running it, you should get:

+
mpirun -np 2 ./scripts/findmax.py gadget_z00.dat.slice
+proc 1 local_max = 8109.682129 global_max = 8109.682129
+proc 0 local_max = 7733.846680 global_max = 8109.682129
+
+

which illustrate clearly the point. Finally, the latter script shows that even graphical +functions support parallelisme. The script showmap.py illustrate this point by computing +a map of the model:

+
#!/usr/bin/env python
+
+import sys
+from pNbody import *
+
+file = sys.argv[1]
+
+nb = Nbody(file,ftype='gadget',pio='yes')
+nb.display(size=(10000,10000),shape=(256,256),palette='light')
+
+
+

When running

+
mpirun -np 2 ./scripts/showmap.py gadget_z00.dat.slice
+
+

you get an image of the model. The mapping has been performed independently by two processors.

+
+
+ + +
+
+
+
+
+ +

Table Of Contents

+ + +

Previous topic

+

Using pNbody with scripts

+

Next topic

+

the Io module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Tutorial_scripts.html b/Doc/newdoc/_build/html/rst/Tutorial_scripts.html new file mode 100644 index 0000000..e45a148 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/Tutorial_scripts.html @@ -0,0 +1,147 @@ + + + + + + + + + Using pNbody with scripts — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

Using pNbody with scripts¶

+

In addition to using pNbody in the python interpreter, +it is very useful to use pNbody in python scripts. Usually a python script +begin by the line #!/usr/bin/env python and must be executable:

+
chmod a+x file.py
+
+

The following example (slice.py), we show how to write a script that opens a gadget file, +select gas particles and cut a thin slice

+
+

-1000<y<1000

+

The new files are saved using the extension .slice.

+
#!/usr/bin/env python
+
+import sys
+from pNbody import *
+
+files = sys.argv[1:]
+
+for file in files:
+  print "slicing",file
+  nb = Nbody(file,ftype='gadget',pio='yes')
+  nb = nb.select('gas')
+  nb = nb.selectc((fabs(nb.pos[:,1])<1000))
+  nb.rename(file+'.slice')
+  nb.write()
+
+
+

In your pnbody_example directory, you can run this script with the command:

+
./scripts/slice.py gadget_z*0.dat
+
+

or:

+
python ./scripts/slice.py gadget_z*0.dat
+
+
+ + +
+
+
+
+
+ +

Previous topic

+

Using pNbody with the python interpreter

+

Next topic

+

Using pNbody interactively in parallel

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/Overview.html b/Doc/newdoc/_build/html/rst/Units.html similarity index 62% copy from Doc/newdoc/_build/html/rst/Overview.html copy to Doc/newdoc/_build/html/rst/Units.html index e0c0581..78417af 100644 --- a/Doc/newdoc/_build/html/rst/Overview.html +++ b/Doc/newdoc/_build/html/rst/Units.html @@ -1,126 +1,124 @@ - Overview — pNbody v4 documentation + How to deal with units ? — pNbody v4 documentation - - + +
-
-

Overview¶

-

pNbody is a parallelized python module toolbox designed to manipulate and display -interactively very lage N-body systems.

-

Its oriented object approche allows the user to perform complicate manipulation -with only very few commands.

-

As python is an interpreted language, the user can load an N-body system and explore it -interactively using the python interpreter. pNbody may also be used in python scripts.

-

The module also contains graphical facilities desinged to create maps of physical values of -the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are -also implemented.

-

pNbody is not limited by file format. Each user may redefine in a parameter file how to read -its prefered format.

-

Its new parallel (mpi) facilities make it works on computer cluster without being limitted by -memory consumption. It has already been tested with several millions of particles.

-../_images/cosmo.png +
+

How to deal with units ?¶

Previous topic

-

Welcome to pNbody’s documentation!

+

Generating initial conditions

Next topic

-

the Io module

+

Reference

This Page

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/UnitsModule.html b/Doc/newdoc/_build/html/rst/UnitsModule.html new file mode 100644 index 0000000..bdab06d --- /dev/null +++ b/Doc/newdoc/_build/html/rst/UnitsModule.html @@ -0,0 +1,200 @@ + + + + + + + + + the units module — pNbody v4 documentation + + + + + + + + + + + + + + +
+
+
+
+ +
+

the units module¶

+
+
+class pNbody.units.PhysCte(value, Unit)¶
+

Physical constant

+

Methods

+
+ +
+
+pNbody.units.Set_SystemUnits_From_Params(params)¶
+

return a system of units from given parameters

+

params is a dictionary that must constains at least

+

params[‘UnitVelocity_in_cm_per_s’] +params[‘UnitMass_in_g’] +params[‘UnitLength_in_cm’]

+
+ +
+
+class pNbody.units.UnitSystem(UnitSysName, UnitLst)¶
+

Units system

+

Methods

+
+
+convertionFactorTo(newUnits)¶
+

return the conversion factor to obtain the new units

+
+ +
+
+info()¶
+

print some info

+
+ +
+
+into(newUnits)¶
+

return into the new units

+
+ +
+ +
+
+class pNbody.units.Units(symbol, factor=1.0, power=1, ulist=[])¶
+

Units

+

Methods

+
+
+GatherBaseUnits()¶
+

create the self.bases list (recursively)

+
+ +
+ +
+
+pNbody.units.gal¶
+

constantes are now defined in ctes.py

+

slight = PhysCte(2.99792458e8,Unit_m/Unit_s) +G = PhysCte(6.6732e-11,Unit_G) +q_electron = PhysCte(1.6022e-19,Unit_C) +planck = PhysCte(6.6262e-34,Unit_J*Unit_s) +boltzmann = PhysCte(1.3807e-23,Unit_J/Unit_K) +m_electron = PhysCte(9.1095e-31,Unit_kg) +m_proton = PhysCte(1.6726e-27,Unit_kg) +m_neutron = PhysCte(1.6750e-27,Unit_kg) +Na = PhysCte(6.0220e+23,Unit_mol) +R = PhysCte(8.3144e+00,Unit_J/Unit_mol) +Av = PhysCte(6.828e-50 ,Unit_Pa*Unit_m**6) +Bv = PhysCte(4.419e-29,Unit_m**3)

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the io module

+

Next topic

+

the ctes module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/rst/nbodymodule.html b/Doc/newdoc/_build/html/rst/nbodymodule.html new file mode 100644 index 0000000..1e8ccc7 --- /dev/null +++ b/Doc/newdoc/_build/html/rst/nbodymodule.html @@ -0,0 +1,213 @@ + + + + + + + + + the C nbody module — pNbody v4 documentation + + + + + + + + + + + + + +
+
+
+
+ +
+

the C nbody module¶

+
+
+pNbody.nbodymodule.acceleration()¶
+

Calculate the acceleration at a given position, with a given softening.

+
+ +
+
+pNbody.nbodymodule.am()¶
+

Calculate the angular momentum of the model.

+
+ +
+
+pNbody.nbodymodule.ampmap()¶
+

Return a map of amplitude of the given points.

+
+ +
+
+pNbody.nbodymodule.amxyz()¶
+

Calculate the angular momentum in x,y,z for all particles.

+
+ +
+
+pNbody.nbodymodule.convol()¶
+

Return a 2d convolution of a with kernel b.

+
+ +
+
+pNbody.nbodymodule.epot()¶
+

Calculate the total potential energy.

+
+ +
+
+pNbody.nbodymodule.pamap()¶
+

Return a map of the given points.

+
+ +
+
+pNbody.nbodymodule.pdmap()¶
+

Return a ponderated map of the given points.

+
+ +
+
+pNbody.nbodymodule.perspective()¶
+

Return a 3d projection of the given points.

+
+ +
+
+pNbody.nbodymodule.potential()¶
+

Calculate the potential at a given position, with a given softening.

+
+ +
+
+pNbody.nbodymodule.rotx()¶
+

Rotation around the x axis.

+
+ +
+
+pNbody.nbodymodule.roty()¶
+

Rotation around the y axis.

+
+ +
+
+pNbody.nbodymodule.rotz()¶
+

Rotation around the z axis.

+
+ +
+
+pNbody.nbodymodule.samxyz()¶
+

Calculate the specific angular momentum in x,y,z for all particles.

+
+ +
+
+pNbody.nbodymodule.sphmap()¶
+

Return a sphmap of the given points.

+
+ +
+
+pNbody.nbodymodule.spin()¶
+

Spin the model around an axis.

+
+ +
+ + +
+
+
+
+
+ +

Previous topic

+

the cooling module

+

This Page

+ + + +
+
+
+
+ + + + \ No newline at end of file diff --git a/Doc/newdoc/_build/html/search.html b/Doc/newdoc/_build/html/search.html index dea8efd..f14c32e 100644 --- a/Doc/newdoc/_build/html/search.html +++ b/Doc/newdoc/_build/html/search.html @@ -1,99 +1,111 @@ Search — pNbody v4 documentation

Search

Please activate JavaScript to enable the search functionality.

From here you can search these documents. Enter your search words into the box below and click "search". Note that the search function will automatically search for all of the words. Pages containing fewer words won't appear in the result list.

\ No newline at end of file diff --git a/Doc/newdoc/_build/html/searchindex.js b/Doc/newdoc/_build/html/searchindex.js index 7535917..e64f54d 100644 --- a/Doc/newdoc/_build/html/searchindex.js +++ b/Doc/newdoc/_build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({objects:{"pNbody.io":{read_dump:[1,1,1],checkfile:[1,1,1],read_ascii:[1,1,1],write_dump:[1,1,1],write_array:[1,1,1],end_of_file:[1,1,1]}},terms:{load:7,all:1,design7:5,movi:5,ndtype:1,thi:[1,6],testfct:[],checkfil:1,squar:[],shape:[],pythonx:6,end_of_fil:1,follow:[4,5],afil:1,fine:5,languag:7,paramet:[1,7],write:1,onli:[2,7],depend:4,system:7,read_ascii:1,also:7,skipe:1,should:[],openmpi:5,dict:1,vec:1,densiti:7,mpich:[],input:[],expon:[],local:1,modul:[4,0,5,1,7],applic:5,scipi:5,"return":1,format:[1,5,7],read:[1,7],doe:1,pypi:5,mpi:[1,5,7],dat:1,an_existing_fil:1,graphic:7,testal:6,commun:1,"new":[5,7],veri:[1,7],toolbox:7,now:4,special:5,python2:1,like:[6,7],specif:[],header:1,quarternion:[],list:1,prefix:4,integ:[],"default":4,approch:7,each:7,found:1,path:1,revaz:1,where:6,page:0,compil:[4,5,2],www:5,set:1,dump:1,map:7,write_dump:1,begin:1,redefin:7,zero:[],possibl:4,design:7,tempertur:7,arg:[],home:1,orient:7,particl:7,mat:[],tmp:1,index:[0,5],statu:1,mpeg:5,matrix:1,neg:[],label:1,content:0,python_directori:6,prerequist:[0,5,2],matrix_pow:[],mencod:5,"import":1,lib64:1,equiv:[],complic:7,imaginari:[],php:5,nofil:1,arang:1,write_arrai:1,million:7,imagemagick:5,power:[],numer:[],gener:1,manipul:7,chang:6,sophist:[],len:1,standard:[4,5],bodi:7,linalgerror:[],explor:7,dictionari:1,epfl:1,mplayer:5,valu:7,addit:5,mplayerhq:5,convert:5,last:1,capabl:[5,7],column:1,traceback:1,equal:[],etc:7,rais:1,xrang:1,fals:1,implement:7,com:5,interact:7,first:1,oper:[],useful:5,point:[],dtype:1,arrai:1,"float":1,prefer:7,number:1,two:5,filenam:1,unlik:[],alreadi:7,been:7,linux:2,wrap:5,pythonwar:5,txt:1,open:1,your:[4,6],unit:[],three:[],from:[4,0,2,1],creat:7,simpli:6,script:[5,7],errno:1,invert:[],support:2,numpi:[1,5],invers:[],"long":[],name:1,gif:5,without:7,call:1,interpret:7,desing:7,basic:5,overview:[0,7],lage:7,ioerror:1,velocit:7,type:[1,6],usr:6,"function":[1,5],gnu:5,option:4,linalg:[],python:[4,1,5,6,7],imag:5,copi:[],search:0,site:[1,6],warn:6,somewhat:[],cheeseshop:5,line:1,"true":1,than:4,those:[],must:6,"case":1,read_dump:1,none:1,ident:[],word:1,other_directori:4,comput:7,provid:[],gcc:5,setup:4,work:7,displai:7,can:7,mpi4pi:5,dmp:1,directori:[4,6],recent:1,limit:7,pickabl:1,error:1,posit:[],consumpt:7,more:[],root:4,html:5,pil:5,file:[4,1,7],tar:4,pio:1,"int":[],few:7,indic:0,facil:7,"char":1,cluster:7,sourc:[4,0,2],exponenti:[],have:1,tabl:0,close:1,exist:1,asmatrix:[],ascii:1,check:[1,0,6,2],sever:7,packag:[1,5,6],tarbal:[4,2],end:1,welcom:0,lib:6,perform:[4,7],pnbodi:[0,1,2,4,5,6,7],make:7,skiphead:1,same:[],instal:[4,0,6,2],note:6,how:7,need:[4,5],curent:2,bool:1,build:4,test:7,instead:1,you:4,document:0,simpl:1,cchar:1,higher:5,lam:[],product:5,repeat:[],http:5,distribut:5,see:[],org:5,object:[1,7],reach:1,peas:1,ndarrai:1,decompress:[4,2],most:1,xzf:4,want:4,user:7,consid:1,aproch:[],mai:7,multipl:[],memeri:[],contain:[1,7],data:1,parallel:[1,5,7],physic:7,a_non_existing_fil:1,other:1,memori:7,stereo:7,equival:[],array_lik:[],ani:[],stdin:1,well:5,element:[],pickl:1,look:6,anoth:4,exampl:[1,0,3,6,2],command:[4,7],allow:[1,7],enter:4,order:5,usual:6},objtypes:{"0":"py:function","1":"np:function"},titles:["Welcome to pNbody’s documentation!","the Io module","Installation","Examples","Installing from source","Prerequiste","Check the installation","Overview"],objnames:{"0":"Python function","1":"Python function"},filenames:["index","rst/Io","rst/Installation","rst/Documentation_and_examples","rst/Installing_from_tarball","rst/Prerequiste","rst/Test_the_installation","rst/Overview"]}) \ No newline at end of file +Search.setIndex({objects:{"pNbody.pyfits.CardList":{insert:[5,9,1],keys:[5,9,1],index_of:[5,9,1],count_blanks:[5,9,1],copy:[5,9,1],append:[5,9,1]},"PyGadget.gadget":{Acceleration:[57,8,1],GetAllHsml:[57,8,1],Density:[57,8,1],LoadParticles2:[57,8,1],GetAllIDQ:[57,8,1],GetAllAcceleration:[57,8,1],GetAllPositions:[57,8,1],GetAllVelocities:[57,8,1],SetParameters:[57,8,1],GetPos:[57,8,1],GetAllPotential:[57,8,1],GetAllTypes:[57,8,1],GetAllMasses:[57,8,1],GetAllMassesQ:[57,8,1],Ngbs:[57,8,1],InitMPI:[57,8,1],Info:[57,8,1],GetAllPositionsQ:[57,8,1],GetAllDensities:[57,8,1],InitDefaultParameters:[57,8,1],GetAllTypesQ:[57,8,1],InitHsml:[57,8,1],SphEvaluate:[57,8,1],GetAllVelocitiesQ:[57,8,1],GetAllID:[57,8,1],LoadParticlesQ:[57,8,1],AllAcceleration:[57,8,1],LoadParticles:[57,8,1],Potential:[57,8,1],AllPotential:[57,8,1],GetParameters:[57,8,1]},"pNbody.cooling_with_metals":{get_lambda_normalized_from_Temperature_FeH:[30,8,1],integrate1:[30,8,1],PrintParameters:[30,8,1],integrate2:[30,8,1],get_cooling_time_from_Density_EnergyInt_FeH:[30,8,1],get_cooling_time_from_Density_Temperature_FeH:[30,8,1],get_lambda_from_Density_Temperature_FeH:[30,8,1],get_lambda_from_Density_EnergyInt_FeH:[30,8,1],get_lambda_from_Density_Entropy_FeH:[30,8,1],init_cooling:[30,8,1]},"pNbody.Movie":{Movie:[9,10,1]},"pNbody.units.UnitSystem":{info:[60,9,1],into:[60,9,1],convertionFactorTo:[60,9,1]},"pNbody.tessel":{info:[23,8,1],GetVoronoi:[23,8,1],ComputeIsoContours:[23,8,1],ConstructDelaunay:[23,8,1],TriangleMedians:[23,8,1],InCircumCircle:[23,8,1],InTriangleOrOutside:[23,8,1],GetTriangles:[23,8,1],test:[23,8,1],CircumCircleProperties:[23,8,1],InTriangle:[23,8,1]},"pNbody.main":{get_known_formats:[62,8,1],Nbody_default:[62,10,1],NbodyDefault:[62,10,1],Nbody:[62,8,1]},"pNbody.pyfits.StreamingHDU":{write:[5,9,1],close:[5,9,1],size:[5,9,1]},"pNbody.libqt":{qtplot:[20,8,1],QNumarrayImage:[20,10,1],display:[20,8,1]},"pNbody.coolinglib":{cooling:[10,8,1],cooling_from_nH_and_T:[10,8,1]},"pNbody.param":{read_ascii_value:[44,8,1],Params:[44,10,1],write_ascii_value:[44,8,1]},"pNbody.cosmo":{setdefault:[3,8,1],Rho_c:[3,8,1],Age_a:[3,8,1],A_z:[3,8,1],Hubble_a:[3,8,1],Z_a:[3,8,1],Adot_a:[3,8,1],CosmicTime_a:[3,8,1],a_CosmicTime:[3,8,1]},"pNbody.nbodymodule":{acceleration:[25,8,1],rotx:[25,8,1],pdmap:[25,8,1],pamap:[25,8,1],epot:[25,8,1],sphmap:[25,8,1],samxyz:[25,8,1],am:[25,8,1],ampmap:[25,8,1],convol:[25,8,1],amxyz:[25,8,1],rotz:[25,8,1],perspective:[25,8,1],roty:[25,8,1],spin:[25,8,1],potential:[25,8,1]},"pNbody.geometry":{rotate:[4,8,1],viewport:[4,8,1],frustum:[4,8,1],align:[4,8,1],boxcut_segments:[4,8,1],inv_viewport:[4,8,1],ortho:[4,8,1],get_obs:[4,8,1],perspective:[4,8,1],boxcut:[4,8,1],expose:[4,8,1],norm:[4,8,1]},"pNbody.liblog.Log":{write:[55,9,1],close:[55,9,1]},"pNbody.pygsl":{sobol_sequence:[54,8,1]},"pNbody.plummer":{Vcirc:[28,8,1],Density:[28,8,1],dPotential:[28,8,1],LDensity:[28,8,1],Potential:[28,8,1],Sigma:[28,8,1]},"pNbody.io":{read_dump:[26,8,1],checkfile:[26,8,1],read_ascii:[26,8,1],write_dump:[26,8,1],write_array:[26,8,1],end_of_file:[26,8,1]},"pNbody.fortranfile":{FortranFile:[52,10,1]},"pNbody.units.Units":{GatherBaseUnits:[60,9,1]},"pNbody.main.NbodyDefault":{CombiMap:[62,9,1],show:[62,9,1],getRadialVelocityDispersionInCylindricalGrid:[62,9,1],phi_xyz:[62,9,1],has_var:[62,9,1],sort_type:[62,9,1],Get_Velocities_From_Virial_Approximation:[62,9,1],inertial_tensor:[62,9,1],set_ftype:[62,9,1],selectc:[62,9,1],sigma_z:[62,9,1],rotate_old:[62,9,1],get_ntype:[62,9,1],epot:[62,9,1],histovel:[62,9,1],Accel:[62,9,1],tork:[62,9,1],ComputeSigmaMap:[62,9,1],init:[62,9,1],getRadiusInCylindricalGrid:[62,9,1],get_npart_and_npart_all:[62,9,1],get_list_of_method:[62,9,1],align2:[62,9,1],rotateR:[62,9,1],getNumberParticlesInCylindricalGrid:[62,9,1],sdens:[62,9,1],zmodes:[62,9,1],set_tpe:[62,9,1],get_rsp_approximation:[62,9,1],read:[62,9,1],minert:[62,9,1],get_nbody_tot:[62,9,1],L:[62,9,1],P:[62,9,1],memory_info:[62,9,1],T:[62,9,1],MeanWeight:[62,9,1],SphEvaluate:[62,9,1],redistribute:[62,9,1],ComputeMeanHisto:[62,9,1],get_histocenter2:[62,9,1],hdcenter:[62,9,1],l:[62,9,1],get_mxntpe:[62,9,1],gather_num:[62,9,1],get_num:[62,9,1],check_arrays:[62,9,1],get_default_spec_vars:[62,9,1],get_npart_tot:[62,9,1],dv_mean:[62,9,1],theta_xyz:[62,9,1],Epot:[62,9,1],sigma_vz:[62,9,1],get_default_spec_vect:[62,9,1],set_local_system_of_units:[62,9,1],Lum:[62,9,1],append:[62,9,1],cart2sph:[62,9,1],weighted_numngb:[62,9,1],sub:[62,9,1],mdens:[62,9,1],gather_pos:[62,9,1],set_filenames:[62,9,1],Tmu:[62,9,1],ComputeMap:[62,9,1],getindex:[62,9,1],vx:[62,9,1],get_rotation_matrix_to_align_with_main_axis:[62,9,1],object_info:[62,9,1],ltot:[62,9,1],S:[62,9,1],get_mass_tot:[62,9,1],get_npart_all:[62,9,1],x:[62,9,1],ComputeHisto:[62,9,1],cvcenter:[62,9,1],make_default_vars_global:[62,9,1],z:[62,9,1],Ekin:[62,9,1],rxyz:[62,9,1],msdens:[62,9,1],ComputeDensityAndHsml:[62,9,1],SendAllToAll:[62,9,1],get_list_of_vars:[62,9,1],spec_info:[62,9,1],vel_cyl2cart:[62,9,1],Mr_Spherical:[62,9,1],cm:[62,9,1],set_npart:[62,9,1],Ne:[62,9,1],Ltot:[62,9,1],set_parameters:[62,9,1],histocenter2:[62,9,1],has_array:[62,9,1],cv:[62,9,1],select:[62,9,1],size:[62,9,1],ComputeObjectMap:[62,9,1],selecti:[62,9,1],rebox:[62,9,1],open_and_read:[62,9,1],vrxyz:[62,9,1],init_units:[62,9,1],real_numngb:[62,9,1],read_num:[62,9,1],vel_cart2cyl:[62,9,1],getSurfaceDensityInCylindricalGrid:[62,9,1],translate:[62,9,1],selectp:[62,9,1],sort:[62,9,1],getTree:[62,9,1],getPotentialInCylindricalGrid:[62,9,1],expose:[62,9,1],ComputeMeanMap:[62,9,1],usual_numngb:[62,9,1],gather_mass:[62,9,1],R:[62,9,1],ExchangeParticles:[62,9,1],TreePot:[62,9,1],find_vars:[62,9,1],spin:[62,9,1],dmodes:[62,9,1],info:[62,9,1],get_npart:[62,9,1],zprof:[62,9,1],dx_mean:[62,9,1],sph2cart:[62,9,1],align:[62,9,1],getAccelerationInCylindricalGrid:[62,9,1],InitSpec:[62,9,1],get_ns:[62,9,1],r:[62,9,1],mr:[62,9,1],x_sigma:[62,9,1],display:[62,9,1],rename:[62,9,1],Tcool:[62,9,1],open_and_write:[62,9,1],ekin:[62,9,1],get_nbody:[62,9,1],print_filenames:[62,9,1],vn:[62,9,1],vy:[62,9,1],ComputeSph:[62,9,1],write_num:[62,9,1],vz:[62,9,1],Map:[62,9,1],get_histocenter:[62,9,1],cmcenter:[62,9,1],write:[62,9,1],ComputeSigmaHisto:[62,9,1],histocenter:[62,9,1],Rho:[62,9,1],Vt:[62,9,1],phi_xy:[62,9,1],A:[62,9,1],align_with_main_axis:[62,9,1],U:[62,9,1],TreeAccel:[62,9,1],IntegrateUsingRK:[62,9,1],reduc:[62,9,1],set_pio:[62,9,1],gather_vel:[62,9,1],dens:[62,9,1],Pot:[62,9,1],get_list_of_array:[62,9,1],rxy:[62,9,1],nodes_info:[62,9,1],Vr:[62,9,1],gather_vec:[62,9,1],y:[62,9,1],v_sigma:[62,9,1],Vz:[62,9,1],set_unitsparameters:[62,9,1],sigma:[62,9,1],rotate:[62,9,1]},"pNbody.mapping":{mkmap1d:[2,8,1],mkmap2d:[2,8,1],mkmap1dn:[2,8,1],create_line:[2,8,1],mkmap2dw:[2,8,1],mkmap3dw:[2,8,1],mkmap2dsph:[2,8,1],mkmap1dw:[2,8,1],mkmap2dn:[2,8,1],mkmap3dsortedsph:[2,8,1],create_line3:[2,8,1],create_line2:[2,8,1],mkmap2dnsph:[2,8,1],mkmap3d:[2,8,1],mkmap3dn:[2,8,1],mkmap3dslicesph:[2,8,1]},"pNbody.pyfits.Header":{add_comment:[5,9,1],get_comment:[5,9,1],get:[5,9,1],items:[5,9,1],update:[5,9,1],rename_key:[5,9,1],get_history:[5,9,1],has_key:[5,9,1],ascardlist:[5,9,1],add_history:[5,9,1],copy:[5,9,1],add_blank:[5,9,1]},"pNbody.Movie.Movie":{read:[9,9,1],info:[9,9,1],open:[9,9,1],get_img:[9,9,1]},"pNbody.libdisk":{Diff:[61,8,1],get_Integral:[61,8,1]},"pNbody.peanolib":{peano2xyz:[42,8,1],xyz2peano:[42,8,1]},"pNbody.talkgdisp":{TalkServer:[0,10,1]},"pNbody.ctes":{convert_ctes:[63,8,1]},"pNbody.libgrid":{get_VolumeMap_On_Carthesian_3d_Grid:[43,8,1],get_MassMap_On_Cylindrical_2dv_Grid:[43,8,1],get_PotentialMap_On_Cylindrical_2dv_Grid:[43,8,1],get_AccumulatedMassMap_On_Spherical_1d_Grid:[43,8,1],get_NumberMap_On_Cylindrical_2dv_Grid:[43,8,1],get_MassMap_On_Cylindrical_3d_Grid:[43,8,1],get_LinearDensityMap_On_Spherical_1d_Grid:[43,8,1],get_PotentialMap_On_Carthesian_3d_Grid:[43,8,1],get_Points_On_Carthesian_3d_Grid:[43,8,1],get_PotentialMap_On_Spherical_1d_Grid:[43,8,1],get_MassMap_On_Cylindrical_2dh_Grid:[43,8,1],get_MassMap_On_Carthesian_3d_Grid:[43,8,1],get_SurfaceDensityMap_From_Cylindrical_2dv_Grid:[43,8,1],get_MassMap_On_Spherical_3d_Grid:[43,8,1],get_NumberMap_On_Spherical_3d_Grid:[43,8,1],get_SurfaceDensityMap_On_Carthesian_2d_Grid:[43,8,1],get_PotentialMap_On_Cylindrical_2dh_Grid:[43,8,1],get_DensityMap_On_Spherical_1d_Grid:[43,8,1],get_PotentialMap_On_Spherical_3d_Grid:[43,8,1],get_MassMap_On_Spherical_1d_Grid:[43,8,1],get_GenericMap_On_Spherical_1d_Grid:[43,8,1],get_DensityMap_On_Carthesian_3d_Grid:[43,8,1],get_VolumeMap_On_Spherical_3d_Grid:[43,8,1],get_NumberMap_On_Carthesian_2d_Grid:[43,8,1],get_DensityMap_On_Cylindrical_3d_Grid:[43,8,1],get_SurfaceMap_On_Carthesian_2d_Grid:[43,8,1],get_Accumulation_Along_Axis:[43,8,1],get_NumberMap_On_Carthesian_3d_Grid:[43,8,1],get_SurfaceDensityMap_On_Cylindrical_2dh_Grid:[43,8,1],get_Points_On_Cylindrical_2dh_Grid:[43,8,1],get_Points_On_Spherical_3d_Grid:[43,8,1],get_NumberMap_On_Spherical_1d_Grid:[43,8,1],get_Points_On_Spherical_1d_Grid:[43,8,1],get_Interpolation_On_Cylindrical_2dv_Grid:[43,8,1],get_PotentialMap_On_Cylindrical_3d_Grid:[43,8,1],get_r_Interpolation_On_Cylindrical_2dv_Grid:[43,8,1],get_MassMap_On_Carthesian_2d_Grid:[43,8,1],get_DensityMap_On_Spherical_3d_Grid:[43,8,1],get_PotentialMap_On_Carthesian_2d_Grid:[43,8,1],get_Integral:[43,8,1],get_First_Derivative:[43,8,1],get_VolumeMap_On_Cylindrical_2dv_Grid:[43,8,1],get_DensityMap_On_Cylindrical_2dv_Grid:[43,8,1],get_VolumeMap_On_Cylindrical_3d_Grid:[43,8,1],get_Symetrisation_Along_Axis_Old:[43,8,1],get_AccelerationMap_On_Cylindrical_2dv_Grid:[43,8,1],get_SurfaceMap_On_Spherical_1d_Grid:[43,8,1],get_Points_On_Carthesian_2d_Grid:[43,8,1],get_SurfaceMap_On_Cylindrical_2dh_Grid:[43,8,1],get_VolumeMap_On_Spherical_1d_Grid:[43,8,1],get_Points_On_Cylindrical_2dv_Grid:[43,8,1],get_Interpolation_On_Spherical_1d_Grid:[43,8,1],get_Points_On_Cylindrical_3d_Grid:[43,8,1],get_NumberMap_On_Cylindrical_3d_Grid:[43,8,1],get_Symetrisation_Along_Axis:[43,8,1],get_NumberMap_On_Cylindrical_2dh_Grid:[43,8,1]},"pNbody.units":{Units:[60,10,1],gal:[60,11,1],PhysCte:[60,10,1],Set_SystemUnits_From_Params:[60,8,1],UnitSystem:[60,10,1]},"pNbody.fortranfile.FortranFile":{writeString:[52,9,1],readReals:[52,9,1],readString:[52,9,1],readRecord:[52,9,1],writeReals:[52,9,1],readInts:[52,9,1],ENDIAN:[52,13,1],writeRecord:[52,9,1],writeInts:[52,9,1],HEADER_PREC:[52,13,1]},"pNbody.ic":{plummer:[7,8,1],ComputeGridParameters:[7,8,1],nfw_mr:[7,8,1],pisothm_mr:[7,8,1],generic_Mx:[7,8,1],hernquist:[7,8,1],invert:[7,8,1],isothm:[7,8,1],kuzmin:[7,8,1],burkert:[7,8,1],isothm_mr:[7,8,1],expd_mr:[7,8,1],homosphere:[7,8,1],shell:[7,8,1],generic_Mr:[7,8,1],dl2_mr:[7,8,1],generic2c:[7,8,1],ComputeGridParameters2:[7,8,1],box:[7,8,1],homodisk:[7,8,1],pisothm:[7,8,1],nfw:[7,8,1],dl2:[7,8,1],generic_alpha:[7,8,1],nfwg:[7,8,1],miyamoto_nagai:[7,8,1],expd:[7,8,1]},"pNbody.libmiyamoto":{Sigma_z:[47,8,1],Vcirc:[47,8,1],Kappa:[47,8,1],Density:[47,8,1],SurfaceDensity:[47,8,1],Sigma_zbis:[47,8,1],Sigma_t:[47,8,1],d2z_Potential:[47,8,1],Potential:[47,8,1],dz_Potential:[47,8,1],d2R_Potential:[47,8,1],Omega:[47,8,1],dR_Potential:[47,8,1]},PyGadget:{gadget:[57,7,1]},"pNbody.param.Params":{set:[44,9,1],get:[44,9,1],lists:[44,9,1],get_type:[44,9,1],get_string:[44,9,1],save:[44,9,1],get_dic:[44,9,1]},"pNbody.myNumeric":{roty:[33,8,1],rotx:[33,8,1],Interpolate_From_2d_Array:[33,8,1],rotz:[33,8,1],hnd:[33,8,1],whistogram:[33,8,1],turnup:[33,8,1],quadinterp1d:[33,8,1],spline3d:[33,8,1],quaddinterp1d:[33,8,1],splint:[33,8,1],polint:[33,8,1],getmask:[33,8,1],histogram2d:[33,8,1],vprod:[33,8,1],ratint:[33,8,1],Interpolate_From_1d_Array:[33,8,1],lininterp1d:[33,8,1],test:[33,8,1],spline:[33,8,1],expand:[33,8,1]},"pNbody.thermodyn":{Arp:[11,8,1],ElectronDensity:[11,8,1],Art:[11,8,1],Ura:[11,8,1],Tra:[11,8,1],Pra:[11,8,1],Urt:[11,8,1],Prt:[11,8,1],Urp:[11,8,1],Trp:[11,8,1],Pru:[11,8,1],Lambda:[11,8,1],Tru:[11,8,1],Aru:[11,8,1]},"pNbody.pyfits.ColDefs":{info:[5,9,1],add_col:[5,9,1],change_unit:[5,9,1],change_name:[5,9,1],del_col:[5,9,1],change_attrib:[5,9,1]},"pNbody.iclib":{generic_Mx:[36,8,1],exponential_disk:[36,8,1],generic_Mr:[36,8,1],miyamoto_nagai_f:[36,8,1],generic_Mx1D:[36,8,1],generic_alpha:[36,8,1],nfwg:[36,8,1],miyamoto_nagai:[36,8,1],burkert:[36,8,1]},"pNbody.mpi":{mpi_recv:[41,8,1],mpi_GatherAndWriteArray:[41,8,1],mpi_argmin:[41,8,1],mpi_argmax:[41,8,1],mpi_find_a_toTask:[41,8,1],mpi_allreduce:[41,8,1],mpi_getval:[41,8,1],mpi_GetExchangeTable:[41,8,1],mpi_max:[41,8,1],mpi_rprint:[41,8,1],mpi_sendrecv:[41,8,1],mpi_ExchangeFromTable:[41,8,1],mpi_gather:[41,8,1],mpi_AllgatherAndConcatArray:[41,8,1],mpi_min:[41,8,1],mpi_allgather:[41,8,1],mpi_send:[41,8,1],mpi_histogram:[41,8,1],mpi_pprint:[41,8,1],mpi_sarange:[41,8,1],mpi_len:[41,8,1],mpi_bcast:[41,8,1],mpi_arange:[41,8,1],mpi_ReadAndSendBlock:[41,8,1],mpi_ReadAndSendArray:[41,8,1],mpi_iprint:[41,8,1],mpi_mean:[41,8,1],mpi_sum:[41,8,1],mpi_OldGatherAndWriteArray:[41,8,1],mpi_reduce:[41,8,1],mpi_OldReadAndSendArray:[41,8,1]},"pNbody.profiles":{burkert_mr:[64,8,1],nfws_profile:[64,8,1],nfw_mr:[64,8,1],jaffe_mr:[64,8,1],pisothm_mr:[64,8,1],generic2c_profile:[64,8,1],king_profile:[64,8,1],burkert_profile:[64,8,1],king_Rc:[64,8,1],hernquist_profile:[64,8,1],jaffe_profile:[64,8,1],nfwg_mr:[64,8,1],plummer_mr:[64,8,1],king_surface_density_old:[64,8,1],hernquist_mr:[64,8,1],plummer_profile:[64,8,1],generic2c_mr:[64,8,1],pisothm_profile:[64,8,1],nfws_mr:[64,8,1],king_profile_Rz:[64,8,1],nfwg_profile:[64,8,1],hernquist_mR:[64,8,1],nfw_profile:[64,8,1],king_surface_density:[64,8,1]},"pNbody.pyfits.FITS_rec":{field:[5,9,1]},"pNbody.cosmolib":{Age_a:[35,8,1]},"pNbody.pyfits.FITS_record":{field:[5,9,1],setfield:[5,9,1]},"pNbody.pyfits.GroupData":{par:[5,9,1],setpar:[5,9,1]},"pNbody.nbdrklib":{Compute:[38,8,1],IntegrateOverDt:[38,8,1]},"pNbody.palette":{readlut:[6,8,1]},"pNbody.pyfits":{Section:[5,10,1],TableHDU:[5,10,1],new_table:[5,8,1],PrimaryHDU:[5,10,1],open:[5,8,1],append:[5,8,1],ColDefs:[5,10,1],Header:[5,10,1],writeto:[5,8,1],Delayed:[5,10,1],StreamingHDU:[5,10,1],GroupsHDU:[5,10,1],getheader:[5,8,1],Undefined:[5,10,1],ErrorURLopener:[5,10,1],getval:[5,8,1],Column:[5,10,1],update:[5,8,1],HDUList:[5,10,1],GroupData:[5,10,1],VerifyError:[5,12,1],fitsopen:[5,8,1],getdata:[5,8,1],info:[5,8,1],CardList:[5,10,1],FITS_record:[5,10,1],BinTableHDU:[5,10,1],ImageHDU:[5,10,1],FITS_rec:[5,10,1]},"pNbody.fourier":{fourier:[29,8,1]},"pNbody.libutil":{vel_cyl2cart:[14,8,1],add_box:[14,8,1],apply_filter:[14,8,1],tranfert_functions:[14,8,1],drawxticks:[14,8,1],GetMassMap:[14,8,1],GetMeanValMap:[14,8,1],get_eyes:[14,8,1],GetSigmaMap:[14,8,1],GetSigmaValMap:[14,8,1],log_filter:[14,8,1],Extract1dMeanFrom2dMap:[14,8,1],mplot:[14,8,1],geter2:[14,8,1],vel_cart2cyl:[14,8,1],contours:[14,8,1],compress_from_lst:[14,8,1],getr:[14,8,1],drawyticks:[14,8,1],getvaltype:[14,8,1],getval:[14,8,1],geter:[14,8,1],myhistogram:[14,8,1],extract_parameters:[14,8,1],phys2img:[14,8,1],invgetr:[14,8,1],RotateAround:[14,8,1],get_image:[14,8,1],sbox:[14,8,1],log_filter_inv:[14,8,1],GetMeanMap:[14,8,1],set_ranges:[14,8,1],GetNumberMap:[14,8,1]},"pNbody.liblog":{Log:[55,10,1]},"pNbody.montecarlolib":{mc2d:[19,8,1],mc3d:[19,8,1],mc1d:[19,8,1]},pNbody:{montecarlolib:[19,7,1],plummer:[28,7,1],Movie:[9,7,1],ic:[7,7,1],pygsl:[54,7,1],treelib:[46,7,1],myNumeric:[33,7,1],pyfits:[5,7,1],ctes:[63,7,1],palette:[6,7,1],parameters:[58,7,1],peanolib:[42,7,1],libdisk:[61,7,1],param:[44,7,1],units:[60,7,1],rec:[31,7,1],main:[62,7,1],cooling_with_metals:[30,7,1],nbdrklib:[38,7,1],cosmo:[3,7,1],thermodyn:[11,7,1],mapping:[2,7,1],mpi:[41,7,1],liblog:[55,7,1],talkgdisp:[0,7,1],nbodymodule:[25,7,1],tessel:[23,7,1],asciilib:[21,7,1],libmiyamoto:[47,7,1],phot:[59,7,1],libgrid:[43,7,1],fortranfile:[52,7,1],geometry:[4,7,1],cosmolib:[35,7,1],libqt:[20,7,1],profiles:[64,7,1],iclib:[36,7,1],libutil:[14,7,1],coolinglib:[10,7,1],fourier:[29,7,1]},"pNbody.pyfits.HDUList":{info:[5,9,1],readall:[5,9,1],update_tbhdu:[5,9,1],writeto:[5,9,1],index_of:[5,9,1],flush:[5,9,1],close:[5,9,1],append:[5,9,1],update_extend:[5,9,1]},"pNbody.asciilib":{read:[21,8,1]},"pNbody.pyfits.GroupsHDU":{size:[5,9,1]},"pNbody.phot":{LvtoMv:[59,8,1],MvtoLv:[59,8,1]}},terms:{entropi:[62,14],prefix:[39,8],sci:5,palettedir:8,whose:52,typeerror:5,"_validhdu":5,openmpi:22,compress_from_lst:14,under:[16,50,53,56,12,15],mr_spheric:62,keylist:5,spec:[10,5],everi:[],should:[46,62,8,14,49],wich:[62,49],getpo:57,get_surfacemap_on_spherical_1d_grid:43,libgrid:[12,16,43],gnu:22,correct:43,exponnenti:7,vector:[45,33,4,43,7,41,61,62],math:[],verif:5,get_npart_tot:62,nfw_profil:64,initialis:[],direct:62,second:[30,47,14,5],allgath:41,inertial_tensor:62,even:[49,5],mpeg:22,weighted_numngb:62,a_non_existing_fil:26,lunix:[],commonnam:5,neg:[],"new":[1,32,49,43,22,8,5,60,62,45,65],max_buffer_s:[],nep:7,intriangl:23,never:[12,16,53],get_mass_tot:62,set_unitsparamet:62,cart:[62,14],path:[32,13,8,26,49],interpret:[40,1,45,65,16],phys2img:14,precis:[62,52],credit:45,permit:[],spec_info:62,fourier:[12,16,29],offp:43,offr:43,offt:43,immin:[],offx:43,logfram:55,mkmap2dsph:2,linesep:[],unix:[],offi:43,total:[7,25,62,45,5],univ:35,unit:[16,32,48,3,30,60,59,5,12,62,63,45],plot:[20,14,62],describ:[],would:5,rebox:62,peanolib:[12,16,42],suit:[],call:[45,62,49,26,5],pisothm_profil:64,type:[44,32,33,41,24,57,26,5,62,45],tell:49,relat:5,inv_viewport:4,warn:[61,62,64],set_paramet:62,exce:[],isatti:[],recommand:32,must:[1,32,43,57,8,10,5,60,61,62],word:26,err:5,setup:39,work:[62,8,65,49],strerror:[],root:[39,41],get_histocent:62,line_buff:[],overrid:[],defer:[],ionis:[24,62],kinet:[62,14],give:[57,52,9,45,49],begtask:41,indic:[16,61,43,5],want:39,end:[62,8,49,26,5],"22044604925e":45,how:[1,48,16,5,45,65],gather_mass:62,env:[1,49],numarrai:45,verifi:5,config:[24,32,8],asciilib:[12,16,21],updat:5,montecarlolib:[12,16,19],lam:[],recogn:[],initial_valu:[],after:5,index_of:5,paticl:62,befor:[52,49,9,45,5],wrong:62,box_mx:7,mvsun:59,parallel:[16,49,22,26,40,62,65],averag:62,attempt:5,minim:[62,5],get_first_deriv:43,curent:18,environ:8,enter:39,lambda:[30,62,14,10,11],order:[45,4,22,7,41,62,14],output_verifi:5,oper:45,over:[2,38,49,5,41],becaus:5,fab:[1,49],rgb_tabl:[32,8,6],afil:26,flexibl:5,vari:52,irand:7,zmode:62,getallhsml:57,usetre:62,fit:5,fix:[],"1xn":62,frsp:[24,62],unit_pa:60,offic:5,urlretriev:5,mpich:[],split:41,getvoronoi:23,them:62,thei:5,global_max:49,"_tablebasehdu":5,unitmass_in_g:[60,24,62],band:59,memory_info:62,ascardlist:5,software_hardwar:5,nfws_mr:64,each:[45,49,52,43,57,5,62,14,65],coolinglib:[12,16,10],dv_mean:62,mean:[49,24,57,41,62,14],set_pio:[62,49],extract:14,nbodymodul:[12,16,25],goe:8,nfwg_profil:64,get_potentialmap_on_spherical_3d_grid:43,content:[16,44,32,49,5,12],prerequist:[16,22,18],reader:[],lib64:26,linear:[28,33,43,14],object_info:62,get_points_on_spherical_3d_grid:43,mden:62,standard:[39,22,5],npart_per_proc:62,coolingfil:[24,62,11],angl:[24,62,4,14,45],traceback:26,cosmic:3,accumul:43,local:[49,24,8,26,62,14],filter:[24,14],iso:[23,14],isn:[],"0x3850690":41,rang:[62,33,14],render:24,independ:49,rank:49,nbodydefault:62,get_n:62,alreadi:[62,65,5],preform:49,thick:7,primari:5,seekabl:[],top:5,getaccelerationincylindricalgrid:62,master:[44,49],too:[],get_potentialmap_on_spherical_1d_grid:43,computesigmamap:62,npart_al:[62,41],whith:62,somewhat:[],psudo:5,read_dump:26,get_npart:62,keyword:5,convert_ct:63,provid:[32,13,46,45,5],tree:[45,62,46,43,41],computegridparameters2:7,project:[25,4,64,14,62],pressur:[62,14],aproch:[],king_surface_dens:64,expd:7,gif:[22,45],pnbody_checkal:8,raw:[],manner:5,seen:[62,4],seek:[],imagepil:20,pickl:26,maxx:62,latter:[32,49],mkmap3dslicesph:2,maxi:62,fname:52,initmpi:57,prevou:49,mpi_bcast:41,object:[16,44,45,33,49,5,4,7,26,41,40,62,14,65],tcool:[62,14],boltzmann:60,talkgdisp:[0,12,16],phase:62,jean:[28,47],bytesio:[],get_accumulation_along_axi:43,log_filt:14,metal:[24,62],log_filter_inv:14,doe:[62,5,26,41],dummi:[],declar:[],groupshdu:5,tform:5,rk78:62,sum:41,has_kei:5,trianl:23,random:5,radiu:[7,62,64,45],get_rsp_approxim:62,gettriangl:23,protocol:[],gadget_z00_sub:45,absolut:[],get_volumemap_on_carthesian_3d_grid:43,libari:49,configur:[16,32,18],geter2:14,burkert:[7,36,64],new_unit:5,displi:45,report:[],toolbox:65,loadparticl:57,byteord:[62,45,41],"public":5,twice:[],"_formatp":5,"_formatx":5,fieldnam:5,get_npart_and_npart_al:62,num:[45,62,14,41],result:45,memmori:5,fail:7,best:5,rename_kei:5,tensor:62,integrateusingrk:62,an_existing_fil:26,imagehdu:5,attribut:[49,5],nbody_default:[62,45],accord:[62,41],triplet:42,"085e":24,vprod:33,tmax:62,bufferedrandom:[],xrang:26,hernquist_mr:64,omp:[12,16,50],extent:49,exponential_disk:36,howev:[],dtout:62,com:22,"2nd":5,diff:61,assum:[36,5,41],summar:5,duplic:5,mpi_exchangefromt:41,integrateoverdt:38,union:5,numpi:[22,62,26,14],three:57,been:[49,65,45,5],age_a:[3,35],desing:65,basic:22,vcirc:[28,47],getval:[14,5],xxx:[],get_imag:14,argument:[7,62,49,5],extract_paramet:14,dl2_mr:7,conta:[],spin:[25,62],ident:[],formatsdir:8,properti:23,acrod:41,aim:62,calcul:[25,33],mpi_sum:41,pickabl:26,extrahead:[],higer:14,computedensityandhsml:62,sphevalu:[62,57],kwarg:[52,5],n_i:41,sever:65,perform:[39,45,65,49],make:[62,65,5],format:[16,32,34,52,22,8,26,5,62,65],"435693e":24,unitsystem:[60,62],get_potentialmap_on_carthesian_3d_grid:43,getexchang:41,electro:14,complet:[57,5],dr_potenti:47,get_mxntp:62,rais:[26,5],readal:[9,5],redefin:65,kept:[],epx:7,inherit:[],client:[],centrer:[],everyth:8,tha:45,left:[62,4],intriangleoroutsid:23,just:52,pygadget:[12,16,57,46],newton:7,atim:45,getsurfacedensityincylindricalgrid:62,end_of_fil:26,getalltypesq:57,yet:[62,5],languag:65,mplayerhq:22,expos:[62,4],defaultparamet:[24,32,8],change_nam:5,fortran:52,spread:41,save:[1,44,45,14],opt:[32,8,14,45],applic:22,offz:43,which:[49,33,5],interpolate_from_2d_arrai:33,background:24,get_volumemap_on_spherical_3d_grid:43,rotate2:[],plummer_profil:64,specif:[32,45,4,25,5,62,14],manual:5,gather_num:62,localdir:8,underli:[],www:[22,5],right:[62,4],old:[62,43,5],deal:[16,52,48,6,5],generic_mx1d:36,interv:62,maxim:62,init_unit:62,intern:[3,5],get_list_of_var:62,interg:14,get_default_spec_var:62,cart2sph:62,txt:26,bottom:5,subclass:[],python_directori:[],suffici:5,t11:45,readblock:[],foc:[24,62,4],condit:[16,62,17],get_symetrisation_along_axi:43,peek:[],generic2c:[7,64],epfl:26,rotate_old:62,obj:4,simul:45,getradiusincylindricalgrid:62,streaminghdu:5,attrib:5,qnumarrayimag:20,memeri:[],vel_cyl2cart:[62,14],xyz:42,"float":[44,52,41,24,26,5,62,14],encod:[],get_surfacedensitymap_on_carthesian_2d_grid:43,mpi_readandsendblock:41,wrap:22,wai:62,support:[49,18,5],transform:[62,4,14],avail:[50,56,5,53,15],width:[44,62],kuzmin:7,fraction:[24,62,49],lage:65,head:[24,62,14],form:62,forc:[62,49,5],some:[60,33,57,4,45],heat:62,fitsopen:5,solar:59,sigma_z:[62,47],"true":[62,26,5],circumcircleproperti:23,get_surfacedensitymap_from_cylindrical_2dv_grid:43,new_nam:5,maximum:[45,62,49,14,41],until:[],get_potentialmap_on_cylindrical_2dv_grid:43,new_tabl:5,linux2:45,repartit:41,"abstract":[],hernquist_profil:64,computesigmahisto:62,computegridparamet:7,exist:[40,16,45,26,5],readreal:52,pnbodi:[0,1,2,3,21,5,6,7,8,9,10,11,13,14,16,18,19,20,4,23,24,25,26,28,29,30,32,33,35,36,38,39,40,41,42,43,44,45,47,49,52,54,55,22,57,59,60,61,62,63,64,65],check:[16,32,18,49,8,26,5,62],change_attrib:5,urt:11,readonli:5,nx1:62,generic2c_profil:64,get_numbermap_on_spherical_1d_grid:43,when:[5,8,49,41],test:[23,33,65],urp:11,node:[62,2,49,43,41],get_numbermap_on_carthesian_3d_grid:43,classextens:5,findmax:49,stringio:[],turnup:33,nb_sub:45,consid:26,omega:[62,47],writefct:62,m_electron:60,miyamoto_nagai:[7,36],writeblock:[],pseudo:[7,64],ignor:[],fact:62,time:[3,30,62,14],shdu:5,backward:5,update_extend:5,unitparameterfil:62,skip:[9,41],global:[62,49],testfct:[],checkfil:26,lvtomv:59,row:5,mpi_max:[49,41],get_points_on_cylindrical_2dh_grid:43,depend:[39,14,5],get_ey:14,readabl:[],inerti:62,yve:[],vec:[62,26,41],getparamet:57,vel:[7,62,14,45],mkmap2d:2,luminos:[59,14],king_rc:64,sourc:[39,16,62,18,41],string:[24,52,44,5],make_default_vars_glob:62,cool:[16,30,24,10,12,62,51,14],dim:[33,14,5],level:[24,14,5],did:8,reproduc:7,iter:[],item:5,r_ob:[24,4],"828e":60,plummer:[12,16,7,64,28],core:[7,64],"419e":60,ptree:62,current:[45,50,24,53,56,57,8,9,62,15],pnbody_copi:[32,45],deriv:[28,61,47,43,52],rho_c:3,boxsiz:[62,45],gener:[16,17,7,26,27,45,14],satisfi:5,linalgerror:[],modif:7,address:0,extver:5,along:[43,14],box:[7,62,4,24,14],ngb:57,shift:62,computemeanmap:62,primaryhdu:5,vectori:33,get_densitymap_on_spherical_3d_grid:43,apply_filt:14,getsigmamap:14,nmax:7,useful:[22,62,43,14],pluginsdir:8,bzero:5,modul:[0,2,3,21,5,6,7,9,10,11,12,14,15,16,19,20,4,22,23,25,26,28,29,30,33,31,35,36,38,39,41,42,43,44,45,46,47,52,50,51,53,54,55,56,57,58,59,60,61,62,63,64,65],loadparticles2:57,prefer:65,get_cooling_time_from_density_temperature_feh:30,marker:14,instal:[16,32,18,49,39,8],dispers:62,planck:60,add_blank:5,selecti:62,memori:[57,62,65,5],univers:[],selectc:[1,62,45,49],prec:52,vel_cart2cyl:[62,14],epsx:62,epsv:62,suppos:62,selectp:[62,45],set_local_system_of_unit:62,loadparticlesq:57,set_rang:14,rigid:62,vmax:62,tranfer:14,graphic:[32,65,49],peano2xyz:42,mkmap1dn:2,uniqu:[62,5,41],mkmap1dw:2,descriptor:[],can:[1,32,49,52,8,9,5,45,65],readrecord:52,gadget_z:[1,49],stream:5,sph:[57,24,2,62],critic:3,hydrogen:[24,62,10],mpi_gatherandwritearrai:41,alwai:5,multipl:62,ptreelib:[12,16,15],skiphead:26,write:[1,55,49,41,52,26,5,62,45],closefd:[],map:[16,2,49,50,24,25,5,12,62,45,14,65],product:[22,33],mat:[20,43,14],max:[62,49,24,7,45,14],rmax:[7,62,43,14],set_tp:62,usabl:[],"3rd":5,xtick:14,xzf:39,mpi_reduc:41,"_extensionhdu":5,mai:[45,65,5],eof:[],data:[52,20,41,9,26,5,14],newlin:[],array_lik:[],divid:36,explicit:62,predic:[],brodcast:41,inform:[45,5],"switch":[],combin:5,gamma:[7,62,24,64],talk:0,epydoc:5,interpolate_from_1d_arrai:33,zmax:[7,43],cutoff:14,trianglemedian:23,nfw_mr:[7,64],gdisp:[],computesph:62,lst:[62,14],still:[50,56,53,15],equiv:[],group:5,polici:[],check_arrai:62,tork:62,pyathonpath:[],platform:[],window:[44,8],vtr:[62,14],main:[12,16,62],surfacedens:47,boxcut_seg:4,non:[14,5],initi:[16,62,17,49],histogram2d:33,lunch:49,half:43,now:[32,49,39,60,61,45],name:[44,32,49,24,55,7,9,26,5,62,14],perspect:[24,25,4],revers:[62,41],baserawio:[],separ:[57,62,4,5],mkmap2dnsph:2,get_points_on_cylindrical_3d_grid:43,readfit:[],compil:[39,16,22,57,18],replac:[46,9],spline3d:33,gaussian:45,redistribut:62,tempertur:[45,65],exang:41,get_massmap_on_carthesian_3d_grid:43,vtr2:14,space:[24,62,45,5],profil:[12,16,7,62,64],"5mdk":45,factori:5,nofil:26,million:65,argv:[1,49],get_cooling_time_from_density_energyint_feh:30,maxnumngbdevi:62,force_comput:62,blksize:[],org:22,"byte":[52,5],card:5,header_prec:52,libmiyamoto:[12,16,47],setdefault:3,badli:64,open_and_writ:62,synchron:41,motion:62,turn:33,place:[62,5],mkmap3d:2,first:[45,47,49,30,43,7,61,26,5,28,62,14],origin:[7,62,4],directli:5,vr2:14,onc:[7,5],arrai:[45,33,52,23,5,4,43,64,9,26,41,62,14],has_arrai:62,q_electron:60,stsci:5,open:[1,45,16,49,9,26,5,40,62],angular:[25,45,62],size:[45,49,52,4,24,43,7,5,61,62,14],convens:45,given:[44,2,33,49,3,36,5,52,23,7,25,41,60,62,63,14,57],pamap:25,local_max:49,localsystem:11,circl:23,writecomplet:5,isothm:7,gatherbaseunit:60,copi:[45,32,43,5],specifi:[62,8,52,14,5],broadcast:41,liblog:[12,16,55],than:[39,45,47,43,49],"6732e":60,f_m:29,aproxim:62,posit:[45,36,4,24,25,9,5,62,14,57],l_n:24,cosmolog:3,seri:13,pra:11,flag_cool:45,ntask:[62,41],ani:[8,5],nbodi:[1,45,49,24,7,62],pru:11,prt:11,setparamet:57,getalltyp:57,rotatearound:14,squar:[],phi_xi:62,note:[62,49,5],take:[7,62],users_manual1:5,get_interpolation_on_spherical_1d_grid:43,noth:[62,41],libutil:[12,16,14],begin:[1,26],sure:5,fct:7,normal:[30,62,14,11],buffer:[],nallhw:45,compress:14,pair:5,renam:[1,62,49,45,5],getallid:57,samxyz:25,get_points_on_cylindrical_2dv_grid:43,runtim:52,generic_alpha:[7,36],defaultconfig:32,axi:[45,33,4,24,43,7,25,62,14],sigma:[28,7,62,64,14],slope:33,currentmodul:[],show:[45,1,62,55,49],ndtype:26,phi_xyz:62,convol:25,ratint:33,mvtolv:59,getmeanvalmap:14,rotat:[62,33,4,25,45,14],onli:[45,18,49,36,41,4,43,5,62,65],ratio:62,elemet:33,nall:45,get_massmap_on_carthesian_2d_grid:43,dict:26,unitmass:24,homospher:7,get:[44,45,49,23,30,4,43,57,5,62],fortranfil:[12,16,52],newkei:5,cannot:5,physct:60,requir:5,fileno:[],unitlength:24,get_histocenter2:62,sbox:14,median:23,yield:[],xmax:[7,43,14],bscale:5,where:[32,33,49,52,43,62,41,13,14],has_var:62,summari:5,kernel:25,polint:33,infinit:7,readint:52,frustrum:4,label:[26,14],between:[49,4,43,24,61,62,14],"import":[1,45,26,49],read_ascii_valu:44,getindex:62,assumpt:52,betwen:14,get_genericmap_on_spherical_1d_grid:43,screen:8,num_fil:45,mpi_rprint:41,region:[62,45],pisothm:7,voronoi:23,mont:19,tutori:[40,16,45],mani:5,l_min:[24,14],among:[45,49],acceler:[57,25,43,62],color:[24,32,14,6],overview:[16,65],pot:62,sigma_t:47,nlocal:41,invert:7,theap:5,invers:[7,62,14],valueerror:[],thistask:49,resolut:7,do_not_sort:62,usual_numngb:62,those:[52,5],"case":[45,62,8,26,5],hdu:5,kappa:47,hdr:5,plugin:[32,8],get_symetrisation_along_axis_old:43,nfws_profil:64,lut2:45,add_com:5,add_col:5,cluster:65,scipi:22,lininterp1d:33,writeint:52,luminosti:62,asmatrix:[],ascii:[44,62,21,26,5],par:[3,5,11],inquiri:[],characters_written:[],mode1:62,same:[45,33,47,52,49,5,62],binari:[7,5,62,45,41],html:22,pad:5,get_rotation_matrix_to_align_with_main_axi:62,getallmassesq:57,document:16,get_volumemap_on_cylindrical_2dv_grid:43,exchangeparticl:62,ness:52,decompress:[39,16,18],"3144e":60,capabl:[22,65,5],copyonwrit:5,writedatablock:[],burkert_profil:64,appropri:[7,5],get_densitymap_on_carthesian_3d_grid:43,markup:5,clobber:5,without:[62,65],gather_vec:62,model:[16,45,49,36,37,4,7,25,40,28,62],gather_vel:62,dimension:[61,43],get_list_of_arrai:62,get_massmap_on_cylindrical_2dh_grid:43,computemeanhisto:62,"0220e":60,execut:1,rest:5,processu:49,initspec:62,speed:62,mpi_allgatherandconcatarrai:41,struct:52,hint:[],except:5,param:[16,44,60,5,12,62],paral:49,rescal:14,read_cool:[],treeaccel:62,mpi_sarang:41,momentum:[45,25,14,62],mpi_arang:41,real:[61,43,14,52],around:[33,25,4,45,62],read:[32,49,5,21,6,52,9,26,41,62,65],get_str:44,temperatur:[62,14,10,45],grid:[27,16,7,62,43],indici:[61,62],convertionfactorto:60,sum_:29,nost:5,integ:[52,41,5,61,62,14],server:0,mpi_mean:41,readlut:6,either:62,unitvelocity_in_cm_per_:[60,24,62],output:[16,49,4,8,5,40,62,14],showmap:49,sphere_mr:7,respect:[62,4,14,45],intend:[],computehisto:62,slice:[1,2,49],definit:[32,5],achiev:45,legal:[],cylindr:[62,43,14],complic:65,refer:[12,16,62],power:60,a_cosmictim:3,ltot:[62,45],winheight:44,writerecord:52,d2z_potenti:47,"throw":[],acc:62,nsph:24,neighbor:[24,62,57],act:62,vz2:14,"2x2":14,processor:[49,41],routin:[7,62,46],redshift:45,get_numbermap_on_spherical_3d_grid:43,splint:33,coodin:[62,14],cyclindr:64,your:[39,1,45,8,49],jaffe_profil:64,buffer_s:[],change_unit:5,setpar:5,bissector:7,overwrit:5,get_points_on_carthesian_2d_grid:43,start:[49,45,5],interfac:[],lot:8,gravit:[62,57],get_lambda_from_density_entropy_feh:30,mkmap1d:2,linalg:[],tupl:[24,5,41],quadinterp1d:33,jul:45,getdata:5,viewport:4,possibl:[39,52,45,49],"default":[16,32,18,52,3,24,39,7,57,5,62,45,14],solf:62,creat:[16,45,49,41,40,5,60,62,14,65],deep:5,mpi_allgath:41,file:[1,32,16,45,49,21,6,52,39,40,24,9,26,5,34,62,55,65],get_points_on_carthesian_3d_grid:43,fill:5,qtplot:20,again:45,nbody_gadget:45,mpi_sendrecv:41,readinto:[],coldef:5,field:5,defaultparam:14,rawio:[],writabl:[],you:[1,32,49,39,8,5,45],get_surfacemap_on_carthesian_2d_grid:43,sequenc:[14,54,52],symbol:60,docstr:5,mass_tot:[62,45],polynomi:33,frum_num:62,reduc:[62,41],homodisk:7,directori:[39,1,32,45,55],getallacceler:57,mask:33,rxy:62,mass:[45,24,43,7,57,64,62,14],parallelism:[40,16,49],potenti:[47,43,57,28,62,25],disp:5,represent:[44,4],all:[32,49,41,38,25,26,5,62,45],dist:[24,14],illustr:[49,5],allpotenti:57,unitsysnam:60,bytearrai:[],obsm:14,follow:[1,45,52,36,22,39,7,8,5,62],disk:[7,45,8,36],pygsl:[12,16,54],errtoltheta:62,read_ascii:26,init_cool:30,dl2:7,miyamoto_nagai_f:36,init:[30,62,57],ymin:43,norm:[62,4],mpi_recv:41,fals:[7,62,26,14,5],mpi:[16,49,22,57,26,41,12,62,65],ytick:14,hubbleparam:[3,24,45],fall:[],veri:[1,26,14,65],mpi_allreduc:41,flag_sfr:45,list:[44,23,5,24,7,26,41,60,62,14],dpotenti:28,small:14,revaz:[26,6],writearrai:[],dimens:[7,62,54],electrondens:11,diment:14,edu:5,zero:[62,33,14],mpi_argmax:41,design:65,pass:[],further:49,whenc:[],what:[24,5],hernquist:[7,64],sub:[62,14],unitsparam:62,sun:59,section:5,crush:[24,14],delet:5,memmap:5,method:[0,44,45,52,20,55,9,5,60,62],full:[],nb3:45,nb2:45,nb1:45,get_accelerationmap_on_cylindrical_2dv_grid:43,sophist:[],strong:45,modifi:[32,64,62],valu:[44,32,33,52,45,5,24,43,7,57,41,60,61,62,14,65],amoung:62,search:[16,5],configuratio:32,persp:[24,62],gettre:62,amount:5,mc1d:19,magnitud:59,point:[45,49,23,4,43,7,25,62,14,57],data_typ:41,timestep:30,tablehdu:5,filenam:[44,52,6,55,26,5,62],heurist:[],select:[1,45,49,16,40,62],vrxyz:62,regist:[],two:[16,33,49,43,22,5,40,45,14],"0000000000000001e":62,taken:[24,32],mpi_send:41,more:[16,49,43,5,40,45],allacceler:57,desir:[7,62],veloci:62,flag:[24,62,5],get_potentialmap_on_cylindrical_2dh_grid:43,rotx:[25,33],known:62,none:[0,44,5,4,24,43,55,7,9,26,41,61,62,14],histori:5,cooling_with_met:[12,16,30],den:62,mr_fct:7,accept:5,histocent:62,sphere:7,minimum:[62,14,41],explor:[45,65],cours:[8,49],axis1:[62,4],axis2:[62,4],mape:2,unit_kg:60,anoth:39,divis:7,getmask:33,reject:[62,14],carthesian:[62,43,14],simpl:[32,49,23,7,26,14],optdir:8,flag_ag:45,resourc:5,amximum:41,referenc:5,get_surfacemap_on_cylindrical_2dh_grid:43,plane:[24,62,4,14],mode2:62,associ:[45,5],lvsun:59,"short":[],ambigu:5,caus:5,ortho:4,spheric:[62,43],egg:[],help:45,through:[],hierarchi:[],lweight:14,paramet:[16,44,32,18,52,3,30,4,24,60,7,57,58,26,5,12,62,14,65],epot:[25,62],overhead:57,imheight:44,align2:62,nbdrklib:[12,16,38],pysic:24,good:[62,8],"return":[2,3,41,7,43,10,14,19,4,23,24,25,26,28,33,35,36,5,42,44,47,54,57,59,60,62],gadget:[1,45,46,49,57,8,62],untransl:[],dist_ey:[24,62,4],getr:14,spec_vect:62,instruct:45,ampmap:25,easili:45,iff:[],radian:[62,4],radial:[28,62,14,49],found:[62,8,26,5],nfwg:[7,36],computemap:62,truncat:[],getpotentialincylindricalgrid:62,weight:[24,62,33,45],getradialvelocitydispersionincylindricalgrid:62,phi:[29,47],realli:62,expect:5,http:[22,5],energi:[62,25,14,10,5],orient:65,mynumer:[12,16,33],sort_typ:62,print:[1,44,49,30,41,5,60,62],mc2d:19,advanc:[],myhistogram:14,somewer:[],base:[60,62,46,57],ask:49,"6022e":60,cvcenter:62,b_weight:24,symetr:43,rxyz:[62,45,49],veloc:[47,4,57,28,62,14],omit:[],phi_m:29,"5th":5,major:62,exchang:[62,41],number:[52,24,43,7,57,26,5,62,14],frmax:7,talkserv:0,vetor:14,done:45,construct:[16,23,53,50,56,5,12,15],blank:5,m_proton:60,readfct:62,trapez:61,betwe:62,differ:[32,49,45,62],php:22,exponenti:36,interact:65,least:60,vxyr2:14,vxy:62,statement:[],ttype:5,dt0:62,store:[32,5],interti:62,option:[32,49,52,39,24,9,5,62],get_ntyp:62,get_massmap_on_spherical_1d_grid:43,part:[57,5],dzphi:47,parnam:5,"_temphdu":5,king:64,scheme:30,contrari:[62,8],other_directori:39,get_lambda_from_density_temperature_feh:30,str:[],consumpt:65,mpi_getv:41,vx2:14,"6262e":60,comput:[49,23,38,43,7,57,64,62,14,65],packag:[32,22,24,57,8,26],hubbl:3,write_num:[62,45],"null":5,equival:[5,41],commentari:5,"6750e":60,npart:[62,45,41],initdefaultparamet:57,drawxtick:14,also:[49,65,5],get_lambda_from_density_energyint_feh:30,build:39,distribut:[2,49,36,22,7,62,19,64],previou:[45,49],reach:26,mixtur:11,most:26,automodul:[],rho:[45,47,7,64,11,28,62,14],alpha:[7,62,4,24,14],tpe:[44,62,45],ext:5,exp:14,azimuth:62,get_potentialmap_on_carthesian_2d_grid:43,extkei:5,"3x1":62,"3x3":23,astrophys:5,fine:22,find:[5,49,64,45,41],cell:[61,7,62,43],copyright:45,get_massmap_on_cylindrical_2dv_grid:43,writer:52,read_on:9,factor:[35,60,62],get_num:62,hit:[],palett:[16,49,20,6,12,45,14],get_nbody_tot:62,col_nam:5,"3xn":[62,33,14],get_numbermap_on_carthesian_2d_grid:43,quarternion:[],byter:62,coord:[62,64,14],common:[],arp:11,set:[16,44,32,34,3,52,7,57,8,26,5,62],art:11,aru:11,dump:26,pisothm_mr:[7,64],l_kx:24,l_ky:24,sec:14,arg:[52,4,7,5,62,14],close:[55,8,26,5],"_imagebasehdu":5,contour:[23,14],analog:5,someth:[8,49],wor:62,unit_mol:60,signatur:[],imagemagick:22,get_known_format:62,get_massmap_on_spherical_3d_grid:43,gethead:5,distinguish:[],filter_nam:[24,45],both:62,last:[45,62,26,5],winwidth:44,informatin:45,meanmap:14,d2r_potenti:47,pdf:5,mpi_getexchanget:41,load:[45,65],simpli:[32,49,5,8,45,62],l_crush:24,get_list_of_method:62,gather_po:62,header:[52,62,26,14,5],vxyr:[62,14],littl:[62,52,45,41],suppli:5,vertic:62,devic:[],due:62,empti:62,get_com:5,imag:[44,45,49,20,4,22,24,9,5,62,14],geter:14,append:[62,55,5],coordin:[62,14],gal:60,tangenti:[62,14],look:[62,4,14],hdcenter:62,dz_potenti:47,readarrai:[],mpi4pi:[22,49,41],"while":5,abov:[],error:26,x_sigma:62,mc3d:19,readi:[],centr:[7,62],nb_ga:45,itself:52,quadrat:33,vy2:14,obsolet:[12,16,64,15],get_typ:44,combimap:62,oldkei:5,decod:[],zprof:62,conflict:5,higher:[61,22,62,43,45],find_var:62,optim:47,mpirun:[57,49],ponder:25,read1:[],moment:[62,14],user:[30,32,65,45,5],typic:[],focal:[24,62,4],recent:26,lower:[61,43,14],task:49,particul:[7,62],lib:[24,8],add_histori:5,drphi:28,simplai:41,person:2,elev:62,mandriva:45,pfennig:14,lin:14,explan:5,gadget_z00:[45,49],"_file":5,groupdata:5,shape:[45,49,41,4,43,24,5,62,14],"6726e":60,regardless:[],get_numbermap_on_cylindrical_3d_grid:43,imwidth:44,mandat:5,cut:[1,24,62,14],extra:5,friedli:14,rgb:14,nfwg_mr:64,dtype:[45,26,5],printparamet:30,input:[40,16,62,49,5],unlik:[],pnbodypath:[13,8],euler:4,bin:[1,49,8,41,62,14],thermodyn:[12,16,11],big:[62,14],a_z:3,insert:5,bit:52,pmlib:[12,16,53],b_color:24,implemet:62,set_systemunits_from_param:60,delaunai:23,exttt:[],write_dump:26,sgn:62,textiowrapp:[],back:[9,5],unspecifi:[],sampl:[23,57],svxyr:62,roti:[25,33],scale:[7,24,14,5],though:[],per:[24,62,5],arrayob:24,mencod:22,flag_feedback:45,"_hierarch":5,proc:[62,49,41],isothm_mr:7,run:[1,8,49],pnbody_mpi:49,sigma_zbi:47,bintablehdu:5,delta_n:41,nbody_tot:[62,45],hilbert:62,idx:41,boxcut:4,paralel:62,block:[5,41],file2:62,file1:62,gadgetparameterfil:62,pythonpath:8,within:45,rawiobas:[],statment:[],get_densitymap_on_spherical_1d_grid:43,computeobjectmap:62,create_line3:2,create_line2:2,ensur:8,constructdelaunai:23,triangl:23,spam:[],errno:26,king_profil:64,"long":52,includ:[62,5,41],mpi_gath:41,getallposit:57,nbodypath:13,decomposit:62,msg:41,link:[62,14,45],translat:[62,4,45],ulist:60,line:[1,2,49,55,24,26,45],diret:49,info:[45,23,41,57,9,5,60,62],concaten:41,utf:[],consist:[62,52],align_with_main_axi:62,caller:[],omegalambda:[3,45],readlin:[],similar:62,curv:62,sort:62,constant:[60,63],ex_tabl:41,jaffe_mr:64,doesn:[],repres:[],"char":26,unit_m:60,treepot:62,matint:14,b_xopt:24,tranfert_funct:14,titl:[],sequenti:[],nan:62,get_massmap_on_cylindrical_3d_grid:43,codec:[],unit_k:60,tbtype:5,draw:[62,14],b_yopt:24,thermopar:11,developp:62,amplitud:[25,62],amp_m:29,algorithm:[7,2],nasa:5,eeach:49,far:[62,4],v_sigma:62,svr:62,code:[52,5],edg:8,scratch:[40,16,45],zmin:43,soften:[25,64,62],getnumberparticlesincylindricalgrid:62,cheeseshop:22,sphmap:25,privat:5,sensit:5,libdisk:[12,16,61],send:[62,41],z_a:3,ftype:[1,7,62,45,49],sens:62,get_surfacedensitymap_on_cylindrical_2dh_grid:43,sent:41,objet:45,minert:62,corespond:33,electron:[62,11],volum:43,svtr:62,reciv:49,tra:11,"try":[62,45,49],amxyz:25,trp:11,tru:11,"_corruptedhdu":5,impli:5,smaller:45,visualis:45,getvaltyp:14,get_default_spec_vect:62,carlo:19,fonction:[62,14],compat:[],index:[16,41,22,24,5,62,14],pdmap:25,iclib:[12,16,36],access:[45,5],matrix_pow:[],expd_mr:7,"__file":5,pnbody_exampl:[1,45],len:[45,26,54],leo:[24,45,8],bodi:[62,4,65],adiabat:[24,62],l25:64,get_points_on_spherical_1d_grid:43,ioerror:[26,5],becom:[],getnumbermap:14,convert:[22,62,63,14],convers:60,spec_var:62,technolog:5,astrophi:64,hubble_a:3,cte:[12,16,60,63],del_col:5,chang:[62,52,45,5],appli:14,approxim:[7,62],jaff:64,immut:[],oval:7,from:[1,41,7,14,16,18,4,24,26,28,32,33,39,40,5,42,44,45,47,52,49,43,60,62],update_tbhdu:5,commun:26,intiti:45,read_num:62,get_npart_al:62,few:65,usr:[1,49],get_img:9,mpi_find_a_totask:41,cosmo:[3,12,16],givent:57,extens:[1,5],get_accumulatedmassmap_on_spherical_1d_grid:43,bufferedread:[],histovel:62,flag_met:45,sendalltoal:62,account:62,phot:[12,16,59],alia:62,arang:[62,26],thin:[1,7],endian:52,control:[],unitlength_in_cm:[60,24,62],tar:39,process:[62,49],high:5,xmin:[43,14],tarbal:[39,16,18],springel:62,get_densitymap_on_cylindrical_3d_grid:43,miyamoto:[7,47],serial:46,delai:5,gcc:[22,45],cmcenter:62,getmassmap:14,get_nbodi:62,palette_nam:[20,14],instead:[62,33,49,26,5,45],king_surface_density_old:64,mpi_len:41,from_num:62,mpi_readandsendarrai:41,frustum:4,textiobas:[],bresenham:2,pyarrai:33,surfac:[64,62,47,43,45],tier:5,neps_d:7,ldensiti:28,physic:[45,43,5,60,61,62,14,65],velocit:[62,65],"05811936674e":45,light:[45,20,14,6,49],correspond:[44,33,49,23,41,43,7,59,5,11,61,62,14],element:[62,45,41],issu:[],binx:41,allow:[62,65,26,5],fits_rec:5,king_profile_rz:64,l_max:[24,14],design7:22,movi:[12,16,22,9],move:[62,45],pyraf:5,bufferediobas:[],unitsparamet:[24,32,8,62],mpi_min:41,segement:23,chosen:[],tstart:62,mpi_pprint:41,pixel:14,handl:[],mpdule:57,dai:62,dat:[1,49,24,7,26,5,45],testal:[],front:5,desnumngb:62,sobol:54,recarrai:5,parameterfil:8,count_blank:5,mpi_iprint:41,nfw:[7,64],astronom:5,mode:[45,41,24,9,5,62,14],writefit:[],circular:[28,47],whistogram:33,chunk:[],rho0:64,our:49,meanweight:62,special:[22,62],out:5,variabl:[62,8,45,41],"_vlf":5,get_ob:4,nodes_info:62,develop:[12,16,53],approch:65,identifi:45,spline:33,rel:62,rec:[12,16,31],matric:[2,14],pyfit:[12,16,5],insid:[62,4],"3807e":60,manipul:[65,5],msden:62,get_interpolation_on_cylindrical_2dv_grid:43,get_numbermap_on_cylindrical_2dh_grid:43,dictionari:[44,26,5,60,62,14],releas:45,rhob:[7,64],ymax:43,integrate1:30,integrate2:30,put:[],get_velocities_from_virial_approxim:62,counterpart:[],length:[57,7,52,5],geometri:[12,16,4],dmode:62,endors:5,set_npart:62,"_allhdu":5,mpi_oldreadandsendarrai:41,arctan2:43,generic2c_mr:64,facil:65,l_color:24,strict:[],sobol_sequ:54,licens:[45,5],system:[38,5,60,62,63,65],messag:5,getallidq:57,unitsparameterfil:8,termin:[],"final":[62,8,45,49],streelib:[12,16,56],shell:7,recongn:32,obsrevaz:45,exactli:[],rsp:[62,45],get_histori:5,rbox:62,see:[45,24,8,5,62,64],structur:5,charact:52,real_numngb:62,incircumcircl:23,pr_fct:7,get_densitymap_on_cylindrical_2dv_grid:43,stdin:26,clip:[24,62,4],nbodydefaultqq:[],respons:62,clearli:49,sigma_vz:62,have:[33,49,7,8,26,5,62],tabl:[16,32,5,41],need:[45,49,39,22,64,5,62,14],treelib:[12,16,46],errorurlopen:5,rmin:[7,62,14],min:[24,14],diverg:64,writestr:52,blockingioerror:[],builtin:[],fileio:[],configdir:8,box_opt:14,singl:52,arument:41,unless:[],get_volumemap_on_cylindrical_3d_grid:43,"1e11":45,getmeanmap:14,mplayer:22,write_ascii_valu:44,thi:[1,32,5,49,45,41,4,6,7,57,8,26,11,62,14],"class":[0,44,32,52,20,55,9,5,60,62,45],getsigmavalmap:14,homogen:[7,36],stereo:65,mkmap3dsortedsph:2,ura:11,url:5,gather:[62,49,41],pipe:[],snapshot:45,determin:[7,62],m_neutron:60,peano:[42,62],float32:[45,41],deafult_buffer_s:[],nmin:14,getallpositionsq:57,add_box:14,text:5,verbos:[7,5],computeisocontour:23,szr:62,inithsml:57,useblank:5,get_dic:44,mxntpe:45,dispert:62,isotherm:[7,64],exonenti:7,verifyerror:5,setfield:5,print_filenam:62,pypi:22,accel:62,rainbow4:45,beam:7,acces:45,filter_opt:24,increas:[62,5],drawytick:14,enabl:[],extnam:5,integr:[30,38,43,57,61,62],contain:[32,49,23,5,43,7,8,26,41,62,14,65],view:[24,62,4,45,5],easyli:45,npart_tot:[62,45],writeabl:[],frame:14,bufferedwrit:[],particl:[1,2,16,45,49,4,65,43,7,25,41,40,62,14,57],statu:[62,26],lut:6,extend:5,correctli:49,numer:[],written:[49,52,5],getallmass:57,theta:[43,14],lum:[62,14],pathonpath:[],kei:[42,5],cosmictime_a:3,localsystem_of_unit:[3,62],addit:[1,22],invgetr:14,rtype:5,equal:[7,62,14],etc:[62,65,5],instanc:[62,45],equat:[28,62,47],freeli:32,comment:5,chmod:1,fromtask:41,histocenter2:62,pythonwar:22,readdatablock:[],evalu:62,open_and_read:62,writeto:5,compos:62,read_param:[],montecarlo:[],immedi:[],create_lin:2,iobas:[],tranform:14,togeth:[],vmin:62,set_ftyp:[62,45],present:5,critical_energy_spec:45,readstr:52,dx_mean:62,sph2cart:62,align:[62,4],defin:[45,52,7,41,60,62,14],triaxial:7,observ:[24,62,4],layer:5,write_arrai:26,ekin:62,site:[24,32,8,26],default_buffer_s:[],unit_:60,unneed:[],scienc:5,generic_mx:[7,36],let:[45,49],welcom:16,generic_mr:[7,36],sqrt:43,member:[],python:[1,49,16,52,39,22,26,40,45,65],mpi_histogram:41,unitveloc:24,slave:[],expans:35,effect:[],hdulist:5,"_card_with_continu":5,newunit:60,set_filenam:62,expand:33,off:[24,62],center:[45,62,43,14,49],cosmolib:[12,16,35],well:[22,62,8],exampl:[1,45,18,16,52,8,26,5,12,13,14],command:[1,32,49,39,8,13,65],interpol:[33,43],undefin:5,usual:[1,62,52],distanc:[24,62,4,14,49],less:[],mpi_oldgatherandwritearrai:41,unitlst:60,obtain:[60,13,49,45,62],pythonx:[],sden:62,expon:[],skipe:26,exempl:45,script:[1,49,16,22,24,8,40,65],add:[62,2,49,14,5],densiti:[45,47,3,36,43,7,57,64,10,11,28,62,14,65],match:5,momemtum:14,dest:41,piec:5,height:[44,62,14],recurs:[60,41],python2:[24,8,26],loss:[],rotz:[25,33],like:[62,8,65,45],fits_record:5,"1095e":60,get_lambda_normalized_from_temperature_feh:30,page:16,theta_xyz:62,burkert_mr:64,plummer_mr:64,linux:[45,18],self:[60,62],get_r_interpolation_on_cylindrical_2dv_grid:43,unit_g:60,"export":57,nagai:[7,47],unit_c:60,flush:5,proper:5,home:[24,45,8,26,6],transport:5,unit_j:60,tmp:26,tmu:62,lead:[],p_name:62,unitsfil:62,mpi_argmin:41,estim:62,leav:[],mkmap3dw:2,tmin:62,slight:60,imaginari:[],usag:5,peas:26,get_volumemap_on_spherical_1d_grid:43,adot_a:3,offset:[],mkmap3dn:2,continu:[33,43],about:[7,45],socket:[],cardlist:5,column:[26,5],libqt:[12,16,20],get_numbermap_on_cylindrical_2dv_grid:43,histrogram:[62,33],constructor:[62,5],own:[32,49,5],gadget_z40:45,convolut:[25,57],lenght:[62,41],hsml:[62,57],automat:[32,14,45],mplot:14,diagon:62,xyz2peano:42,merg:[40,16,45],val:[62,43,14],flag_entr_:45,transfer:41,mkmovi:[],"var":62,log10:45,getallpotenti:57,"function":[45,33,49,3,30,5,22,35,7,64,26,41,62,14],quaddinterp1d:33,interest:[],bufferedrwpair:[],extract1dmeanfrom2dmap:14,constain:60,histogram:[62,33,14,41],beetween:14,overflow:5,partilc:57,cooling_from_nh_and_t:10,count:62,getallvelocitiesq:57,getpreferredencod:[],whether:[],pnbody_show:[24,13,32],smooth:[2,57],displai:[16,32,49,20,37,24,8,45,62,65],get_potentialmap_on_cylindrical_3d_grid:43,dmp:26,record:[52,5],below:45,limit:65,otherwis:5,unformat:52,pil:[22,20,14],get_integr:[61,43],pio:[45,1,62,26,49],"int":[47,52,24,41,28,62,14],dure:30,get_lineardensitymap_on_spherical_1d_grid:43,implement:[64,65],circum:23,inf:62,tick:14,detail:5,new_valu:5,other:[45,62,49,26,5],bool:26,cchar:26,stat:[],repeat:[],mkmap2dw:2,ndarrai:[26,5],mkmap2dn:2,log:[7,62,24,14,55],tessel:[12,16,23],matrix:[33,20,4,43,26,62,14],"99792458e8":60,hnd:33,nrow:5,getalldens:57,getallveloc:57,portion:5,omega0:[3,45]},objtypes:{"0":"py:module","1":"py:function","2":"py:method","3":"py:class","4":"py:data","5":"py:exception","6":"py:attribute","7":"np:module","8":"np:function","9":"np:method","10":"np:class","11":"np:data","12":"np:exception","13":"np:attribute"},titles:["the talkgdisp module","Using pNbody with scripts","the C mapping module","the cosmo module","the geometry module","the pyfits module","the palette module","the ic module","Check the installation","the Movie module","the C coolinglib module","the thermodyn module","Reference","Examples","the libutil module","the C ptreelib module (obsolete)","Welcome to pNbody’s documentation!","Generating initial conditions","Installation","the C montecarlolib module","the libqt module","the C asciilib module","Prerequiste","the C tessel module","Default parameters","the C nbodymodule module","the io module","Generating grids","the plummer module","the fourier module","the C cooling_with_metals module","the rec module","Default configuration","the C myNumeric module","Setting a format file","the C cosmolib module","the C iclib module","Display Models","the C nbdrklib module","Installing from source","Tutorial","the mpi module","the C peanolib module","the libgrid module","the param module","Using pNbody with the python interpreter","the C treelib module","the libmiyamoto module","How to deal with units ?","Using pNbody in parallel","the C mapping-omp module (under construction)","the cooling module","the fortranfile module","the C pmlib module (never developped)","the C pygsl module","the liblog module","the C streelib module (under construction)","the C PyGadget module","the parameters module","the phot module","the units module","the libdisk module","the main module","the ctes module","the profiles module","Overview"],objnames:{"0":"Python module","1":"Python function","2":"Python method","3":"Python class","4":"Python data","5":"Python exception","6":"Python attribute","7":"Python module","8":"Python function","9":"Python method","10":"Python class","11":"Python data","12":"Python exception","13":"Python attribute"},filenames:["rst/TalkgdispModule","rst/Tutorial_scripts","rst/C_mapping","rst/CosmoModule","rst/GeometryModule","rst/PyfitsModule","rst/PaletteModule","rst/IcModule","rst/Test_the_installation","rst/MovieModule","rst/C_coolinglib","rst/ThermodynModule","rst/Reference","rst/Examples","rst/LibutilModule","rst/C_ptreelib","index","rst/InitialConditions","rst/Installation","rst/C_montecarlolib","rst/LibqtModule","rst/C_asciilib","rst/Prerequiste","rst/C_tessel","rst/Default_parameters","rst/C_nbodymodule","rst/IoModule","rst/Grids","rst/PlummerModule","rst/FourierModule","rst/C_cooling_with_metals","rst/RecModule","rst/Default_configurations","rst/C_myNumeric","rst/Formats","rst/C_cosmolib","rst/C_iclib","rst/Display","rst/C_nbdrklib","rst/Installing_from_tarball","rst/Tutorial","rst/MpiModule","rst/C_peanolib","rst/LibgridModule","rst/ParamModule","rst/Tutorial_interpreter","rst/C_treelib","rst/LibmiyamotoModule","rst/Units","rst/Tutorial_parallel","rst/C_mapping-omp","rst/CoolingModule","rst/FortranfileModule","rst/C_pmlib","rst/C_pygsl","rst/LiblogModule","rst/C_streelib","rst/C_PyGadget","rst/ParameterModule","rst/PhotModule","rst/UnitsModule","rst/LibdiskModule","rst/MainModule","rst/CtesModule","rst/ProfilesModule","rst/Overview"]}) \ No newline at end of file diff --git a/Doc/newdoc/_build/latex/Makefile b/Doc/newdoc/_build/latex/Makefile new file mode 100644 index 0000000..f219a2f --- /dev/null +++ b/Doc/newdoc/_build/latex/Makefile @@ -0,0 +1,64 @@ +# Makefile for Sphinx LaTeX output + +ALLDOCS = $(basename $(wildcard *.tex)) +ALLPDF = $(addsuffix .pdf,$(ALLDOCS)) +ALLDVI = $(addsuffix .dvi,$(ALLDOCS)) + +# Prefix for archive names +ARCHIVEPRREFIX = +# Additional LaTeX options +LATEXOPTS = + +all: $(ALLPDF) +all-pdf: $(ALLPDF) +all-dvi: $(ALLDVI) +all-ps: all-dvi + for f in *.dvi; do dvips $$f; done +all-pdf-ja: $(wildcard *.tex) + ebb $(wildcard *.pdf *.png *.gif *.jpeg) + platex -kanji=utf8 $(LATEXOPTS) '$<' + platex -kanji=utf8 $(LATEXOPTS) '$<' + platex -kanji=utf8 $(LATEXOPTS) '$<' + -mendex -U -f -d '$(basename $<).dic' -s python.ist '$(basename $<).idx' + platex -kanji=utf8 $(LATEXOPTS) '$<' + platex -kanji=utf8 $(LATEXOPTS) '$<' + dvipdfmx '$(basename $<).dvi' + +zip: all-$(FMT) + mkdir $(ARCHIVEPREFIX)docs-$(FMT) + cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) + zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT) + rm -r $(ARCHIVEPREFIX)docs-$(FMT) + +tar: all-$(FMT) + mkdir $(ARCHIVEPREFIX)docs-$(FMT) + cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) + tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT) + rm -r $(ARCHIVEPREFIX)docs-$(FMT) + +bz2: tar + bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar + +# The number of LaTeX runs is quite conservative, but I don't expect it +# to get run often, so the little extra time won't hurt. +%.dvi: %.tex + latex $(LATEXOPTS) '$<' + latex $(LATEXOPTS) '$<' + latex $(LATEXOPTS) '$<' + -makeindex -s python.ist '$(basename $<).idx' + latex $(LATEXOPTS) '$<' + latex $(LATEXOPTS) '$<' + +%.pdf: %.tex + pdflatex $(LATEXOPTS) '$<' + pdflatex $(LATEXOPTS) '$<' + pdflatex $(LATEXOPTS) '$<' + -makeindex -s python.ist '$(basename $<).idx' + pdflatex $(LATEXOPTS) '$<' + pdflatex $(LATEXOPTS) '$<' + +clean: + rm -f *.dvi *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla + +.PHONY: all all-pdf all-dvi all-ps clean + diff --git a/Doc/newdoc/_build/latex/cosmo1.png b/Doc/newdoc/_build/latex/cosmo1.png new file mode 100644 index 0000000..9717228 Binary files /dev/null and b/Doc/newdoc/_build/latex/cosmo1.png differ diff --git a/Doc/newdoc/_build/latex/edge-on-disk4.png b/Doc/newdoc/_build/latex/edge-on-disk4.png new file mode 100644 index 0000000..7fd5fe5 Binary files /dev/null and b/Doc/newdoc/_build/latex/edge-on-disk4.png differ diff --git a/Doc/newdoc/_build/latex/fncychap.sty b/Doc/newdoc/_build/latex/fncychap.sty new file mode 100644 index 0000000..9a56c04 --- /dev/null +++ b/Doc/newdoc/_build/latex/fncychap.sty @@ -0,0 +1,683 @@ +%%% Copyright Ulf A. Lindgren +%%% +%%% Note Premission is granted to modify this file under +%%% the condition that it is saved using another +%%% file and package name. +%%% +%%% Revision 1.1 (1997) +%%% +%%% Jan. 8th Modified package name base date option +%%% Jan. 22th Modified FmN and FmTi for error in book.cls +%%% \MakeUppercase{#}->{\MakeUppercase#} +%%% Apr. 6th Modified Lenny option to prevent undesired +%%% skip of line. +%%% Nov. 8th Fixed \@chapapp for AMS +%%% +%%% Revision 1.2 (1998) +%%% +%%% Feb. 11th Fixed appendix problem related to Bjarne +%%% Aug. 11th Fixed problem related to 11pt and 12pt +%%% suggested by Tomas Lundberg. THANKS! +%%% +%%% Revision 1.3 (2004) +%%% Sep. 20th problem with frontmatter, mainmatter and +%%% backmatter, pointed out by Lapo Mori +%%% +%%% Revision 1.31 (2004) +%%% Sep. 21th problem with the Rejne definition streched text +%%% caused ugly gaps in the vrule aligned with the title +%%% text. Kindly pointed out to me by Hendri Adriaens +%%% +%%% Revision 1.32 (2005) +%%% Jun. 23th compatibility problem with the KOMA class 'scrbook.cls' +%%% a remedy is a redefinition of '\@schapter' in +%%% line with that used in KOMA. The problem was pointed +%%% out to me by Mikkel Holm Olsen +%%% +%%% Revision 1.33 (2005) +%%% Aug. 9th misspelled ``TWELV'' corrected, the error was pointed +%%% out to me by George Pearson +%%% +%%% Revision 1.34 (2007) +%%% Added an alternative to Lenny provided by Peter +%%% Osborne (2005-11-28) +%%% Corrected front, main and back matter, based on input +%%% from Bas van Gils (2006-04-24) +%%% Jul. 30th Added Bjornstrup option provided by Jean-Marc +%%% Francois (2007-01-05). +%%% Reverted to \MakeUppercase{#} see rev 1.1, solved +%%% problem with MakeUppercase and MakeLowercase pointed +%%% out by Marco Feuerstein (2007-06-06) + + +%%% Last modified Jul. 2007 + +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesPackage{fncychap} + [2007/07/30 v1.34 + LaTeX package (Revised chapters)] + +%%%% For conditional inclusion of color +\newif\ifusecolor +\usecolorfalse + + + +%%%% DEFINITION OF Chapapp variables +\newcommand{\CNV}{\huge\bfseries} +\newcommand{\ChNameVar}[1]{\renewcommand{\CNV}{#1}} + + +%%%% DEFINITION OF TheChapter variables +\newcommand{\CNoV}{\huge\bfseries} +\newcommand{\ChNumVar}[1]{\renewcommand{\CNoV}{#1}} + +\newif\ifUCN +\UCNfalse +\newif\ifLCN +\LCNfalse +\def\ChNameLowerCase{\LCNtrue\UCNfalse} +\def\ChNameUpperCase{\UCNtrue\LCNfalse} +\def\ChNameAsIs{\UCNfalse\LCNfalse} + +%%%%% Fix for AMSBook 971008 + +\@ifundefined{@chapapp}{\let\@chapapp\chaptername}{} + + +%%%%% Fix for Bjarne and appendix 980211 + +\newif\ifinapp +\inappfalse +\renewcommand\appendix{\par + \setcounter{chapter}{0}% + \setcounter{section}{0}% + \inapptrue% + \renewcommand\@chapapp{\appendixname}% + \renewcommand\thechapter{\@Alph\c@chapter}} + +%%%%% Fix for frontmatter, mainmatter, and backmatter 040920 + +\@ifundefined{@mainmatter}{\newif\if@mainmatter \@mainmattertrue}{} + +%%%%% + + + +\newcommand{\FmN}[1]{% +\ifUCN + {\MakeUppercase{#1}}\LCNfalse +\else + \ifLCN + {\MakeLowercase{#1}}\UCNfalse + \else #1 + \fi +\fi} + + +%%%% DEFINITION OF Title variables +\newcommand{\CTV}{\Huge\bfseries} +\newcommand{\ChTitleVar}[1]{\renewcommand{\CTV}{#1}} + +%%%% DEFINITION OF the basic rule width +\newlength{\RW} +\setlength{\RW}{1pt} +\newcommand{\ChRuleWidth}[1]{\setlength{\RW}{#1}} + +\newif\ifUCT +\UCTfalse +\newif\ifLCT +\LCTfalse +\def\ChTitleLowerCase{\LCTtrue\UCTfalse} +\def\ChTitleUpperCase{\UCTtrue\LCTfalse} +\def\ChTitleAsIs{\UCTfalse\LCTfalse} +\newcommand{\FmTi}[1]{% +\ifUCT + {\MakeUppercase{#1}}\LCTfalse +\else + \ifLCT + {\MakeLowercase{#1}}\UCTfalse + \else {#1} + \fi +\fi} + + + +\newlength{\mylen} +\newlength{\myhi} +\newlength{\px} +\newlength{\py} +\newlength{\pyy} +\newlength{\pxx} + + +\def\mghrulefill#1{\leavevmode\leaders\hrule\@height #1\hfill\kern\z@} + +\newcommand{\DOCH}{% + \CNV\FmN{\@chapapp}\space \CNoV\thechapter + \par\nobreak + \vskip 20\p@ + } +\newcommand{\DOTI}[1]{% + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@ + } +\newcommand{\DOTIS}[1]{% + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@ + } + +%%%%%% SONNY DEF + +\DeclareOption{Sonny}{% + \ChNameVar{\Large\sf} + \ChNumVar{\Huge} + \ChTitleVar{\Large\sf} + \ChRuleWidth{0.5pt} + \ChNameUpperCase + \renewcommand{\DOCH}{% + \raggedleft + \CNV\FmN{\@chapapp}\space \CNoV\thechapter + \par\nobreak + \vskip 40\p@} + \renewcommand{\DOTI}[1]{% + \CTV\raggedleft\mghrulefill{\RW}\par\nobreak + \vskip 5\p@ + \CTV\FmTi{#1}\par\nobreak + \mghrulefill{\RW}\par\nobreak + \vskip 40\p@} + \renewcommand{\DOTIS}[1]{% + \CTV\raggedleft\mghrulefill{\RW}\par\nobreak + \vskip 5\p@ + \CTV\FmTi{#1}\par\nobreak + \mghrulefill{\RW}\par\nobreak + \vskip 40\p@} +} + +%%%%%% LENNY DEF + +\DeclareOption{Lenny}{% + + \ChNameVar{\fontsize{14}{16}\usefont{OT1}{phv}{m}{n}\selectfont} + \ChNumVar{\fontsize{60}{62}\usefont{OT1}{ptm}{m}{n}\selectfont} + \ChTitleVar{\Huge\bfseries\rm} + \ChRuleWidth{1pt} + \renewcommand{\DOCH}{% + \settowidth{\px}{\CNV\FmN{\@chapapp}} + \addtolength{\px}{2pt} + \settoheight{\py}{\CNV\FmN{\@chapapp}} + \addtolength{\py}{1pt} + + \settowidth{\mylen}{\CNV\FmN{\@chapapp}\space\CNoV\thechapter} + \addtolength{\mylen}{1pt} + \settowidth{\pxx}{\CNoV\thechapter} + \addtolength{\pxx}{-1pt} + + \settoheight{\pyy}{\CNoV\thechapter} + \addtolength{\pyy}{-2pt} + \setlength{\myhi}{\pyy} + \addtolength{\myhi}{-1\py} + \par + \parbox[b]{\textwidth}{% + \rule[\py]{\RW}{\myhi}% + \hskip -\RW% + \rule[\pyy]{\px}{\RW}% + \hskip -\px% + \raggedright% + \CNV\FmN{\@chapapp}\space\CNoV\thechapter% + \hskip1pt% + \mghrulefill{\RW}% + \rule{\RW}{\pyy}\par\nobreak% + \vskip -\baselineskip% + \vskip -\pyy% + \hskip \mylen% + \mghrulefill{\RW}\par\nobreak% + \vskip \pyy}% + \vskip 20\p@} + + + \renewcommand{\DOTI}[1]{% + \raggedright + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@} + + \renewcommand{\DOTIS}[1]{% + \raggedright + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@} + } + +%%%%%% Peter Osbornes' version of LENNY DEF + +\DeclareOption{PetersLenny}{% + +% five new lengths +\newlength{\bl} % bottom left : orig \space +\setlength{\bl}{6pt} +\newcommand{\BL}[1]{\setlength{\bl}{#1}} +\newlength{\br} % bottom right : orig 1pt +\setlength{\br}{1pt} +\newcommand{\BR}[1]{\setlength{\br}{#1}} +\newlength{\tl} % top left : orig 2pt +\setlength{\tl}{2pt} +\newcommand{\TL}[1]{\setlength{\tl}{#1}} +\newlength{\trr} % top right :orig 1pt +\setlength{\trr}{1pt} +\newcommand{\TR}[1]{\setlength{\trr}{#1}} +\newlength{\blrule} % top right :orig 1pt +\setlength{\trr}{0pt} +\newcommand{\BLrule}[1]{\setlength{\blrule}{#1}} + + + \ChNameVar{\fontsize{14}{16}\usefont{OT1}{phv}{m}{n}\selectfont} + \ChNumVar{\fontsize{60}{62}\usefont{OT1}{ptm}{m}{n}\selectfont} + \ChTitleVar{\Huge\bfseries\rm} + \ChRuleWidth{1pt} +\renewcommand{\DOCH}{% + + +%%%%%%% tweaks for 1--9 and A--Z +\ifcase\c@chapter\relax% +\or\BL{-3pt}\TL{-4pt}\BR{0pt}\TR{-6pt}%1 +\or\BL{0pt}\TL{-4pt}\BR{2pt}\TR{-4pt}%2 +\or\BL{0pt}\TL{-4pt}\BR{2pt}\TR{-4pt}%3 +\or\BL{0pt}\TL{5pt}\BR{2pt}\TR{-4pt}%4 +\or\BL{0pt}\TL{3pt}\BR{2pt}\TR{-4pt}%5 +\or\BL{-1pt}\TL{0pt}\BR{2pt}\TR{-2pt}%6 +\or\BL{0pt}\TL{-3pt}\BR{2pt}\TR{-2pt}%7 +\or\BL{0pt}\TL{-3pt}\BR{2pt}\TR{-2pt}%8 +\or\BL{0pt}\TL{-3pt}\BR{-4pt}\TR{-2pt}%9 +\or\BL{-3pt}\TL{-3pt}\BR{2pt}\TR{-7pt}%10 +\or\BL{-6pt}\TL{-6pt}\BR{0pt}\TR{-9pt}%11 +\or\BL{-6pt}\TL{-6pt}\BR{2pt}\TR{-7pt}%12 +\or\BL{-5pt}\TL{-5pt}\BR{0pt}\TR{-9pt}%13 +\or\BL{-6pt}\TL{-6pt}\BR{0pt}\TR{-9pt}%14 +\or\BL{-3pt}\TL{-3pt}\BR{3pt}\TR{-6pt}%15 +\or\BL{-3pt}\TL{-3pt}\BR{3pt}\TR{-6pt}%16 +\or\BL{-5pt}\TL{-3pt}\BR{-8pt}\TR{-6pt}%17 +\or\BL{-5pt}\TL{-5pt}\BR{0pt}\TR{-9pt}%18 +\or\BL{-3pt}\TL{-3pt}\BR{-6pt}\TR{-9pt}%19 +\or\BL{0pt}\TL{0pt}\BR{0pt}\TR{-5pt}%20 +\fi + +\ifinapp\ifcase\c@chapter\relax% +\or\BL{0pt}\TL{14pt}\BR{5pt}\TR{-19pt}%A +\or\BL{0pt}\TL{-5pt}\BR{-3pt}\TR{-8pt}%B +\or\BL{-3pt}\TL{-2pt}\BR{1pt}\TR{-6pt}\BLrule{0pt}%C +\or\BL{0pt}\TL{-5pt}\BR{-3pt}\TR{-8pt}\BLrule{0pt}%D +\or\BL{0pt}\TL{-5pt}\BR{2pt}\TR{-3pt}%E +\or\BL{0pt}\TL{-5pt}\BR{-10pt}\TR{-1pt}%F +\or\BL{-3pt}\TL{0pt}\BR{0pt}\TR{-7pt}%G +\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%H +\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%I +\or\BL{2pt}\TL{0pt}\BR{-3pt}\TR{1pt}%J +\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%K +\or\BL{0pt}\TL{-5pt}\BR{2pt}\TR{-19pt}%L +\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%M +\or\BL{0pt}\TL{-5pt}\BR{-2pt}\TR{-1pt}%N +\or\BL{-3pt}\TL{-2pt}\BR{-3pt}\TR{-11pt}%O +\or\BL{0pt}\TL{-5pt}\BR{-9pt}\TR{-3pt}%P +\or\BL{-3pt}\TL{-2pt}\BR{-3pt}\TR{-11pt}%Q +\or\BL{0pt}\TL{-5pt}\BR{4pt}\TR{-8pt}%R +\or\BL{-2pt}\TL{-2pt}\BR{-2pt}\TR{-7pt}%S +\or\BL{-3pt}\TL{0pt}\BR{-5pt}\TR{4pt}\BLrule{8pt}%T +\or\BL{-7pt}\TL{-11pt}\BR{-5pt}\TR{-7pt}\BLrule{0pt}%U +\or\BL{-14pt}\TL{-5pt}\BR{-14pt}\TR{-1pt}\BLrule{14pt}%V +\or\BL{-10pt}\TL{-9pt}\BR{-13pt}\TR{-3pt}\BLrule{7pt}%W +\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}\BLrule{0pt}%X +\or\BL{-6pt}\TL{-4pt}\BR{-7pt}\TR{1pt}\BLrule{7pt}%Y +\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}\BLrule{0pt}%Z +\fi\fi +%%%%%%% + \settowidth{\px}{\CNV\FmN{\@chapapp}} + \addtolength{\px}{\tl} %MOD change 2pt to \tl + \settoheight{\py}{\CNV\FmN{\@chapapp}} + \addtolength{\py}{1pt} + + \settowidth{\mylen}{\CNV\FmN{\@chapapp}\space\CNoV\thechapter} + \addtolength{\mylen}{\trr}% MOD change 1pt to \tr + \settowidth{\pxx}{\CNoV\thechapter} + \addtolength{\pxx}{-1pt} + + \settoheight{\pyy}{\CNoV\thechapter} + \addtolength{\pyy}{-2pt} + \setlength{\myhi}{\pyy} + \addtolength{\myhi}{-1\py} + \par + \parbox[b]{\textwidth}{% + \rule[\py]{\RW}{\myhi}% + \hskip -\RW% + \rule[\pyy]{\px}{\RW}% + \hskip -\px% + \raggedright% + \CNV\FmN{\@chapapp}\rule{\blrule}{\RW}\hskip\bl\CNoV\thechapter%MOD +% \CNV\FmN{\@chapapp}\space\CNoV\thechapter %ORIGINAL + \hskip\br% %MOD 1pt to \br + \mghrulefill{\RW}% + \rule{\RW}{\pyy}\par\nobreak% + \vskip -\baselineskip% + \vskip -\pyy% + \hskip \mylen% + \mghrulefill{\RW}\par\nobreak% + \vskip \pyy}% + \vskip 20\p@} + + + \renewcommand{\DOTI}[1]{% + \raggedright + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@} + + \renewcommand{\DOTIS}[1]{% + \raggedright + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@} + } + + +% + + +%%%%%% BJORNSTRUP DEF + +\DeclareOption{Bjornstrup}{% + \usecolortrue + % pzc (Zapf Chancelery) is nice. ppl (Palatino) is cool too. + \ChNumVar{\fontsize{76}{80}\usefont{OT1}{pzc}{m}{n}\selectfont} + \ChTitleVar{\raggedleft\Large\sffamily\bfseries} + + \setlength{\myhi}{10pt} % Space between grey box border and text + \setlength{\mylen}{\textwidth} + \addtolength{\mylen}{-2\myhi} + \renewcommand{\DOCH}{% + \settowidth{\py}{\CNoV\thechapter} + \addtolength{\py}{-10pt} % Amount of space by which the +% % number is shifted right + \fboxsep=0pt% + \colorbox[gray]{.85}{\rule{0pt}{40pt}\parbox[b]{\textwidth}{\hfill}}% + \kern-\py\raise20pt% + \hbox{\color[gray]{.5}\CNoV\thechapter}\\% + } + + \renewcommand{\DOTI}[1]{% + \nointerlineskip\raggedright% + \fboxsep=\myhi% + \vskip-1ex% + \colorbox[gray]{.85}{\parbox[t]{\mylen}{\CTV\FmTi{#1}}}\par\nobreak% + \vskip 40\p@% + } + + \renewcommand{\DOTIS}[1]{% + \fboxsep=0pt + \colorbox[gray]{.85}{\rule{0pt}{40pt}\parbox[b]{\textwidth}{\hfill}}\\% + \nointerlineskip\raggedright% + \fboxsep=\myhi% + \colorbox[gray]{.85}{\parbox[t]{\mylen}{\CTV\FmTi{#1}}}\par\nobreak% + \vskip 40\p@% + } +} + + +%%%%%%% GLENN DEF + + +\DeclareOption{Glenn}{% + \ChNameVar{\bfseries\Large\sf} + \ChNumVar{\Huge} + \ChTitleVar{\bfseries\Large\rm} + \ChRuleWidth{1pt} + \ChNameUpperCase + \ChTitleUpperCase + \renewcommand{\DOCH}{% + \settoheight{\myhi}{\CTV\FmTi{Test}} + \setlength{\py}{\baselineskip} + \addtolength{\py}{\RW} + \addtolength{\py}{\myhi} + \setlength{\pyy}{\py} + \addtolength{\pyy}{-1\RW} + + \raggedright + \CNV\FmN{\@chapapp}\space\CNoV\thechapter + \hskip 3pt\mghrulefill{\RW}\rule[-1\pyy]{2\RW}{\py}\par\nobreak} + + \renewcommand{\DOTI}[1]{% + \addtolength{\pyy}{-4pt} + \settoheight{\myhi}{\CTV\FmTi{#1}} + \addtolength{\myhi}{\py} + \addtolength{\myhi}{-1\RW} + \vskip -1\pyy + \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 2pt + \raggedleft\CTV\FmTi{#1}\par\nobreak + \vskip 80\p@} + +\newlength{\backskip} + \renewcommand{\DOTIS}[1]{% +% \setlength{\py}{10pt} +% \setlength{\pyy}{\py} +% \addtolength{\pyy}{\RW} +% \setlength{\myhi}{\baselineskip} +% \addtolength{\myhi}{\pyy} +% \mghrulefill{\RW}\rule[-1\py]{2\RW}{\pyy}\par\nobreak +% \addtolength{}{} +%\vskip -1\baselineskip +% \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 2pt +% \raggedleft\CTV\FmTi{#1}\par\nobreak +% \vskip 60\p@} +%% Fix suggested by Tomas Lundberg + \setlength{\py}{25pt} % eller vad man vill + \setlength{\pyy}{\py} + \setlength{\backskip}{\py} + \addtolength{\backskip}{2pt} + \addtolength{\pyy}{\RW} + \setlength{\myhi}{\baselineskip} + \addtolength{\myhi}{\pyy} + \mghrulefill{\RW}\rule[-1\py]{2\RW}{\pyy}\par\nobreak + \vskip -1\backskip + \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 3pt % + \raggedleft\CTV\FmTi{#1}\par\nobreak + \vskip 40\p@} + } + +%%%%%%% CONNY DEF + +\DeclareOption{Conny}{% + \ChNameUpperCase + \ChTitleUpperCase + \ChNameVar{\centering\Huge\rm\bfseries} + \ChNumVar{\Huge} + \ChTitleVar{\centering\Huge\rm} + \ChRuleWidth{2pt} + + \renewcommand{\DOCH}{% + \mghrulefill{3\RW}\par\nobreak + \vskip -0.5\baselineskip + \mghrulefill{\RW}\par\nobreak + \CNV\FmN{\@chapapp}\space \CNoV\thechapter + \par\nobreak + \vskip -0.5\baselineskip + } + \renewcommand{\DOTI}[1]{% + \mghrulefill{\RW}\par\nobreak + \CTV\FmTi{#1}\par\nobreak + \vskip 60\p@ + } + \renewcommand{\DOTIS}[1]{% + \mghrulefill{\RW}\par\nobreak + \CTV\FmTi{#1}\par\nobreak + \vskip 60\p@ + } + } + +%%%%%%% REJNE DEF + +\DeclareOption{Rejne}{% + + \ChNameUpperCase + \ChTitleUpperCase + \ChNameVar{\centering\Large\rm} + \ChNumVar{\Huge} + \ChTitleVar{\centering\Huge\rm} + \ChRuleWidth{1pt} + \renewcommand{\DOCH}{% + \settoheight{\py}{\CNoV\thechapter} + \parskip=0pt plus 1pt % Set parskip to default, just in case v1.31 + \addtolength{\py}{-1pt} + \CNV\FmN{\@chapapp}\par\nobreak + \vskip 20\p@ + \setlength{\myhi}{2\baselineskip} + \setlength{\px}{\myhi} + \addtolength{\px}{-1\RW} + \rule[-1\px]{\RW}{\myhi}\mghrulefill{\RW}\hskip + 10pt\raisebox{-0.5\py}{\CNoV\thechapter}\hskip 10pt\mghrulefill{\RW}\rule[-1\px]{\RW}{\myhi}\par\nobreak + \vskip -3\p@% Added -2pt vskip to correct for streched text v1.31 + } + \renewcommand{\DOTI}[1]{% + \setlength{\mylen}{\textwidth} + \parskip=0pt plus 1pt % Set parskip to default, just in case v1.31 + \addtolength{\mylen}{-2\RW} + {\vrule width\RW}\parbox{\mylen}{\CTV\FmTi{#1}}{\vrule width\RW}\par\nobreak% + \vskip -3pt\rule{\RW}{2\baselineskip}\mghrulefill{\RW}\rule{\RW}{2\baselineskip}% + \vskip 60\p@% Added -2pt in vskip to correct for streched text v1.31 + } + \renewcommand{\DOTIS}[1]{% + \setlength{\py}{\fboxrule} + \setlength{\fboxrule}{\RW} + \setlength{\mylen}{\textwidth} + \addtolength{\mylen}{-2\RW} + \fbox{\parbox{\mylen}{\vskip 2\baselineskip\CTV\FmTi{#1}\par\nobreak\vskip \baselineskip}} + \setlength{\fboxrule}{\py} + \vskip 60\p@ + } + } + + +%%%%%%% BJARNE DEF + +\DeclareOption{Bjarne}{% + \ChNameUpperCase + \ChTitleUpperCase + \ChNameVar{\raggedleft\normalsize\rm} + \ChNumVar{\raggedleft \bfseries\Large} + \ChTitleVar{\raggedleft \Large\rm} + \ChRuleWidth{1pt} + + +%% Note thechapter -> c@chapter fix appendix bug +%% Fixed misspelled 12 + + \newcounter{AlphaCnt} + \newcounter{AlphaDecCnt} + \newcommand{\AlphaNo}{% + \ifcase\number\theAlphaCnt + \ifnum\c@chapter=0 + ZERO\else{}\fi + \or ONE\or TWO\or THREE\or FOUR\or FIVE + \or SIX\or SEVEN\or EIGHT\or NINE\or TEN + \or ELEVEN\or TWELVE\or THIRTEEN\or FOURTEEN\or FIFTEEN + \or SIXTEEN\or SEVENTEEN\or EIGHTEEN\or NINETEEN\fi +} + + \newcommand{\AlphaDecNo}{% + \setcounter{AlphaDecCnt}{0} + \@whilenum\number\theAlphaCnt>0\do + {\addtocounter{AlphaCnt}{-10} + \addtocounter{AlphaDecCnt}{1}} + \ifnum\number\theAlphaCnt=0 + \else + \addtocounter{AlphaDecCnt}{-1} + \addtocounter{AlphaCnt}{10} + \fi + + + \ifcase\number\theAlphaDecCnt\or TEN\or TWENTY\or THIRTY\or + FORTY\or FIFTY\or SIXTY\or SEVENTY\or EIGHTY\or NINETY\fi + } + \newcommand{\TheAlphaChapter}{% + + \ifinapp + \thechapter + \else + \setcounter{AlphaCnt}{\c@chapter} + \ifnum\c@chapter<20 + \AlphaNo + \else + \AlphaDecNo\AlphaNo + \fi + \fi + } + \renewcommand{\DOCH}{% + \mghrulefill{\RW}\par\nobreak + \CNV\FmN{\@chapapp}\par\nobreak + \CNoV\TheAlphaChapter\par\nobreak + \vskip -1\baselineskip\vskip 5pt\mghrulefill{\RW}\par\nobreak + \vskip 20\p@ + } + \renewcommand{\DOTI}[1]{% + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@ + } + \renewcommand{\DOTIS}[1]{% + \CTV\FmTi{#1}\par\nobreak + \vskip 40\p@ + } +} + +\DeclareOption*{% + \PackageWarning{fancychapter}{unknown style option} + } + +\ProcessOptions* \relax + +\ifusecolor + \RequirePackage{color} +\fi +\def\@makechapterhead#1{% + \vspace*{50\p@}% + {\parindent \z@ \raggedright \normalfont + \ifnum \c@secnumdepth >\m@ne + \if@mainmatter%%%%% Fix for frontmatter, mainmatter, and backmatter 040920 + \DOCH + \fi + \fi + \interlinepenalty\@M + \if@mainmatter%%%%% Fix for frontmatter, mainmatter, and backmatter 060424 + \DOTI{#1}% + \else% + \DOTIS{#1}% + \fi + }} + + +%%% Begin: To avoid problem with scrbook.cls (fncychap version 1.32) + +%%OUT: +%\def\@schapter#1{\if@twocolumn +% \@topnewpage[\@makeschapterhead{#1}]% +% \else +% \@makeschapterhead{#1}% +% \@afterheading +% \fi} + +%%IN: +\def\@schapter#1{% +\if@twocolumn% + \@makeschapterhead{#1}% +\else% + \@makeschapterhead{#1}% + \@afterheading% +\fi} + +%%% End: To avoid problem with scrbook.cls (fncychap version 1.32) + +\def\@makeschapterhead#1{% + \vspace*{50\p@}% + {\parindent \z@ \raggedright + \normalfont + \interlinepenalty\@M + \DOTIS{#1} + \vskip 40\p@ + }} + +\endinput + + diff --git a/Doc/newdoc/_build/latex/pNbody.aux b/Doc/newdoc/_build/latex/pNbody.aux new file mode 100644 index 0000000..13fe1b3 --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.aux @@ -0,0 +1,105 @@ +\relax +\ifx\hyper@anchor\@undefined +\global \let \oldcontentsline\contentsline +\gdef \contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} +\global \let \oldnewlabel\newlabel +\gdef \newlabel#1#2{\newlabelxx{#1}#2} +\gdef \newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} +\AtEndDocument{\let \contentsline\oldcontentsline +\let \newlabel\oldnewlabel} +\else +\global \let \hyper@last\relax +\fi + +\select@language{english} +\@writefile{toc}{\select@language{english}} +\@writefile{lof}{\select@language{english}} +\@writefile{lot}{\select@language{english}} +\newlabel{index::doc}{{}{1}{\relax }{section*.2}{}} +\@writefile{toc}{\contentsline {chapter}{\numberline {1}Overview}{3}{chapter.1}} +\@writefile{lof}{\addvspace {10\p@ }} +\@writefile{lot}{\addvspace {10\p@ }} +\newlabel{rst/Overview:welcome-to-pnbody-s-documentation}{{1}{3}{Overview\relax }{chapter.1}{}} +\newlabel{rst/Overview::doc}{{1}{3}{Overview\relax }{chapter.1}{}} +\newlabel{rst/Overview:overview}{{1}{3}{Overview\relax }{chapter.1}{}} +\@writefile{toc}{\contentsline {chapter}{\numberline {2}Installation}{5}{chapter.2}} +\@writefile{lof}{\addvspace {10\p@ }} +\@writefile{lot}{\addvspace {10\p@ }} +\newlabel{rst/Installation:installation}{{2}{5}{Installation\relax }{chapter.2}{}} +\newlabel{rst/Installation::doc}{{2}{5}{Installation\relax }{chapter.2}{}} +\@writefile{toc}{\contentsline {section}{\numberline {2.1}Prerequiste}{5}{section.2.1}} +\newlabel{rst/Prerequiste:prerequiste}{{2.1}{5}{Prerequiste\relax }{section.2.1}{}} +\newlabel{rst/Prerequiste::doc}{{2.1}{5}{Prerequiste\relax }{section.2.1}{}} +\@writefile{toc}{\contentsline {section}{\numberline {2.2}Installing from source}{6}{section.2.2}} +\newlabel{rst/Installing_from_tarball:installing-from-source}{{2.2}{6}{Installing from source\relax }{section.2.2}{}} +\newlabel{rst/Installing_from_tarball::doc}{{2.2}{6}{Installing from source\relax }{section.2.2}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.1}Decompress the tarball}{6}{subsection.2.2.1}} +\newlabel{rst/Installing_from_tarball:decompress-the-tarball}{{2.2.1}{6}{Decompress the tarball\relax }{subsection.2.2.1}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.2}Compile}{6}{subsection.2.2.2}} +\newlabel{rst/Installing_from_tarball:compile}{{2.2.2}{6}{Compile\relax }{subsection.2.2.2}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {2.2.3}Install}{6}{subsection.2.2.3}} +\newlabel{rst/Installing_from_tarball:install}{{2.2.3}{6}{Install\relax }{subsection.2.2.3}{}} +\@writefile{toc}{\contentsline {section}{\numberline {2.3}Check the installation}{6}{section.2.3}} +\newlabel{rst/Test_the_installation:check-the-installation}{{2.3}{6}{Check the installation\relax }{section.2.3}{}} +\newlabel{rst/Test_the_installation::doc}{{2.3}{6}{Check the installation\relax }{section.2.3}{}} +\@writefile{toc}{\contentsline {section}{\numberline {2.4}Default configuration}{8}{section.2.4}} +\newlabel{rst/Default_configurations:default-configuration}{{2.4}{8}{Default configuration\relax }{section.2.4}{}} +\newlabel{rst/Default_configurations::doc}{{2.4}{8}{Default configuration\relax }{section.2.4}{}} +\@writefile{toc}{\contentsline {section}{\numberline {2.5}Default parameters}{8}{section.2.5}} +\newlabel{rst/Default_parameters::doc}{{2.5}{8}{Default parameters\relax }{section.2.5}{}} +\newlabel{rst/Default_parameters:default-parameters}{{2.5}{8}{Default parameters\relax }{section.2.5}{}} +\@writefile{toc}{\contentsline {section}{\numberline {2.6}Examples}{9}{section.2.6}} +\newlabel{rst/Examples::doc}{{2.6}{9}{Examples\relax }{section.2.6}{}} +\newlabel{rst/Examples:examples}{{2.6}{9}{Examples\relax }{section.2.6}{}} +\@writefile{toc}{\contentsline {chapter}{\numberline {3}Tutorial}{11}{chapter.3}} +\@writefile{lof}{\addvspace {10\p@ }} +\@writefile{lot}{\addvspace {10\p@ }} +\newlabel{rst/Tutorial::doc}{{3}{11}{Tutorial\relax }{chapter.3}{}} +\newlabel{rst/Tutorial:tutorial}{{3}{11}{Tutorial\relax }{chapter.3}{}} +\@writefile{toc}{\contentsline {section}{\numberline {3.1}Using \textbf {pNbody} with the python interpreter}{11}{section.3.1}} +\newlabel{rst/Tutorial_interpreter:using-pnbody-with-the-python-interpreter}{{3.1}{11}{Using \textbf {pNbody} with the python interpreter\relax }{section.3.1}{}} +\newlabel{rst/Tutorial_interpreter::doc}{{3.1}{11}{Using \textbf {pNbody} with the python interpreter\relax }{section.3.1}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.1}Creating \textbf {pNbody} objects from scratch}{11}{subsection.3.1.1}} +\newlabel{rst/Tutorial_interpreter:creating-pnbody-objects-from-scratch}{{3.1.1}{11}{Creating \textbf {pNbody} objects from scratch\relax }{subsection.3.1.1}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.2}Open from existing file}{13}{subsection.3.1.2}} +\newlabel{rst/Tutorial_interpreter:open-from-existing-file}{{3.1.2}{13}{Open from existing file\relax }{subsection.3.1.2}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.3}Selection of particles}{14}{subsection.3.1.3}} +\newlabel{rst/Tutorial_interpreter:selection-of-particles}{{3.1.3}{14}{Selection of particles\relax }{subsection.3.1.3}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.1.4}Merging two models}{15}{subsection.3.1.4}} +\newlabel{rst/Tutorial_interpreter:merging-two-models}{{3.1.4}{15}{Merging two models\relax }{subsection.3.1.4}{}} +\@writefile{toc}{\contentsline {section}{\numberline {3.2}Using pNbody with scripts}{15}{section.3.2}} +\newlabel{rst/Tutorial_scripts:using-pnbody-with-scripts}{{3.2}{15}{Using pNbody with scripts\relax }{section.3.2}{}} +\newlabel{rst/Tutorial_scripts::doc}{{3.2}{15}{Using pNbody with scripts\relax }{section.3.2}{}} +\@writefile{toc}{\contentsline {section}{\numberline {3.3}Using pNbody in parallel}{16}{section.3.3}} +\newlabel{rst/Tutorial_parallel:using-pnbody-in-parallel}{{3.3}{16}{Using pNbody in parallel\relax }{section.3.3}{}} +\newlabel{rst/Tutorial_parallel::doc}{{3.3}{16}{Using pNbody in parallel\relax }{section.3.3}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.3.1}Parallel output}{17}{subsection.3.3.1}} +\newlabel{rst/Tutorial_parallel:parallel-output}{{3.3.1}{17}{Parallel output\relax }{subsection.3.3.1}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.3.2}Parallel input}{17}{subsection.3.3.2}} +\newlabel{rst/Tutorial_parallel:parallel-input}{{3.3.2}{17}{Parallel input\relax }{subsection.3.3.2}{}} +\@writefile{toc}{\contentsline {subsection}{\numberline {3.3.3}More on parallelisme}{18}{subsection.3.3.3}} +\newlabel{rst/Tutorial_parallel:more-on-parallelisme}{{3.3.3}{18}{More on parallelisme\relax }{subsection.3.3.3}{}} +\@writefile{toc}{\contentsline {chapter}{\numberline {4}Reference}{19}{chapter.4}} +\@writefile{lof}{\addvspace {10\p@ }} +\@writefile{lot}{\addvspace {10\p@ }} +\newlabel{rst/Reference::doc}{{4}{19}{Reference\relax }{chapter.4}{}} +\newlabel{rst/Reference:reference}{{4}{19}{Reference\relax }{chapter.4}{}} +\@writefile{toc}{\contentsline {section}{\numberline {4.1}the Io module}{19}{section.4.1}} +\newlabel{rst/Io:the-io-module}{{4.1}{19}{the Io module\relax }{section.4.1}{}} +\newlabel{rst/Io::doc}{{4.1}{19}{the Io module\relax }{section.4.1}{}} +\newlabel{rst/Io:pNbody.io.checkfile}{{4.1}{19}{the Io module\relax }{section*.3}{}} +\@writefile{toc}{\contentsline {paragraph}{Examples}{19}{paragraph*.4}} +\newlabel{rst/Io:pNbody.io.end_of_file}{{4.1}{19}{the Io module\relax }{section*.5}{}} +\newlabel{rst/Io:pNbody.io.write_array}{{4.1}{19}{the Io module\relax }{section*.6}{}} +\@writefile{toc}{\contentsline {paragraph}{Examples}{20}{paragraph*.7}} +\newlabel{rst/Io:pNbody.io.read_ascii}{{4.1}{20}{the Io module\relax }{section*.8}{}} +\@writefile{toc}{\contentsline {paragraph}{Examples}{20}{paragraph*.9}} +\newlabel{rst/Io:pNbody.io.write_dump}{{4.1}{20}{the Io module\relax }{section*.10}{}} +\@writefile{toc}{\contentsline {paragraph}{Examples}{21}{paragraph*.11}} +\newlabel{rst/Io:pNbody.io.read_dump}{{4.1}{21}{the Io module\relax }{section*.12}{}} +\@writefile{toc}{\contentsline {paragraph}{Examples}{21}{paragraph*.13}} +\@writefile{toc}{\contentsline {chapter}{\numberline {5}Indices and tables}{23}{chapter.5}} +\@writefile{lof}{\addvspace {10\p@ }} +\@writefile{lot}{\addvspace {10\p@ }} +\newlabel{index:indices-and-tables}{{5}{23}{Indices and tables\relax }{chapter.5}{}} +\@writefile{toc}{\contentsline {chapter}{Index}{25}{section*.14}} diff --git a/Doc/newdoc/_build/latex/pNbody.idx b/Doc/newdoc/_build/latex/pNbody.idx new file mode 100644 index 0000000..99e0b2b --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.idx @@ -0,0 +1,6 @@ +\indexentry{checkfile() (in module pNbody.io)|hyperpage}{19} +\indexentry{end\_of\_file() (in module pNbody.io)|hyperpage}{19} +\indexentry{write\_array() (in module pNbody.io)|hyperpage}{19} +\indexentry{read\_ascii() (in module pNbody.io)|hyperpage}{20} +\indexentry{write\_dump() (in module pNbody.io)|hyperpage}{20} +\indexentry{read\_dump() (in module pNbody.io)|hyperpage}{21} diff --git a/Doc/newdoc/_build/latex/pNbody.ilg b/Doc/newdoc/_build/latex/pNbody.ilg new file mode 100644 index 0000000..3bfc03f --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.ilg @@ -0,0 +1,7 @@ +This is makeindex, version 2.15 [TeX Live 2009] (kpathsea + Thai support). +Scanning style file ./python.ist......done (6 attributes redefined, 0 ignored). +Scanning input file pNbody.idx....done (6 entries accepted, 0 rejected). +Sorting entries....done (17 comparisons). +Generating output file pNbody.ind....done (21 lines written, 0 warnings). +Output written in pNbody.ind. +Transcript written in pNbody.ilg. diff --git a/Doc/newdoc/_build/latex/pNbody.ind b/Doc/newdoc/_build/latex/pNbody.ind new file mode 100644 index 0000000..628460c --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.ind @@ -0,0 +1,21 @@ +\begin{theindex} +\def\bigletter#1{{\Large\sffamily#1}\nopagebreak\vspace{1mm}} + + \bigletter C + \item checkfile() (in module pNbody.io), \hyperpage{19} + + \indexspace + \bigletter E + \item end\_of\_file() (in module pNbody.io), \hyperpage{19} + + \indexspace + \bigletter R + \item read\_ascii() (in module pNbody.io), \hyperpage{20} + \item read\_dump() (in module pNbody.io), \hyperpage{21} + + \indexspace + \bigletter W + \item write\_array() (in module pNbody.io), \hyperpage{19} + \item write\_dump() (in module pNbody.io), \hyperpage{20} + +\end{theindex} diff --git a/Doc/newdoc/_build/latex/pNbody.log b/Doc/newdoc/_build/latex/pNbody.log new file mode 100644 index 0000000..28cc482 --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.log @@ -0,0 +1,966 @@ +This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=pdflatex 2011.5.30) 26 AUG 2011 16:56 +entering extended mode + %&-line parsing enabled. +**pNbody.tex +(./pNbody.tex +LaTeX2e <2009/09/24> +Babel and hyphenation patterns for english, usenglishmax, dumylang, noh +yphenation, farsi, arabic, croatian, bulgarian, ukrainian, russian, czech, slov +ak, danish, dutch, finnish, french, basque, ngerman, german, german-x-2009-06-1 +9, ngerman-x-2009-06-19, ibycus, monogreek, greek, ancientgreek, hungarian, san +skrit, italian, latin, latvian, lithuanian, mongolian2a, mongolian, bokmal, nyn +orsk, romanian, irish, coptic, serbian, turkish, welsh, esperanto, uppersorbian +, estonian, indonesian, interlingua, icelandic, kurmanji, slovenian, polish, po +rtuguese, spanish, galician, catalan, swedish, ukenglish, pinyin, loaded. +(./sphinxmanual.cls +Document Class: sphinxmanual 2009/06/02 Document class (Sphinx manual) +(/usr/share/texmf-texlive/tex/latex/base/report.cls +Document Class: report 2007/10/19 v1.4h Standard LaTeX document class +(/usr/share/texmf-texlive/tex/latex/base/size10.clo +File: size10.clo 2007/10/19 v1.4h Standard LaTeX file (size option) +) +\c@part=\count79 +\c@chapter=\count80 +\c@section=\count81 +\c@subsection=\count82 +\c@subsubsection=\count83 +\c@paragraph=\count84 +\c@subparagraph=\count85 +\c@figure=\count86 +\c@table=\count87 +\abovecaptionskip=\skip41 +\belowcaptionskip=\skip42 +\bibindent=\dimen102 +)) +(/usr/share/texmf-texlive/tex/latex/base/inputenc.sty +Package: inputenc 2008/03/30 v1.1d Input encoding file +\inpenc@prehook=\toks14 +\inpenc@posthook=\toks15 + +(/usr/share/texmf-texlive/tex/latex/base/utf8.def +File: utf8.def 2008/04/05 v1.1m UTF-8 support for inputenc +Now handling font encoding OML ... +... no UTF-8 mapping file for font encoding OML +Now handling font encoding T1 ... +... processing UTF-8 mapping file for font encoding T1 + +(/usr/share/texmf-texlive/tex/latex/base/t1enc.dfu +File: t1enc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc + defining Unicode char U+00A1 (decimal 161) + defining Unicode char U+00A3 (decimal 163) + defining Unicode char U+00AB (decimal 171) + defining Unicode char U+00BB (decimal 187) + defining Unicode char U+00BF (decimal 191) + defining Unicode char U+00C0 (decimal 192) + defining Unicode char U+00C1 (decimal 193) + defining Unicode char U+00C2 (decimal 194) + defining Unicode char U+00C3 (decimal 195) + defining Unicode char U+00C4 (decimal 196) + defining Unicode char U+00C5 (decimal 197) + defining Unicode char U+00C6 (decimal 198) + defining Unicode char U+00C7 (decimal 199) + defining Unicode char U+00C8 (decimal 200) + defining Unicode char U+00C9 (decimal 201) + defining Unicode char U+00CA (decimal 202) + defining Unicode char U+00CB (decimal 203) + defining Unicode char U+00CC (decimal 204) + defining Unicode char U+00CD (decimal 205) + defining Unicode char U+00CE (decimal 206) + defining Unicode char U+00CF (decimal 207) + defining Unicode char U+00D0 (decimal 208) + defining Unicode char U+00D1 (decimal 209) + defining Unicode char U+00D2 (decimal 210) + defining Unicode char U+00D3 (decimal 211) + defining Unicode char U+00D4 (decimal 212) + defining Unicode char U+00D5 (decimal 213) + defining Unicode char U+00D6 (decimal 214) + defining Unicode char U+00D8 (decimal 216) + defining Unicode char U+00D9 (decimal 217) + defining Unicode char U+00DA (decimal 218) + defining Unicode char U+00DB (decimal 219) + defining Unicode char U+00DC (decimal 220) + defining Unicode char U+00DD (decimal 221) + defining Unicode char U+00DE (decimal 222) + defining Unicode char U+00DF (decimal 223) + defining Unicode char U+00E0 (decimal 224) + defining Unicode char U+00E1 (decimal 225) + defining Unicode char U+00E2 (decimal 226) + defining Unicode char U+00E3 (decimal 227) + defining Unicode char U+00E4 (decimal 228) + defining Unicode char U+00E5 (decimal 229) + defining Unicode char U+00E6 (decimal 230) + defining Unicode char U+00E7 (decimal 231) + defining Unicode char U+00E8 (decimal 232) + defining Unicode char U+00E9 (decimal 233) + defining Unicode char U+00EA (decimal 234) + defining Unicode char U+00EB (decimal 235) + defining Unicode char U+00EC (decimal 236) + defining Unicode char U+00ED (decimal 237) + defining Unicode char U+00EE (decimal 238) + defining Unicode char U+00EF (decimal 239) + defining Unicode char U+00F0 (decimal 240) + defining Unicode char U+00F1 (decimal 241) + defining Unicode char U+00F2 (decimal 242) + defining Unicode char U+00F3 (decimal 243) + defining Unicode char U+00F4 (decimal 244) + defining Unicode char U+00F5 (decimal 245) + defining Unicode char U+00F6 (decimal 246) + defining Unicode char U+00F8 (decimal 248) + defining Unicode char U+00F9 (decimal 249) + defining Unicode char U+00FA (decimal 250) + defining Unicode char U+00FB (decimal 251) + defining Unicode char U+00FC (decimal 252) + defining Unicode char U+00FD (decimal 253) + defining Unicode char U+00FE (decimal 254) + defining Unicode char U+00FF (decimal 255) + defining Unicode char U+0102 (decimal 258) + defining Unicode char U+0103 (decimal 259) + defining Unicode char U+0104 (decimal 260) + defining Unicode char U+0105 (decimal 261) + defining Unicode char U+0106 (decimal 262) + defining Unicode char U+0107 (decimal 263) + defining Unicode char U+010C (decimal 268) + defining Unicode char U+010D (decimal 269) + defining Unicode char U+010E (decimal 270) + defining Unicode char U+010F (decimal 271) + defining Unicode char U+0110 (decimal 272) + defining Unicode char U+0111 (decimal 273) + defining Unicode char U+0118 (decimal 280) + defining Unicode char U+0119 (decimal 281) + defining Unicode char U+011A (decimal 282) + defining Unicode char U+011B (decimal 283) + defining Unicode char U+011E (decimal 286) + defining Unicode char U+011F (decimal 287) + defining Unicode char U+0130 (decimal 304) + defining Unicode char U+0131 (decimal 305) + defining Unicode char U+0132 (decimal 306) + defining Unicode char U+0133 (decimal 307) + defining Unicode char U+0139 (decimal 313) + defining Unicode char U+013A (decimal 314) + defining Unicode char U+013D (decimal 317) + defining Unicode char U+013E (decimal 318) + defining Unicode char U+0141 (decimal 321) + defining Unicode char U+0142 (decimal 322) + defining Unicode char U+0143 (decimal 323) + defining Unicode char U+0144 (decimal 324) + defining Unicode char U+0147 (decimal 327) + defining Unicode char U+0148 (decimal 328) + defining Unicode char U+014A (decimal 330) + defining Unicode char U+014B (decimal 331) + defining Unicode char U+0150 (decimal 336) + defining Unicode char U+0151 (decimal 337) + defining Unicode char U+0152 (decimal 338) + defining Unicode char U+0153 (decimal 339) + defining Unicode char U+0154 (decimal 340) + defining Unicode char U+0155 (decimal 341) + defining Unicode char U+0158 (decimal 344) + defining Unicode char U+0159 (decimal 345) + defining Unicode char U+015A (decimal 346) + defining Unicode char U+015B (decimal 347) + defining Unicode char U+015E (decimal 350) + defining Unicode char U+015F (decimal 351) + defining Unicode char U+0160 (decimal 352) + defining Unicode char U+0161 (decimal 353) + defining Unicode char U+0162 (decimal 354) + defining Unicode char U+0163 (decimal 355) + defining Unicode char U+0164 (decimal 356) + defining Unicode char U+0165 (decimal 357) + defining Unicode char U+016E (decimal 366) + defining Unicode char U+016F (decimal 367) + defining Unicode char U+0170 (decimal 368) + defining Unicode char U+0171 (decimal 369) + defining Unicode char U+0178 (decimal 376) + defining Unicode char U+0179 (decimal 377) + defining Unicode char U+017A (decimal 378) + defining Unicode char U+017B (decimal 379) + defining Unicode char U+017C (decimal 380) + defining Unicode char U+017D (decimal 381) + defining Unicode char U+017E (decimal 382) + defining Unicode char U+200C (decimal 8204) + defining Unicode char U+2013 (decimal 8211) + defining Unicode char U+2014 (decimal 8212) + defining Unicode char U+2018 (decimal 8216) + defining Unicode char U+2019 (decimal 8217) + defining Unicode char U+201A (decimal 8218) + defining Unicode char U+201C (decimal 8220) + defining Unicode char U+201D (decimal 8221) + defining Unicode char U+201E (decimal 8222) + defining Unicode char U+2030 (decimal 8240) + defining Unicode char U+2031 (decimal 8241) + defining Unicode char U+2039 (decimal 8249) + defining Unicode char U+203A (decimal 8250) + defining Unicode char U+2423 (decimal 9251) +) +Now handling font encoding OT1 ... +... processing UTF-8 mapping file for font encoding OT1 + +(/usr/share/texmf-texlive/tex/latex/base/ot1enc.dfu +File: ot1enc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc + defining Unicode char U+00A1 (decimal 161) + defining Unicode char U+00A3 (decimal 163) + defining Unicode char U+00B8 (decimal 184) + defining Unicode char U+00BF (decimal 191) + defining Unicode char U+00C5 (decimal 197) + defining Unicode char U+00C6 (decimal 198) + defining Unicode char U+00D8 (decimal 216) + defining Unicode char U+00DF (decimal 223) + defining Unicode char U+00E6 (decimal 230) + defining Unicode char U+00EC (decimal 236) + defining Unicode char U+00ED (decimal 237) + defining Unicode char U+00EE (decimal 238) + defining Unicode char U+00EF (decimal 239) + defining Unicode char U+00F8 (decimal 248) + defining Unicode char U+0131 (decimal 305) + defining Unicode char U+0141 (decimal 321) + defining Unicode char U+0142 (decimal 322) + defining Unicode char U+0152 (decimal 338) + defining Unicode char U+0153 (decimal 339) + defining Unicode char U+2013 (decimal 8211) + defining Unicode char U+2014 (decimal 8212) + defining Unicode char U+2018 (decimal 8216) + defining Unicode char U+2019 (decimal 8217) + defining Unicode char U+201C (decimal 8220) + defining Unicode char U+201D (decimal 8221) +) +Now handling font encoding OMS ... +... processing UTF-8 mapping file for font encoding OMS + +(/usr/share/texmf-texlive/tex/latex/base/omsenc.dfu +File: omsenc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc + defining Unicode char U+00A7 (decimal 167) + defining Unicode char U+00B6 (decimal 182) + defining Unicode char U+00B7 (decimal 183) + defining Unicode char U+2020 (decimal 8224) + defining Unicode char U+2021 (decimal 8225) + defining Unicode char U+2022 (decimal 8226) +) +Now handling font encoding OMX ... +... no UTF-8 mapping file for font encoding OMX +Now handling font encoding U ... +... no UTF-8 mapping file for font encoding U + defining Unicode char U+00A9 (decimal 169) + defining Unicode char U+00AA (decimal 170) + defining Unicode char U+00AE (decimal 174) + defining Unicode char U+00BA (decimal 186) + defining Unicode char U+02C6 (decimal 710) + defining Unicode char U+02DC (decimal 732) + defining Unicode char U+200C (decimal 8204) + defining Unicode char U+2026 (decimal 8230) + defining Unicode char U+2122 (decimal 8482) + defining Unicode char U+2423 (decimal 9251) +)) + defining Unicode char U+00A0 (decimal 160) + +(/usr/share/texmf-texlive/tex/latex/base/fontenc.sty +Package: fontenc 2005/09/27 v1.99g Standard LaTeX package + +(/usr/share/texmf-texlive/tex/latex/base/t1enc.def +File: t1enc.def 2005/09/27 v1.99g Standard LaTeX file +LaTeX Font Info: Redeclaring font encoding T1 on input line 43. +)) +(/var/lib/texmf/tex/generic/babel/babel.sty +Package: babel 2008/07/06 v3.8l The Babel package + +(/usr/share/texmf-texlive/tex/generic/babel/english.ldf +Language: english 2005/03/30 v3.3o English support from the babel system + +(/usr/share/texmf-texlive/tex/generic/babel/babel.def +File: babel.def 2008/07/06 v3.8l Babel common definitions +\babel@savecnt=\count88 +\U@D=\dimen103 +) +\l@canadian = a dialect from \language\l@american +\l@australian = a dialect from \language\l@british +\l@newzealand = a dialect from \language\l@british +)) +(/usr/share/texmf-texlive/tex/latex/psnfss/times.sty +Package: times 2005/04/12 PSNFSS-v9.2a (SPQR) +) (./fncychap.sty +Package: fncychap 2007/07/30 v1.34 LaTeX package (Revised chapters) +\RW=\skip43 +\mylen=\skip44 +\myhi=\skip45 +\px=\skip46 +\py=\skip47 +\pyy=\skip48 +\pxx=\skip49 +\c@AlphaCnt=\count89 +\c@AlphaDecCnt=\count90 +) +(/usr/share/texmf-texlive/tex/latex/tools/longtable.sty +Package: longtable 2004/02/01 v4.11 Multi-page Table package (DPC) +\LTleft=\skip50 +\LTright=\skip51 +\LTpre=\skip52 +\LTpost=\skip53 +\LTchunksize=\count91 +\LTcapwidth=\dimen104 +\LT@head=\box26 +\LT@firsthead=\box27 +\LT@foot=\box28 +\LT@lastfoot=\box29 +\LT@cols=\count92 +\LT@rows=\count93 +\c@LT@tables=\count94 +\c@LT@chunks=\count95 +\LT@p@ftn=\toks16 +) (./sphinx.sty +Package: sphinx 2010/01/15 LaTeX package (Sphinx markup) + +(/usr/share/texmf-texlive/tex/latex/base/textcomp.sty +Package: textcomp 2005/09/27 v1.99g Standard LaTeX package +Package textcomp Info: Sub-encoding information: +(textcomp) 5 = only ISO-Adobe without \textcurrency +(textcomp) 4 = 5 + \texteuro +(textcomp) 3 = 4 + \textohm +(textcomp) 2 = 3 + \textestimated + \textcurrency +(textcomp) 1 = TS1 - \textcircled - \t +(textcomp) 0 = TS1 (full) +(textcomp) Font families with sub-encoding setting implement +(textcomp) only a restricted character set as indicated. +(textcomp) Family '?' is the default used for unknown fonts. +(textcomp) See the documentation for details. +Package textcomp Info: Setting ? sub-encoding to TS1/1 on input line 71. + +(/usr/share/texmf-texlive/tex/latex/base/ts1enc.def +File: ts1enc.def 2001/06/05 v3.0e (jk/car/fm) Standard LaTeX file +Now handling font encoding TS1 ... +... processing UTF-8 mapping file for font encoding TS1 + +(/usr/share/texmf-texlive/tex/latex/base/ts1enc.dfu +File: ts1enc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc + defining Unicode char U+00A2 (decimal 162) + defining Unicode char U+00A3 (decimal 163) + defining Unicode char U+00A4 (decimal 164) + defining Unicode char U+00A5 (decimal 165) + defining Unicode char U+00A6 (decimal 166) + defining Unicode char U+00A7 (decimal 167) + defining Unicode char U+00A8 (decimal 168) + defining Unicode char U+00A9 (decimal 169) + defining Unicode char U+00AA (decimal 170) + defining Unicode char U+00AC (decimal 172) + defining Unicode char U+00AE (decimal 174) + defining Unicode char U+00AF (decimal 175) + defining Unicode char U+00B0 (decimal 176) + defining Unicode char U+00B1 (decimal 177) + defining Unicode char U+00B2 (decimal 178) + defining Unicode char U+00B3 (decimal 179) + defining Unicode char U+00B4 (decimal 180) + defining Unicode char U+00B5 (decimal 181) + defining Unicode char U+00B6 (decimal 182) + defining Unicode char U+00B7 (decimal 183) + defining Unicode char U+00B9 (decimal 185) + defining Unicode char U+00BA (decimal 186) + defining Unicode char U+00BC (decimal 188) + defining Unicode char U+00BD (decimal 189) + defining Unicode char U+00BE (decimal 190) + defining Unicode char U+00D7 (decimal 215) + defining Unicode char U+00F7 (decimal 247) + defining Unicode char U+0192 (decimal 402) + defining Unicode char U+02C7 (decimal 711) + defining Unicode char U+02D8 (decimal 728) + defining Unicode char U+02DD (decimal 733) + defining Unicode char U+0E3F (decimal 3647) + defining Unicode char U+2016 (decimal 8214) + defining Unicode char U+2020 (decimal 8224) + defining Unicode char U+2021 (decimal 8225) + defining Unicode char U+2022 (decimal 8226) + defining Unicode char U+2030 (decimal 8240) + defining Unicode char U+2031 (decimal 8241) + defining Unicode char U+203B (decimal 8251) + defining Unicode char U+203D (decimal 8253) + defining Unicode char U+2044 (decimal 8260) + defining Unicode char U+204E (decimal 8270) + defining Unicode char U+2052 (decimal 8274) + defining Unicode char U+20A1 (decimal 8353) + defining Unicode char U+20A4 (decimal 8356) + defining Unicode char U+20A6 (decimal 8358) + defining Unicode char U+20A9 (decimal 8361) + defining Unicode char U+20AB (decimal 8363) + defining Unicode char U+20AC (decimal 8364) + defining Unicode char U+20B1 (decimal 8369) + defining Unicode char U+2103 (decimal 8451) + defining Unicode char U+2116 (decimal 8470) + defining Unicode char U+2117 (decimal 8471) + defining Unicode char U+211E (decimal 8478) + defining Unicode char U+2120 (decimal 8480) + defining Unicode char U+2122 (decimal 8482) + defining Unicode char U+2126 (decimal 8486) + defining Unicode char U+2127 (decimal 8487) + defining Unicode char U+212E (decimal 8494) + defining Unicode char U+2190 (decimal 8592) + defining Unicode char U+2191 (decimal 8593) + defining Unicode char U+2192 (decimal 8594) + defining Unicode char U+2193 (decimal 8595) + defining Unicode char U+2329 (decimal 9001) + defining Unicode char U+232A (decimal 9002) + defining Unicode char U+2422 (decimal 9250) + defining Unicode char U+25E6 (decimal 9702) + defining Unicode char U+25EF (decimal 9711) + defining Unicode char U+266A (decimal 9834) +)) +LaTeX Info: Redefining \oldstylenums on input line 266. +Package textcomp Info: Setting cmr sub-encoding to TS1/0 on input line 281. +Package textcomp Info: Setting cmss sub-encoding to TS1/0 on input line 282. +Package textcomp Info: Setting cmtt sub-encoding to TS1/0 on input line 283. +Package textcomp Info: Setting cmvtt sub-encoding to TS1/0 on input line 284. +Package textcomp Info: Setting cmbr sub-encoding to TS1/0 on input line 285. +Package textcomp Info: Setting cmtl sub-encoding to TS1/0 on input line 286. +Package textcomp Info: Setting ccr sub-encoding to TS1/0 on input line 287. +Package textcomp Info: Setting ptm sub-encoding to TS1/4 on input line 288. +Package textcomp Info: Setting pcr sub-encoding to TS1/4 on input line 289. +Package textcomp Info: Setting phv sub-encoding to TS1/4 on input line 290. +Package textcomp Info: Setting ppl sub-encoding to TS1/3 on input line 291. +Package textcomp Info: Setting pag sub-encoding to TS1/4 on input line 292. +Package textcomp Info: Setting pbk sub-encoding to TS1/4 on input line 293. +Package textcomp Info: Setting pnc sub-encoding to TS1/4 on input line 294. +Package textcomp Info: Setting pzc sub-encoding to TS1/4 on input line 295. +Package textcomp Info: Setting bch sub-encoding to TS1/4 on input line 296. +Package textcomp Info: Setting put sub-encoding to TS1/5 on input line 297. +Package textcomp Info: Setting uag sub-encoding to TS1/5 on input line 298. +Package textcomp Info: Setting ugq sub-encoding to TS1/5 on input line 299. +Package textcomp Info: Setting ul8 sub-encoding to TS1/4 on input line 300. +Package textcomp Info: Setting ul9 sub-encoding to TS1/4 on input line 301. +Package textcomp Info: Setting augie sub-encoding to TS1/5 on input line 302. +Package textcomp Info: Setting dayrom sub-encoding to TS1/3 on input line 303. +Package textcomp Info: Setting dayroms sub-encoding to TS1/3 on input line 304. + +Package textcomp Info: Setting pxr sub-encoding to TS1/0 on input line 305. +Package textcomp Info: Setting pxss sub-encoding to TS1/0 on input line 306. +Package textcomp Info: Setting pxtt sub-encoding to TS1/0 on input line 307. +Package textcomp Info: Setting txr sub-encoding to TS1/0 on input line 308. +Package textcomp Info: Setting txss sub-encoding to TS1/0 on input line 309. +Package textcomp Info: Setting txtt sub-encoding to TS1/0 on input line 310. +Package textcomp Info: Setting futs sub-encoding to TS1/4 on input line 311. +Package textcomp Info: Setting futx sub-encoding to TS1/4 on input line 312. +Package textcomp Info: Setting futj sub-encoding to TS1/4 on input line 313. +Package textcomp Info: Setting hlh sub-encoding to TS1/3 on input line 314. +Package textcomp Info: Setting hls sub-encoding to TS1/3 on input line 315. +Package textcomp Info: Setting hlst sub-encoding to TS1/3 on input line 316. +Package textcomp Info: Setting hlct sub-encoding to TS1/5 on input line 317. +Package textcomp Info: Setting hlx sub-encoding to TS1/5 on input line 318. +Package textcomp Info: Setting hlce sub-encoding to TS1/5 on input line 319. +Package textcomp Info: Setting hlcn sub-encoding to TS1/5 on input line 320. +Package textcomp Info: Setting hlcw sub-encoding to TS1/5 on input line 321. +Package textcomp Info: Setting hlcf sub-encoding to TS1/5 on input line 322. +Package textcomp Info: Setting pplx sub-encoding to TS1/3 on input line 323. +Package textcomp Info: Setting pplj sub-encoding to TS1/3 on input line 324. +Package textcomp Info: Setting ptmx sub-encoding to TS1/4 on input line 325. +Package textcomp Info: Setting ptmj sub-encoding to TS1/4 on input line 326. +) +(/usr/share/texmf-texlive/tex/latex/fancyhdr/fancyhdr.sty +\fancy@headwidth=\skip54 +\f@ncyO@elh=\skip55 +\f@ncyO@erh=\skip56 +\f@ncyO@olh=\skip57 +\f@ncyO@orh=\skip58 +\f@ncyO@elf=\skip59 +\f@ncyO@erf=\skip60 +\f@ncyO@olf=\skip61 +\f@ncyO@orf=\skip62 +) +(/usr/share/texmf-texlive/tex/latex/fancybox/fancybox.sty +Package: fancybox 2000/09/19 1.3 + +Style option: `fancybox' v1.3 <2000/09/19> (tvz) +\@fancybox=\box30 +\shadowsize=\dimen105 +\@Sbox=\box31 +\do@VerbBox=\toks17 +\the@fancyput=\toks18 +\this@fancyput=\toks19 +\EndVerbatimTokens=\toks20 +\Verbatim@Outfile=\write3 +\Verbatim@Infile=\read1 +) (/usr/share/texmf-texlive/tex/latex/titlesec/titlesec.sty +Package: titlesec 2007/08/12 v2.8 Sectioning titles +\ttl@box=\box32 +\beforetitleunit=\skip63 +\aftertitleunit=\skip64 +\ttl@plus=\dimen106 +\ttl@minus=\dimen107 +\ttl@toksa=\toks21 +\titlewidth=\dimen108 +\titlewidthlast=\dimen109 +\titlewidthfirst=\dimen110 +) (./tabulary.sty +Package: tabulary 2007/10/02 v0.9 tabulary package (DPC) + +(/usr/share/texmf-texlive/tex/latex/tools/array.sty +Package: array 2008/09/09 v2.4c Tabular extension package (FMi) +\col@sep=\dimen111 +\extrarowheight=\dimen112 +\NC@list=\toks22 +\extratabsurround=\skip65 +\backup@length=\skip66 +) +\TY@count=\count96 +\TY@linewidth=\dimen113 +\tymin=\dimen114 +\tymax=\dimen115 +\TY@tablewidth=\dimen116 +) +(/usr/share/texmf-texlive/tex/latex/amsmath/amsmath.sty +Package: amsmath 2000/07/18 v2.13 AMS math features +\@mathmargin=\skip67 + +For additional information on amsmath, use the `?' option. +(/usr/share/texmf-texlive/tex/latex/amsmath/amstext.sty +Package: amstext 2000/06/29 v2.01 + +(/usr/share/texmf-texlive/tex/latex/amsmath/amsgen.sty +File: amsgen.sty 1999/11/30 v2.0 +\@emptytoks=\toks23 +\ex@=\dimen117 +)) +(/usr/share/texmf-texlive/tex/latex/amsmath/amsbsy.sty +Package: amsbsy 1999/11/29 v1.2d +\pmbraise@=\dimen118 +) +(/usr/share/texmf-texlive/tex/latex/amsmath/amsopn.sty +Package: amsopn 1999/12/14 v2.01 operator names +) +\inf@bad=\count97 +LaTeX Info: Redefining \frac on input line 211. +\uproot@=\count98 +\leftroot@=\count99 +LaTeX Info: Redefining \overline on input line 307. +\classnum@=\count100 +\DOTSCASE@=\count101 +LaTeX Info: Redefining \ldots on input line 379. +LaTeX Info: Redefining \dots on input line 382. +LaTeX Info: Redefining \cdots on input line 467. +\Mathstrutbox@=\box33 +\strutbox@=\box34 +\big@size=\dimen119 +LaTeX Font Info: Redeclaring font encoding OML on input line 567. +LaTeX Font Info: Redeclaring font encoding OMS on input line 568. +\macc@depth=\count102 +\c@MaxMatrixCols=\count103 +\dotsspace@=\muskip10 +\c@parentequation=\count104 +\dspbrk@lvl=\count105 +\tag@help=\toks24 +\row@=\count106 +\column@=\count107 +\maxfields@=\count108 +\andhelp@=\toks25 +\eqnshift@=\dimen120 +\alignsep@=\dimen121 +\tagshift@=\dimen122 +\tagwidth@=\dimen123 +\totwidth@=\dimen124 +\lineht@=\dimen125 +\@envbody=\toks26 +\multlinegap=\skip68 +\multlinetaggap=\skip69 +\mathdisplay@stack=\toks27 +LaTeX Info: Redefining \[ on input line 2666. +LaTeX Info: Redefining \] on input line 2667. +) +(/usr/share/texmf-texlive/tex/latex/base/makeidx.sty +Package: makeidx 2000/03/29 v1.0m Standard LaTeX package +) +(/usr/share/texmf-texlive/tex/latex/ltxmisc/framed.sty +Package: framed 2007/10/04 v 0.95: framed or shaded text with page breaks +\fb@frw=\dimen126 +\fb@frh=\dimen127 +\FrameRule=\dimen128 +\FrameSep=\dimen129 +) +(/usr/share/texmf-texlive/tex/latex/base/ifthen.sty +Package: ifthen 2001/05/26 v1.1c Standard LaTeX ifthen package (DPC) +) +(/usr/share/texmf-texlive/tex/latex/graphics/color.sty +Package: color 2005/11/14 v1.0j Standard LaTeX Color (DPC) + +(/etc/texmf/tex/latex/config/color.cfg +File: color.cfg 2007/01/18 v1.5 color configuration of teTeX/TeXLive +) +Package color Info: Driver file: pdftex.def on input line 130. + +(/usr/share/texmf-texlive/tex/latex/pdftex-def/pdftex.def +File: pdftex.def 2010/03/12 v0.04p Graphics/color for pdfTeX +\Gread@gobject=\count109 +)) +(/usr/share/texmf-texlive/tex/latex/fancyvrb/fancyvrb.sty +Package: fancyvrb 2008/02/07 + +Style option: `fancyvrb' v2.7a, with DG/SPQR fixes, and firstline=lastline fix +<2008/02/07> (tvz) (/usr/share/texmf-texlive/tex/latex/graphics/keyval.sty +Package: keyval 1999/03/16 v1.13 key=value parser (DPC) +\KV@toks@=\toks28 +) +\FV@CodeLineNo=\count110 +\FV@InFile=\read2 +\FV@TabBox=\box35 +\c@FancyVerbLine=\count111 +\FV@StepNumber=\count112 +\FV@OutFile=\write4 +) +(/usr/share/texmf-texlive/tex/latex/ltxmisc/threeparttable.sty +Package: threeparttable 2003/06/13 v 3.0 +\@tempboxb=\box36 +) +(/usr/share/texmf-texlive/tex/latex/mdwtools/footnote.sty +Package: footnote 1997/01/28 1.13 Save footnotes around boxes +\fn@notes=\box37 +\fn@width=\dimen130 +) +(/usr/share/texmf-texlive/tex/latex/wrapfig/wrapfig.sty +\wrapoverhang=\dimen131 +\WF@size=\dimen132 +\c@WF@wrappedlines=\count113 +\WF@box=\box38 +\WF@everypar=\toks29 +Package: wrapfig 2003/01/31 v 3.6 +) +(/usr/share/texmf-texlive/tex/latex/ltxmisc/parskip.sty +Package: parskip 2001/04/09 non-zero parskip adjustments +) +(/usr/share/texmf-texlive/tex/latex/graphics/graphicx.sty +Package: graphicx 1999/02/16 v1.0f Enhanced LaTeX Graphics (DPC,SPQR) + +(/usr/share/texmf-texlive/tex/latex/graphics/graphics.sty +Package: graphics 2009/02/05 v1.0o Standard LaTeX Graphics (DPC,SPQR) + +(/usr/share/texmf-texlive/tex/latex/graphics/trig.sty +Package: trig 1999/03/16 v1.09 sin cos tan (DPC) +) +(/etc/texmf/tex/latex/config/graphics.cfg +File: graphics.cfg 2009/08/28 v1.8 graphics configuration of TeX Live +) +Package graphics Info: Driver file: pdftex.def on input line 91. +) +\Gin@req@height=\dimen133 +\Gin@req@width=\dimen134 +) +(/usr/share/texmf-texlive/tex/plain/misc/pdfcolor.tex) +\distancetoright=\skip70 +\py@argswidth=\skip71 +\py@noticelength=\skip72 +\lineblockindentation=\skip73 +\image@box=\box39 +\image@width=\dimen135 + +(/usr/share/texmf-texlive/tex/latex/hyperref/hyperref.sty +Package: hyperref 2009/10/09 v6.79a Hypertext links for LaTeX + +(/usr/share/texmf-texlive/tex/generic/oberdiek/ifpdf.sty +Package: ifpdf 2009/04/10 v2.0 Provides the ifpdf switch (HO) +Package ifpdf Info: pdfTeX in pdf mode detected. +) +(/usr/share/texmf-texlive/tex/generic/oberdiek/ifvtex.sty +Package: ifvtex 2008/11/04 v1.4 Switches for detecting VTeX and its modes (HO) +Package ifvtex Info: VTeX not detected. +) +(/usr/share/texmf-texlive/tex/generic/ifxetex/ifxetex.sty +Package: ifxetex 2009/01/23 v0.5 Provides ifxetex conditional +) +(/usr/share/texmf-texlive/tex/latex/oberdiek/hycolor.sty +Package: hycolor 2009/10/02 v1.5 Code for color options of hyperref/bookmark (H +O) + +(/usr/share/texmf-texlive/tex/latex/oberdiek/xcolor-patch.sty +Package: xcolor-patch 2009/10/02 xcolor patch +)) +\@linkdim=\dimen136 +\Hy@linkcounter=\count114 +\Hy@pagecounter=\count115 + +(/usr/share/texmf-texlive/tex/latex/hyperref/pd1enc.def +File: pd1enc.def 2009/10/09 v6.79a Hyperref: PDFDocEncoding definition (HO) +Now handling font encoding PD1 ... +... no UTF-8 mapping file for font encoding PD1 +) +(/usr/share/texmf-texlive/tex/generic/oberdiek/etexcmds.sty +Package: etexcmds 2007/12/12 v1.2 Prefix for e-TeX command names (HO) + +(/usr/share/texmf-texlive/tex/generic/oberdiek/infwarerr.sty +Package: infwarerr 2007/09/09 v1.2 Providing info/warning/message (HO) +) +Package etexcmds Info: Could not find \expanded. +(etexcmds) That can mean that you are not using pdfTeX 1.50 or +(etexcmds) that some package has redefined \expanded. +(etexcmds) In the latter case, load this package earlier. +) +(/usr/share/texmf-texlive/tex/latex/latexconfig/hyperref.cfg +File: hyperref.cfg 2002/06/06 v1.2 hyperref configuration of TeXLive +) +(/usr/share/texmf-texlive/tex/latex/oberdiek/kvoptions.sty +Package: kvoptions 2009/08/13 v3.4 Keyval support for LaTeX options (HO) + +(/usr/share/texmf-texlive/tex/generic/oberdiek/kvsetkeys.sty +Package: kvsetkeys 2009/07/30 v1.5 Key value parser with default handler suppor +t (HO) +)) +Package hyperref Info: Option `colorlinks' set `true' on input line 2864. +Package hyperref Info: Option `breaklinks' set `true' on input line 2864. +Package hyperref Info: Hyper figures OFF on input line 2975. +Package hyperref Info: Link nesting OFF on input line 2980. +Package hyperref Info: Hyper index ON on input line 2983. +Package hyperref Info: Plain pages OFF on input line 2990. +Package hyperref Info: Backreferencing OFF on input line 2995. + +Implicit mode ON; LaTeX internals redefined +Package hyperref Info: Bookmarks ON on input line 3191. +(/usr/share/texmf-texlive/tex/latex/ltxmisc/url.sty +\Urlmuskip=\muskip11 +Package: url 2006/04/12 ver 3.3 Verb mode for urls, etc. +) +LaTeX Info: Redefining \url on input line 3428. + +(/usr/share/texmf-texlive/tex/generic/oberdiek/bitset.sty +Package: bitset 2007/09/28 v1.0 Data type bit set (HO) + +(/usr/share/texmf-texlive/tex/generic/oberdiek/intcalc.sty +Package: intcalc 2007/09/27 v1.1 Expandable integer calculations (HO) +) +(/usr/share/texmf-texlive/tex/generic/oberdiek/bigintcalc.sty +Package: bigintcalc 2007/11/11 v1.1 Expandable big integer calculations (HO) + +(/usr/share/texmf-texlive/tex/generic/oberdiek/pdftexcmds.sty +Package: pdftexcmds 2009/09/23 v0.6 LuaTeX support for pdfTeX utility functions + (HO) + +(/usr/share/texmf-texlive/tex/generic/oberdiek/ifluatex.sty +Package: ifluatex 2009/04/17 v1.2 Provides the ifluatex switch (HO) +Package ifluatex Info: LuaTeX not detected. +) +(/usr/share/texmf-texlive/tex/generic/oberdiek/ltxcmds.sty +Package: ltxcmds 2009/08/05 v1.0 Some LaTeX kernel commands for general use (HO +) +) +Package pdftexcmds Info: LuaTeX not detected. +Package pdftexcmds Info: \pdf@primitive is available. +Package pdftexcmds Info: \pdf@ifprimitive is available. +))) +\Fld@menulength=\count116 +\Field@Width=\dimen137 +\Fld@charsize=\dimen138 +\Field@toks=\toks30 +Package hyperref Info: Hyper figures OFF on input line 4377. +Package hyperref Info: Link nesting OFF on input line 4382. +Package hyperref Info: Hyper index ON on input line 4385. +Package hyperref Info: backreferencing OFF on input line 4392. +Package hyperref Info: Link coloring ON on input line 4395. +Package hyperref Info: Link coloring with OCG OFF on input line 4402. +Package hyperref Info: PDF/A mode OFF on input line 4407. + +(/usr/share/texmf-texlive/tex/generic/oberdiek/atbegshi.sty +Package: atbegshi 2008/07/31 v1.9 At begin shipout hook (HO) +) +\Hy@abspage=\count117 +\c@Item=\count118 +\c@Hfootnote=\count119 +) +*hyperref using default driver hpdftex* +(/usr/share/texmf-texlive/tex/latex/hyperref/hpdftex.def +File: hpdftex.def 2009/10/09 v6.79a Hyperref driver for pdfTeX +\Fld@listcount=\count120 +) +(/usr/share/texmf-texlive/tex/latex/oberdiek/hypcap.sty +Package: hypcap 2008/09/08 v1.10 Adjusting anchors of captions (HO) +) +\DUlineblockindent=\skip74 +) +\@indexfile=\write5 +\openout5 = `pNbody.idx'. + + +Writing index file pNbody.idx +(./pNbody.aux) +\openout1 = `pNbody.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 105. +LaTeX Font Info: Try loading font information for TS1+cmr on input line 105. + + (/usr/share/texmf-texlive/tex/latex/base/ts1cmr.fd +File: ts1cmr.fd 1999/05/25 v2.5h Standard LaTeX font definitions +) +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 105. +LaTeX Font Info: ... okay on input line 105. +LaTeX Font Info: Try loading font information for T1+ptm on input line 105. + +(/usr/share/texmf-texlive/tex/latex/psnfss/t1ptm.fd +File: t1ptm.fd 2001/06/04 font definitions for T1/ptm. +) +(/usr/share/texmf/tex/context/base/supp-pdf.mkii +[Loading MPS to PDF converter (version 2006.09.02).] +\scratchcounter=\count121 +\scratchdimen=\dimen139 +\scratchbox=\box40 +\nofMPsegments=\count122 +\nofMParguments=\count123 +\everyMPshowfont=\toks31 +\MPscratchCnt=\count124 +\MPscratchDim=\dimen140 +\MPnumerator=\count125 +\everyMPtoPDFconversion=\toks32 +) +Package hyperref Info: Link coloring ON on input line 105. + (/usr/share/texmf-texlive/tex/latex/hyperref/nameref.sty +Package: nameref 2007/05/29 v2.31 Cross-referencing by name of section + +(/usr/share/texmf-texlive/tex/latex/oberdiek/refcount.sty +Package: refcount 2008/08/11 v3.1 Data extraction from references (HO) +) +\c@section@level=\count126 +) +LaTeX Info: Redefining \ref on input line 105. +LaTeX Info: Redefining \pageref on input line 105. + (./pNbody.out) +(./pNbody.out) +\@outlinefile=\write6 +\openout6 = `pNbody.out'. + +\AtBeginShipoutBox=\box41 + +Underfull \hbox (badness 10000) in paragraph at lines 108--108 + + [] + +LaTeX Font Info: Try loading font information for T1+phv on input line 108. +(/usr/share/texmf-texlive/tex/latex/psnfss/t1phv.fd +File: t1phv.fd 2001/06/04 scalable font definitions for T1/phv. +) +LaTeX Font Info: Font shape `T1/phv/bx/n' in size <24.88> not available +(Font) Font shape `T1/phv/b/n' tried instead on input line 108. +LaTeX Font Info: Font shape `T1/phv/m/it' in size <17.28> not available +(Font) Font shape `T1/phv/m/sl' tried instead on input line 108. +LaTeX Font Info: Font shape `T1/phv/bx/it' in size <17.28> not available +(Font) Font shape `T1/phv/b/it' tried instead on input line 108. +LaTeX Font Info: Font shape `T1/phv/b/it' in size <17.28> not available +(Font) Font shape `T1/phv/b/sl' tried instead on input line 108. +LaTeX Font Info: Font shape `T1/phv/bx/n' in size <17.28> not available +(Font) Font shape `T1/phv/b/n' tried instead on input line 108. + [1 + +{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}] [2 + +] (./pNbody.toc +LaTeX Font Info: Font shape `T1/ptm/bx/n' in size <10> not available +(Font) Font shape `T1/ptm/b/n' tried instead on input line 2. +) +\tf@toc=\write7 +\openout7 = `pNbody.toc'. + + +Adding blank page after the table of contents. +LaTeX Font Info: Font shape `T1/phv/bx/n' in size <10> not available +(Font) Font shape `T1/phv/b/n' tried instead on input line 108. +pdfTeX warning (ext4): destination with the same identifier (name{page.i}) has +been already used, duplicate ignored + + \relax +l.108 \tableofcontents + [1 + +]pdfTeX warning (ext4): destination with the same identifier (name{page.ii}) ha +s been already used, duplicate ignored + + \relax +l.108 \tableofcontents + [2] [1 + +] [2 + +] +Chapter 1. +LaTeX Font Info: Font shape `T1/phv/bx/n' in size <14.4> not available +(Font) Font shape `T1/phv/b/n' tried instead on input line 115. + +File: cosmo1.png Graphic file (type png) + +File: cosmo1.png Graphic file (type png) + +[3 <./cosmo1.png>] [4 + +] +Chapter 2. +LaTeX Font Info: Font shape `T1/phv/bx/n' in size <12> not available +(Font) Font shape `T1/phv/b/n' tried instead on input line 208. +[5] +LaTeX Font Info: Try loading font information for T1+pcr on input line 212. + (/usr/share/texmf-texlive/tex/latex/psnfss/t1pcr.fd +File: t1pcr.fd 2001/06/04 font definitions for T1/pcr. +) + +File: edge-on-disk4.png Graphic file (type png) + +File: edge-on-disk4.png Graphic file (type png) + + [6] +Underfull \vbox (badness 2799) detected at line 299 + [] + +[7 <./edge-on-disk4.png>] [8] [9] [10 + +] +Chapter 3. +LaTeX Font Info: Font shape `T1/pcr/bx/n' in size <9> not available +(Font) Font shape `T1/pcr/b/n' tried instead on input line 505. +[11] [12] [13] [14] [15] +LaTeX Font Info: Font shape `T1/pcr/m/it' in size <9> not available +(Font) Font shape `T1/pcr/m/sl' tried instead on input line 817. + [16] [17] [18] +Chapter 4. +LaTeX Font Info: Font shape `T1/pcr/bx/n' in size <10> not available +(Font) Font shape `T1/pcr/b/n' tried instead on input line 1032. +LaTeX Font Info: Font shape `T1/phv/bx/n' in size <9> not available +(Font) Font shape `T1/phv/b/n' tried instead on input line 1040. +[19 + +] [20] [21] [22 + +] +Chapter 5. +LaTeX Font Info: Try loading font information for TS1+ptm on input line 1245 +. +(/usr/share/texmf-texlive/tex/latex/psnfss/ts1ptm.fd +File: ts1ptm.fd 2001/06/04 font definitions for TS1/ptm. +) (./pNbody.ind [23] +[24 + +] [25 + + +]) (./pNbody.aux) ) +Here is how much of TeX's memory you used: + 8049 strings out of 493848 + 109008 string characters out of 1152823 + 214613 words of memory out of 3000000 + 11051 multiletter control sequences out of 15000+50000 + 62655 words of font info for 71 fonts, out of 3000000 for 9000 + 715 hyphenation exceptions out of 8191 + 45i,13n,45p,608b,481s stack positions out of 5000i,500n,10000p,200000b,50000s +{/usr/share/texmf-texlive/fonts/enc/dvips/base/8r.enc} +Output written on pNbody.pdf (29 pages, 349084 bytes). +PDF statistics: + 319 PDF objects out of 1000 (max. 8388607) + 75 named destinations out of 1000 (max. 500000) + 139 words of extra memory for PDF output out of 10000 (max. 10000000) + diff --git a/Doc/newdoc/_build/latex/pNbody.out b/Doc/newdoc/_build/latex/pNbody.out new file mode 100644 index 0000000..8d792f2 --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.out @@ -0,0 +1,16 @@ +\BOOKMARK [0][-]{chapter.1}{Overview}{} +\BOOKMARK [0][-]{chapter.2}{Installation}{} +\BOOKMARK [1][-]{section.2.1}{Prerequiste}{chapter.2} +\BOOKMARK [1][-]{section.2.2}{Installing from source}{chapter.2} +\BOOKMARK [1][-]{section.2.3}{Check the installation}{chapter.2} +\BOOKMARK [1][-]{section.2.4}{Default configuration}{chapter.2} +\BOOKMARK [1][-]{section.2.5}{Default parameters}{chapter.2} +\BOOKMARK [1][-]{section.2.6}{Examples}{chapter.2} +\BOOKMARK [0][-]{chapter.3}{Tutorial}{} +\BOOKMARK [1][-]{section.3.1}{Using pNbody with the python interpreter}{chapter.3} +\BOOKMARK [1][-]{section.3.2}{Using pNbody with scripts}{chapter.3} +\BOOKMARK [1][-]{section.3.3}{Using pNbody in parallel}{chapter.3} +\BOOKMARK [0][-]{chapter.4}{Reference}{} +\BOOKMARK [1][-]{section.4.1}{the Io module}{chapter.4} +\BOOKMARK [0][-]{chapter.5}{Indices and tables}{} +\BOOKMARK [0][-]{section*.14}{Index}{} diff --git a/Doc/newdoc/_build/latex/pNbody.pdf b/Doc/newdoc/_build/latex/pNbody.pdf new file mode 100644 index 0000000..4fd2fb2 Binary files /dev/null and b/Doc/newdoc/_build/latex/pNbody.pdf differ diff --git a/Doc/newdoc/_build/latex/pNbody.tex b/Doc/newdoc/_build/latex/pNbody.tex new file mode 100644 index 0000000..ec0eb19 --- /dev/null +++ b/Doc/newdoc/_build/latex/pNbody.tex @@ -0,0 +1,1260 @@ +% Generated by Sphinx. +\def\sphinxdocclass{report} +\documentclass[letterpaper,10pt,english]{sphinxmanual} +\usepackage[utf8]{inputenc} +\DeclareUnicodeCharacter{00A0}{\nobreakspace} +\usepackage[T1]{fontenc} +\usepackage{babel} +\usepackage{times} +\usepackage[Bjarne]{fncychap} +\usepackage{longtable} +\usepackage{sphinx} + + +\title{pNbody Documentation} +\date{August 26, 2011} +\release{4} +\author{Yves Revaz} +\newcommand{\sphinxlogo}{} +\renewcommand{\releasename}{Release} +\makeindex + +\makeatletter +\def\PYG@reset{\let\PYG@it=\relax \let\PYG@bf=\relax% + \let\PYG@ul=\relax \let\PYG@tc=\relax% + \let\PYG@bc=\relax \let\PYG@ff=\relax} +\def\PYG@tok#1{\csname PYG@tok@#1\endcsname} +\def\PYG@toks#1+{\ifx\relax#1\empty\else% + \PYG@tok{#1}\expandafter\PYG@toks\fi} +\def\PYG@do#1{\PYG@bc{\PYG@tc{\PYG@ul{% + \PYG@it{\PYG@bf{\PYG@ff{#1}}}}}}} +\def\PYG#1#2{\PYG@reset\PYG@toks#1+\relax+\PYG@do{#2}} + +\def\PYG@tok@gd{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}} +\def\PYG@tok@gu{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}} +\def\PYG@tok@gt{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.25,0.82}{##1}}} +\def\PYG@tok@gs{\let\PYG@bf=\textbf} +\def\PYG@tok@gr{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} +\def\PYG@tok@cm{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} +\def\PYG@tok@vg{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} +\def\PYG@tok@m{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} +\def\PYG@tok@mh{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} +\def\PYG@tok@cs{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}} +\def\PYG@tok@ge{\let\PYG@it=\textit} +\def\PYG@tok@vc{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} +\def\PYG@tok@il{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} +\def\PYG@tok@go{\def\PYG@tc##1{\textcolor[rgb]{0.19,0.19,0.19}{##1}}} +\def\PYG@tok@cp{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@gi{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}} +\def\PYG@tok@gh{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} +\def\PYG@tok@ni{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.84,0.33,0.22}{##1}}} +\def\PYG@tok@nl{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.13,0.44}{##1}}} +\def\PYG@tok@nn{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}} +\def\PYG@tok@no{\def\PYG@tc##1{\textcolor[rgb]{0.38,0.68,0.84}{##1}}} +\def\PYG@tok@na{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@nb{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@nc{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}} +\def\PYG@tok@nd{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.33,0.33,0.33}{##1}}} +\def\PYG@tok@ne{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@nf{\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.49}{##1}}} +\def\PYG@tok@si{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.44,0.63,0.82}{##1}}} +\def\PYG@tok@s2{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@vi{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} +\def\PYG@tok@nt{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.45}{##1}}} +\def\PYG@tok@nv{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} +\def\PYG@tok@s1{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@gp{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}} +\def\PYG@tok@sh{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@ow{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@sx{\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}} +\def\PYG@tok@bp{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@c1{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} +\def\PYG@tok@kc{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@c{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} +\def\PYG@tok@mf{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} +\def\PYG@tok@err{\def\PYG@bc##1{\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{##1}}} +\def\PYG@tok@kd{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@ss{\def\PYG@tc##1{\textcolor[rgb]{0.32,0.47,0.09}{##1}}} +\def\PYG@tok@sr{\def\PYG@tc##1{\textcolor[rgb]{0.14,0.33,0.53}{##1}}} +\def\PYG@tok@mo{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} +\def\PYG@tok@mi{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} +\def\PYG@tok@kn{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@o{\def\PYG@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} +\def\PYG@tok@kr{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@s{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@kp{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@w{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}} +\def\PYG@tok@kt{\def\PYG@tc##1{\textcolor[rgb]{0.56,0.13,0.00}{##1}}} +\def\PYG@tok@sc{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@sb{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@k{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} +\def\PYG@tok@se{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} +\def\PYG@tok@sd{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} + +\def\PYGZbs{\char`\\} +\def\PYGZus{\char`\_} +\def\PYGZob{\char`\{} +\def\PYGZcb{\char`\}} +\def\PYGZca{\char`\^} +% for compatibility with earlier versions +\def\PYGZat{@} +\def\PYGZlb{[} +\def\PYGZrb{]} +\makeatother + +\begin{document} + +\maketitle +\tableofcontents +\phantomsection\label{index::doc} + + +Contents: + + +\chapter{Overview} +\label{rst/Overview:welcome-to-pnbody-s-documentation}\label{rst/Overview::doc}\label{rst/Overview:overview} +\textbf{pNbody} is a parallelized python module toolbox designed to manipulate and display +interactively very lage N-body systems. + +Its oriented object approche allows the user to perform complicate manipulation +with only very few commands. + +As python is an interpreted language, the user can load an N-body system and explore it +interactively using the python interpreter. pNbody may also be used in python scripts. + +The module also contains graphical facilities desinged to create maps of physical values of +the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are +also implemented. + +pNbody is not limited by file format. Each user may redefine in a parameter file how to read +its prefered format. + +Its new parallel (mpi) facilities make it works on computer cluster without being limitted by +memory consumption. It has already been tested with several millions of particles. + +\includegraphics{cosmo1.png} + + +\chapter{Installation} +\label{rst/Installation:installation}\label{rst/Installation::doc} +pNbody is curently only supported by linux. + + +\section{Prerequiste} +\label{rst/Prerequiste:prerequiste}\label{rst/Prerequiste::doc} +The basic module of pNbody needs python and additional packages : +\begin{enumerate} +\item {} +Python 2.5.x, 2.6.x, 2.7.x + +\href{http://www.python.org}{http://www.python.org} + +\item {} +a C compiler + +gcc is fine \href{http://gcc.gnu.org/}{http://gcc.gnu.org/} + +\item {} +numpy-1.0.4 or higher + +\href{http://numpy.scipy.org/}{http://numpy.scipy.org/} + +\item {} +Imaging 1.1.5 or higher + +\href{http://www.pythonware.com/products/pil/}{http://www.pythonware.com/products/pil/} + +\end{enumerate} + +For additional but usefull special functions : +\begin{enumerate} +\setcounter{enumi}{4} +\item {} +scipy 0.7 or higher + +\href{http://www.scipy.org/}{http://www.scipy.org/} + +\end{enumerate} + +For the parallel capabilities, an mpi distribution is needed (ex. openmpi) +as well as the additional python mpi wrapping: +\begin{enumerate} +\setcounter{enumi}{5} +\item {} +mpi4py +\href{http://cheeseshop.python.org/pypi/mpi4py}{http://cheeseshop.python.org/pypi/mpi4py} + +\end{enumerate} + +In order to convert movies in standard format (gif or mpeg), the two following applications are needed : +\begin{enumerate} +\item {} +convert (imagemagick) + +\href{http://www.imagemagick.org/script/index.php}{http://www.imagemagick.org/script/index.php} + +\item {} +mencoder (mplayer) + +\href{http://www.mplayerhq.hu/design7/news.html}{http://www.mplayerhq.hu/design7/news.html} + +\end{enumerate} + + +\section{Installing from source} +\label{rst/Installing_from_tarball:installing-from-source}\label{rst/Installing_from_tarball::doc} + +\subsection{Decompress the tarball} +\label{rst/Installing_from_tarball:decompress-the-tarball} +Decompress the tarball file: + +\begin{Verbatim}[commandchars=@\[\]] +tar -xzf pNbody-4.x.tar.gz +\end{Verbatim} + +enter the directory: + +\begin{Verbatim}[commandchars=@\[\]] +cd pNbody-4.x +\end{Verbatim} + + +\subsection{Compile} +\label{rst/Installing_from_tarball:compile} +The compilation is performed using the standard command: + +\begin{Verbatim}[commandchars=@\[\]] +python setup.py build +\end{Verbatim} + +If one wants to install in another directory than the default +python one, it is possible to use the standard \code{-{-}prefix} option: + +\begin{Verbatim}[commandchars=@\[\]] +python setup.py build --prefix other@_directory +\end{Verbatim} + + +\subsection{Install} +\label{rst/Installing_from_tarball:install} +Now, depending on your python installation you need to be root. +The module is installed with the following command: + +\begin{Verbatim}[commandchars=@\[\]] +python setup.py install +\end{Verbatim} + + +\section{Check the installation} +\label{rst/Test_the_installation:check-the-installation}\label{rst/Test_the_installation::doc} +You can check the installation by simply running the following +command: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}checkall} +\end{Verbatim} + +This command must of course be in your path. This will be the case +if you did not specified any \code{-{-}prefix}. On the contrary if \code{-{-}prefix} +is set to for example, \emph{localdir} you should have your \emph{PATH} environment +variable should contains: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{localdir}\PYG{o}{/}\PYG{n+nb}{bin} +\end{Verbatim} + +and you \emph{PYTHONPATH} environment should contains: + +\begin{Verbatim}[commandchars=@\[\]] +localdir/lib/python2.x/site-packages/ +\end{Verbatim} + +to ensure that the \textbf{pNbody} package will be found. + +If everything goes well, you should see a lots of outputs on your screen, +as well as a window displaying an edge-on disk. + +\includegraphics{edge-on-disk4.png} + +Close it when you see it. +The script should finally ends up with something like + +\begin{Verbatim}[commandchars=@\[\]] +@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@# +Good News ! pNbody with format gadget is working ! +@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@#@# + +You are currently using the following paths + +HOME : /home/leo +PNBODYPATH : /home/leo/local/lib/python2.6/site-packages/pNbody +CONFIGDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config +PARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters +UNITSPARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters +PALETTEDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/rgb@_tables +PLUGINSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/plugins +OPTDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/opt +FORMATSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/formats +\end{Verbatim} + + +\section{Default configuration} +\label{rst/Default_configurations:default-configuration}\label{rst/Default_configurations::doc} +\textbf{pNbody} uses a set of parameters files, color tables and formats files. +These files are provided by the installation and are by default stored in +the directory \code{site-packages/pNbody/config}. +To display where these files are taken from, you can use the command: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}show}\PYG{o}{-}\PYG{n}{path} +\end{Verbatim} + +It is recommanded that the user uses its own configuration files. To be automatically +recongnized by \textbf{pNbody}, the latter must be in the user \code{\textasciitilde{}/.pNbody} directory. +\textbf{pNbody} provides a simple command to copy all parameters in this directory. Simply +type: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}copy}\PYG{o}{-}\PYG{n}{defaultconfig} +\end{Verbatim} + +and check the values of the new paths: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}show}\PYG{o}{-}\PYG{n}{path} +\end{Verbatim} + +You can now freely modify the files contains in the configuratio directory. + +By default, the content of the configuration directory is: + +\begin{tabulary}{\linewidth}{|L|L|L|} +\hline +\textbf{ +name +} & \textbf{ +type +} & \textbf{ +Content +}\\ +\hline + +defaultparameters + & +file + & +the default graphical parameters used by \textbf{pNbody} +\\ + +unitsparameters + & +file + & +the default units parameters used by \textbf{pNbody} +\\ + +formats + & +directory + & +specific class definition files used to read different file formats +\\ + +rgb\_tables + & +directory + & +color tables +\\ + +plugins + & +directory + & +optional plugins +\\ + +opt + & +directory + & +optional files +\\ +\hline +\end{tabulary} + + + +\section{Default parameters} +\label{rst/Default_parameters::doc}\label{rst/Default_parameters:default-parameters} +To see what default parameters \textbf{pNbody} uses, type: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}show}\PYG{o}{-}\PYG{n}{parameters} +\end{Verbatim} + +The script returns the parameters taken from the files +\emph{defaultparameters} and \emph{unitsparameters}. +Their current values are displayed: + +\begin{Verbatim}[commandchars=@\[\]] +parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters + +---------------------------------------------------------------------------------------------------- + name meaning value (type) +---------------------------------------------------------------------------------------------------- + obs : observer = None (ArrayObs) + xp : observing position = None (List) + x0 : position of observer = None (List) + alpha : angle of the head = None (Float) + view : view = xz (String) + r@_obs : dist. to the observer = 201732.223771 (Float) + clip : clip planes = (100866.11188556443, 403464.44754225772) (Tuple) + cut : cut clip planes = no (String) + eye : name of the eye = None (String) + dist@_eye : distance between eyes = -0.0005 (Float) + foc : focal = 300.0 (Float) + persp : perspective = off (String) + shape : shape of the image = (512, 512) (Tuple) + size : pysical size = (6000, 6000) (Tuple) + frsp : frsp = 0.0 (Float) + space : space = pos (String) + mode : mode = m (String) + rendering : rendering mode = map (String) + filter@_name : name of the filter = None (String) + filter@_opts : filter options = @PYGZlb[]10, 10, 2, 2@PYGZrb[] (List) + scale : scale = log (String) + cd : cd = 0.0 (Float) + mn : mn = 0.0 (Float) + mx : mx = 0.0 (Float) + l@_n : number of levels = 15 (Int) + l@_min : min level = 0.0 (Float) + l@_max : max level = 0.0 (Float) + l@_kx : l@_kx = 10 (Int) + l@_ky : l@_ky = 10 (Int) + l@_color : level color = 0 (Int) + l@_crush : crush background = no (String) + b@_weight : box line weight = 0 (Int) + b@_xopts : x axis options = None (Tuple) + b@_yopts : y axis options = None (Tuple) + b@_color : line color = 255 (Int) + +parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters + +---------------------------------------------------------------------------------------------------- + name meaning value (type) +---------------------------------------------------------------------------------------------------- + xi : hydrogen mass fraction = 0.76 (Float) + ionisation : ionisation flag = 1 (Int) + metalicity : metalicity index = 4 (Int) + Nsph : number of sph neighbors = 50 (Int) + gamma : adiabatic index = 1.66666666667 (Float) + coolingfile : Cooling file = @textasciitilde[]/.Nbody/cooling.dat (String) + HubbleParam : HubbleParam = 1.0 (Float) + UnitLength@_in@_cm : UnitLength in cm = 3.085e+21 (Float) + UnitMass@_in@_g : UnitMass in g = 4.435693e+44 (Float) + UnitVelocity@_in@_cm@_per@_s : UnitVelocity in cm per s = 97824708.2699 (Float) +\end{Verbatim} + + +\section{Examples} +\label{rst/Examples::doc}\label{rst/Examples:examples} +A series of examples is provided by \textbf{pNbody} in the +\code{PNBODYPATH/examples}, where NBODYPATH is obtained +with the command: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}show}\PYG{o}{-}\PYG{n}{path} +\end{Verbatim} + + +\chapter{Tutorial} +\label{rst/Tutorial::doc}\label{rst/Tutorial:tutorial} + +\section{Using \textbf{pNbody} with the python interpreter} +\label{rst/Tutorial_interpreter:using-pnbody-with-the-python-interpreter}\label{rst/Tutorial_interpreter::doc} +In order to use this tutorial, you first need to copy some examples provided +with \textbf{pNbody}. This can be done by typing: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{n}{pNbody\PYGZus{}copy}\PYG{o}{-}\PYG{n}{examples} +\end{Verbatim} + +by default, this create a directory in your home \code{\textasciitilde{}/pnbody\_examples}. +Move to this directory: + +\begin{Verbatim}[commandchars=@\[\]] +cd @textasciitilde[]/pnbody@_examples +\end{Verbatim} + +Then you can simply follow the instructions below. +First, start the python interpreter: + +\begin{Verbatim}[commandchars=@\[\]] +leo@PYGZat[]obsrevaz:@textasciitilde[]/pnbody@_examples python +Python 2.4.2 (@#2, Jul 13 2006, 15:26:48) +@PYGZlb[]GCC 4.0.1 (4.0.1-5mdk for Mandriva Linux release 2006.0)@PYGZrb[] on linux2 +Type "help", "copyright", "credits" or "license" for more information. +@textgreater[]@textgreater[]@textgreater[] +\end{Verbatim} + +Now, you can load the \textbf{pNbody} module: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{k+kn}{from} \PYG{n+nn}{pNbody} \PYG{k+kn}{import} \PYG{o}{*} +\end{Verbatim} + + +\subsection{Creating \textbf{pNbody} objects from scratch} +\label{rst/Tutorial_interpreter:creating-pnbody-objects-from-scratch} +We can first start by creating a default \textbf{pNbody} objet and get info about it + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb} \PYG{o}{=} \PYG{n}{Nbody}\PYG{p}{(}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{info}\PYG{p}{(}\PYG{p}{)} +\PYG{g+go}{-----------------------------------} +\PYG{g+go}{particle file : ['file.dat']} +\PYG{g+go}{ftype : 'Nbody\PYGZus{}default'} +\PYG{g+go}{mxntpe : 6} +\PYG{g+go}{nbody : 0} +\PYG{g+go}{nbody\PYGZus{}tot : 0} +\PYG{g+go}{npart : [0, 0, 0, 0, 0, 0]} +\PYG{g+go}{npart\PYGZus{}tot : [0, 0, 0, 0, 0, 0]} +\PYG{g+go}{mass\PYGZus{}tot : 0.0} +\PYG{g+go}{byteorder : 'little'} +\PYG{g+go}{pio : 'no'} +\PYG{g+go}{\textgreater{}\textgreater{}\textgreater{}} +\end{Verbatim} + +All variables linked to the object nb are accesible by typing nb. followed by the associated variables : + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{nbody} +\PYG{g+go}{0} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{mass\PYGZus{}tot} +\PYG{g+go}{0.0} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{pio} +\PYG{g+go}{'no'} +\end{Verbatim} + +Now, you can create an object by giving the positions of particles: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{pos} \PYG{o}{=} \PYG{n}{ones}\PYG{p}{(}\PYG{p}{(}\PYG{l+m+mi}{10}\PYG{p}{,}\PYG{l+m+mi}{3}\PYG{p}{)}\PYG{p}{,}\PYG{n}{float32}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb} \PYG{o}{=} \PYG{n}{Nbody}\PYG{p}{(}\PYG{n}{pos}\PYG{o}{=}\PYG{n}{pos}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{info}\PYG{p}{(}\PYG{p}{)} +\PYG{g+go}{-----------------------------------} +\PYG{g+go}{particle file : ['file.dat']} +\PYG{g+go}{ftype : 'Nbody\PYGZus{}default'} +\PYG{g+go}{mxntpe : 6} +\PYG{g+go}{nbody : 10} +\PYG{g+go}{nbody\PYGZus{}tot : 10} +\PYG{g+go}{npart : array([10, 0, 0, 0, 0, 0])} +\PYG{g+go}{npart\PYGZus{}tot : array([10, 0, 0, 0, 0, 0])} +\PYG{g+go}{mass\PYGZus{}tot : 1.00000011921} +\PYG{g+go}{byteorder : 'little'} +\PYG{g+go}{pio : 'no'} + +\PYG{g+go}{len pos : 10} +\PYG{g+go}{pos[0] : array([ 1., 1., 1.], dtype=float32)} +\PYG{g+go}{pos[-1] : array([ 1., 1., 1.], dtype=float32)} +\PYG{g+go}{len vel : 10} +\PYG{g+go}{vel[0] : array([ 0., 0., 0.], dtype=float32)} +\PYG{g+go}{vel[-1] : array([ 0., 0., 0.], dtype=float32)} +\PYG{g+go}{len mass : 10} +\PYG{g+go}{mass[0] : 0.10000000149} +\PYG{g+go}{mass[-1] : 0.10000000149} +\PYG{g+go}{len num : 10} +\PYG{g+go}{num[0] : 0} +\PYG{g+go}{num[-1] : 9} +\PYG{g+go}{len tpe : 10} +\PYG{g+go}{tpe[0] : 0} +\PYG{g+go}{tpe[-1] : 0} +\end{Verbatim} + +In this case, you can see that the class automatically intitialize other arrays variables +(vel, mass, num and rsp) with default values. Only the first and the last element of +each defined vector are displyed by the methode info. All defined arrays and array elements +may be easily accessible using the numarray convensions. For exemple, to display and +change the positions of the tree first particles, type: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{pos}\PYG{p}{[}\PYG{p}{:}\PYG{l+m+mi}{3}\PYG{p}{]} +\PYG{g+go}{array([[ 1., 1., 1.],} +\PYG{g+go}{ [ 1., 1., 1.],} +\PYG{g+go}{ [ 1., 1., 1.]], type=float32)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{pos}\PYG{p}{[}\PYG{p}{:}\PYG{l+m+mi}{3}\PYG{p}{]}\PYG{o}{=}\PYG{l+m+mi}{2}\PYG{o}{*}\PYG{n}{ones}\PYG{p}{(}\PYG{p}{(}\PYG{l+m+mi}{3}\PYG{p}{,}\PYG{l+m+mi}{3}\PYG{p}{)}\PYG{p}{,}\PYG{n}{float32}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{pos}\PYG{p}{[}\PYG{p}{:}\PYG{l+m+mi}{3}\PYG{p}{]} +\PYG{g+go}{array([[ 2., 2., 2.],} +\PYG{g+go}{ [ 2., 2., 2.],} +\PYG{g+go}{ [ 2., 2., 2.]], type=float32)} +\end{Verbatim} + + +\subsection{Open from existing file} +\label{rst/Tutorial_interpreter:open-from-existing-file} +Now, lets try to open the gadget snapshot gadget\_z00.dat. This is achieved by typing: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb} \PYG{o}{=} \PYG{n}{Nbody}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{gadget\PYGZus{}z00.dat}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{ftype}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{gadget}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +Again, informatins on this snapshot may be obtained using the instance info(): + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{info}\PYG{p}{(}\PYG{p}{)} +\PYG{g+go}{-----------------------------------} +\PYG{g+go}{particle file : ['gadget\PYGZus{}z00.dat']} +\PYG{g+go}{ftype : 'Nbody\PYGZus{}gadget'} +\PYG{g+go}{mxntpe : 6} +\PYG{g+go}{nbody : 20560} +\PYG{g+go}{nbody\PYGZus{}tot : 20560} +\PYG{g+go}{npart : array([ 9160, 10280, 0, 0, 1120, 0])} +\PYG{g+go}{npart\PYGZus{}tot : array([ 9160, 10280, 0, 0, 1120, 0])} +\PYG{g+go}{mass\PYGZus{}tot : 79.7066955566} +\PYG{g+go}{byteorder : 'little'} +\PYG{g+go}{pio : 'no'} + +\PYG{g+go}{len pos : 20560} +\PYG{g+go}{pos[0] : array([-1294.48828125, -2217.09765625, -9655.49609375], dtype=float32)} +\PYG{g+go}{pos[-1] : array([ -986.0625 , -2183.83203125, 4017.04296875], dtype=float32)} +\PYG{g+go}{len vel : 20560} +\PYG{g+go}{vel[0] : array([ -69.80491638, 60.56475067, -166.32981873], dtype=float32)} +\PYG{g+go}{vel[-1] : array([-140.59715271, -66.44669342, -37.01613235], dtype=float32)} +\PYG{g+go}{len mass : 20560} +\PYG{g+go}{mass[0] : 0.00108565215487} +\PYG{g+go}{mass[-1] : 0.00108565215487} +\PYG{g+go}{len num : 20560} +\PYG{g+go}{num[0] : 21488} +\PYG{g+go}{num[-1] : 1005192} +\PYG{g+go}{len tpe : 20560} +\PYG{g+go}{tpe[0] : 0} +\PYG{g+go}{tpe[-1] : 4} + +\PYG{g+go}{atime : 1.0} +\PYG{g+go}{redshift : 2.22044604925e-16} +\PYG{g+go}{flag\PYGZus{}sfr : 1} +\PYG{g+go}{flag\PYGZus{}feedback : 1} +\PYG{g+go}{nall : [ 9160 10280 0 0 1120 0]} +\PYG{g+go}{flag\PYGZus{}cooling : 1} +\PYG{g+go}{num\PYGZus{}files : 1} +\PYG{g+go}{boxsize : 100000.0} +\PYG{g+go}{omega0 : 0.3} +\PYG{g+go}{omegalambda : 0.7} +\PYG{g+go}{hubbleparam : 0.7} +\PYG{g+go}{flag\PYGZus{}age : 0} +\PYG{g+go}{flag\PYGZus{}metals : 0} +\PYG{g+go}{nallhw : [0 0 0 0 0 0]} +\PYG{g+go}{flag\PYGZus{}entr\PYGZus{}ic : 0} +\PYG{g+go}{critical\PYGZus{}energy\PYGZus{}spec: 0.0} + +\PYG{g+go}{len u : 20560} +\PYG{g+go}{u[0] : 6606.63037109} +\PYG{g+go}{u[-1] : 0.0} +\PYG{g+go}{len rho : 20560} +\PYG{g+go}{rho[0] : 7.05811936674e-11} +\PYG{g+go}{rho[-1] : 0.0} +\PYG{g+go}{len rsp : 20560} +\PYG{g+go}{rsp[0] : 909.027587891} +\PYG{g+go}{rsp[-1] : 0.0} +\PYG{g+go}{len opt : 20560} +\PYG{g+go}{opt[0] : 446292.5625} +\PYG{g+go}{opt[-1] : 0.0} +\end{Verbatim} + +You can obtain informations on physical values, like the center of mass +or the total angular momentum vector by typing: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{cm}\PYG{p}{(}\PYG{p}{)} +\PYG{g+go}{array([-1649.92651346, 609.98256428, -1689.04011033])} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{Ltot}\PYG{p}{(}\PYG{p}{)} +\PYG{g+go}{array([-1112078.125 , -755964.1875, -1536667.125 ], dtype=float32)} +\end{Verbatim} + +In order to visualise the model in position space, it is possible to +generate a surface density map of it using the display instance: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{10000}\PYG{p}{,}\PYG{l+m+mi}{10000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +You can now performe some operations on the model in order to explore a specific +region. First, translate the model in position space: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{translate}\PYG{p}{(}\PYG{p}{[}\PYG{l+m+mi}{3125}\PYG{p}{,}\PYG{o}{-}\PYG{l+m+mi}{4690}\PYG{p}{,}\PYG{l+m+mi}{1720}\PYG{p}{]}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{10000}\PYG{p}{,}\PYG{l+m+mi}{10000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{1000}\PYG{p}{,}\PYG{l+m+mi}{1000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +Ou can now rotate around: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{rotate}\PYG{p}{(}\PYG{n}{angle}\PYG{o}{=}\PYG{n}{pi}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{1000}\PYG{p}{,}\PYG{l+m+mi}{1000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +You can now display a temperature map of the model. First, +create a new object with only the gas particles: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}gas} \PYG{o}{=} \PYG{n}{nb}\PYG{o}{.}\PYG{n}{select}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{gas}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}gas}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{1000}\PYG{p}{,}\PYG{l+m+mi}{1000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +now, display the temperture mass-weighted map: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}gas}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{1000}\PYG{p}{,}\PYG{l+m+mi}{1000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{rainbow4}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{mode}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{T}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{filter\PYGZus{}name}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{gaussian}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + + +\subsection{Selection of particles} +\label{rst/Tutorial_interpreter:selection-of-particles} +You can select only particles within a radius smaller tha 500 (in user units) +with respect to the center: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub} \PYG{o}{=} \PYG{n}{nb}\PYG{o}{.}\PYG{n}{selectc}\PYG{p}{(}\PYG{p}{(}\PYG{n}{nb}\PYG{o}{.}\PYG{n}{rxyz}\PYG{p}{(}\PYG{p}{)}\PYG{o}{\textless{}}\PYG{l+m+mi}{500}\PYG{p}{)}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{1000}\PYG{p}{,}\PYG{l+m+mi}{1000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +Now, rename the new model and save it: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub}\PYG{o}{.}\PYG{n}{rename}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{gadget\PYGZus{}z00\PYGZus{}sub.dat}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub}\PYG{o}{.}\PYG{n}{write}\PYG{p}{(}\PYG{p}{)} +\end{Verbatim} + +A new gadget file has been created and saved in the current directory. +We can now select particles as a function of the temperature. +First, display the maximum temperature among all gas particles, +then selectc particles and finally save in `T11.num' the identifier (variable num) of these particles: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{log10}\PYG{p}{(}\PYG{n+nb}{max}\PYG{p}{(}\PYG{n}{nb\PYGZus{}gas}\PYG{o}{.}\PYG{n}{T}\PYG{p}{(}\PYG{p}{)}\PYG{p}{)}\PYG{p}{)} +\PYG{g+go}{12.8707923889} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub} \PYG{o}{=} \PYG{n}{nb\PYGZus{}gas}\PYG{o}{.}\PYG{n}{selectc}\PYG{p}{(} \PYG{p}{(}\PYG{n}{nb\PYGZus{}gas}\PYG{o}{.}\PYG{n}{T}\PYG{p}{(}\PYG{p}{)}\PYG{o}{\textgreater{}}\PYG{l+m+mf}{1e11}\PYG{p}{)} \PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub}\PYG{o}{.}\PYG{n}{write\PYGZus{}num}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{T11.num}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +Now open a new snapshot, from the same simulation, but at different redshift and find the +particles in previous snapshot with temperature higher than \$10\textasciicircum{}\{11\}\$: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb} \PYG{o}{=} \PYG{n}{Nbody}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{gadget\PYGZus{}z40.dat}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{ftype}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{gadget}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{10000}\PYG{p}{,}\PYG{l+m+mi}{10000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub} \PYG{o}{=} \PYG{n}{nb}\PYG{o}{.}\PYG{n}{selectp}\PYG{p}{(}\PYG{n+nb}{file}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{T11.num}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb\PYGZus{}sub}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{10000}\PYG{p}{,}\PYG{l+m+mi}{10000}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{light}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + +Now, instead of saving it in a gadget file, save it in a binary file type. +You simply need to call the set\_ftype instance before saving it: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb} \PYG{o}{=} \PYG{n}{nb}\PYG{o}{.}\PYG{n}{set\PYGZus{}ftype}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{binary}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{rename}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{binary.dat}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb}\PYG{o}{.}\PYG{n}{write}\PYG{p}{(}\PYG{p}{)} +\end{Verbatim} + + +\subsection{Merging two models} +\label{rst/Tutorial_interpreter:merging-two-models} +As a last example, we show how two \textbf{pNbody} models can be easyly merged with only 11 lines: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb1} \PYG{o}{=} \PYG{n}{Nbody}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{disk.dat}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{ftype}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{gadget}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb2} \PYG{o}{=} \PYG{n}{Nbody}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{disk.dat}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{ftype}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{gadget}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb1}\PYG{o}{.}\PYG{n}{rotate}\PYG{p}{(}\PYG{n}{angle}\PYG{o}{=}\PYG{n}{pi}\PYG{o}{/}\PYG{l+m+mi}{4}\PYG{p}{,}\PYG{n}{axis}\PYG{o}{=}\PYG{p}{[}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{1}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb1}\PYG{o}{.}\PYG{n}{translate}\PYG{p}{(}\PYG{p}{[}\PYG{o}{-}\PYG{l+m+mi}{150}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb1}\PYG{o}{.}\PYG{n}{vel} \PYG{o}{=} \PYG{n}{nb1}\PYG{o}{.}\PYG{n}{vel} \PYG{o}{+} \PYG{p}{[}\PYG{l+m+mi}{50}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb2}\PYG{o}{.}\PYG{n}{rotate}\PYG{p}{(}\PYG{n}{angle}\PYG{o}{=}\PYG{n}{pi}\PYG{o}{/}\PYG{l+m+mi}{4}\PYG{p}{,}\PYG{n}{axis}\PYG{o}{=}\PYG{p}{[}\PYG{l+m+mi}{1}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb2}\PYG{o}{.}\PYG{n}{translate}\PYG{p}{(}\PYG{p}{[}\PYG{o}{+}\PYG{l+m+mi}{150}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{50}\PYG{p}{]}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb2}\PYG{o}{.}\PYG{n}{vel} \PYG{o}{=} \PYG{n}{nb2}\PYG{o}{.}\PYG{n}{vel} \PYG{o}{-} \PYG{p}{[}\PYG{l+m+mi}{50}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3} \PYG{o}{=} \PYG{n}{nb1} \PYG{o}{+} \PYG{n}{nb2} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{rename}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{merge.dat}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{write}\PYG{p}{(}\PYG{p}{)} +\end{Verbatim} + +Now display the result from different point of view: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{300}\PYG{p}{,}\PYG{l+m+mi}{300}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{lut2}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3} \PYG{o}{=} \PYG{n}{nb3}\PYG{o}{.}\PYG{n}{select}\PYG{p}{(}\PYG{l+s}{'}\PYG{l+s}{disk}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{300}\PYG{p}{,}\PYG{l+m+mi}{300}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{lut2}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{view}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{xz}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{300}\PYG{p}{,}\PYG{l+m+mi}{300}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{lut2}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{view}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{xy}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{300}\PYG{p}{,}\PYG{l+m+mi}{300}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{lut2}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{view}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{yz}\PYG{l+s}{'}\PYG{p}{)} +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{300}\PYG{p}{,}\PYG{l+m+mi}{300}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{lut2}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{xp}\PYG{o}{=}\PYG{p}{[}\PYG{o}{-}\PYG{l+m+mi}{100}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]}\PYG{p}{)} +\end{Verbatim} + +or save it into a gif file: + +\begin{Verbatim}[commandchars=\\\{\}] +\PYG{g+gp}{\textgreater{}\textgreater{}\textgreater{} }\PYG{n}{nb3}\PYG{o}{.}\PYG{n}{display}\PYG{p}{(}\PYG{n}{size}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{300}\PYG{p}{,}\PYG{l+m+mi}{300}\PYG{p}{)}\PYG{p}{,}\PYG{n}{shape}\PYG{o}{=}\PYG{p}{(}\PYG{l+m+mi}{256}\PYG{p}{,}\PYG{l+m+mi}{256}\PYG{p}{)}\PYG{p}{,}\PYG{n}{palette}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{lut2}\PYG{l+s}{'}\PYG{p}{,}\PYG{n}{xp}\PYG{o}{=}\PYG{p}{[}\PYG{o}{-}\PYG{l+m+mi}{100}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{,}\PYG{l+m+mi}{0}\PYG{p}{]}\PYG{p}{,}\PYG{n}{save}\PYG{o}{=}\PYG{l+s}{'}\PYG{l+s}{image.gif}\PYG{l+s}{'}\PYG{p}{)} +\end{Verbatim} + + +\section{Using pNbody with scripts} +\label{rst/Tutorial_scripts:using-pnbody-with-scripts}\label{rst/Tutorial_scripts::doc} +In addition to using \textbf{pNbody} in the python interpreter, +it is very useful to use \textbf{pNbody} in python scripts. Usually a python script +begin by the line \#!/usr/bin/env python and must be executable: + +\begin{Verbatim}[commandchars=@\[\]] +chmod a+x file.py +\end{Verbatim} + +The following example (slice.py), we show how to write a script that opens a gadget file, +select gas particles and cut a thin slice +\begin{gather} +\begin{split}-1000\m@ne + \DOCH + \fi + \interlinepenalty\@M + \DOTI{#1} + } + } +} + +% Redefine description environment so that it is usable inside fulllineitems. +% +\renewcommand{\description}{% + \list{}{\labelwidth\z@% + \itemindent-\leftmargin% + \labelsep5pt% + \let\makelabel=\descriptionlabel}} + +% Definition lists; requested by AMK for HOWTO documents. Probably useful +% elsewhere as well, so keep in in the general style support. +% +\newenvironment{definitions}{% + \begin{description}% + \def\term##1{\item[##1]\mbox{}\\*[0mm]} +}{% + \end{description}% +} + +% Tell TeX about pathological hyphenation cases: +\hyphenation{Base-HTTP-Re-quest-Hand-ler} + + +% The following is stuff copied from docutils' latex writer. +% +\newcommand{\optionlistlabel}[1]{\bf #1 \hfill} +\newenvironment{optionlist}[1] +{\begin{list}{} + {\setlength{\labelwidth}{#1} + \setlength{\rightmargin}{1cm} + \setlength{\leftmargin}{\rightmargin} + \addtolength{\leftmargin}{\labelwidth} + \addtolength{\leftmargin}{\labelsep} + \renewcommand{\makelabel}{\optionlistlabel}} +}{\end{list}} + +\newlength{\lineblockindentation} +\setlength{\lineblockindentation}{2.5em} +\newenvironment{lineblock}[1] +{\begin{list}{} + {\setlength{\partopsep}{\parskip} + \addtolength{\partopsep}{\baselineskip} + \topsep0pt\itemsep0.15\baselineskip\parsep0pt + \leftmargin#1} + \raggedright} +{\end{list}} + +% Redefine includgraphics for avoiding images larger than the screen size +% If the size is not specified. +\let\py@Oldincludegraphics\includegraphics + +\newbox\image@box% +\newdimen\image@width% +\renewcommand\includegraphics[2][\@empty]{% + \ifx#1\@empty% + \setbox\image@box=\hbox{\py@Oldincludegraphics{#2}}% + \image@width\wd\image@box% + \ifdim \image@width>\linewidth% + \setbox\image@box=\hbox{\py@Oldincludegraphics[width=\linewidth]{#2}}% + \box\image@box% + \else% + \py@Oldincludegraphics{#2}% + \fi% + \else% + \py@Oldincludegraphics[#1]{#2}% + \fi% +} + + +% Fix the index and bibliography environments to add an entry to the Table of +% Contents; this is much nicer than just having to jump to the end of the book +% and flip around, especially with multiple indexes. +% +\let\py@OldTheindex=\theindex +\renewcommand{\theindex}{ + \cleardoublepage + \phantomsection + \py@OldTheindex + \addcontentsline{toc}{chapter}{\indexname} +} + +\let\py@OldThebibliography=\thebibliography +\renewcommand{\thebibliography}[1]{ + \cleardoublepage + \phantomsection + \py@OldThebibliography{1} + \addcontentsline{toc}{chapter}{\bibname} +} + +% Include hyperref last. +\RequirePackage[colorlinks,breaklinks, + linkcolor=InnerLinkColor,filecolor=OuterLinkColor, + menucolor=OuterLinkColor,urlcolor=OuterLinkColor, + citecolor=InnerLinkColor]{hyperref} +% Fix anchor placement for figures with captions. +% (Note: we don't use a package option here; instead, we give an explicit +% \capstart for figures that actually have a caption.) +\RequirePackage{hypcap} + +% From docutils.writers.latex2e +\providecommand{\DUspan}[2]{% + {% group ("span") to limit the scope of styling commands + \@for\node@class@name:=#1\do{% + \ifcsname docutilsrole\node@class@name\endcsname% + \csname docutilsrole\node@class@name\endcsname% + \fi% + }% + {#2}% node content + }% close "span" +} + +\providecommand*{\DUprovidelength}[2]{ + \ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{} +} + +\DUprovidelength{\DUlineblockindent}{2.5em} +\ifthenelse{\isundefined{\DUlineblock}}{ + \newenvironment{DUlineblock}[1]{% + \list{}{\setlength{\partopsep}{\parskip} + \addtolength{\partopsep}{\baselineskip} + \setlength{\topsep}{0pt} + \setlength{\itemsep}{0.15\baselineskip} + \setlength{\parsep}{0pt} + \setlength{\leftmargin}{#1}} + \raggedright + } + {\endlist} +}{} diff --git a/Doc/newdoc/_build/latex/sphinxhowto.cls b/Doc/newdoc/_build/latex/sphinxhowto.cls new file mode 100644 index 0000000..1ebdd43 --- /dev/null +++ b/Doc/newdoc/_build/latex/sphinxhowto.cls @@ -0,0 +1,81 @@ +% +% sphinxhowto.cls for Sphinx (http://sphinx.pocoo.org/) +% + +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{sphinxhowto}[2009/06/02 Document class (Sphinx HOWTO)] + +% 'oneside' option overriding the 'twoside' default +\newif\if@oneside +\DeclareOption{oneside}{\@onesidetrue} +% Pass remaining document options to the parent class. +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} +\ProcessOptions\relax + +% Default to two-side document +\if@oneside +% nothing to do (oneside is the default) +\else +\PassOptionsToClass{twoside}{\sphinxdocclass} +\fi + +\LoadClass{\sphinxdocclass} + +% Set some sane defaults for section numbering depth and TOC depth. You can +% reset these counters in your preamble. +% +\setcounter{secnumdepth}{2} + +% Change the title page to look a bit better, and fit in with the fncychap +% ``Bjarne'' style a bit better. +% +\renewcommand{\maketitle}{ + \rule{\textwidth}{1pt} + \ifsphinxpdfoutput + \begingroup + % These \defs are required to deal with multi-line authors; it + % changes \\ to ', ' (comma-space), making it pass muster for + % generating document info in the PDF file. + \def\\{, } + \def\and{and } + \pdfinfo{ + /Author (\@author) + /Title (\@title) + } + \endgroup + \fi + \begin{flushright} + \sphinxlogo% + {\rm\Huge\py@HeaderFamily \@title} \par + {\em\large\py@HeaderFamily \py@release\releaseinfo} \par + \vspace{25pt} + {\Large\py@HeaderFamily + \begin{tabular}[t]{c} + \@author + \end{tabular}} \par + \vspace{25pt} + \@date \par + \py@authoraddress \par + \end{flushright} + \@thanks + \setcounter{footnote}{0} + \let\thanks\relax\let\maketitle\relax + %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} +} + +\let\py@OldTableofcontents=\tableofcontents +\renewcommand{\tableofcontents}{ + \begingroup + \parskip = 0mm + \py@OldTableofcontents + \endgroup + \rule{\textwidth}{1pt} + \vspace{12pt} +} + +\@ifundefined{fancyhf}{ + \pagestyle{plain}}{ + \pagestyle{normal}} % start this way; change for +\pagenumbering{arabic} % ToC & chapters + +\thispagestyle{empty} diff --git a/Doc/newdoc/_build/latex/sphinxmanual.cls b/Doc/newdoc/_build/latex/sphinxmanual.cls new file mode 100644 index 0000000..5751779 --- /dev/null +++ b/Doc/newdoc/_build/latex/sphinxmanual.cls @@ -0,0 +1,122 @@ +% +% sphinxmanual.cls for Sphinx (http://sphinx.pocoo.org/) +% + +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{sphinxmanual}[2009/06/02 Document class (Sphinx manual)] + +% chapters starting at odd pages (overridden by 'openany' document option) +\PassOptionsToClass{openright}{\sphinxdocclass} + +% 'oneside' option overriding the 'twoside' default +\newif\if@oneside +\DeclareOption{oneside}{\@onesidetrue} +% Pass remaining document options to the parent class. +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} +\ProcessOptions\relax + +% Defaults two-side document +\if@oneside +% nothing to do (oneside is the default) +\else +\PassOptionsToClass{twoside}{\sphinxdocclass} +\fi + +\LoadClass{\sphinxdocclass} + +% Set some sane defaults for section numbering depth and TOC depth. You can +% reset these counters in your preamble. +% +\setcounter{secnumdepth}{2} +\setcounter{tocdepth}{1} + +% Change the title page to look a bit better, and fit in with the fncychap +% ``Bjarne'' style a bit better. +% +\renewcommand{\maketitle}{% + \begin{titlepage}% + \let\footnotesize\small + \let\footnoterule\relax + \rule{\textwidth}{1pt}% + \ifsphinxpdfoutput + \begingroup + % These \defs are required to deal with multi-line authors; it + % changes \\ to ', ' (comma-space), making it pass muster for + % generating document info in the PDF file. + \def\\{, } + \def\and{and } + \pdfinfo{ + /Author (\@author) + /Title (\@title) + } + \endgroup + \fi + \begin{flushright}% + \sphinxlogo% + {\rm\Huge\py@HeaderFamily \@title \par}% + {\em\LARGE\py@HeaderFamily \py@release\releaseinfo \par} + \vfill + {\LARGE\py@HeaderFamily + \begin{tabular}[t]{c} + \@author + \end{tabular} + \par} + \vfill\vfill + {\large + \@date \par + \vfill + \py@authoraddress \par + }% + \end{flushright}%\par + \@thanks + \end{titlepage}% + \cleardoublepage% + \setcounter{footnote}{0}% + \let\thanks\relax\let\maketitle\relax + %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} +} + + +% Catch the end of the {abstract} environment, but here make sure the abstract +% is followed by a blank page if the 'openright' option is used. +% +\let\py@OldEndAbstract=\endabstract +\renewcommand{\endabstract}{ + \if@openright + \ifodd\value{page} + \typeout{Adding blank page after the abstract.} + \vfil\pagebreak + \fi + \fi + \py@OldEndAbstract +} + +% This wraps the \tableofcontents macro with all the magic to get the spacing +% right and have the right number of pages if the 'openright' option has been +% used. This eliminates a fair amount of crud in the individual document files. +% +\let\py@OldTableofcontents=\tableofcontents +\renewcommand{\tableofcontents}{% + \setcounter{page}{1}% + \pagebreak% + \pagestyle{plain}% + {% + \parskip = 0mm% + \py@OldTableofcontents% + \if@openright% + \ifodd\value{page}% + \typeout{Adding blank page after the table of contents.}% + \pagebreak\hspace{0pt}% + \fi% + \fi% + \cleardoublepage% + }% + \pagenumbering{arabic}% + \@ifundefined{fancyhf}{}{\pagestyle{normal}}% +} + +% This is needed to get the width of the section # area wide enough in the +% library reference. Doing it here keeps it the same for all the manuals. +% +\renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}} +\renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}} diff --git a/Doc/newdoc/_build/latex/tabulary.sty b/Doc/newdoc/_build/latex/tabulary.sty new file mode 100644 index 0000000..ba83c0a --- /dev/null +++ b/Doc/newdoc/_build/latex/tabulary.sty @@ -0,0 +1,452 @@ +%% +%% This is file `tabulary.sty', +%% generated with the docstrip utility. +%% +%% The original source files were: +%% +%% tabulary.dtx (with options: `package') +%% DRAFT VERSION +%% +%% File `tabulary.dtx'. +%% Copyright (C) 1995 1996 2003 David Carlisle +%% This file may be distributed under the terms of the LPPL. +%% See 00readme.txt for details. +%% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{tabulary} + [2007/10/02 v0.9 tabulary package (DPC)] +\RequirePackage{array} +\catcode`\Z=14 +\DeclareOption{debugshow}{\catcode`\Z=9\relax} +\ProcessOptions +\def\arraybackslash{\let\\=\@arraycr} +\def\@finalstrut#1{% + \unskip\ifhmode\nobreak\fi\vrule\@width\z@\@height\z@\@depth\dp#1} +\newcount\TY@count +\def\tabulary{% + \let\TY@final\tabular + \let\endTY@final\endtabular + \TY@tabular} +\def\TY@tabular#1{% + \edef\TY@{\@currenvir}% + {\ifnum0=`}\fi + \@ovxx\TY@linewidth + \@ovyy\TY@tablewidth + \count@\z@ + \@tempswatrue + \@whilesw\if@tempswa\fi{% + \advance\count@\@ne + \expandafter\ifx\csname TY@F\the\count@\endcsname\relax + \@tempswafalse + \else + \expandafter\let\csname TY@SF\the\count@\expandafter\endcsname + \csname TY@F\the\count@\endcsname + \global\expandafter\let\csname TY@F\the\count@\endcsname\relax + \expandafter\let\csname TY@S\the\count@\expandafter\endcsname + \csname TY@\the\count@\endcsname + \fi}% + \global\TY@count\@ne + \TY@width\xdef{0pt}% + \global\TY@tablewidth\z@ + \global\TY@linewidth#1\relax +Z\message{^^J^^JTable^^J% +Z Target Width: \the\TY@linewidth^^J% +Z \string\tabcolsep: \the\tabcolsep\space +Z \string\arrayrulewidth: \the\arrayrulewidth\space +Z \string\doublerulesep: \the\doublerulesep^^J% +Z \string\tymin: \the\tymin\space +Z \string\tymax: \the\tymax^^J}% + \let\@classz\TY@classz + \let\verb\TX@verb + \toks@{}\TY@get@body} +\let\TY@@mkpream\@mkpream +\def\TY@mkpream{% + \def\@addamp{% + \if@firstamp \@firstampfalse \else + \global\advance\TY@count\@ne + \edef\@preamble{\@preamble &}\fi + \TY@width\xdef{0pt}}% + \def\@acol{% + \TY@subwidth\col@sep + \@addtopreamble{\hskip\col@sep}}% + \let\@arrayrule\TY@arrayrule + \let\@classvi\TY@classvi + \def\@classv{\save@decl + \expandafter\NC@ecs\@nextchar\extracolsep{}\extracolsep\@@@ + \sbox\z@{\d@llarbegin\@nextchar\d@llarend}% + \TY@subwidth{\wd\z@}% + \@addtopreamble{\d@llarbegin\the@toks\the\count@\relax\d@llarend}% + \prepnext@tok}% + \global\let\@mkpream\TY@@mkpream + \TY@@mkpream} +\def\TY@arrayrule{% + \TY@subwidth\arrayrulewidth + \@addtopreamble \vline} +\def\TY@classvi{\ifcase \@lastchclass + \@acol \or + \TY@subwidth\doublerulesep + \@addtopreamble{\hskip \doublerulesep}\or + \@acol \or + \@classvii + \fi} +\def\TY@tab{% + \setbox\z@\hbox\bgroup + \let\[$\let\]$% + \let\equation$\let\endequation$% + \col@sep\tabcolsep + \let\d@llarbegin\begingroup\let\d@llarend\endgroup + \let\@mkpream\TY@mkpream + \def\multicolumn##1##2##3{\multispan##1\relax}% + \CT@start\TY@tabarray} +\def\TY@tabarray{\@ifnextchar[{\TY@array}{\@array[t]}} +\def\TY@array[#1]{\@array[t]} +\def\TY@width#1{% + \expandafter#1\csname TY@\the\TY@count\endcsname} +\def\TY@subwidth#1{% + \TY@width\dimen@ + \advance\dimen@-#1\relax + \TY@width\xdef{\the\dimen@}% + \global\advance\TY@linewidth-#1\relax} +\def\endtabulary{% + \gdef\@halignto{}% + \let\TY@footnote\footnote% + \def\footnote{}% prevent footnotes from doing anything + \expandafter\TY@tab\the\toks@ + \crcr\omit + {\xdef\TY@save@row{}% + \loop + \advance\TY@count\m@ne + \ifnum\TY@count>\z@ + \xdef\TY@save@row{\TY@save@row&\omit}% + \repeat}\TY@save@row + \endarray\global\setbox1=\lastbox\setbox0=\vbox{\unvbox1 + \unskip\global\setbox1=\lastbox}\egroup + \dimen@\TY@linewidth + \divide\dimen@\TY@count + \ifdim\dimen@<\tymin + \TY@warn{tymin too large (\the\tymin), resetting to \the\dimen@}% + \tymin\dimen@ + \fi + \setbox\tw@=\hbox{\unhbox\@ne + \loop +\@tempdima=\lastskip +\ifdim\@tempdima>\z@ +Z \message{ecs=\the\@tempdima^^J}% + \global\advance\TY@linewidth-\@tempdima +\fi + \unskip + \setbox\tw@=\lastbox + \ifhbox\tw@ +Z \message{Col \the\TY@count: Initial=\the\wd\tw@\space}% + \ifdim\wd\tw@>\tymax + \wd\tw@\tymax +Z \message{> max\space}% +Z \else +Z \message{ \@spaces\space}% + \fi + \TY@width\dimen@ +Z \message{\the\dimen@\space}% + \advance\dimen@\wd\tw@ +Z \message{Final=\the\dimen@\space}% + \TY@width\xdef{\the\dimen@}% + \ifdim\dimen@<\tymin +Z \message{< tymin}% + \global\advance\TY@linewidth-\dimen@ + \expandafter\xdef\csname TY@F\the\TY@count\endcsname + {\the\dimen@}% + \else + \expandafter\ifx\csname TY@F\the\TY@count\endcsname\z@ +Z \message{***}% + \global\advance\TY@linewidth-\dimen@ + \expandafter\xdef\csname TY@F\the\TY@count\endcsname + {\the\dimen@}% + \else +Z \message{> tymin}% + \global\advance\TY@tablewidth\dimen@ + \global\expandafter\let\csname TY@F\the\TY@count\endcsname + \maxdimen + \fi\fi + \advance\TY@count\m@ne + \repeat}% + \TY@checkmin + \TY@checkmin + \TY@checkmin + \TY@checkmin + \TY@count\z@ + \let\TY@box\TY@box@v + \let\footnote\TY@footnote % restore footnotes + {\expandafter\TY@final\the\toks@\endTY@final}% + \count@\z@ + \@tempswatrue + \@whilesw\if@tempswa\fi{% + \advance\count@\@ne + \expandafter\ifx\csname TY@SF\the\count@\endcsname\relax + \@tempswafalse + \else + \global\expandafter\let\csname TY@F\the\count@\expandafter\endcsname + \csname TY@SF\the\count@\endcsname + \global\expandafter\let\csname TY@\the\count@\expandafter\endcsname + \csname TY@S\the\count@\endcsname + \fi}% + \TY@linewidth\@ovxx + \TY@tablewidth\@ovyy + \ifnum0=`{\fi}} +\def\TY@checkmin{% + \let\TY@checkmin\relax +\ifdim\TY@tablewidth>\z@ + \Gscale@div\TY@ratio\TY@linewidth\TY@tablewidth + \ifdim\TY@tablewidth <\linewidth + \def\TY@ratio{1}% + \fi +\else + \TY@warn{No suitable columns!}% + \def\TY@ratio{1}% +\fi +\count@\z@ +Z \message{^^JLine Width: \the\TY@linewidth, +Z Natural Width: \the\TY@tablewidth, +Z Ratio: \TY@ratio^^J}% +\@tempdima\z@ +\loop +\ifnum\count@<\TY@count +\advance\count@\@ne + \ifdim\csname TY@F\the\count@\endcsname>\tymin + \dimen@\csname TY@\the\count@\endcsname + \dimen@\TY@ratio\dimen@ + \ifdim\dimen@<\tymin +Z \message{Column \the\count@\space ->}% + \global\expandafter\let\csname TY@F\the\count@\endcsname\tymin + \global\advance\TY@linewidth-\tymin + \global\advance\TY@tablewidth-\csname TY@\the\count@\endcsname + \let\TY@checkmin\TY@@checkmin + \else + \expandafter\xdef\csname TY@F\the\count@\endcsname{\the\dimen@}% + \advance\@tempdima\csname TY@F\the\count@\endcsname + \fi + \fi +Z \dimen@\csname TY@F\the\count@\endcsname\message{\the\dimen@, }% +\repeat +Z \message{^^JTotal:\the\@tempdima^^J}% +} +\let\TY@@checkmin\TY@checkmin +\newdimen\TY@linewidth +\def\tyformat{\everypar{{\nobreak\hskip\z@skip}}} +\newdimen\tymin +\tymin=10pt +\newdimen\tymax +\tymax=2\textwidth +\def\@testpach{\@chclass + \ifnum \@lastchclass=6 \@ne \@chnum \@ne \else + \ifnum \@lastchclass=7 5 \else + \ifnum \@lastchclass=8 \tw@ \else + \ifnum \@lastchclass=9 \thr@@ + \else \z@ + \ifnum \@lastchclass = 10 \else + \edef\@nextchar{\expandafter\string\@nextchar}% + \@chnum + \if \@nextchar c\z@ \else + \if \@nextchar l\@ne \else + \if \@nextchar r\tw@ \else + \if \@nextchar C7 \else + \if \@nextchar L8 \else + \if \@nextchar R9 \else + \if \@nextchar J10 \else + \z@ \@chclass + \if\@nextchar |\@ne \else + \if \@nextchar !6 \else + \if \@nextchar @7 \else + \if \@nextchar <8 \else + \if \@nextchar >9 \else + 10 + \@chnum + \if \@nextchar m\thr@@\else + \if \@nextchar p4 \else + \if \@nextchar b5 \else + \z@ \@chclass \z@ \@preamerr \z@ \fi \fi \fi \fi\fi \fi \fi\fi \fi + \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi} +\def\TY@classz{% + \@classx + \@tempcnta\count@ + \ifx\TY@box\TY@box@v + \global\advance\TY@count\@ne + \fi + \let\centering c% + \let\raggedright\noindent + \let\raggedleft\indent + \let\arraybackslash\relax + \prepnext@tok + \ifnum\@chnum<4 + \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ + \fi + \ifnum\@chnum=6 + \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ + \fi + \@addtopreamble{% + \ifcase\@chnum + \hfil \d@llarbegin\insert@column\d@llarend \hfil \or + \kern\z@ + \d@llarbegin \insert@column \d@llarend \hfil \or + \hfil\kern\z@ \d@llarbegin \insert@column \d@llarend \or + $\vcenter\@startpbox{\@nextchar}\insert@column \@endpbox $\or + \vtop \@startpbox{\@nextchar}\insert@column \@endpbox \or + \vbox \@startpbox{\@nextchar}\insert@column \@endpbox \or + \d@llarbegin \insert@column \d@llarend \or% dubious "s" case + \TY@box\centering\or + \TY@box\raggedright\or + \TY@box\raggedleft\or + \TY@box\relax + \fi}\prepnext@tok} +\def\TY@box#1{% + \ifx\centering#1% + \hfil \d@llarbegin\insert@column\d@llarend \hfil \else + \ifx\raggedright#1% + \kern\z@%<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + \d@llarbegin \insert@column \d@llarend \hfil \else + \ifx\raggedleft#1% + \hfil\kern\z@ \d@llarbegin \insert@column \d@llarend \else + \ifx\relax#1% + \d@llarbegin \insert@column \d@llarend + \fi \fi \fi \fi} +\def\TY@box@v#1{% + \vtop \@startpbox{\csname TY@F\the\TY@count\endcsname}% + #1\arraybackslash\tyformat + \insert@column\@endpbox} +\newdimen\TY@tablewidth +\def\Gscale@div#1#2#3{% + \setlength\dimen@{#3}% + \ifdim\dimen@=\z@ + \PackageError{graphics}{Division by 0}\@eha + \dimen@#2% + \fi + \edef\@tempd{\the\dimen@}% + \setlength\dimen@{#2}% + \count@65536\relax + \ifdim\dimen@<\z@ + \dimen@-\dimen@ + \count@-\count@ + \fi + \loop + \ifdim\dimen@<8192\p@ + \dimen@\tw@\dimen@ + \divide\count@\tw@ + \repeat + \dimen@ii=\@tempd\relax + \divide\dimen@ii\count@ + \divide\dimen@\dimen@ii + \edef#1{\strip@pt\dimen@}} +\long\def\TY@get@body#1\end + {\toks@\expandafter{\the\toks@#1}\TY@find@end} +\def\TY@find@end#1{% + \def\@tempa{#1}% + \ifx\@tempa\TY@\def\@tempa{\end{#1}}\expandafter\@tempa + \else\toks@\expandafter + {\the\toks@\end{#1}}\expandafter\TY@get@body\fi} +\def\TY@warn{% + \PackageWarning{tabulary}} +\catcode`\Z=11 +\AtBeginDocument{ +\@ifpackageloaded{colortbl}{% +\expandafter\def\expandafter\@mkpream\expandafter#\expandafter1% + \expandafter{% + \expandafter\let\expandafter\CT@setup\expandafter\relax + \expandafter\let\expandafter\CT@color\expandafter\relax + \expandafter\let\expandafter\CT@do@color\expandafter\relax + \expandafter\let\expandafter\color\expandafter\relax + \expandafter\let\expandafter\CT@column@color\expandafter\relax + \expandafter\let\expandafter\CT@row@color\expandafter\relax + \@mkpream{#1}} +\let\TY@@mkpream\@mkpream +\def\TY@classz{% + \@classx + \@tempcnta\count@ + \ifx\TY@box\TY@box@v + \global\advance\TY@count\@ne + \fi + \let\centering c% + \let\raggedright\noindent + \let\raggedleft\indent + \let\arraybackslash\relax + \prepnext@tok +\expandafter\CT@extract\the\toks\@tempcnta\columncolor!\@nil + \ifnum\@chnum<4 + \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ + \fi + \ifnum\@chnum=6 + \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ + \fi + \@addtopreamble{% + \setbox\z@\hbox\bgroup\bgroup + \ifcase\@chnum + \hskip\stretch{.5}\kern\z@ + \d@llarbegin\insert@column\d@llarend\hskip\stretch{.5}\or + \kern\z@%<<<<<<<<<<<<<<<<<<<<<<<<<<< + \d@llarbegin \insert@column \d@llarend \hfill \or + \hfill\kern\z@ \d@llarbegin \insert@column \d@llarend \or + $\vcenter\@startpbox{\@nextchar}\insert@column \@endpbox $\or + \vtop \@startpbox{\@nextchar}\insert@column \@endpbox \or + \vbox \@startpbox{\@nextchar}\insert@column \@endpbox \or + \d@llarbegin \insert@column \d@llarend \or% dubious s case + \TY@box\centering\or + \TY@box\raggedright\or + \TY@box\raggedleft\or + \TY@box\relax + \fi + \egroup\egroup +\begingroup + \CT@setup + \CT@column@color + \CT@row@color + \CT@do@color +\endgroup + \@tempdima\ht\z@ + \advance\@tempdima\minrowclearance + \vrule\@height\@tempdima\@width\z@ +\unhbox\z@ +}\prepnext@tok}% + \def\TY@arrayrule{% + \TY@subwidth\arrayrulewidth + \@addtopreamble{{\CT@arc@\vline}}}% + \def\TY@classvi{\ifcase \@lastchclass + \@acol \or + \TY@subwidth\doublerulesep + \ifx\CT@drsc@\relax + \@addtopreamble{\hskip\doublerulesep}% + \else + \@addtopreamble{{\CT@drsc@\vrule\@width\doublerulesep}}% + \fi\or + \@acol \or + \@classvii + \fi}% +}{% +\let\CT@start\relax +} +} +{\uccode`\*=`\ % +\uppercase{\gdef\TX@verb{% + \leavevmode\null\TX@vwarn + {\ifnum0=`}\fi\ttfamily\let\\\ignorespaces + \@ifstar{\let~*\TX@vb}{\TX@vb}}}} +\def\TX@vb#1{\def\@tempa##1#1{\toks@{##1}\edef\@tempa{\the\toks@}% + \expandafter\TX@v\meaning\@tempa\\ \\\ifnum0=`{\fi}}\@tempa!} +\def\TX@v#1!{\afterassignment\TX@vfirst\let\@tempa= } +\begingroup +\catcode`\*=\catcode`\# +\catcode`\#=12 +\gdef\TX@vfirst{% + \if\@tempa#% + \def\@tempb{\TX@v@#}% + \else + \let\@tempb\TX@v@ + \if\@tempa\space~\else\@tempa\fi + \fi + \@tempb} +\gdef\TX@v@*1 *2{% + \TX@v@hash*1##\relax\if*2\\\else~\expandafter\TX@v@\fi*2} +\gdef\TX@v@hash*1##*2{*1\ifx*2\relax\else#\expandafter\TX@v@hash\fi*2} +\endgroup +\def\TX@vwarn{% + \@warning{\noexpand\verb may be unreliable inside tabularx/y}% + \global\let\TX@vwarn\@empty} +\endinput +%% +%% End of file `tabulary.sty'. diff --git a/Doc/newdoc/icon-small.jpg b/Doc/newdoc/icon-small.jpg new file mode 100644 index 0000000..5f90807 Binary files /dev/null and b/Doc/newdoc/icon-small.jpg differ diff --git a/Doc/newdoc/icon.jpg b/Doc/newdoc/icon.jpg new file mode 100644 index 0000000..eec1a92 Binary files /dev/null and b/Doc/newdoc/icon.jpg differ diff --git a/Doc/newdoc/images/cosmo.png b/Doc/newdoc/images/cosmo.png new file mode 100644 index 0000000..9717228 Binary files /dev/null and b/Doc/newdoc/images/cosmo.png differ diff --git a/Doc/newdoc/images/edge-on-disk.png b/Doc/newdoc/images/edge-on-disk.png new file mode 100644 index 0000000..7fd5fe5 Binary files /dev/null and b/Doc/newdoc/images/edge-on-disk.png differ diff --git a/Doc/newdoc/index.rst b/Doc/newdoc/index.rst index b8c7773..6e9ef7b 100644 --- a/Doc/newdoc/index.rst +++ b/Doc/newdoc/index.rst @@ -1,24 +1,30 @@ .. pNbody documentation master file, created by sphinx-quickstart on Wed Aug 24 16:29:02 2011. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to pNbody's documentation! ================================== Contents: .. toctree:: - :maxdepth: 2 + :maxdepth: 5 rst/Overview rst/Installation - rst/Io + rst/Tutorial + rst/Formats + rst/Display + rst/InitialConditions + rst/Units + rst/Grids + rst/Reference Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` diff --git a/Doc/newdoc/index.rst b/Doc/newdoc/index.rst~ similarity index 79% copy from Doc/newdoc/index.rst copy to Doc/newdoc/index.rst~ index b8c7773..bfc29aa 100644 --- a/Doc/newdoc/index.rst +++ b/Doc/newdoc/index.rst~ @@ -1,24 +1,29 @@ .. pNbody documentation master file, created by sphinx-quickstart on Wed Aug 24 16:29:02 2011. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to pNbody's documentation! ================================== Contents: .. toctree:: - :maxdepth: 2 + :maxdepth: 5 rst/Overview rst/Installation - rst/Io + rst/Tutorial + rst/Formats + rst/Display + rst/InitialConditions + rst/Units + rst/Reference Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` diff --git a/Doc/newdoc/rst/C_PyGadget.rst b/Doc/newdoc/rst/C_PyGadget.rst new file mode 100644 index 0000000..4298d45 --- /dev/null +++ b/Doc/newdoc/rst/C_PyGadget.rst @@ -0,0 +1,16 @@ +the C PyGadget module +********************** + +This mpdule is currently not completely integrated to **pNbody**. +It is part of the **pNbody** package but must be compiled +separately. +For mpi, use:: + + export CC=mpirun + + + +.. currentmodule:: PyGadget.gadget + +.. automodule:: PyGadget.gadget + :members: diff --git a/Doc/newdoc/rst/C_asciilib.rst b/Doc/newdoc/rst/C_asciilib.rst new file mode 100644 index 0000000..b6c257b --- /dev/null +++ b/Doc/newdoc/rst/C_asciilib.rst @@ -0,0 +1,7 @@ +the C asciilib module +********************** + +.. currentmodule:: pNbody.asciilib + +.. automodule:: pNbody.asciilib + :members: diff --git a/Doc/newdoc/rst/C_cooling_with_metals.rst b/Doc/newdoc/rst/C_cooling_with_metals.rst new file mode 100644 index 0000000..155e8dc --- /dev/null +++ b/Doc/newdoc/rst/C_cooling_with_metals.rst @@ -0,0 +1,7 @@ +the C cooling_with_metals module +********************** + +.. currentmodule:: pNbody.cooling_with_metals + +.. automodule:: pNbody.cooling_with_metals + :members: diff --git a/Doc/newdoc/rst/C_coolinglib.rst b/Doc/newdoc/rst/C_coolinglib.rst new file mode 100644 index 0000000..7c4f60e --- /dev/null +++ b/Doc/newdoc/rst/C_coolinglib.rst @@ -0,0 +1,7 @@ +the C coolinglib module +********************** + +.. currentmodule:: pNbody.coolinglib + +.. automodule:: pNbody.coolinglib + :members: diff --git a/Doc/newdoc/rst/C_cosmolib.rst b/Doc/newdoc/rst/C_cosmolib.rst new file mode 100644 index 0000000..e5608b2 --- /dev/null +++ b/Doc/newdoc/rst/C_cosmolib.rst @@ -0,0 +1,7 @@ +the C cosmolib module +********************** + +.. currentmodule:: pNbody.cosmolib + +.. automodule:: pNbody.cosmolib + :members: diff --git a/Doc/newdoc/rst/C_iclib.rst b/Doc/newdoc/rst/C_iclib.rst new file mode 100644 index 0000000..4555a26 --- /dev/null +++ b/Doc/newdoc/rst/C_iclib.rst @@ -0,0 +1,7 @@ +the C iclib module +********************** + +.. currentmodule:: pNbody.iclib + +.. automodule:: pNbody.iclib + :members: diff --git a/Doc/newdoc/rst/C_mapping-omp.rst b/Doc/newdoc/rst/C_mapping-omp.rst new file mode 100644 index 0000000..4a6b814 --- /dev/null +++ b/Doc/newdoc/rst/C_mapping-omp.rst @@ -0,0 +1,7 @@ +the C mapping-omp module (under construction) +********************** + + +currently not available, +still under construction. + diff --git a/Doc/newdoc/rst/C_mapping.rst b/Doc/newdoc/rst/C_mapping.rst new file mode 100644 index 0000000..5092972 --- /dev/null +++ b/Doc/newdoc/rst/C_mapping.rst @@ -0,0 +1,7 @@ +the C mapping module +********************** + +.. currentmodule:: pNbody.mapping + +.. automodule:: pNbody.mapping + :members: diff --git a/Doc/newdoc/rst/C_montecarlolib.rst b/Doc/newdoc/rst/C_montecarlolib.rst new file mode 100644 index 0000000..4e9ebe2 --- /dev/null +++ b/Doc/newdoc/rst/C_montecarlolib.rst @@ -0,0 +1,7 @@ +the C montecarlolib module +********************** + +.. currentmodule:: pNbody.montecarlolib + +.. automodule:: pNbody.montecarlolib + :members: diff --git a/Doc/newdoc/rst/C_myNumeric.rst b/Doc/newdoc/rst/C_myNumeric.rst new file mode 100644 index 0000000..bf722bb --- /dev/null +++ b/Doc/newdoc/rst/C_myNumeric.rst @@ -0,0 +1,7 @@ +the C myNumeric module +********************** + +.. currentmodule:: pNbody.myNumeric + +.. automodule:: pNbody.myNumeric + :members: diff --git a/Doc/newdoc/rst/C_nbdrklib.rst b/Doc/newdoc/rst/C_nbdrklib.rst new file mode 100644 index 0000000..8630470 --- /dev/null +++ b/Doc/newdoc/rst/C_nbdrklib.rst @@ -0,0 +1,7 @@ +the C nbdrklib module +********************** + +.. currentmodule:: pNbody.nbdrklib + +.. automodule:: pNbody.nbdrklib + :members: diff --git a/Doc/newdoc/rst/C_nbodymodule.rst b/Doc/newdoc/rst/C_nbodymodule.rst new file mode 100644 index 0000000..23a835e --- /dev/null +++ b/Doc/newdoc/rst/C_nbodymodule.rst @@ -0,0 +1,7 @@ +the C nbodymodule module +********************** + +.. currentmodule:: pNbody.nbodymodule + +.. automodule:: pNbody.nbodymodule + :members: diff --git a/Doc/newdoc/rst/C_peanolib.rst b/Doc/newdoc/rst/C_peanolib.rst new file mode 100644 index 0000000..3c97f65 --- /dev/null +++ b/Doc/newdoc/rst/C_peanolib.rst @@ -0,0 +1,7 @@ +the C peanolib module +********************** + +.. currentmodule:: pNbody.peanolib + +.. automodule:: pNbody.peanolib + :members: diff --git a/Doc/newdoc/rst/C_pmlib.rst b/Doc/newdoc/rst/C_pmlib.rst new file mode 100644 index 0000000..60f3d6b --- /dev/null +++ b/Doc/newdoc/rst/C_pmlib.rst @@ -0,0 +1,6 @@ +the C pmlib module (never developped) +********************** + +currently not available, +still under construction. + diff --git a/Doc/newdoc/rst/C_ptreelib.rst b/Doc/newdoc/rst/C_ptreelib.rst new file mode 100644 index 0000000..c58edd8 --- /dev/null +++ b/Doc/newdoc/rst/C_ptreelib.rst @@ -0,0 +1,6 @@ +the C ptreelib module (obsolete) +********************** + +currently not available, +still under construction. + diff --git a/Doc/newdoc/rst/C_pygsl.rst b/Doc/newdoc/rst/C_pygsl.rst new file mode 100644 index 0000000..c7df240 --- /dev/null +++ b/Doc/newdoc/rst/C_pygsl.rst @@ -0,0 +1,7 @@ +the C pygsl module +********************** + +.. currentmodule:: pNbody.pygsl + +.. automodule:: pNbody.pygsl + :members: diff --git a/Doc/newdoc/rst/C_streelib.rst b/Doc/newdoc/rst/C_streelib.rst new file mode 100644 index 0000000..532cf11 --- /dev/null +++ b/Doc/newdoc/rst/C_streelib.rst @@ -0,0 +1,5 @@ +the C streelib module (under construction) +********************** + +currently not available, +still under construction. diff --git a/Doc/newdoc/rst/C_tessel.rst b/Doc/newdoc/rst/C_tessel.rst new file mode 100644 index 0000000..594d86e --- /dev/null +++ b/Doc/newdoc/rst/C_tessel.rst @@ -0,0 +1,7 @@ +the C tessel module +********************** + +.. currentmodule:: pNbody.tessel + +.. automodule:: pNbody.tessel + :members: diff --git a/Doc/newdoc/rst/C_treelib.rst b/Doc/newdoc/rst/C_treelib.rst new file mode 100644 index 0000000..7dc7909 --- /dev/null +++ b/Doc/newdoc/rst/C_treelib.rst @@ -0,0 +1,7 @@ +the C treelib module +********************** + +.. currentmodule:: pNbody.treelib + +.. automodule:: pNbody.treelib + :members: diff --git a/Doc/newdoc/rst/CoolingModule.rst b/Doc/newdoc/rst/CoolingModule.rst new file mode 100644 index 0000000..683567f --- /dev/null +++ b/Doc/newdoc/rst/CoolingModule.rst @@ -0,0 +1,2 @@ +the cooling module +********************** diff --git a/Doc/newdoc/rst/CosmoModule.rst b/Doc/newdoc/rst/CosmoModule.rst new file mode 100644 index 0000000..76d7303 --- /dev/null +++ b/Doc/newdoc/rst/CosmoModule.rst @@ -0,0 +1,7 @@ +the cosmo module +********************** + +.. currentmodule:: pNbody.cosmo + +.. automodule:: pNbody.cosmo + :members: diff --git a/Doc/newdoc/rst/CtesModule.rst b/Doc/newdoc/rst/CtesModule.rst new file mode 100644 index 0000000..3b591ef --- /dev/null +++ b/Doc/newdoc/rst/CtesModule.rst @@ -0,0 +1,7 @@ +the ctes module +********************** + +.. currentmodule:: pNbody.ctes + +.. automodule:: pNbody.ctes + :members: diff --git a/Doc/newdoc/rst/Default.rst~ b/Doc/newdoc/rst/Default.rst~ new file mode 100644 index 0000000..6ede93a --- /dev/null +++ b/Doc/newdoc/rst/Default.rst~ @@ -0,0 +1,2 @@ +Default configuration +********************** \ No newline at end of file diff --git a/Doc/newdoc/rst/Default_configurations.rst b/Doc/newdoc/rst/Default_configurations.rst new file mode 100644 index 0000000..37000bd --- /dev/null +++ b/Doc/newdoc/rst/Default_configurations.rst @@ -0,0 +1,41 @@ +Default configuration +********************** + +**pNbody** uses a set of parameters files, color tables and formats files. +These files are provided by the installation and are by default stored in +the directory ``site-packages/pNbody/config``. +To display where these files are taken from, you can use the command:: + + pNbody_show-path + +It is recommanded that the user uses its own configuration files. To be automatically +recongnized by **pNbody**, the latter must be in the user ``~/.pNbody`` directory. +**pNbody** provides a simple command to copy all parameters in this directory. Simply +type:: + + pNbody_copy-defaultconfig + +and check the values of the new paths:: + + pNbody_show-path + +You can now freely modify the files contains in the configuratio directory. + +By default, the content of the configuration directory is: + ++------------------------+------------+--------------------------------------------------------------------------+ +| name | type | Content | ++========================+============+==========================================================================+ +| defaultparameters | file | the default graphical parameters used by **pNbody** | ++------------------------+------------+--------------------------------------------------------------------------+ +| unitsparameters | file | the default units parameters used by **pNbody** | ++------------------------+------------+--------------------------------------------------------------------------+ +| formats | directory | specific class definition files used to read different file formats | ++------------------------+------------+--------------------------------------------------------------------------+ +| rgb_tables | directory | color tables | ++------------------------+------------+--------------------------------------------------------------------------+ +| plugins | directory | optional plugins | ++------------------------+------------+--------------------------------------------------------------------------+ +| opt | directory | optional files | ++------------------------+------------+--------------------------------------------------------------------------+ + diff --git a/Doc/newdoc/rst/Default_configurations.rst~ b/Doc/newdoc/rst/Default_configurations.rst~ new file mode 100644 index 0000000..9c1e0e4 --- /dev/null +++ b/Doc/newdoc/rst/Default_configurations.rst~ @@ -0,0 +1,41 @@ +Default configuration +********************** + +**pNbody** uses a set of parameters files, color tables and formats files. +These files are provided by the installation and are by default stored in +the directory ``site-packages/pNbody/config``. +To display where these files are taken from, you can use the command:: + + pNbody_show-path + +It is recommanded that the user uses its own configuration files. To be automatically +recongnized by **pNbody**, the latter must be in the user ``~/.pNbody`` directory. +**pNbody** provides a simple command to copy all parameters in this directory. Simply +type:: + + pNbody_copy-defaultconfig + +and check the values of the new paths:: + + pNbody_show-path + +You can now freely modify the files contains in the configuratio directory. + +By default, the content of the configuration directory is: + ++------------------------+------------+--------------------------------------------------------------------------+ +| name | type | Content | ++========================+============+==========================================================================+ +| defaultparameters | file | the default graphical parameters used by pNbody | ++------------------------+------------+--------------------------------------------------------------------------+ +| unitsparameters | file | the default units parameters used by pNbody | ++------------------------+------------+--------------------------------------------------------------------------+ +| formats | directory | specific class definition files used to read different file formats | ++------------------------+------------+--------------------------------------------------------------------------+ +| rgb_tables | directory | color tables | ++------------------------+------------+--------------------------------------------------------------------------+ +| plugins | directory | optional plugins | ++------------------------+------------+--------------------------------------------------------------------------+ +| opt | directory | optional files | ++------------------------+------------+--------------------------------------------------------------------------+ + diff --git a/Doc/newdoc/rst/Default_parameters.rst b/Doc/newdoc/rst/Default_parameters.rst new file mode 100644 index 0000000..16687c0 --- /dev/null +++ b/Doc/newdoc/rst/Default_parameters.rst @@ -0,0 +1,67 @@ +Default parameters +********************** + +To see what default parameters **pNbody** uses, type:: + + pNbody_show-parameters + +The script returns the parameters taken from the files +*defaultparameters* and *unitsparameters*. +Their current values are displayed:: + + parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters + + ---------------------------------------------------------------------------------------------------- + name meaning value (type) + ---------------------------------------------------------------------------------------------------- + obs : observer = None (ArrayObs) + xp : observing position = None (List) + x0 : position of observer = None (List) + alpha : angle of the head = None (Float) + view : view = xz (String) + r_obs : dist. to the observer = 201732.223771 (Float) + clip : clip planes = (100866.11188556443, 403464.44754225772) (Tuple) + cut : cut clip planes = no (String) + eye : name of the eye = None (String) + dist_eye : distance between eyes = -0.0005 (Float) + foc : focal = 300.0 (Float) + persp : perspective = off (String) + shape : shape of the image = (512, 512) (Tuple) + size : pysical size = (6000, 6000) (Tuple) + frsp : frsp = 0.0 (Float) + space : space = pos (String) + mode : mode = m (String) + rendering : rendering mode = map (String) + filter_name : name of the filter = None (String) + filter_opts : filter options = [10, 10, 2, 2] (List) + scale : scale = log (String) + cd : cd = 0.0 (Float) + mn : mn = 0.0 (Float) + mx : mx = 0.0 (Float) + l_n : number of levels = 15 (Int) + l_min : min level = 0.0 (Float) + l_max : max level = 0.0 (Float) + l_kx : l_kx = 10 (Int) + l_ky : l_ky = 10 (Int) + l_color : level color = 0 (Int) + l_crush : crush background = no (String) + b_weight : box line weight = 0 (Int) + b_xopts : x axis options = None (Tuple) + b_yopts : y axis options = None (Tuple) + b_color : line color = 255 (Int) + + parameters in /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters + + ---------------------------------------------------------------------------------------------------- + name meaning value (type) + ---------------------------------------------------------------------------------------------------- + xi : hydrogen mass fraction = 0.76 (Float) + ionisation : ionisation flag = 1 (Int) + metalicity : metalicity index = 4 (Int) + Nsph : number of sph neighbors = 50 (Int) + gamma : adiabatic index = 1.66666666667 (Float) + coolingfile : Cooling file = ~/.Nbody/cooling.dat (String) + HubbleParam : HubbleParam = 1.0 (Float) + UnitLength_in_cm : UnitLength in cm = 3.085e+21 (Float) + UnitMass_in_g : UnitMass in g = 4.435693e+44 (Float) + UnitVelocity_in_cm_per_s : UnitVelocity in cm per s = 97824708.2699 (Float) diff --git a/Doc/newdoc/rst/Default_parameters.rst~ b/Doc/newdoc/rst/Default_parameters.rst~ new file mode 100644 index 0000000..198c9b3 --- /dev/null +++ b/Doc/newdoc/rst/Default_parameters.rst~ @@ -0,0 +1,11 @@ +Default parameters +********************** + +To see what default parameters **pNbody** uses, type:: + + pNbody_show-parameters + +The script returns the parameters taken from the files +*defaultparameters* and *unitsparameters*. +Their current values are displayed:: + diff --git a/Doc/newdoc/rst/Display.rst b/Doc/newdoc/rst/Display.rst new file mode 100644 index 0000000..955ced3 --- /dev/null +++ b/Doc/newdoc/rst/Display.rst @@ -0,0 +1,3 @@ +Display Models +********************** + diff --git a/Doc/newdoc/rst/Examples.rst b/Doc/newdoc/rst/Examples.rst new file mode 100644 index 0000000..1e91547 --- /dev/null +++ b/Doc/newdoc/rst/Examples.rst @@ -0,0 +1,9 @@ +Examples +********************** + +A series of examples is provided by **pNbody** in the +``PNBODYPATH/examples``, where NBODYPATH is obtained +with the command:: + + pNbody_show-path + \ No newline at end of file diff --git a/Doc/newdoc/rst/Examples.rst~ b/Doc/newdoc/rst/Examples.rst~ new file mode 100644 index 0000000..1e91547 --- /dev/null +++ b/Doc/newdoc/rst/Examples.rst~ @@ -0,0 +1,9 @@ +Examples +********************** + +A series of examples is provided by **pNbody** in the +``PNBODYPATH/examples``, where NBODYPATH is obtained +with the command:: + + pNbody_show-path + \ No newline at end of file diff --git a/Doc/newdoc/rst/Formats.rst b/Doc/newdoc/rst/Formats.rst new file mode 100644 index 0000000..bcfb7f9 --- /dev/null +++ b/Doc/newdoc/rst/Formats.rst @@ -0,0 +1,3 @@ +Setting a format file +********************** + diff --git a/Doc/newdoc/rst/FortranfileModule.rst b/Doc/newdoc/rst/FortranfileModule.rst new file mode 100644 index 0000000..b1c6756 --- /dev/null +++ b/Doc/newdoc/rst/FortranfileModule.rst @@ -0,0 +1,7 @@ +the fortranfile module +********************** + +.. currentmodule:: pNbody.fortranfile + +.. automodule:: pNbody.fortranfile + :members: diff --git a/Doc/newdoc/rst/FourierModule.rst b/Doc/newdoc/rst/FourierModule.rst new file mode 100644 index 0000000..ce448bd --- /dev/null +++ b/Doc/newdoc/rst/FourierModule.rst @@ -0,0 +1,7 @@ +the fourier module +********************** + +.. currentmodule:: pNbody.fourier + +.. automodule:: pNbody.fourier + :members: diff --git a/Doc/newdoc/rst/GeometryModule.rst b/Doc/newdoc/rst/GeometryModule.rst new file mode 100644 index 0000000..a53df9f --- /dev/null +++ b/Doc/newdoc/rst/GeometryModule.rst @@ -0,0 +1,7 @@ +the geometry module +********************** + +.. currentmodule:: pNbody.geometry + +.. automodule:: pNbody.geometry + :members: diff --git a/Doc/newdoc/rst/Grids.rst b/Doc/newdoc/rst/Grids.rst new file mode 100644 index 0000000..bcb2ce8 --- /dev/null +++ b/Doc/newdoc/rst/Grids.rst @@ -0,0 +1,3 @@ +Generating grids +********************** + diff --git a/Doc/newdoc/rst/IcModule.rst b/Doc/newdoc/rst/IcModule.rst new file mode 100644 index 0000000..a442db0 --- /dev/null +++ b/Doc/newdoc/rst/IcModule.rst @@ -0,0 +1,7 @@ +the ic module +********************** + +.. currentmodule:: pNbody.ic + +.. automodule:: pNbody.ic + :members: diff --git a/Doc/newdoc/rst/InitialConditions.rst b/Doc/newdoc/rst/InitialConditions.rst new file mode 100644 index 0000000..002549a --- /dev/null +++ b/Doc/newdoc/rst/InitialConditions.rst @@ -0,0 +1,3 @@ +Generating initial conditions +********************** + diff --git a/Doc/newdoc/rst/Installation.rst b/Doc/newdoc/rst/Installation.rst index 7ece16d..b251858 100644 --- a/Doc/newdoc/rst/Installation.rst +++ b/Doc/newdoc/rst/Installation.rst @@ -1,15 +1,17 @@ Installation ********************** pNbody is curently only supported by linux. .. toctree:: :maxdepth: 2 Prerequiste Installing_from_tarball Test_the_installation - Documentation_and_examples + Default_configurations + Default_parameters + Examples diff --git a/Doc/newdoc/rst/Installation.rst b/Doc/newdoc/rst/Installation.rst~ similarity index 75% copy from Doc/newdoc/rst/Installation.rst copy to Doc/newdoc/rst/Installation.rst~ index 7ece16d..b251858 100644 --- a/Doc/newdoc/rst/Installation.rst +++ b/Doc/newdoc/rst/Installation.rst~ @@ -1,15 +1,17 @@ Installation ********************** pNbody is curently only supported by linux. .. toctree:: :maxdepth: 2 Prerequiste Installing_from_tarball Test_the_installation - Documentation_and_examples + Default_configurations + Default_parameters + Examples diff --git a/Doc/newdoc/rst/Installing_from_tarball.rst b/Doc/newdoc/rst/Installing_from_tarball.rst index 978d8d1..b6a1f2c 100644 --- a/Doc/newdoc/rst/Installing_from_tarball.rst +++ b/Doc/newdoc/rst/Installing_from_tarball.rst @@ -1,39 +1,39 @@ Installing from source ********************** Decompress the tarball ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Decompress the tarball file:: tar -xzf pNbody-4.x.tar.gz enter the directory:: cd pNbody-4.x Compile ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compilation is performed using the standard command:: python setup.py build If one wants to install in another directory than the default -python one, it is possible to use the standard *--prefix* option:: +python one, it is possible to use the standard ``--prefix`` option:: python setup.py build --prefix other_directory Install ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now, depending on your python installation you need to be root. The module is installed with the following command:: python setup.py install diff --git a/Doc/newdoc/rst/Installing_from_tarball.rst b/Doc/newdoc/rst/Installing_from_tarball.rst~ similarity index 100% copy from Doc/newdoc/rst/Installing_from_tarball.rst copy to Doc/newdoc/rst/Installing_from_tarball.rst~ diff --git a/Doc/newdoc/rst/IoModule.rst b/Doc/newdoc/rst/IoModule.rst new file mode 100644 index 0000000..d571bba --- /dev/null +++ b/Doc/newdoc/rst/IoModule.rst @@ -0,0 +1,14 @@ +the io module +********************** + +.. currentmodule:: pNbody.io + +.. autofunction:: checkfile +.. autofunction:: end_of_file +.. autofunction:: write_array +.. autofunction:: read_ascii +.. autofunction:: write_dump +.. autofunction:: read_dump + + + diff --git a/Doc/newdoc/rst/LibdiskModule.rst b/Doc/newdoc/rst/LibdiskModule.rst new file mode 100644 index 0000000..c8f18bb --- /dev/null +++ b/Doc/newdoc/rst/LibdiskModule.rst @@ -0,0 +1,7 @@ +the libdisk module +********************** + +.. currentmodule:: pNbody.libdisk + +.. automodule:: pNbody.libdisk + :members: diff --git a/Doc/newdoc/rst/LibgridModule.rst b/Doc/newdoc/rst/LibgridModule.rst new file mode 100644 index 0000000..1703590 --- /dev/null +++ b/Doc/newdoc/rst/LibgridModule.rst @@ -0,0 +1,7 @@ +the libgrid module +********************** + +.. currentmodule:: pNbody.libgrid + +.. automodule:: pNbody.libgrid + :members: diff --git a/Doc/newdoc/rst/LiblogModule.rst b/Doc/newdoc/rst/LiblogModule.rst new file mode 100644 index 0000000..5584e90 --- /dev/null +++ b/Doc/newdoc/rst/LiblogModule.rst @@ -0,0 +1,7 @@ +the liblog module +********************** + +.. currentmodule:: pNbody.liblog + +.. automodule:: pNbody.liblog + :members: diff --git a/Doc/newdoc/rst/LibmiyamotoModule.rst b/Doc/newdoc/rst/LibmiyamotoModule.rst new file mode 100644 index 0000000..8fd942d --- /dev/null +++ b/Doc/newdoc/rst/LibmiyamotoModule.rst @@ -0,0 +1,7 @@ +the libmiyamoto module +********************** + +.. currentmodule:: pNbody.libmiyamoto + +.. automodule:: pNbody.libmiyamoto + :members: diff --git a/Doc/newdoc/rst/LibqtModule.rst b/Doc/newdoc/rst/LibqtModule.rst new file mode 100644 index 0000000..ae36fbc --- /dev/null +++ b/Doc/newdoc/rst/LibqtModule.rst @@ -0,0 +1,7 @@ +the libqt module +********************** + +.. currentmodule:: pNbody.libqt + +.. automodule:: pNbody.libqt + :members: diff --git a/Doc/newdoc/rst/LibutilModule.rst b/Doc/newdoc/rst/LibutilModule.rst new file mode 100644 index 0000000..a330841 --- /dev/null +++ b/Doc/newdoc/rst/LibutilModule.rst @@ -0,0 +1,7 @@ +the libutil module +********************** + +.. currentmodule:: pNbody.libutil + +.. automodule:: pNbody.libutil + :members: diff --git a/Doc/newdoc/rst/MainModule.rst b/Doc/newdoc/rst/MainModule.rst new file mode 100644 index 0000000..6f61a81 --- /dev/null +++ b/Doc/newdoc/rst/MainModule.rst @@ -0,0 +1,8 @@ +the main module +********************** + +.. currentmodule:: pNbody.main + +.. automodule:: pNbody.main + :members: + diff --git a/Doc/newdoc/rst/MainModule.rst~ b/Doc/newdoc/rst/MainModule.rst~ new file mode 100644 index 0000000..038d621 --- /dev/null +++ b/Doc/newdoc/rst/MainModule.rst~ @@ -0,0 +1,7 @@ +the main module +********************** + + +.. autocalss::pNbody.main.NbodyDefault + :members: + .. method::init() \ No newline at end of file diff --git a/Doc/newdoc/rst/MovieModule.rst b/Doc/newdoc/rst/MovieModule.rst new file mode 100644 index 0000000..bb127d9 --- /dev/null +++ b/Doc/newdoc/rst/MovieModule.rst @@ -0,0 +1,7 @@ +the Movie module +********************** + +.. currentmodule:: pNbody.Movie + +.. automodule:: pNbody.Movie + :members: diff --git a/Doc/newdoc/rst/MpiModule.rst b/Doc/newdoc/rst/MpiModule.rst new file mode 100644 index 0000000..2049388 --- /dev/null +++ b/Doc/newdoc/rst/MpiModule.rst @@ -0,0 +1,7 @@ +the mpi module +********************** + +.. currentmodule:: pNbody.mpi + +.. automodule:: pNbody.mpi + :members: diff --git a/Doc/newdoc/rst/Overview.rst b/Doc/newdoc/rst/Overview.rst index 04c6c96..978b948 100644 --- a/Doc/newdoc/rst/Overview.rst +++ b/Doc/newdoc/rst/Overview.rst @@ -1,26 +1,26 @@ Overview ********************** -pNbody is a parallelized python module toolbox designed to manipulate and display +**pNbody** is a parallelized python module toolbox designed to manipulate and display interactively very lage N-body systems. Its oriented object approche allows the user to perform complicate manipulation with only very few commands. As python is an interpreted language, the user can load an N-body system and explore it interactively using the python interpreter. pNbody may also be used in python scripts. The module also contains graphical facilities desinged to create maps of physical values of the system, like density maps, temperture maps, velocites maps, etc. Stereo capabilities are also implemented. pNbody is not limited by file format. Each user may redefine in a parameter file how to read its prefered format. Its new parallel (mpi) facilities make it works on computer cluster without being limitted by memory consumption. It has already been tested with several millions of particles. .. image:: ../images/cosmo.png diff --git a/Doc/newdoc/_build/html/_sources/rst/Overview.txt b/Doc/newdoc/rst/Overview.rst~ similarity index 100% copy from Doc/newdoc/_build/html/_sources/rst/Overview.txt copy to Doc/newdoc/rst/Overview.rst~ diff --git a/Doc/newdoc/rst/PaletteModule.rst b/Doc/newdoc/rst/PaletteModule.rst new file mode 100644 index 0000000..51d8a5c --- /dev/null +++ b/Doc/newdoc/rst/PaletteModule.rst @@ -0,0 +1,7 @@ +the palette module +********************** + +.. currentmodule:: pNbody.palette + +.. automodule:: pNbody.palette + :members: diff --git a/Doc/newdoc/rst/ParamModule.rst b/Doc/newdoc/rst/ParamModule.rst new file mode 100644 index 0000000..b618d2b --- /dev/null +++ b/Doc/newdoc/rst/ParamModule.rst @@ -0,0 +1,7 @@ +the param module +********************** + +.. currentmodule:: pNbody.param + +.. automodule:: pNbody.param + :members: diff --git a/Doc/newdoc/rst/ParameterModule.rst b/Doc/newdoc/rst/ParameterModule.rst new file mode 100644 index 0000000..60c0366 --- /dev/null +++ b/Doc/newdoc/rst/ParameterModule.rst @@ -0,0 +1,7 @@ +the parameters module +********************** + +.. currentmodule:: pNbody.parameters + +.. automodule:: pNbody.parameters + :members: diff --git a/Doc/newdoc/rst/PhotModule.rst b/Doc/newdoc/rst/PhotModule.rst new file mode 100644 index 0000000..e85fd04 --- /dev/null +++ b/Doc/newdoc/rst/PhotModule.rst @@ -0,0 +1,7 @@ +the phot module +********************** + +.. currentmodule:: pNbody.phot + +.. automodule:: pNbody.phot + :members: diff --git a/Doc/newdoc/rst/PlummerModule.rst b/Doc/newdoc/rst/PlummerModule.rst new file mode 100644 index 0000000..1cb52c3 --- /dev/null +++ b/Doc/newdoc/rst/PlummerModule.rst @@ -0,0 +1,7 @@ +the plummer module +********************** + +.. currentmodule:: pNbody.plummer + +.. automodule:: pNbody.plummer + :members: diff --git a/Doc/newdoc/rst/ProfilesModule.rst b/Doc/newdoc/rst/ProfilesModule.rst new file mode 100644 index 0000000..f228716 --- /dev/null +++ b/Doc/newdoc/rst/ProfilesModule.rst @@ -0,0 +1,7 @@ +the profiles module +********************** + +.. currentmodule:: pNbody.profiles + +.. automodule:: pNbody.profiles + :members: diff --git a/Doc/newdoc/rst/PyfitsModule.rst b/Doc/newdoc/rst/PyfitsModule.rst new file mode 100644 index 0000000..f9dcabb --- /dev/null +++ b/Doc/newdoc/rst/PyfitsModule.rst @@ -0,0 +1,7 @@ +the pyfits module +********************** + +.. currentmodule:: pNbody.pyfits + +.. automodule:: pNbody.pyfits + :members: diff --git a/Doc/newdoc/rst/RecModule.rst b/Doc/newdoc/rst/RecModule.rst new file mode 100644 index 0000000..f73f6ba --- /dev/null +++ b/Doc/newdoc/rst/RecModule.rst @@ -0,0 +1,7 @@ +the rec module +********************** + +.. currentmodule:: pNbody.rec + +.. automodule:: pNbody.rec + :members: diff --git a/Doc/newdoc/rst/Reference.rst b/Doc/newdoc/rst/Reference.rst new file mode 100644 index 0000000..27e1f71 --- /dev/null +++ b/Doc/newdoc/rst/Reference.rst @@ -0,0 +1,69 @@ +Reference +********************** + +Contents: + +.. toctree:: + :maxdepth: 2 + + MainModule + + IcModule + IoModule + UnitsModule + CtesModule + + MpiModule + LibutilModule + ParamModule + ParameterModule + LiblogModule + TalkgdispModule + PyfitsModule + RecModule + LibqtModule + FortranfileModule + PaletteModule + MovieModule + + ProfilesModule + GeometryModule + LibmiyamotoModule + PlummerModule + + + LibgridModule + LibdiskModule + + CosmoModule + ThermodynModule + FourierModule + PhotModule + CoolingModule + + C_asciilib + C_coolinglib + C_cooling_with_metals + C_cosmolib + C_iclib + C_mapping + C_mapping-omp + C_montecarlolib + C_myNumeric + C_nbdrklib + C_nbodymodule + C_peanolib + C_pmlib + C_ptreelib + C_PyGadget + C_pygsl + C_streelib + C_tessel + C_treelib + + + + + + + diff --git a/Doc/newdoc/rst/Reference.rst~ b/Doc/newdoc/rst/Reference.rst~ new file mode 100644 index 0000000..9015100 --- /dev/null +++ b/Doc/newdoc/rst/Reference.rst~ @@ -0,0 +1,69 @@ +Reference +********************** + +Contents: + +.. toctree:: + :maxdepth: 2 + + MainModule + + IcModule + IoModule + UnitsModule + CtesModule + + MpiModule + LibutilModule + ParamModule + ParameterModule + LiblogModule + TalkgdispModule + PyfitsModule + RecModule + LibqtModule + FortranfileModule + PaletteModule + MovieModule + + ProfilesModule + GeometryModule + LibmiyamotoModule + PlummerModule + + + LibgridModule + LibdiskModule + + CosmoModule + ThermodynModule + FourierModule + PhotModule + CoolingModule + + asciilib + coolinglib + cooling_with_metals + cosmolib + iclib + mapping + mapping-omp + montecarlolib + myNumeric + nbdrklib + nbodymodule + peanolib + pmlib + ptreelib + PyGadget + pygsl + streelib + tessel + treelib + + + + + + + diff --git a/Doc/newdoc/rst/TalkgdispModule.rst b/Doc/newdoc/rst/TalkgdispModule.rst new file mode 100644 index 0000000..e35093b --- /dev/null +++ b/Doc/newdoc/rst/TalkgdispModule.rst @@ -0,0 +1,7 @@ +the talkgdisp module +********************** + +.. currentmodule:: pNbody.talkgdisp + +.. automodule:: pNbody.talkgdisp + :members: diff --git a/Doc/newdoc/rst/Test_the_installation.rst b/Doc/newdoc/rst/Test_the_installation.rst index 3680835..c0280d1 100644 --- a/Doc/newdoc/rst/Test_the_installation.rst +++ b/Doc/newdoc/rst/Test_the_installation.rst @@ -1,14 +1,48 @@ Check the installation ********************** -.. note:: - WARNING WARNING WARNING : THIS MUST BE CHANGED !!! - - create a script to test everything - - create a script to copy the parameters in the home -To check the installation, go to the example directory -PYTHON_DIRECTORY/site-packages/pNbody/examples, where PYTHON_DIRECTORY is -the directory of your python installation and usually looks like /usr/lib/pythonx.y/. -Then ,simply type:: +You can check the installation by simply running the following +command:: - python testall.py + pNbody_checkall + +This command must of course be in your path. This will be the case +if you did not specified any ``--prefix``. On the contrary if ``--prefix`` +is set to for example, *localdir* you should have your *PATH* environment +variable should contains:: + + localdir/bin + +and you *PYTHONPATH* environment should contains:: + + localdir/lib/python2.x/site-packages/ + +to ensure that the **pNbody** package will be found. + +If everything goes well, you should see a lots of outputs on your screen, +as well as a window displaying an edge-on disk. + +.. image:: ../images/edge-on-disk.png + :width: 200 px + + +Close it when you see it. +The script should finally ends up with something like :: + + + ######################################################################## + Good News ! pNbody with format gadget is working ! + ######################################################################## + + You are currently using the following paths + + HOME : /home/leo + PNBODYPATH : /home/leo/local/lib/python2.6/site-packages/pNbody + CONFIGDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config + PARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters + UNITSPARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters + PALETTEDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/rgb_tables + PLUGINSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/plugins + OPTDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/opt + FORMATSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/formats diff --git a/Doc/newdoc/rst/Test_the_installation.rst~ b/Doc/newdoc/rst/Test_the_installation.rst~ new file mode 100644 index 0000000..c0280d1 --- /dev/null +++ b/Doc/newdoc/rst/Test_the_installation.rst~ @@ -0,0 +1,48 @@ +Check the installation +********************** + + +You can check the installation by simply running the following +command:: + + pNbody_checkall + +This command must of course be in your path. This will be the case +if you did not specified any ``--prefix``. On the contrary if ``--prefix`` +is set to for example, *localdir* you should have your *PATH* environment +variable should contains:: + + localdir/bin + +and you *PYTHONPATH* environment should contains:: + + localdir/lib/python2.x/site-packages/ + +to ensure that the **pNbody** package will be found. + +If everything goes well, you should see a lots of outputs on your screen, +as well as a window displaying an edge-on disk. + +.. image:: ../images/edge-on-disk.png + :width: 200 px + + +Close it when you see it. +The script should finally ends up with something like :: + + + ######################################################################## + Good News ! pNbody with format gadget is working ! + ######################################################################## + + You are currently using the following paths + + HOME : /home/leo + PNBODYPATH : /home/leo/local/lib/python2.6/site-packages/pNbody + CONFIGDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config + PARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/defaultparameters + UNITSPARAMETERFILE : /home/leo/local/lib/python2.6/site-packages/pNbody/config/unitsparameters + PALETTEDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/rgb_tables + PLUGINSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/plugins + OPTDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/opt + FORMATSDIR : /home/leo/local/lib/python2.6/site-packages/pNbody/config/formats diff --git a/Doc/newdoc/rst/ThermodynModule.rst b/Doc/newdoc/rst/ThermodynModule.rst new file mode 100644 index 0000000..fc2d1b7 --- /dev/null +++ b/Doc/newdoc/rst/ThermodynModule.rst @@ -0,0 +1,7 @@ +the thermodyn module +********************** + +.. currentmodule:: pNbody.thermodyn + +.. automodule:: pNbody.thermodyn + :members: diff --git a/Doc/newdoc/rst/Tutorial.rst b/Doc/newdoc/rst/Tutorial.rst new file mode 100644 index 0000000..2aad6bb --- /dev/null +++ b/Doc/newdoc/rst/Tutorial.rst @@ -0,0 +1,9 @@ +Tutorial +********************** + +.. toctree:: + :maxdepth: 2 + + Tutorial_interpreter + Tutorial_scripts + Tutorial_parallel \ No newline at end of file diff --git a/Doc/newdoc/rst/Tutorial.rst~ b/Doc/newdoc/rst/Tutorial.rst~ new file mode 100644 index 0000000..9fc89db --- /dev/null +++ b/Doc/newdoc/rst/Tutorial.rst~ @@ -0,0 +1,9 @@ +Tutorial +********************** + +.. toctree:: + :maxdepth: 2 + + Using pNbody with the python interpreter + Using pNbody with scripts + Using pNbody interactively in parallel \ No newline at end of file diff --git a/Doc/newdoc/rst/Tutorial_interpreter.rst b/Doc/newdoc/rst/Tutorial_interpreter.rst new file mode 100644 index 0000000..702ca13 --- /dev/null +++ b/Doc/newdoc/rst/Tutorial_interpreter.rst @@ -0,0 +1,280 @@ +Using **pNbody** with the python interpreter +********************** + +In order to use this tutorial, you first need to copy some examples provided +with **pNbody**. This can be done by typing:: + + pNbody_copy-examples + +by default, this create a directory in your home ``~/pnbody_examples``. +Move to this directory:: + + cd ~/pnbody_examples + +Then you can simply follow the instructions below. +First, start the python interpreter:: + + leo@obsrevaz:~/pnbody_examples python + Python 2.4.2 (#2, Jul 13 2006, 15:26:48) + [GCC 4.0.1 (4.0.1-5mdk for Mandriva Linux release 2006.0)] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + >>> + + +Now, you can load the **pNbody** module:: + + >>> from pNbody import * + + +Creating **pNbody** objects from scratch +======================================== + + +We can first start by creating a default **pNbody** objet and get info about it :: + + >>> nb = Nbody() + >>> nb.info() + ----------------------------------- + particle file : ['file.dat'] + ftype : 'Nbody_default' + mxntpe : 6 + nbody : 0 + nbody_tot : 0 + npart : [0, 0, 0, 0, 0, 0] + npart_tot : [0, 0, 0, 0, 0, 0] + mass_tot : 0.0 + byteorder : 'little' + pio : 'no' + >>> + + +All variables linked to the object nb are accesible by typing nb. followed by the associated variables : + + >>> nb.nbody + 0 + >>> nb.mass_tot + 0.0 + >>> nb.pio + 'no' + +Now, you can create an object by giving the positions of particles:: + + >>> pos = ones((10,3),float32) + >>> nb = Nbody(pos=pos) + >>> nb.info() + ----------------------------------- + particle file : ['file.dat'] + ftype : 'Nbody_default' + mxntpe : 6 + nbody : 10 + nbody_tot : 10 + npart : array([10, 0, 0, 0, 0, 0]) + npart_tot : array([10, 0, 0, 0, 0, 0]) + mass_tot : 1.00000011921 + byteorder : 'little' + pio : 'no' + + len pos : 10 + pos[0] : array([ 1., 1., 1.], dtype=float32) + pos[-1] : array([ 1., 1., 1.], dtype=float32) + len vel : 10 + vel[0] : array([ 0., 0., 0.], dtype=float32) + vel[-1] : array([ 0., 0., 0.], dtype=float32) + len mass : 10 + mass[0] : 0.10000000149 + mass[-1] : 0.10000000149 + len num : 10 + num[0] : 0 + num[-1] : 9 + len tpe : 10 + tpe[0] : 0 + tpe[-1] : 0 + +In this case, you can see that the class automatically intitialize other arrays variables +(vel, mass, num and rsp) with default values. Only the first and the last element of +each defined vector are displyed by the methode info. All defined arrays and array elements +may be easily accessible using the numarray convensions. For exemple, to display and +change the positions of the tree first particles, type:: + + >>> nb.pos[:3] + array([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]], type=float32) + >>> nb.pos[:3]=2*ones((3,3),float32) + >>> nb.pos[:3] + array([[ 2., 2., 2.], + [ 2., 2., 2.], + [ 2., 2., 2.]], type=float32) + +Open from existing file +======================================== + +Now, lets try to open the gadget snapshot gadget_z00.dat. This is achieved by typing:: + + >>> nb = Nbody('gadget_z00.dat',ftype='gadget') + +Again, informatins on this snapshot may be obtained using the instance info():: + + >>> nb.info() + ----------------------------------- + particle file : ['gadget_z00.dat'] + ftype : 'Nbody_gadget' + mxntpe : 6 + nbody : 20560 + nbody_tot : 20560 + npart : array([ 9160, 10280, 0, 0, 1120, 0]) + npart_tot : array([ 9160, 10280, 0, 0, 1120, 0]) + mass_tot : 79.7066955566 + byteorder : 'little' + pio : 'no' + + len pos : 20560 + pos[0] : array([-1294.48828125, -2217.09765625, -9655.49609375], dtype=float32) + pos[-1] : array([ -986.0625 , -2183.83203125, 4017.04296875], dtype=float32) + len vel : 20560 + vel[0] : array([ -69.80491638, 60.56475067, -166.32981873], dtype=float32) + vel[-1] : array([-140.59715271, -66.44669342, -37.01613235], dtype=float32) + len mass : 20560 + mass[0] : 0.00108565215487 + mass[-1] : 0.00108565215487 + len num : 20560 + num[0] : 21488 + num[-1] : 1005192 + len tpe : 20560 + tpe[0] : 0 + tpe[-1] : 4 + + atime : 1.0 + redshift : 2.22044604925e-16 + flag_sfr : 1 + flag_feedback : 1 + nall : [ 9160 10280 0 0 1120 0] + flag_cooling : 1 + num_files : 1 + boxsize : 100000.0 + omega0 : 0.3 + omegalambda : 0.7 + hubbleparam : 0.7 + flag_age : 0 + flag_metals : 0 + nallhw : [0 0 0 0 0 0] + flag_entr_ic : 0 + critical_energy_spec: 0.0 + + len u : 20560 + u[0] : 6606.63037109 + u[-1] : 0.0 + len rho : 20560 + rho[0] : 7.05811936674e-11 + rho[-1] : 0.0 + len rsp : 20560 + rsp[0] : 909.027587891 + rsp[-1] : 0.0 + len opt : 20560 + opt[0] : 446292.5625 + opt[-1] : 0.0 + +You can obtain informations on physical values, like the center of mass +or the total angular momentum vector by typing:: + + >>> nb.cm() + array([-1649.92651346, 609.98256428, -1689.04011033]) + >>> nb.Ltot() + array([-1112078.125 , -755964.1875, -1536667.125 ], dtype=float32) + +In order to visualise the model in position space, it is possible to +generate a surface density map of it using the display instance:: + + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + +You can now performe some operations on the model in order to explore a specific +region. First, translate the model in position space:: + + >>> nb.translate([3125,-4690,1720]) + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + >>> nb.display(size=(1000,1000),shape=(256,256),palette='light') + +Ou can now rotate around:: + + >>> nb.rotate(angle=pi) + >>> nb.display(size=(1000,1000),shape=(256,256),palette='light') + +You can now display a temperature map of the model. First, +create a new object with only the gas particles:: + + >>> nb_gas = nb.select('gas') + >>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='light') + +now, display the temperture mass-weighted map:: + + >>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='rainbow4',mode='T',filter_name='gaussian') + + +Selection of particles +======================================== + +You can select only particles within a radius smaller tha 500 (in user units) +with respect to the center:: + + >>> nb_sub = nb.selectc((nb.rxyz()<500)) + >>> nb_sub.display(size=(1000,1000),shape=(256,256),palette='light') + +Now, rename the new model and save it:: + + >>> nb_sub.rename('gadget_z00_sub.dat') + >>> nb_sub.write() + +A new gadget file has been created and saved in the current directory. +We can now select particles as a function of the temperature. +First, display the maximum temperature among all gas particles, +then selectc particles and finally save in 'T11.num' the identifier (variable num) of these particles:: + + >>> log10(max(nb_gas.T())) + 12.8707923889 + >>> nb_sub = nb_gas.selectc( (nb_gas.T()>1e11) ) + >>> nb_sub.write_num('T11.num') + +Now open a new snapshot, from the same simulation, but at different redshift and find the +particles in previous snapshot with temperature higher than $10^{11}$:: + + >>> nb = Nbody('gadget_z40.dat',ftype='gadget') + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + >>> nb_sub = nb.selectp(file='T11.num') + >>> nb_sub.display(size=(10000,10000),shape=(256,256),palette='light') + +Now, instead of saving it in a gadget file, save it in a binary file type. +You simply need to call the set_ftype instance before saving it:: + + >>> nb = nb.set_ftype('binary') + >>> nb.rename('binary.dat') + >>> nb.write() + +Merging two models +=================== + +As a last example, we show how two **pNbody** models can be easyly merged with only 11 lines:: + + >>> nb1 = Nbody('disk.dat',ftype='gadget') + >>> nb2 = Nbody('disk.dat',ftype='gadget') + >>> nb1.rotate(angle=pi/4,axis=[0,1,0]) + >>> nb1.translate([-150,0,0]) + >>> nb1.vel = nb1.vel + [50,0,0] + >>> nb2.rotate(angle=pi/4,axis=[1,0,0]) + >>> nb2.translate([+150,0,50]) + >>> nb2.vel = nb2.vel - [50,0,0] + >>> nb3 = nb1 + nb2 + >>> nb3.rename('merge.dat') + >>> nb3.write() + +Now display the result from different point of view:: + + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2') + >>> nb3 = nb3.select('disk') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xz') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xy') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='yz') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0]) + +or save it into a gif file:: + + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0],save='image.gif') diff --git a/Doc/newdoc/rst/Tutorial_interpreter.rst~ b/Doc/newdoc/rst/Tutorial_interpreter.rst~ new file mode 100644 index 0000000..d4742b0 --- /dev/null +++ b/Doc/newdoc/rst/Tutorial_interpreter.rst~ @@ -0,0 +1,280 @@ +Using **pNbody** with the python interpreter +********************** + +In order to use this tutorial, you first need to copy some examples provided +with **pNbody**. This can be done by typing:: + + pNbody_copy-examples + +by default, this create a directory in your home ``~/pnbody_examples``. +Move to this directory:: + + cd ~/pnbody_examples + +Then you can simply follow the instructions below. +First, start the python interpreter:: + + leo@obsrevaz:~/pnbody_examples python + Python 2.4.2 (#2, Jul 13 2006, 15:26:48) + [GCC 4.0.1 (4.0.1-5mdk for Mandriva Linux release 2006.0)] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + >>> + + +Now, you can load the **pNbody** module:: + + >>> from pNbody import * + + +Creating **pNbody** objects from scratch +======================================== + + +We can first start by creating a default **pNbody** objet and get info about it :: + + >>> nb = Nbody() + >>> nb.info() + ----------------------------------- + particle file : ['file.dat'] + ftype : 'Nbody_default' + mxntpe : 6 + nbody : 0 + nbody_tot : 0 + npart : [0, 0, 0, 0, 0, 0] + npart_tot : [0, 0, 0, 0, 0, 0] + mass_tot : 0.0 + byteorder : 'little' + pio : 'no' + >>> + + +All variables linked to the object nb are accesible by typing nb. followed by the associated variables : + + >>> nb.nbody + 0 + >>> nb.mass_tot + 0.0 + >>> nb.pio + 'no' + +Now, you can create an object by giving the positions of particles:: + + >>> pos = ones((10,3),float32) + >>> nb = Nbody(pos=pos) + >>> nb.info() + ----------------------------------- + particle file : ['file.dat'] + ftype : 'Nbody_default' + mxntpe : 6 + nbody : 10 + nbody_tot : 10 + npart : array([10, 0, 0, 0, 0, 0]) + npart_tot : array([10, 0, 0, 0, 0, 0]) + mass_tot : 1.00000011921 + byteorder : 'little' + pio : 'no' + + len pos : 10 + pos[0] : array([ 1., 1., 1.], dtype=float32) + pos[-1] : array([ 1., 1., 1.], dtype=float32) + len vel : 10 + vel[0] : array([ 0., 0., 0.], dtype=float32) + vel[-1] : array([ 0., 0., 0.], dtype=float32) + len mass : 10 + mass[0] : 0.10000000149 + mass[-1] : 0.10000000149 + len num : 10 + num[0] : 0 + num[-1] : 9 + len tpe : 10 + tpe[0] : 0 + tpe[-1] : 0 + +In this case, you can see that the class automatically intitialize other arrays variables +(vel, mass, num and rsp) with default values. Only the first and the last element of +each defined vector are displyed by the methode info. All defined arrays and array elements +may be easily accessible using the numarray convensions. For exemple, to display and +change the positions of the tree first particles, type:: + + >>> nb.pos[:3] + array([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]], type=float32) + >>> nb.pos[:3]=2*ones((3,3),float32) + >>> nb.pos[:3] + array([[ 2., 2., 2.], + [ 2., 2., 2.], + [ 2., 2., 2.]], type=float32) + +Open from existing file +======================================== + +Now, lets try to open the gadget snapshot gadget_z00.dat. This is achieved by typing:: + + >>> nb = Nbody('gadget_z00.dat',ftype='gadget') + +Again, informatins on this snapshot may be obtained using the instance info():: + + >>> nb.info() + ----------------------------------- + particle file : ['gadget_z00.dat'] + ftype : 'Nbody_gadget' + mxntpe : 6 + nbody : 20560 + nbody_tot : 20560 + npart : array([ 9160, 10280, 0, 0, 1120, 0]) + npart_tot : array([ 9160, 10280, 0, 0, 1120, 0]) + mass_tot : 79.7066955566 + byteorder : 'little' + pio : 'no' + + len pos : 20560 + pos[0] : array([-1294.48828125, -2217.09765625, -9655.49609375], dtype=float32) + pos[-1] : array([ -986.0625 , -2183.83203125, 4017.04296875], dtype=float32) + len vel : 20560 + vel[0] : array([ -69.80491638, 60.56475067, -166.32981873], dtype=float32) + vel[-1] : array([-140.59715271, -66.44669342, -37.01613235], dtype=float32) + len mass : 20560 + mass[0] : 0.00108565215487 + mass[-1] : 0.00108565215487 + len num : 20560 + num[0] : 21488 + num[-1] : 1005192 + len tpe : 20560 + tpe[0] : 0 + tpe[-1] : 4 + + atime : 1.0 + redshift : 2.22044604925e-16 + flag_sfr : 1 + flag_feedback : 1 + nall : [ 9160 10280 0 0 1120 0] + flag_cooling : 1 + num_files : 1 + boxsize : 100000.0 + omega0 : 0.3 + omegalambda : 0.7 + hubbleparam : 0.7 + flag_age : 0 + flag_metals : 0 + nallhw : [0 0 0 0 0 0] + flag_entr_ic : 0 + critical_energy_spec: 0.0 + + len u : 20560 + u[0] : 6606.63037109 + u[-1] : 0.0 + len rho : 20560 + rho[0] : 7.05811936674e-11 + rho[-1] : 0.0 + len rsp : 20560 + rsp[0] : 909.027587891 + rsp[-1] : 0.0 + len opt : 20560 + opt[0] : 446292.5625 + opt[-1] : 0.0 + +You can obtain informations on physical values, like the center of mass +or the total angular momentum vector by typing:: + + >>> nb.cm() + array([-1649.92651346, 609.98256428, -1689.04011033]) + >>> nb.Ltot() + array([-1112078.125 , -755964.1875, -1536667.125 ], dtype=float32) + +In order to visualise the model in position space, it is possible to +generate a surface density map of it using the display instance:: + + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + +You can now performe some operations on the model in order to explore a specific +region. First, translate the model in position space:: + + >>> nb.translate([3125,-4690,1720]) + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + >>> nb.display(size=(1000,1000),shape=(256,256),palette='light') + +Ou can now rotate around:: + + >>> nb.rotate(angle=pi) + >>> nb.display(size=(1000,1000),shape=(256,256),palette='light') + +You can now display a temperature map of the model. First, +create a new object with only the gas particles:: + + >>> nb_gas = nb.select('gas') + >>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='light') + +now, display the temperture mass-weighted map:: + + >>> nb_gas.display(size=(1000,1000),shape=(256,256),palette='rainbow4',mode='T',filter_name='gaussian') + + +Selection of particles +======================================== + +You can select only particles within a radius smaller tha 500 (in user units) +with respect to the center:: + + >>> nb_sub = nb.selectc((nb.rxyz()<500)) + >>> nb_sub.display(size=(1000,1000),shape=(256,256),palette='light') + +Now, rename the new model and save it:: + + >>> nb_sub.rename('gadget_z00_sub.dat') + >>> nb_sub.write() + +A new gadget file has been created and saved in the current directory. +We can now select particles as a function of the temperature. +First, display the maximum temperature among all gas particles, +then selectc particles and finally save in 'T11.num' the identifier (variable num) of these particles:: + + >>> log10(max(nb_gas.T())) + 12.8707923889 + >>> nb_sub = nb_gas.selectc( (nb_gas.T()>1e11) ) + >>> nb_sub.write_num('T11.num') + +Now open a new snapshot, from the same simulation, but at different redshift and find the +particles in previous snapshot with temperature higher than $10^{11}$:: + + >>> nb = Nbody('gadget_z40.dat',ftype='gadget') + >>> nb.display(size=(10000,10000),shape=(256,256),palette='light') + >>> nb_sub = nb.selectp(file='T11.num') + >>> nb_sub.display(size=(10000,10000),shape=(256,256),palette='light') + +Now, instead of saving it in a gadget file, save it in a binary file type. +You simply need to call the set_ftype instance before saving it:: + + >>> nb = nb.set_ftype('binary') + >>> nb.rename('binary.dat') + >>> nb.write() + +Merging two models +=================== + +As a last example, we show how two **pNbody** models can be easyly merged with only 11 lines:: + + >>> nb1 = Nbody('disk.dat',ftype='gadget') + >>> nb2 = Nbody('disk.dat',ftype='gadget') + >>> nb1.rotate2(angle=pi/4,axis=[0,1,0]) + >>> nb1.translate([-150,0,0]) + >>> nb1.vel = nb1.vel + [50,0,0] + >>> nb2.rotate(angle=pi/4,axis=[1,0,0]) + >>> nb2.translate([+150,0,50]) + >>> nb2.vel = nb2.vel - [50,0,0] + >>> nb3 = nb1 + nb2 + >>> nb3.rename('merge.dat') + >>> nb3.write() + +Now display the result from different point of view:: + + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2') + >>> nb3 = nb3.select('disk') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xz') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='xy') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',view='yz') + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0]) + +or save it into a gif file:: + + >>> nb3.display(size=(300,300),shape=(256,256),palette='lut2',xp=[-100,0,0],save='image.gif') diff --git a/Doc/newdoc/rst/Tutorial_parallel.rst b/Doc/newdoc/rst/Tutorial_parallel.rst new file mode 100644 index 0000000..6837763 --- /dev/null +++ b/Doc/newdoc/rst/Tutorial_parallel.rst @@ -0,0 +1,162 @@ +Using pNbody in parallel +********************** + +With **pNbody**, it is possible to run scripts in parallel, using the ``mpi`` libary. +You need to have of course ``mpi`` and ``mpi4py`` installed. +To check your installation, try:: + + mpirun -np 2 pNbody_mpi + +you should get:: + + This is task 0 over 2 + This is task 1 over 2 + +but if you get:: + + This is task 0 over 1 + This is task 0 over 1 + +this means that something is not working correctly, and you should check your path or ``mpi`` and ``mpi4py`` installation +before reading further. + +The prevous scripts ``scripts/slice.py`` can diretely be run in paralle. +This is simply obtained by calling the ``mpirun`` command:: + + mpirun -np 2 scripts/slice.py gadget_z*0.dat + +In this simple script, only the processus of rank 0 (the master) open the file. +The content of the file (particles) is then distributed among all the other processors. +Eeach processor recives a fraction of the particles. +Then, the selection of gas gas particles and the slice are preformed by all processors on +their local particles. +Finally, the ``nb.write()`` command, run by the master, gather all particles and write the output file. + +Parallel output +======================================== + +With **pNbody**, its possible to write files in parallel, i.e., each task write its own file. +We can do this in the previous script simply by adding the line ``nb.set_pio('yes')``. This +tells **pNbody** to write files in parallel when ``nb.write()`` is called. +The content of the new scripts ``scripts/slice-p1.py`` is:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + files = sys.argv[1:] + + for file in files: + print "slicing",file + nb = Nbody(file,ftype='gadget') + nb = nb.select('gas') + nb = nb.selectc((fabs(nb.pos[:,1])<1000)) + nb.rename(file+'.slice') + nb.set_pio='yes' + nb.write() + +We can now run it:: + + mpirun -np 2 scripts/slice-p1.py gadget_z00.dat + +This creates two new files:: + + gadget_z00.dat.slice.1 + gadget_z00.dat.slice.0 + +The files have the same name than the initial name given in ``Nbody()`` with an extention ``.i`` where ``i`` +corresponds to the processus rank. Each file contains the particles attributed to the corresponding task. + + +Parallel input +======================================== + +Now, it possible to start by reading these two files in parallel instead of asking only the master to read one file:: +In our script, we add the optional argument ``pio='yes'`` when creating the object with ``Nbody()``:: + + + +Note also that we have used ``nb.set_pio('no')``. This force at the end the file te be written only by the master. + + #!/usr/bin/env python + + import sys + from pNbody import * + + files = sys.argv[1:] + + for file in files: + print "slicing",file + nb = Nbody(file,ftype='gadget',pio='yes') + nb = nb.select('gas') + nb = nb.selectc((fabs(nb.pos[:,1])<1000)) + nb.rename(file+'.slice.new') + nb.set_pio('no') + nb.write() + + +When we lunch it:: + + mpirun -np 2 scripts/slice-p2.py gadget_z00.dat.slice + +the two files ``gadget_z00.dat.slice.0`` and ``gadget_z00.dat.slice.1`` are read +each by one task, processed but at the end only the master write the final output : `gadget_z00.dat.slice.slice.new``. + + +More on parallelisme +======================================== + + +Lets try two other scripts. The first one (``findmax.py``) try to find the radial maximum distance among +all particles and the center. It illustrate the difference between using ``max()`` +wich gives the local maximum (maximum among particles of the node) and ``mpi.mpi_max()`` +which gives the global maximum among all particles:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + file = sys.argv[1] + + nb = Nbody(file,ftype='gadget',pio='yes') + local_max = max(nb.rxyz()) + global_max = mpi.mpi_max(nb.rxyz()) + + print "proc %d local_max = %f global_max = %f"%(mpi.ThisTask,local_max,global_max) + + +When running it, you should get:: + + mpirun -np 2 ./scripts/findmax.py gadget_z00.dat.slice + proc 1 local_max = 8109.682129 global_max = 8109.682129 + proc 0 local_max = 7733.846680 global_max = 8109.682129 + + +which illustrate clearly the point. Finally, the latter script shows that even graphical +functions support parallelisme. The script ``showmap.py`` illustrate this point by computing +a map of the model:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + file = sys.argv[1] + + nb = Nbody(file,ftype='gadget',pio='yes') + nb.display(size=(10000,10000),shape=(256,256),palette='light') + +When running :: + + mpirun -np 2 ./scripts/showmap.py gadget_z00.dat.slice + +you get an image of the model. The mapping has been performed independently by two processors. + + + + + + + diff --git a/Doc/newdoc/rst/Tutorial_parallel.rst~ b/Doc/newdoc/rst/Tutorial_parallel.rst~ new file mode 100644 index 0000000..6837763 --- /dev/null +++ b/Doc/newdoc/rst/Tutorial_parallel.rst~ @@ -0,0 +1,162 @@ +Using pNbody in parallel +********************** + +With **pNbody**, it is possible to run scripts in parallel, using the ``mpi`` libary. +You need to have of course ``mpi`` and ``mpi4py`` installed. +To check your installation, try:: + + mpirun -np 2 pNbody_mpi + +you should get:: + + This is task 0 over 2 + This is task 1 over 2 + +but if you get:: + + This is task 0 over 1 + This is task 0 over 1 + +this means that something is not working correctly, and you should check your path or ``mpi`` and ``mpi4py`` installation +before reading further. + +The prevous scripts ``scripts/slice.py`` can diretely be run in paralle. +This is simply obtained by calling the ``mpirun`` command:: + + mpirun -np 2 scripts/slice.py gadget_z*0.dat + +In this simple script, only the processus of rank 0 (the master) open the file. +The content of the file (particles) is then distributed among all the other processors. +Eeach processor recives a fraction of the particles. +Then, the selection of gas gas particles and the slice are preformed by all processors on +their local particles. +Finally, the ``nb.write()`` command, run by the master, gather all particles and write the output file. + +Parallel output +======================================== + +With **pNbody**, its possible to write files in parallel, i.e., each task write its own file. +We can do this in the previous script simply by adding the line ``nb.set_pio('yes')``. This +tells **pNbody** to write files in parallel when ``nb.write()`` is called. +The content of the new scripts ``scripts/slice-p1.py`` is:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + files = sys.argv[1:] + + for file in files: + print "slicing",file + nb = Nbody(file,ftype='gadget') + nb = nb.select('gas') + nb = nb.selectc((fabs(nb.pos[:,1])<1000)) + nb.rename(file+'.slice') + nb.set_pio='yes' + nb.write() + +We can now run it:: + + mpirun -np 2 scripts/slice-p1.py gadget_z00.dat + +This creates two new files:: + + gadget_z00.dat.slice.1 + gadget_z00.dat.slice.0 + +The files have the same name than the initial name given in ``Nbody()`` with an extention ``.i`` where ``i`` +corresponds to the processus rank. Each file contains the particles attributed to the corresponding task. + + +Parallel input +======================================== + +Now, it possible to start by reading these two files in parallel instead of asking only the master to read one file:: +In our script, we add the optional argument ``pio='yes'`` when creating the object with ``Nbody()``:: + + + +Note also that we have used ``nb.set_pio('no')``. This force at the end the file te be written only by the master. + + #!/usr/bin/env python + + import sys + from pNbody import * + + files = sys.argv[1:] + + for file in files: + print "slicing",file + nb = Nbody(file,ftype='gadget',pio='yes') + nb = nb.select('gas') + nb = nb.selectc((fabs(nb.pos[:,1])<1000)) + nb.rename(file+'.slice.new') + nb.set_pio('no') + nb.write() + + +When we lunch it:: + + mpirun -np 2 scripts/slice-p2.py gadget_z00.dat.slice + +the two files ``gadget_z00.dat.slice.0`` and ``gadget_z00.dat.slice.1`` are read +each by one task, processed but at the end only the master write the final output : `gadget_z00.dat.slice.slice.new``. + + +More on parallelisme +======================================== + + +Lets try two other scripts. The first one (``findmax.py``) try to find the radial maximum distance among +all particles and the center. It illustrate the difference between using ``max()`` +wich gives the local maximum (maximum among particles of the node) and ``mpi.mpi_max()`` +which gives the global maximum among all particles:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + file = sys.argv[1] + + nb = Nbody(file,ftype='gadget',pio='yes') + local_max = max(nb.rxyz()) + global_max = mpi.mpi_max(nb.rxyz()) + + print "proc %d local_max = %f global_max = %f"%(mpi.ThisTask,local_max,global_max) + + +When running it, you should get:: + + mpirun -np 2 ./scripts/findmax.py gadget_z00.dat.slice + proc 1 local_max = 8109.682129 global_max = 8109.682129 + proc 0 local_max = 7733.846680 global_max = 8109.682129 + + +which illustrate clearly the point. Finally, the latter script shows that even graphical +functions support parallelisme. The script ``showmap.py`` illustrate this point by computing +a map of the model:: + + #!/usr/bin/env python + + import sys + from pNbody import * + + file = sys.argv[1] + + nb = Nbody(file,ftype='gadget',pio='yes') + nb.display(size=(10000,10000),shape=(256,256),palette='light') + +When running :: + + mpirun -np 2 ./scripts/showmap.py gadget_z00.dat.slice + +you get an image of the model. The mapping has been performed independently by two processors. + + + + + + + diff --git a/Doc/newdoc/rst/Tutorial_scripts.rst b/Doc/newdoc/rst/Tutorial_scripts.rst new file mode 100644 index 0000000..594d232 --- /dev/null +++ b/Doc/newdoc/rst/Tutorial_scripts.rst @@ -0,0 +1,45 @@ +Using pNbody with scripts +********************** + + +In addition to using **pNbody** in the python interpreter, +it is very useful to use **pNbody** in python scripts. Usually a python script +begin by the line #!/usr/bin/env python and must be executable:: + + chmod a+x file.py + +The following example (slice.py), we show how to write a script that opens a gadget file, +select gas particles and cut a thin slice + +.. math:: -1000 24) : fsize = 24 if (fsize < 8) : fsize = 8 if (fsize < 10): size = '0'+`fsize` else: size = `fsize` fontbasename = os.path.basename(FFONT) fontdirname = os.path.dirname(FFONT)+'/' fontname = fontdirname+fontbasename[:6]+size+ fontbasename[8:] font = ImageFont.load(fontname) # determination de l'offset en y xy = font.getsize('0000.00') xref = width - (width- xy[0])/2 if label: dylab = int(2*xy[1]) else : dylab = 0 # hauteur de l'image height = int((film.numLine)*zoom)+dylab shapezoom = (width,height) ########################### # lecture de tout le film ########################### listim = [] leap = 0 i = -1 while 1: data = film.read_one() if data==None: break i = i + 1 if i >= tinit and i <= tfinal: if leap==0: # creation de l'image de sortie image = film.get_img(data) # zoom image = image.transform(shapezoom,Image.AFFINE,(1./zoom,0.,0.,0.,1./zoom,0),Image.BICUBIC) image = image.transform(shapezoom,Image.AFFINE,(1,0.,0.,0.,1,-dylab),Image.BICUBIC) # put the label if label: time = "%8.3f"%(film.current_time) xy = font.getsize(time) x = xref - xy[0] y = int(dylab/4) poslab = (x,y) draw = ImageDraw.Draw(image) draw.rectangle((0,0,width,dylab),fill=1) draw.text(poslab,time,fill=256,font=font) if text != None: xy = font.getsize(text) x = xy[1] y = height - 2*xy[1] postext = (x,y) draw = ImageDraw.Draw(image) draw.text(postext,text,fill=256,font=font) # include the palette image.putpalette(palette.palette) #################################### # en dessous, partie propre a gif #################################### # save it bname = "tmp/%00004d_%00008.3f"%(i,film.current_time) print bname image.save(bname+'.gif') listim.append(bname+'.gif') leap = fmod(leap+1,step+1) else: break #################################### # creation du film gif #################################### if not keep: cmd = 'convert -delay ' + `delay` + ' tmp/*.gif '+fout print cmd os.system(cmd) # nettoyage for file in listim: os.remove(file) os.rmdir('tmp') diff --git a/scripts/gmov2mov b/scripts/gmov2mov index 9376c63..5cc12af 100644 --- a/scripts/gmov2mov +++ b/scripts/gmov2mov @@ -1,684 +1,684 @@ #!/usr/bin/python import pNbody from pNbody import Movie from pNbody import * from pNbody.palette import * from pNbody import cosmo import string from numpy import * import Image import ImageDraw import ImageFont import ImagePalette import ImageTk import ImageFont import sys import os import string import getopt import math from optparse import OptionParser #################################################################################### def parse_options(): #################################################################################### usage = "usage: %prog [options] file" parser = OptionParser(usage=usage) parser.add_option("-q","--quality", action="store", dest="quality", type="int", default=5, help="quality 1,2,3,4,5") parser.add_option("-m","--mode", action="store", dest="mode", type="string", default=None, help="image mode : L, P or RGB") parser.add_option("--rgb", action="store_true", dest="rgb", default=False, help="rgb mode") parser.add_option("-k","--keep", action="store_true", dest="keep", default=False, help="keep png images") parser.add_option("--only_film", action="store_true", dest="only_film", default=False, help="make movie from dir dir, without reading a gmov film") parser.add_option("--info", action="store_true", dest="info", default=False, help="give info on film") parser.add_option("--nofilm", action="store_true", dest="nofilm", default=False, help="do not create the film (extract image only)") parser.add_option("--with_time", action="store_true", dest="with_time", default=False, help="add time label") parser.add_option("--time_format", action="store", dest="time_format", default=None, help="time format") parser.add_option("--time_x", action="store", type='float', dest="time_x", default=None, help="time relative position in x") parser.add_option("--time_y", action="store", type='float', dest="time_y", default=None, help="time relative position in y") parser.add_option("--text", action="store", dest="text", type="string", default=None, help="text") parser.add_option("--text_x", action="store", type='float', dest="text_x", default=None, help="text relative position in x") parser.add_option("--text_y", action="store", type='float', dest="text_y", default=None, help="text relative position in y") parser.add_option("--text_size", action="store", dest="text_size", type="int", default=8, help="text size") parser.add_option("--text_font", action="store", dest="text_font", type="string", default="kidprbol.ttf", help="text font") parser.add_option("-o", action="store", dest="outputfile", type="string", default=None, help="output file name") parser.add_option("--fps", action="store", dest="fps", type="int", default=24, help="frame per second") parser.add_option("--istart", action="store", dest="istart", type="int", default=0, help="first number of frame") parser.add_option("-s","--step", action="store", dest="step", type="int", default=0, help="number of step to leep") parser.add_option("-i","--tinit", action="store", dest="tinit", type="float", default=0, help="initial time") parser.add_option("-f","--tfinal", action="store", dest="tfinal", type="float", default=1e100, help="final time") parser.add_option("-z","--zoom", action="store", dest="zoom", type="float", default=1, help="zoom factor") parser.add_option("-p","--palette", action="store", dest="palette", type="string", default="ramp", help="color palette") parser.add_option("-d","--dir", action="store", dest="dir", type="string", default="tmp", help="directory for png files") parser.add_option("--text_color", action="store", dest="text_color", type="string", default=None, help="text color") parser.add_option("--codec", action="store", dest="codec", type="string", default=None, help="msmpeg4v2, mpeg4, x264") parser.add_option("--format", action="store", dest="format", type="string", default=None, help="film format") parser.add_option("--ratio", action="store", dest="ratio", type="float", default=None, help="film ratio") parser.add_option("--bitrate", action="store", dest="bitrate", type="int", default=None, help="bitrate in k") parser.add_option("--pass", action="store", dest="passes", type="int", default=1, help="number of pass") (options, args) = parser.parse_args() if len(args) == 0: if not options.only_film: print "you must specify at least a filename" sys.exit(0) return args,options #################################################################################### # # MAIN # #################################################################################### files, options = parse_options() -FONT_PATH = os.path.join(NBODYPATH,'fonts') +FONT_PATH = os.path.join(PNBODYPATH,'fonts') FFONT = os.path.join(FONT_PATH,options.text_font) if options.only_film: options.keep = True if not options.only_film: fname = files[0] exec("options.text_color = %s"%(options.text_color)) if options.rgb: mode="image_rgb" if options.text_color == None: options.text_color = (255,255,255) else: mode="image" if options.text_color == None: options.text_color = 255 # initial and final time if (options.tfinal != 0): if (options.tfinal < options.tinit): print "number of final image must be larger than initial image !" sys.exit(0) #fname = xarguments[0] if (fname==" "): help_message() sys.exit(0) # verifie que le fichier existe if (os.path.exists(fname)==0): print "Error : the file ",fname," no not exist." sys.exit(0) # output file if options.outputfile == None: options.outputfile = "%s.avi"%os.path.splitext(os.path.basename(fname))[0] else: # find output format ext = os.path.splitext(options.outputfile)[1] options.format = ext[1:] # ouvre le film film = Movie.Movie(fname) film.open() if options.info: film.info() sys.exit(0) # create a palette object palette = Palette() pth = os.path.join(PALETTEDIR,options.palette) palette.read(pth) if not os.path.exists(options.dir): os.mkdir(options.dir) ################################################### # taille de l'image de sortie # largeur de l'image width = int(film.numByte*options.zoom) #---------------- # choix des font #---------------- # determination de la taille fsize = int(12*options.zoom - math.fmod(12*options.zoom,2)) if (fsize > 24) : fsize = 24 if (fsize < 8) : fsize = 8 if (fsize < 10): size = '0'+`fsize` else: size = `fsize` #fontbasename = os.path.basename(FFONT) #fontdirname = os.path.dirname(FFONT)+'/' #fontname = fontdirname+fontbasename[:6]+size+ fontbasename[8:] #font = ImageFont.load(fontname) font = ImageFont.truetype(FFONT,options.text_size) xy = font.getsize('-') if options.with_time and (options.time_x==None) and (options.time_y==None): dylab = int(1.5*xy[1]) else : dylab = 0 # hauteur de l'image height = int((film.numLine)*options.zoom)+dylab shapezoom = (width,height) ########################### # lecture de tout le film ########################### leap = 0 i = -1 + options.istart while 1: #data = film.read_one() #time,image = read_one_with_time() i = i + 1 time,image = film.read_one_with_time(mode) if time==None: break atime = film.current_time if atime>=options.tinit and atime<=options.tfinal: if leap==0: # zoom if options.zoom != 1.: image = image.transform(shapezoom,Image.AFFINE,(1./options.zoom,0.,0.,0.,1./options.zoom,0),Image.BICUBIC) if dylab!=0: image = image.transform(shapezoom,Image.AFFINE,(1,0.,0.,0.,1,-dylab),Image.BICUBIC) # add time if options.with_time: if options.time_format==None: time = "%8.3f"%(atime) elif options.time_format=='cGyr': a = atime t = cosmo.Age_a(a) time = "t = %4.1f Gyr"%(-t) elif options.time_format=='caGyr': a = atime t = cosmo.Age_a(a) time = "a = %4.3f t = %4.1f Gyr"%(a,-t) elif options.time_format=='cazGyr': a = atime z = 1./a - 1 t = cosmo.Age_a(a) time = "a = %4.3f z=%4.1f t=%4.1f Gyr"%(a,z,-t) elif options.time_format=='czGyr': a = atime z = 1./a - 1 t = cosmo.Age_a(a) time = "z=%4.1f t=%4.1f Gyr"%(z,-t) elif options.time_format=='ca': a = atime time = "a = %4.3f"%(a) elif options.time_format=='cz': a = atime z = 1./a - 1 time = "z=%4.1f"%(z) elif options.time_format=='Myr': a = atime time = "%04.0f Myr"%(a) else: exec(options.time_format) xy = font.getsize(time) # specific position if (options.time_x!=None) and (options.time_y!=None): xref = width - (width- xy[0])/2 x = width*options.time_x - xy[0]/2 y = height - height*options.time_y - xy[1]/2 poslab = (x,y) draw = ImageDraw.Draw(image) draw.text(poslab,time,fill=options.text_color,font=font) # default position outisde movie else: xref = width - (width- xy[0])/2 x = xref - xy[0] y = int(dylab/4) poslab = (x,y) draw = ImageDraw.Draw(image) draw.rectangle((0,0,width,dylab),fill=1) draw.text(poslab,time,fill=options.text_color,font=font) # add a text if options.text != None: xy = font.getsize(options.text) if (options.text_x!=None) and (options.text_y!=None): x = width*options.text_x - xy[0]/2 y = height - height*options.text_y - xy[1]/2 else: x = 1*xy[1] y = height - xy[1] - 1*xy[1] postext = (x,y) draw = ImageDraw.Draw(image) draw.text(postext,options.text,fill=options.text_color,font=font) # add borders if options.ratio != None: # read the geometry w = image.size[0] h = image.size[1] f = options.ratio * float(h)/float(w) dw = int(0.5*w* (f - 1)) image = image.transform((dw++dw+w,h),Image.AFFINE,(1,0.,-dw,0.,1,0),Image.BICUBIC) # include the palette if not options.rgb: image.putpalette(palette.palette) if options.mode != None: image = image.convert(options.mode) #################################### # en dessous, partie propre a mpeg #################################### # save it bname = "%s/%08d"%(options.dir,i) print bname, atime image.save(bname+'.png') if options.format == 'mp4': os.system("convert -quality 100 %s %s "%(bname+'.png',bname+'.gif')) leap = fmod(leap+1,options.step) else: continue #################################### # conversion into gif if needed #################################### ''' if options.format == 'mp4' and options.only_film: files = glob.glob(os.path.join(options.dir,'*.png')) for file in files: bname = os.path.splitext(file)[0] cmd = "convert -quality 100 %s %s "%(bname+'.png',bname+'.gif') print cmd os.system(cmd) ''' #################################### # creation du film mpeg #################################### ''' #mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc lavc -lavcopts vcodec=mpeg4 -oac copy -o film.mpg #tres basse qualite vbitrate=800 mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=256 -oac copy -o film.mpg #basse qualite vbitrate=800 mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc lavc -lavcopts vcodec=mpeg4 -oac copy -o film.mpg #basse qualite mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=1600 -oac copy -o film.mpg tres bonne qualite mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=2400:mbd=2:mv0:trell:v4mv:cbp:last_pred=3:predia=2:dia=2:vmax_b_frames=2:vb_strategy=1:precmp=2:cmp=2:subcmp=2:preme=2:qns=2 -oac copy -o film.mpg super qualite mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=8000:vrc_buf_size=1835:vrc_maxrate=9800:keyint=15:trell:mbd=2:precmp=2:subcmp=2:cmp=2:dia=-10:predia=-10:cbp:mv0:vqmin=1:lmin=1:dc=10:vstrict=0 -oac copy -o film.mpg sans compresseion mencoder mf://tmp/*.png -mf type=png:fps=25 -ovc copy -oac copy -o film.avi ''' #if options.format == 'mp4': # # #ffmpeg -r 24 -b 16000k -i %08d.gif qq.mp4 # # #cmd = """ffmpeg -i %s/%%08d.gif -r %d -b 16000k %s"""%(options.dir,options.fps,options.outputfile) # cmd = """ffmpeg -i %s/%%08d.png -r %d -b 16000k %s"""%(options.dir,options.fps,options.outputfile) # read one file and find width height files = glob.glob(os.path.join(options.dir,'*.png')) img = Image.open(files[0]) width = img.size[0] height= img.size[1] print "image size = %d x %d"%(width,height) oac = "-oac copy" ovco = "" if options.codec=='msmpeg4v2': ovco = "-ovc lavc -lavcopts" if options.bitrate!=None: vbitrate = options.bitrate elif options.quality == 1: vbitrate = 256 elif options.quality == 2: vbitrate = 1600 elif options.quality == 3: vbitrate = 2400 elif options.quality == 4: vbitrate = 8000 ovc = "vcodec=%s:vbitrate=%d"%(options.codec,vbitrate) elif options.codec=='x264': # http://www.mplayerhq.hu/DOCS/HTML/en/menc-feat-x264.html ovco = "-ovc x264 -x264encopts" if options.bitrate!=None: vbitrate = options.bitrate else: vbitrate = int(50 * options.fps * width * height / 256. / 1024.) print "vbitrate=%d"%(vbitrate) if options.quality == 1: ovc = "subq=4:bframes=2:b_pyramid:weight_b:bitrate=%d"%vbitrate if options.quality == 2: ovc = "subq=5:8x8dct:frameref=2:bframes=3:b_pyramid:weight_b:bitrate=%d"%vbitrate else: ovc = "subq=6:partitions=all:8x8dct:me=umh:frameref=5:bframes=3:b_pyramid:weight_b:bitrate=%d"%vbitrate else: ovc = "-ovc copy" cmds = [] if options.codec=='x264': for p in xrange(options.passes): tmp = "%s pass=%s:%s"%(ovco,p+1,ovc) cmd = """mencoder mf://%s/*.png -mf type=png:fps=%d %s %s -o %s"""%(options.dir,options.fps,tmp,oac,options.outputfile) cmds.append(cmd) else: ovc = "%s %s"%(ovco,ovc) cmd = """mencoder mf://%s/*.png -mf type=png:fps=%d %s %s -o %s"""%(options.dir,options.fps,ovc,oac,options.outputfile) cmds.append(cmd) if not options.nofilm: for cmd in cmds: print "############################" print cmd print "############################" os.system(cmd) else: options.keep = True if not options.keep: os.system('rm %s/*'%(options.dir)) os.removedirs('%s'%(options.dir)) diff --git a/scripts/gmov2mpeg b/scripts/gmov2mpeg index 3b424ac..a9415c8 100755 --- a/scripts/gmov2mpeg +++ b/scripts/gmov2mpeg @@ -1,431 +1,431 @@ #!/usr/bin/env python import pNbody from pNbody import Movie from pNbody import * from pNbody.palette import * import string from numpy import * import Image import ImageDraw import ImageFont import ImagePalette import ImageTk import ImageFont import sys import os import string import getopt import math -FONT_PATH = os.path.join(NBODYPATH,'fonts') +FONT_PATH = os.path.join(PNBODYPATH,'fonts') FFONT = os.path.join(FONT_PATH,'helvBO10.pil') #################################################################################### def help_message(): #################################################################################### print '''Usage : gmov2mpeg -f file [options ...] Options: -h -- this help message -f -- input file -o -- output file -p -- name of a palette -z -- zoom factor -d -- pause between images (ms) -b -- number of initial image -e -- number of final image -s -- number of images to leap -l -- add the label (time) -i -- gives information on the film -k -- keep temporary images -t -- add a text --cosmo -- cosmoligical option --version -- displays version''' sys.exit(0) #################################################################################### # info film #################################################################################### def infofilm(filename) : f = Movie.Movie(filename) f.open() f.info() #################################################################################### # # MAIN # #################################################################################### try: options, xarguments = getopt.getopt(sys.argv[1:],'f:o:p:z:d:b:e:s:t:ihlk', ['version','cosmo']) except getopt.error: print '''Error: You tried to use an unknown option or the argument for an option that requires it was missing. Try `mov2gif -h\' for more information.''' sys.exit(0) # examination des options # h : help message for a in options[:]: if a[0] == '-h': help_message() zoom = 1. fname = " " fout = " " delay = 10 color = "ramp" tinit = 0 tfinal = 0 step = 0 info = 0 label = 0 keep = 0 text = None cosmo = 0 # f : input file for a in options[:]: if a[0] == '-f' and a[1] != '': fname = a[1] options.remove(a) break elif a[0] == '-f' and a[1] == '': print '-f expects an argument' sys.exit(0) # o : output file for a in options[:]: if a[0] == '-o' and a[1] != '': fout = a[1] options.remove(a) break elif a[0] == '-o' and a[1] == '': print '-o expects an argument' sys.exit(0) # p : palette for a in options[:]: if a[0] == '-p' and a[1] != '': color = pth = os.path.join(PALETTEDIR,a[1]) options.remove(a) break elif a[0] == '-p' and a[1] == '': print '-p expects an argument' sys.exit(0) # z : zoom for a in options[:]: if a[0] == '-z' and a[1] != '': zoom = string.atof(a[1]) options.remove(a) break elif a[0] == '-z' and a[1] == '': print '-z expects an argument' sys.exit(0) # d : delay for a in options[:]: if a[0] == '-d' and a[1] != '': delay = string.atoi(a[1]) options.remove(a) break elif a[0] == '-d' and a[1] == '': print '-d expects an argument' sys.exit(0) # i : im init for a in options[:]: if a[0] == '-b' and a[1] != '': tinit = string.atof(a[1]) options.remove(a) break elif a[0] == '-b' and a[1] == '': print '-b expects an argument' sys.exit(0) # f : im init for a in options[:]: if a[0] == '-e' and a[1] != '': tfinal = string.atof(a[1]) options.remove(a) break elif a[0] == '-e' and a[1] == '': print '-e expects an argument' sys.exit(0) # s : im init for a in options[:]: if a[0] == '-s' and a[1] != '': step = string.atoi(a[1]) options.remove(a) break elif a[0] == '-s' and a[1] == '': print '-s expects an argument' sys.exit(0) # l : add label for a in options[:]: if a[0] == '-l': label = 1 options.remove(a) break # t : text for a in options[:]: if a[0] == '-t' and a[1] != '': text = a[1] options.remove(a) break elif a[0] == '-t' and a[1] == '': print '-t expects an argument' sys.exit(0) # k : add label for a in options[:]: if a[0] == '-k': keep = 1 options.remove(a) break # i : info for a in options[:]: if a[0] == '-i': info = 1 options.remove(a) break for a in options[:]: if a[0] == '--version': print 'version 1.1.' print "Copyright (C) 2001, Revaz corporation." sys.exit(0) for a in options[:]: if a[0] == '--cosmo': cosmo = 1 if (tfinal != 0): if (tfinal-tinit < 1): print "number of final image must be larger than initial image !" sys.exit(0) #fname = xarguments[0] if (fname==" "): help_message() sys.exit(0) if (fout==" "): fout = fname+".gif" # verifie que le fichier existe if (os.path.exists(fname)==0): print "Error : the file ",fname," no not exist." sys.exit(0) # ouvre le film film = Movie.Movie(fname) film.open() if info: film.info() sys.exit(0) # create a palette object palette = Palette() pth = os.path.join(PALETTEDIR,color) palette.read(pth) if tfinal == 0 : tfinal = film.npic if not os.path.exists('tmp'): os.mkdir('tmp') ################################################### # taille de l'image de sortie # largeur de l'image width = int(film.numByte*zoom) #---------------- # choix des font #---------------- # determination de la taille fsize = int(12*zoom - math.fmod(12*zoom,2)) if (fsize > 24) : fsize = 24 if (fsize < 8) : fsize = 8 if (fsize < 10): size = '0'+`fsize` else: size = `fsize` fontbasename = os.path.basename(FFONT) fontdirname = os.path.dirname(FFONT)+'/' fontname = fontdirname+fontbasename[:6]+size+ fontbasename[8:] font = ImageFont.load(fontname) # determination de l'offset en y xy = font.getsize('0000.00') xref = width - (width- xy[0])/2 if label: dylab = int(2*xy[1]) else : dylab = 0 # hauteur de l'image height = int((film.numLine)*zoom)+dylab shapezoom = (width,height) ########################### # lecture de tout le film ########################### listim = [] leap = 0 i = -1 while 1: data = film.read_one() if data==None: break i = i + 1 if i >= tinit and i <= tfinal: if leap==0: # creation de l'image de sortie image = film.get_img(data) # zoom image = image.transform(shapezoom,Image.AFFINE,(1./zoom,0.,0.,0.,1./zoom,0),Image.BICUBIC) image = image.transform(shapezoom,Image.AFFINE,(1,0.,0.,0.,1,-dylab),Image.BICUBIC) # put the label if label: if cosmo: film.current_time = 1./film.current_time - 1 time = "%8.3f"%(film.current_time) xy = font.getsize(time) x = xref - xy[0] y = int(dylab/4) poslab = (x,y) draw = ImageDraw.Draw(image) draw.rectangle((0,0,width,dylab),fill=1) draw.text(poslab,time,fill=256,font=font) if text != None: xy = font.getsize(text) x = xy[1] y = height - 2*xy[1] postext = (x,y) draw = ImageDraw.Draw(image) draw.text(postext,text,fill=256,font=font) # include the palette image.putpalette(palette.palette) #################################### # en dessous, partie propre a mpeg #################################### # save it bname = "tmp/%000005d"%(i) print bname image.save(bname+'.gif') os.system('convert %s.gif %s.ppm'%(bname,bname)) os.system('rm %s.gif'%(bname)) listim.append(bname+'.ppm') leap = fmod(leap+1,step+1) else: break #################################### # creation du film mpeg #################################### if not keep: line = """ PATTERN IBBPBBPBBPBBPBB OUTPUT %s INPUT_DIR ./tmp INPUT *.ppm [%000005d-%000005d] END_INPUT BASE_FILE_FORMAT PPM INPUT_CONVERT * GOP_SIZE 8 SLICES_PER_FRAME 1 PIXEL HALF RANGE 12 PSEARCH_ALG TWOLEVEL BSEARCH_ALG CROSS2 PQSCALE 1 IQSCALE 1 BQSCALE 1 REFERENCE_FRAME ORIGINAL ASPECT_RATIO 1 FRAME_RATE 23.976 """%(fout,1,i) f = open('param.mpg','w') f.write(line) f.close() os.system('ppmtompeg param.mpg') os.remove('param.mpg') for file in listim: os.remove(file) os.rmdir('tmp') diff --git a/scripts/gmov2mpg b/scripts/gmov2mpg index 8c6acbc..a8ce980 100755 --- a/scripts/gmov2mpg +++ b/scripts/gmov2mpg @@ -1,397 +1,397 @@ #!/usr/bin/env python import pNbody from pNbody import Movie from pNbody import * from pNbody.palette import * import string from numpy import * import Image import ImageDraw import ImageFont import ImagePalette import ImageTk import ImageFont import sys import os import string import getopt import math -FONT_PATH = os.path.join(NBODYPATH,'fonts') +FONT_PATH = os.path.join(PNBODYPATH,'fonts') FFONT = os.path.join(FONT_PATH,'helvBO10.pil') #################################################################################### def help_message(): #################################################################################### print '''Usage : gmov2mpeg -f file [options ...] Options: -h -- this help message -f -- input file -o -- output file -p -- name of a palette -z -- zoom factor -d -- pause between images (ms) -b -- number of initial image -e -- number of final image -s -- number of images to leap -l -- add the label (time) -i -- gives information on the film -k -- keep temporary images -t -- add a text --cosmo -- cosmoligical option --version -- displays version''' sys.exit(0) #################################################################################### # info film #################################################################################### def infofilm(filename) : f = Movie.Movie(filename) f.open() f.info() #################################################################################### # # MAIN # #################################################################################### try: options, xarguments = getopt.getopt(sys.argv[1:],'f:o:p:z:d:b:e:s:t:ihlk', ['version','cosmo']) except getopt.error: print '''Error: You tried to use an unknown option or the argument for an option that requires it was missing. Try `mov2gif -h\' for more information.''' sys.exit(0) # examination des options # h : help message for a in options[:]: if a[0] == '-h': help_message() zoom = 1. fname = " " fout = " " delay = 10 color = "ramp" tinit = 0 tfinal = 0 step = 0 info = 0 label = 0 keep = 0 text = None cosmo = 0 # f : input file for a in options[:]: if a[0] == '-f' and a[1] != '': fname = a[1] options.remove(a) break elif a[0] == '-f' and a[1] == '': print '-f expects an argument' sys.exit(0) # o : output file for a in options[:]: if a[0] == '-o' and a[1] != '': fout = a[1] options.remove(a) break elif a[0] == '-o' and a[1] == '': print '-o expects an argument' sys.exit(0) # p : palette for a in options[:]: if a[0] == '-p' and a[1] != '': color = pth = os.path.join(PALETTEDIR,a[1]) options.remove(a) break elif a[0] == '-p' and a[1] == '': print '-p expects an argument' sys.exit(0) # z : zoom for a in options[:]: if a[0] == '-z' and a[1] != '': zoom = string.atof(a[1]) options.remove(a) break elif a[0] == '-z' and a[1] == '': print '-z expects an argument' sys.exit(0) # d : delay for a in options[:]: if a[0] == '-d' and a[1] != '': delay = string.atoi(a[1]) options.remove(a) break elif a[0] == '-d' and a[1] == '': print '-d expects an argument' sys.exit(0) # i : im init for a in options[:]: if a[0] == '-b' and a[1] != '': tinit = string.atof(a[1]) options.remove(a) break elif a[0] == '-b' and a[1] == '': print '-b expects an argument' sys.exit(0) # f : im init for a in options[:]: if a[0] == '-e' and a[1] != '': tfinal = string.atof(a[1]) options.remove(a) break elif a[0] == '-e' and a[1] == '': print '-e expects an argument' sys.exit(0) # s : im init for a in options[:]: if a[0] == '-s' and a[1] != '': step = string.atoi(a[1]) options.remove(a) break elif a[0] == '-s' and a[1] == '': print '-s expects an argument' sys.exit(0) # l : add label for a in options[:]: if a[0] == '-l': label = 1 options.remove(a) break # t : text for a in options[:]: if a[0] == '-t' and a[1] != '': text = a[1] options.remove(a) break elif a[0] == '-t' and a[1] == '': print '-t expects an argument' sys.exit(0) # k : add label for a in options[:]: if a[0] == '-k': keep = 1 options.remove(a) break # i : info for a in options[:]: if a[0] == '-i': info = 1 options.remove(a) break for a in options[:]: if a[0] == '--version': print 'version 1.1.' print "Copyright (C) 2001, Revaz corporation." sys.exit(0) for a in options[:]: if a[0] == '--cosmo': cosmo = 1 if (tfinal != 0): if (tfinal-tinit < 1): print "number of final image must be larger than initial image !" sys.exit(0) #fname = xarguments[0] if (fname==" "): help_message() sys.exit(0) if (fout==" "): fout = fname+".gif" # verifie que le fichier existe if (os.path.exists(fname)==0): print "Error : the file ",fname," no not exist." sys.exit(0) # ouvre le film film = Movie.Movie(fname) film.open() if info: film.info() sys.exit(0) # create a palette object palette = Palette() pth = os.path.join(PALETTEDIR,color) palette.read(pth) if tfinal == 0 : tfinal = film.npic if not os.path.exists('tmp'): os.mkdir('tmp') ################################################### # taille de l'image de sortie # largeur de l'image width = int(film.numByte*zoom) #---------------- # choix des font #---------------- # determination de la taille fsize = int(12*zoom - math.fmod(12*zoom,2)) if (fsize > 24) : fsize = 24 if (fsize < 8) : fsize = 8 if (fsize < 10): size = '0'+`fsize` else: size = `fsize` fontbasename = os.path.basename(FFONT) fontdirname = os.path.dirname(FFONT)+'/' fontname = fontdirname+fontbasename[:6]+size+ fontbasename[8:] font = ImageFont.load(fontname) # determination de l'offset en y xy = font.getsize('0000.00') xref = width - (width- xy[0])/2 if label: dylab = int(2*xy[1]) else : dylab = 0 # hauteur de l'image height = int((film.numLine)*zoom)+dylab shapezoom = (width,height) ########################### # lecture de tout le film ########################### leap = 0 i = -1 while 1: data = film.read_one() if data==None: break i = i + 1 if i >= tinit and i <= tfinal: if leap==0: # creation de l'image de sortie image = film.get_img(data) # zoom image = image.transform(shapezoom,Image.AFFINE,(1./zoom,0.,0.,0.,1./zoom,0),Image.BICUBIC) image = image.transform(shapezoom,Image.AFFINE,(1,0.,0.,0.,1,-dylab),Image.BICUBIC) # put the label if label: if cosmo: film.current_time = 1./film.current_time - 1 time = "%8.3f"%(film.current_time) xy = font.getsize(time) x = xref - xy[0] y = int(dylab/4) poslab = (x,y) draw = ImageDraw.Draw(image) draw.rectangle((0,0,width,dylab),fill=1) draw.text(poslab,time,fill=256,font=font) if text != None: xy = font.getsize(text) x = xy[1] y = height - 2*xy[1] postext = (x,y) draw = ImageDraw.Draw(image) draw.text(postext,text,fill=256,font=font) # include the palette image.putpalette(palette.palette) #################################### # en dessous, partie propre a mpeg #################################### # save it bname = "tmp/%000005d"%(i) print bname image.save(bname+'.gif') os.system('convert -quality 100 %s.gif %s.jpg'%(bname,bname)) os.system('rm %s.gif'%(bname)) leap = fmod(leap+1,step+1) else: break #################################### # creation du film mpeg #################################### if not keep: pass #os.system() #os.rmdir('tmp') mencoder 'mf:///tmp*.jpg' -mf type=jpg:fps=2 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o film.mpg diff --git a/scripts/gmov2ppm b/scripts/gmov2ppm index 18d9eb0..4c76b1a 100755 --- a/scripts/gmov2ppm +++ b/scripts/gmov2ppm @@ -1,384 +1,384 @@ #!/usr/bin/env python import pNbody from pNbody import Movie from pNbody import * from pNbody.palette import * import string from numpy import * import Image import ImageDraw import ImageFont import ImagePalette import ImageTk import ImageFont import sys import os import string import getopt import math import shutil -FONT_PATH = os.path.join(NBODYPATH,'fonts') +FONT_PATH = os.path.join(PNBODYPATH,'fonts') FFONT = os.path.join(FONT_PATH,'helvBO10.pil') #################################################################################### def help_message(): #################################################################################### print '''Usage : gmov2mpeg -f file [options ...] Options: -h -- this help message -f -- input file -o -- output directory -p -- name of a palette -z -- zoom factor -b -- number of initial image -e -- number of final image -s -- number of images to leap -l -- add the label (time) -i -- gives information on the film -t -- add a text --cosmo -- cosmoligical option --version -- displays version''' sys.exit(0) #################################################################################### # info film #################################################################################### def infofilm(filename) : f = Movie.Movie(filename) f.open() f.info() #################################################################################### # # MAIN # #################################################################################### try: options, xarguments = getopt.getopt(sys.argv[1:],'f:o:p:z:d:b:e:s:t:ihlk', ['version','cosmo']) except getopt.error: print '''Error: You tried to use an unknown option or the argument for an option that requires it was missing. Try `mov2gif -h\' for more information.''' sys.exit(0) # examination des options # h : help message for a in options[:]: if a[0] == '-h': help_message() zoom = 1. fname = " " outdir = "tmp" color = "ramp" tinit = 0 tfinal = 0 step = 0 info = 0 label = 0 text = None cosmo = 0 # f : input file for a in options[:]: if a[0] == '-f' and a[1] != '': fname = a[1] options.remove(a) break elif a[0] == '-f' and a[1] == '': print '-f expects an argument' sys.exit(0) # o : output file for a in options[:]: if a[0] == '-o' and a[1] != '': outdir = a[1] options.remove(a) break elif a[0] == '-o' and a[1] == '': print '-o expects an argument' sys.exit(0) # p : palette for a in options[:]: if a[0] == '-p' and a[1] != '': color = pth = os.path.join(PALETTEDIR,a[1]) options.remove(a) break elif a[0] == '-p' and a[1] == '': print '-p expects an argument' sys.exit(0) # z : zoom for a in options[:]: if a[0] == '-z' and a[1] != '': zoom = string.atof(a[1]) options.remove(a) break elif a[0] == '-z' and a[1] == '': print '-z expects an argument' sys.exit(0) # i : im init for a in options[:]: if a[0] == '-b' and a[1] != '': tinit = string.atof(a[1]) options.remove(a) break elif a[0] == '-b' and a[1] == '': print '-b expects an argument' sys.exit(0) # f : im init for a in options[:]: if a[0] == '-e' and a[1] != '': tfinal = string.atof(a[1]) options.remove(a) break elif a[0] == '-e' and a[1] == '': print '-e expects an argument' sys.exit(0) # s : im init for a in options[:]: if a[0] == '-s' and a[1] != '': step = string.atoi(a[1]) options.remove(a) break elif a[0] == '-s' and a[1] == '': print '-s expects an argument' sys.exit(0) # l : add label for a in options[:]: if a[0] == '-l': label = 1 options.remove(a) break # t : text for a in options[:]: if a[0] == '-t' and a[1] != '': text = a[1] options.remove(a) break elif a[0] == '-t' and a[1] == '': print '-t expects an argument' sys.exit(0) # i : info for a in options[:]: if a[0] == '-i': info = 1 options.remove(a) break for a in options[:]: if a[0] == '--version': print 'version 1.1.' print "Copyright (C) 2001, Revaz corporation." sys.exit(0) for a in options[:]: if a[0] == '--cosmo': cosmo = 1 if (tfinal != 0): if (tfinal-tinit < 1): print "number of final image must be larger than initial image !" sys.exit(0) #fname = xarguments[0] if (fname==" "): help_message() sys.exit(0) # create output directory if os.path.exists(outdir): print "%s already exists !"%(outdir) txt = "Continue anyway and removes the content of %s ? [Y/n] "%(outdir) bool = raw_input(txt) if len(bool) == 0 or bool[0] == 'y' or bool[0] == 'Y' : shutil.rmtree(outdir) print "Removing %s"%(outdir) else: print "Nothing has been done. " sys.exit(1) os.mkdir(outdir) # verifie que le fichier existe if (os.path.exists(fname)==0): print "Error : the file ",fname," no not exist." sys.exit(0) # ouvre le film film = Movie.Movie(fname) film.open() if info: film.info() sys.exit(0) # create a palette object palette = Palette() pth = os.path.join(PALETTEDIR,color) palette.read(pth) if tfinal == 0 : tfinal = film.npic ################################################### # taille de l'image de sortie # largeur de l'image width = int(film.numByte*zoom) #---------------- # choix des font #---------------- # determination de la taille fsize = int(12*zoom - math.fmod(12*zoom,2)) if (fsize > 24) : fsize = 24 if (fsize < 8) : fsize = 8 if (fsize < 10): size = '0'+`fsize` else: size = `fsize` fontbasename = os.path.basename(FFONT) fontdirname = os.path.dirname(FFONT)+'/' fontname = fontdirname+fontbasename[:6]+size+ fontbasename[8:] font = ImageFont.load(fontname) # determination de l'offset en y xy = font.getsize('0000.00') xref = width - (width- xy[0])/2 if label: dylab = int(2*xy[1]) else : dylab = 0 # hauteur de l'image height = int((film.numLine)*zoom)+dylab shapezoom = (width,height) ########################### # lecture de tout le film ########################### listim = [] leap = 0 i = -1 while 1: data = film.read_one() if data==None: break i = i + 1 if i >= tinit and i <= tfinal: if leap==0: # creation de l'image de sortie image = film.get_img(data) # zoom image = image.transform(shapezoom,Image.AFFINE,(1./zoom,0.,0.,0.,1./zoom,0),Image.BICUBIC) image = image.transform(shapezoom,Image.AFFINE,(1,0.,0.,0.,1,-dylab),Image.BICUBIC) # put the label if label: if cosmo: film.current_time = 1./film.current_time - 1 time = "%8.3f"%(film.current_time) xy = font.getsize(time) x = xref - xy[0] y = int(dylab/4) poslab = (x,y) draw = ImageDraw.Draw(image) draw.rectangle((0,0,width,dylab),fill=1) draw.text(poslab,time,fill=256,font=font) if text != None: xy = font.getsize(text) x = xy[1] y = height - 2*xy[1] postext = (x,y) draw = ImageDraw.Draw(image) draw.text(postext,text,fill=256,font=font) # include the palette image.putpalette(palette.palette) #################################### # en dessous, partie propre a mpeg #################################### # save it bname = os.path.join(outdir,"%000005d"%(i)) print bname image.save(bname+'.gif') os.system('convert %s.gif %s.ppm'%(bname,bname)) os.system('rm %s.gif'%(bname)) listim.append(bname+'.ppm') leap = fmod(leap+1,step+1) else: break diff --git a/scripts/pNbody_checkall b/scripts/pNbody_checkall new file mode 100755 index 0000000..89ee1f7 --- /dev/null +++ b/scripts/pNbody_checkall @@ -0,0 +1,488 @@ +#!/usr/bin/env python + +from pNbody import * +from pNbody import ic + +from optparse import OptionParser +import os + +HOME = os.environ['HOME'] + +######################################## +# +# parser +# +######################################## + +def parse_options(): + + usage = "usage: %prog [options] file" + parser = OptionParser(usage=usage) + + + parser.add_option("-t", + action="store", + dest="ftype", + type="string", + default = 'gadget', + help="type of the file", + metavar=" TYPE") + + parser.add_option("-f", + action="store", + dest="file", + type="string", + default = os.path.join(HOME,"snap.dat"), + help="output file name", + metavar=" FILE") + + parser.add_option("-n", + action="store", + dest="n", + type="int", + default = 2**14, + help="number of particles", + metavar=" INT") + + + (options, args) = parser.parse_args() + + files = args + + return files,options + + +################################# +# +# main +# +################################# + + +files,opt = parse_options() + + +print 72*"#" +print "Testing %s format"%opt.ftype +print 72*"#" + +ftype = opt.ftype +file = opt.file + + +# create file and save it +print "create an exponential disk" +nb = ic.expd(n=opt.n,Hr=3.,Hz=0.3,Rmax=20,Zmax=2,irand=1,name=file,ftype=ftype) +nb.write() + +# read it +nb = Nbody(file,ftype=ftype) + +# save it with another name +file2 = file+'.2' +nb.rename(file2) +nb.write() +nb.rename(file) + +# compare the two files +print 72*"#" +cmd = "diff %s %s.2"%(file,file) +print cmd +f=os.popen(cmd) +txt=f.readline() +f.close() +if len(txt)!=0: + print txt + print + print "Bad news : %s and %s.2 differs"%(file,file) + sys.exit() +else: + print "diff ok" + +print 72*"#" + + + + +params = param.Params(PARAMETERFILE,None) +uparams = param.Params(UNITSPARAMETERFILE,None) + + +################################# +# +# init functions +# +################################# + +print "testing init functions..." +nb.init() +nb.set_ftype(ftype='binary') +nb.get_num() +nb.get_default_spec_vars() +nb.get_default_spec_vect() +nb.set_pio('no') +nb.rename('test.dat') +nb.set_filenames('test.dat') +nb.get_ntype() +nb.get_nbody() +nb.get_nbody_tot() +nb.get_npart() +nb.get_npart_tot() +nb.get_npart_all(nb.get_npart(),mpi.NTask) +nb.get_npart_and_npart_all(nb.get_npart()) +nb.get_mxntpe() +nb.make_default_vars_global() +nb.set_npart(nb.npart) +nb.set_tpe(0) + +################################# +# +# parameters functions +# +################################# + +print "testing parameters functions..." +nb.set_parameters(params) +nb.set_unitsparameters(uparams) +nb.set_local_system_of_units() + +################################# +# +# info functions +# +################################# + +print "testing info functions..." +nb.info() +nb.spec_info() +nb.object_info() +nb.nodes_info() +nb.memory_info() +nb.print_filenames() + +################################# +# +# list of variables functions +# +################################# + +print "testing list of variables functions..." +nb.get_list_of_array() +nb.get_list_of_method() +nb.get_list_of_vars() +nb.has_var('pos') +nb.has_array('pos') +nb.find_vars() + + +################################# +# +# check special values +# +################################# +nb.check_arrays() + +################################# +# +# read/write functions +# +################################# + +print "testing read/write functions..." +#nb.read() +#nb.open_and_read(nb.p_name[0],nb.get_read_fcts()[0]) +#nb.rename('treo0020.000b') +#nb.write() +#nb.open_and_write(nb.p_name[0],nb.get_write_fcts()[0]) +#nb.write_num('num.dat') +#nb.read_num('num.dat') + + +################################# +# +# coordinate transformation +# +################################# + +print "testing coordinate transformation functions..." +nb.x() +nb.y() +nb.z() +nb.rxyz() +nb.phi_xyz() +nb.theta_xyz() +nb.rxy() +nb.phi_xy() +nb.r() +nb.R() +nb.cart2sph() +nb.sph2cart() +nb.vx() +nb.vy() +nb.vz() +nb.vn() +nb.vrxyz() +nb.Vr() +nb.Vt() +nb.Vz() +nb.vel_cyl2cart() +nb.vel_cart2cyl() + +################################# +# +# physical values +# +################################# + +print "testing physical values functions..." +nb.get_ns() +nb.get_mass_tot() +nb.size() +nb.cm() +nb.get_histocenter() +nb.get_histocenter2() +nb.cv() +nb.minert() +nb.x_sigma() +nb.v_sigma() +nb.dx_mean() +nb.dv_mean() +nb.Ekin() +nb.ekin() +nb.Epot(0.1) +nb.epot(0.1) +nb.L() +nb.l() +nb.Ltot() +nb.ltot() +nb.Pot([0,0,0],0.1) +nb.TreePot(array([[0,0,0]],float32),eps=0.1) +nb.Accel([0,0,0],0.1) +nb.TreeAccel(array([[0,0,0]],float32),eps=0.1) +nb.tork(nb.vel) +nb.dens() # bof +nb.mdens() # bof +nb.mr() # bof +nb.Mr_Spherical() # bof +nb.sdens() # bof +nb.msdens() # bof +nb.sigma_z() # bof +nb.sigma_vz() # bof +nb.zprof() +nb.sigma() +nb.histovel() +#nb.zmodes() +#nb.dmodes() + +nb.getRadiusInCylindricalGrid(0,10) +nb.getAccelerationInCylindricalGrid(0.1,0,10) +nb.getPotentialInCylindricalGrid(0.1,0,10) +nb.getSurfaceDensityInCylindricalGrid(10) +nb.getNumberParticlesInCylindricalGrid(10) +nb.getRadialVelocityDispersionInCylindricalGrid(10) + + + +################################# +# +# geometrical operations +# +################################# + +print "testing geometrical operations functions..." +nb.cmcenter() +nb.cvcenter() +nb.histocenter() +nb.histocenter2() +nb.hdcenter() +nb.translate([10,0,0]) +nb.rebox() +nb.rotate(axis='y',angle=pi/2) +nb.rotate(axis=[1,1,1],angle=pi/2) +nb.align(axis=[1,1,1]) +nb.align2() +nb.spin() + + +################################# +# +# selection of particles +# +################################# + +print "testing selection of particles functions..." +nb.selectc(nb.num<100) +nb.tpe = where(nb.num>10,1,0) +nb.tpe = where(nb.num>nb.nbody-10,2,nb.tpe) +nb = nb.select(1) +nb = nb.sub(2,12) +nb = nb.reduc(2) +nb = nb.selectp([17,19]) +nb.getindex(19) + +################################# +# +# add particles +# +################################# + +print "testing add particles functions..." +nb1 = Nbody(file,ftype=ftype) +nb2 = Nbody(file,ftype=ftype) +nb1.append(nb2) +nb = nb1 + nb2 + +################################# +# +# sort particles +# +################################# + +print "testing sort particles functions..." +nb = nb.sort() +nb = nb.sort_type() + + +################################# +# +# Tree and SPH functions +# +################################# + +print "testing Tree and SPH functions..." +nb = Nbody(file,ftype=ftype) +nb.InitSphParameters() +nb.setTreeParameters(nb.Tree,33,3) +nb.getTree() +nb.get_rsp_approximation() +nb.ComputeSph() +nb.ComputeDensityAndHsml() +nb.SphEvaluate(nb.vel[:,0]) + +################################# +# +# sph functions +# +################################# + +''' +print "testing sph functions..." +nb.weighted_numngb(1) +nb.real_numngb(1) +nb.usual_numngb(1) +''' + +################################# +# +# redistribution of particles +# +################################# + +print "testing redistribution of particles functions..." +nb.redistribute() +#nb.ExchangeParticles() # not tested, need ptree + + +################################# +# +# specific parallel functions +# +################################# + +print "testing specific parallel functions..." +nb.gather_pos() +nb.gather_vel() +nb.gather_mass() +nb.gather_num() +nb.gather_vec(nb.pos) + +################################# +# +# graphical operations +# +################################# + +print "testing graphical operations functions..." +nb.display(size=[30,30]) +#nb.show(size=[30,30]) # it craches if the previous image has been closed... +nb.Map() +nb.CombiMap() +nb.ComputeMeanMap(mode1='0') +nb.ComputeSigmaMap(mode1='0',mode2='0') +nb.ComputeMap(mode='0') +#nb.expose() # tested by map + + +################################# +# +# 1d histograms +# +################################# + +print "testing 1d histograms functions..." +nb.Histo(bins=array([0,1,2,3,4,5])) +nb.CombiHisto(bins=array([0,1,2,3,4,5])) +nb.ComputeMeanHisto(array([0,1,2,3,4,5]),mode1='m',space='R') +nb.ComputeSigmaHisto(array([0,1,2,3,4,5]),mode1='m',mode2='m',space='R') +nb.ComputeHisto(array([0,1,2,3,4,5]),mode='m',space='R') + + +############################################ +# +# Routines to get velocities from positions +# +############################################ + +print "testing routines to get velocities from positions..." +nb.Get_Velocities_From_Spherical_Grid() +nb.Get_Velocities_From_Cylindrical_Grid() + + +############################################ +# +# evolution routines +# +############################################ + +#nb.IntegrateUsingRK() + + +################################# +# +# Thermodynamic functions +# +################################# +''' +nb.U() # not tested +nb.Rho() # not tested +nb.T() # not tested +nb.MeanWeight() # not tested +nb.Tmu() # not tested +nb.A() # not tested +nb.P() # not tested +nb.Tcool() # not tested +nb.Ne() # not tested +nb.S() # not tested +nb.Lum() # not tested +''' + + +################################# +# +# the end +# +################################# + +print 72*"#" +print "Good News ! pNbody with format %s is working !"%ftype +print 72*"#" + + +################################# +# +# some info +# +################################# +print +print "You are using the following paths" +parameters.print_path() diff --git a/scripts/pNbody_copy-defaultconfig b/scripts/pNbody_copy-defaultconfig new file mode 100755 index 0000000..add1430 --- /dev/null +++ b/scripts/pNbody_copy-defaultconfig @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +from pNbody import * +import shutil + + +# check if .pNbody exists +dest = os.path.join(HOME,'.pNbody') +if os.path.isdir(dest): + answer = raw_input('WARNING : The directory %s alredy exists. Remove it [y/N] ? ' %(dest)) + if answer == "y" or answer == "Y": + print "Removing %s"%dest + shutil.rmtree(dest) + else: + print "Nothing done." + sys.exit() + +# recursive copy +print "copying files from %s"%CONFIGDIR +print "to %s"%dest + + +shutil.copytree(CONFIGDIR,dest) + +print "done." +print +print "you can now type pNbody_show-path to check the new configuration." +print diff --git a/scripts/pNbody_copy-examples b/scripts/pNbody_copy-examples new file mode 100755 index 0000000..e801353 --- /dev/null +++ b/scripts/pNbody_copy-examples @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +from pNbody import * +import shutil + + +EXAMPLES=os.path.join(PNBODYPATH,'examples') + + +dest = os.path.join(HOME,"pnbody_examples") + + +# check if .pNbody exists +if os.path.isdir(dest): + answer = raw_input('WARNING : The directory %s alredy exists. Remove it [y/N] ? ' %(dest)) + if answer == "y" or answer == "Y": + print "Removing %s"%dest + shutil.rmtree(dest) + else: + print "Nothing done." + sys.exit() + +# recursive copy +print "copying files from %s"%EXAMPLES +print "to %s"%dest + + +shutil.copytree(EXAMPLES,dest) + +print "done." +print +print "you can now move to %s and test the examples."%dest +print diff --git a/scripts/pNbody_mpi b/scripts/pNbody_mpi new file mode 100755 index 0000000..faa9817 --- /dev/null +++ b/scripts/pNbody_mpi @@ -0,0 +1,5 @@ +#!/usr/bin/env python + +from pNbody import * + +print "This is task %d over %d"%(mpi.ThisTask,mpi.NTask) diff --git a/scripts/pNbody_show-parameters b/scripts/pNbody_show-parameters new file mode 100755 index 0000000..ce8a601 --- /dev/null +++ b/scripts/pNbody_show-parameters @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +from pNbody import * + + +################################# +# +# some info +# +################################# + + +parameters = param.Params(PARAMETERFILE,None) +unitsparameters = param.Params(UNITSPARAMETERFILE,None) + + +print +print "parameters in %s"%PARAMETERFILE +print +parameters.lists() + +print +print "parameters in %s"%UNITSPARAMETERFILE +print +unitsparameters.lists() diff --git a/scripts/pNbody_show-path b/scripts/pNbody_show-path new file mode 100755 index 0000000..2380951 --- /dev/null +++ b/scripts/pNbody_show-path @@ -0,0 +1,13 @@ +#!/usr/bin/env python + +from pNbody import * + + +################################# +# +# some info +# +################################# + +print "You are using the following paths" +parameters.print_path() diff --git a/src/PyGadget/PyGadget/__init__.py b/src/PyGadget/PyGadget/__init__.py new file mode 100644 index 0000000..102346d --- /dev/null +++ b/src/PyGadget/PyGadget/__init__.py @@ -0,0 +1 @@ +from pNbody import * diff --git a/src/PyGadget/build/lib.linux-x86_64-2.6/PyGadget/__init__.py b/src/PyGadget/build/lib.linux-x86_64-2.6/PyGadget/__init__.py new file mode 100644 index 0000000..102346d --- /dev/null +++ b/src/PyGadget/build/lib.linux-x86_64-2.6/PyGadget/__init__.py @@ -0,0 +1 @@ +from pNbody import * diff --git a/src/PyGadget/build/lib.linux-x86_64-2.6/PyGadget/gadget.so b/src/PyGadget/build/lib.linux-x86_64-2.6/PyGadget/gadget.so new file mode 100755 index 0000000..75894c7 Binary files /dev/null and b/src/PyGadget/build/lib.linux-x86_64-2.6/PyGadget/gadget.so differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/accel.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/accel.o new file mode 100644 index 0000000..ae43fa5 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/accel.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/allocate.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/allocate.o new file mode 100644 index 0000000..36be195 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/allocate.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/allocateQ.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/allocateQ.o new file mode 100644 index 0000000..751b6fb Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/allocateQ.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/allvars.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/allvars.o new file mode 100644 index 0000000..3e4087b Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/allvars.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/begrun.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/begrun.o new file mode 100644 index 0000000..754bb7b Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/begrun.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/density.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/density.o new file mode 100644 index 0000000..89464b5 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/density.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/domain.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/domain.o new file mode 100644 index 0000000..fdb8f85 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/domain.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/domainQ.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/domainQ.o new file mode 100644 index 0000000..16440ed Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/domainQ.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/driftfac.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/driftfac.o new file mode 100644 index 0000000..a7772c3 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/driftfac.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/endrun.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/endrun.o new file mode 100644 index 0000000..461a760 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/endrun.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/forcetree.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/forcetree.o new file mode 100644 index 0000000..e12ffb9 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/forcetree.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/global.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/global.o new file mode 100644 index 0000000..9bd2416 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/global.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/gravtree.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/gravtree.o new file mode 100644 index 0000000..1639521 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/gravtree.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/gravtree_forcetest.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/gravtree_forcetest.o new file mode 100644 index 0000000..b1be23a Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/gravtree_forcetest.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/hydra.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/hydra.o new file mode 100644 index 0000000..2e06067 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/hydra.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/init.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/init.o new file mode 100644 index 0000000..9be0199 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/init.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/io.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/io.o new file mode 100644 index 0000000..4d207f8 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/io.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/longrange.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/longrange.o new file mode 100644 index 0000000..e1b9adc Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/longrange.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/main.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/main.o new file mode 100644 index 0000000..f8a2c26 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/main.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/ngb.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/ngb.o new file mode 100644 index 0000000..7bb6820 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/ngb.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/peano.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/peano.o new file mode 100644 index 0000000..5e724aa Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/peano.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/pm_nonperiodic.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/pm_nonperiodic.o new file mode 100644 index 0000000..aec930b Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/pm_nonperiodic.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/pm_periodic.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/pm_periodic.o new file mode 100644 index 0000000..e411e66 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/pm_periodic.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/potential.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/potential.o new file mode 100644 index 0000000..b782ca6 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/potential.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/predict.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/predict.o new file mode 100644 index 0000000..b8d346a Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/predict.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/python_interface.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/python_interface.o new file mode 100644 index 0000000..7af0fb0 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/python_interface.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/read_ic.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/read_ic.o new file mode 100644 index 0000000..70bf98d Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/read_ic.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/restart.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/restart.o new file mode 100644 index 0000000..c9ce011 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/restart.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/run.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/run.o new file mode 100644 index 0000000..b26b9a3 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/run.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/sph.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/sph.o new file mode 100644 index 0000000..97f7b78 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/sph.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/system.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/system.o new file mode 100644 index 0000000..b100123 Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/system.o differ diff --git a/src/PyGadget/build/temp.linux-x86_64-2.6/src/timestep.o b/src/PyGadget/build/temp.linux-x86_64-2.6/src/timestep.o new file mode 100644 index 0000000..c778f5b Binary files /dev/null and b/src/PyGadget/build/temp.linux-x86_64-2.6/src/timestep.o differ diff --git a/src/PyGadget/examples/TODO b/src/PyGadget/examples/TODO new file mode 100644 index 0000000..04c4e9d --- /dev/null +++ b/src/PyGadget/examples/TODO @@ -0,0 +1,119 @@ +liste des parametres + +- initialisation des parametres + + par defaut : ok + + +- begrun() + + read_parameter_file() + + allocate_commbuffers(); + + init(); + -> read file... + + - lit les parametrs + + + - init() + + + - LoadParticles ok tested + + + - AllPotential ok tested + - AllAcceleration ok tested + + + - GetAllPotential ok tested + - GetAllDensities ok tested + - GetAllAcceleration ok tested + - GetAllPositions ok tested + - GetAllVelocities ok not tested + - GetAllHsml ok tested + - GetAllMasses ok not tested + - GetAllID ok not tested + + + - GetPos ok tested + + - Potential() ok tested ok // + - Acceleration() ok tested ok // + + ------------------- + + - InitHsml() find hsml from neighbors -> faire mieux... + - Density() ok tested ok // + + ------------------- + + + - SphEvaluate() ok tested + + + + + - Hydra() + + + - SphEvaluateDiv() a faire, utile ? + - SphEvaluateRot() a faire, utile ? + - SphEvaluateNgb() a faire, utile ? + - SphGetNgb() a faire, utile ? + + + - FindNgb ??? utilité ??? --> demande + + + + - peano hilbert + --> domain decompose pour + les particules Q + + + + - LoadParticles2 -> copie de pointeurs... + + + + + + + - LoadParticlesQ + + - GetAllPositionsQ + - GetAllVelocitiesQ + - GetAllHsmlQ + - GetAllMassesQ + - GetAllIDQ + + + test_LoadParticlesQ.py + + --> vérifier que la somme des particules est correcte... + + + + + + - combiner : grille Q -> particules P + + --> calculer le pot sur la grille Q + + !!!! domaine !!! -> mettre en commun + + + + + + + + + + + + + + diff --git a/src/PyGadget/examples/plummer.dat b/src/PyGadget/examples/plummer.dat new file mode 100644 index 0000000..1e99e7f Binary files /dev/null and b/src/PyGadget/examples/plummer.dat differ diff --git a/src/PyGadget/examples/test.py b/src/PyGadget/examples/test.py new file mode 100755 index 0000000..7d7ed1f --- /dev/null +++ b/src/PyGadget/examples/test.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(0) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles2(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +gadget.AllPotential() +pot = gadget.GetAllPotential() + + + +gadget.GetPos(nb.pos) # ok + + + +############################### +# plot +############################### + +r = nb.rxyz() +val = pot + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_Acceleration.py b/src/PyGadget/examples/test_Acceleration.py new file mode 100755 index 0000000..bba8244 --- /dev/null +++ b/src/PyGadget/examples/test_Acceleration.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +r = arange(0,100,0.1) +y = zeros(len(r)) +z = zeros(len(r)) +pos = transpose(array([r,y,z])) +#pos = array([[0,0,0]]) +eps = rc/100. + +acc = gadget.Acceleration(pos,eps) + +############################### +# plot +############################### + +val = sqrt(acc[:,0]**2 + acc[:,1]**2 + acc[:,2]**2) + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_AllAcceleration.py b/src/PyGadget/examples/test_AllAcceleration.py new file mode 100755 index 0000000..e5e9720 --- /dev/null +++ b/src/PyGadget/examples/test_AllAcceleration.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +#gadget.AllPotential() +print "start accel" +gadget.AllAcceleration() +print "done accel" + +#pot = gadget.GetAllPotential() +acc = gadget.GetAllAcceleration() + +nb.pos = gadget.GetAllPositions() + + +############################### +# plot +############################### + +r = nb.rxyz() +val = sqrt(acc[:,0]**2 + acc[:,1]**2 + acc[:,2]**2) + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_AllDensities.py b/src/PyGadget/examples/test_AllDensities.py new file mode 100755 index 0000000..922062d --- /dev/null +++ b/src/PyGadget/examples/test_AllDensities.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +#gadget.AllPotential() +nb.rho = gadget.GetAllDensities() +nb.pos = gadget.GetAllPositions() + +print min(nb.rho),max(nb.rho) +print min(nb.rxyz()),max(nb.rxyz()) + +############################### +# plot +############################### + +r = nb.rxyz() +val = nb.rho + + +pt.scatter(r,val,s=1) +pt.semilogx() +pt.semilogy() +pt.axis([1e-2,1e3,1e-10,1e0]) +pt.show() + diff --git a/src/PyGadget/examples/test_AllDensity-Hsml.py b/src/PyGadget/examples/test_AllDensity-Hsml.py new file mode 100755 index 0000000..da5e11a --- /dev/null +++ b/src/PyGadget/examples/test_AllDensity-Hsml.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +nb.rho = gadget.GetAllDensities() +nb.hsml = gadget.GetAllHsml() +nb.pos = gadget.GetAllPositions() + + +############################### +# plot +############################### + +r = nb.rxyz() +r = nb.rho +val = 1/nb.hsml**3 + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_AllHsml.py b/src/PyGadget/examples/test_AllHsml.py new file mode 100755 index 0000000..42febba --- /dev/null +++ b/src/PyGadget/examples/test_AllHsml.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +nb.rho = gadget.GetAllDensities() +nb.hsml = gadget.GetAllHsml() +nb.pos = gadget.GetAllPositions() + + +############################### +# plot +############################### + +r = nb.rxyz() +val = nb.hsml + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_AllPotential.py b/src/PyGadget/examples/test_AllPotential.py new file mode 100755 index 0000000..87650cf --- /dev/null +++ b/src/PyGadget/examples/test_AllPotential.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +rc = 1000. + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +gadget.AllPotential() +nb.pot = gadget.GetAllPotential() +nb.pos = gadget.GetAllPositions() + + +############################### +# plot +############################### + +r = nb.rxyz() +val = nb.pot + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_Density.py b/src/PyGadget/examples/test_Density.py new file mode 100755 index 0000000..03ab064 --- /dev/null +++ b/src/PyGadget/examples/test_Density.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +nb.rho = gadget.GetAllDensities() +nb.hsml = gadget.GetAllHsml() +nb.pos = gadget.GetAllPositions() + + + +x = arange(-20,20,1) +y = zeros(len(x),float32) +z = zeros(len(x),float32) +pos = transpose(array([x,y,z])).astype(float32) +r = sqrt(x**2 + y**2 + z**2) + +hsml = ones(len(pos)).astype(float32) * nb.hsml.mean() +rho,hsml=gadget.Density(pos,hsml) + + + + + + +############################### +# plot +############################### + + + +pt.scatter(nb.rxyz(),nb.hsml,s=1) +pt.scatter(r,hsml,s=10,color='r') + +pt.figure() +pt.scatter(nb.rxyz(),nb.rho,s=1) +pt.scatter(r,rho,s=10,color='r') +#pt.semilogx() +#pt.semilogy() + + +pt.show() + + diff --git a/src/PyGadget/examples/test_InitHsml.py b/src/PyGadget/examples/test_InitHsml.py new file mode 100755 index 0000000..d5b2c4b --- /dev/null +++ b/src/PyGadget/examples/test_InitHsml.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +nb.rho = gadget.GetAllDensities() +nb.hsml = gadget.GetAllHsml() +nb.pos = gadget.GetAllPositions() + + + +x = arange(-20,20,1) +y = zeros(len(x),float32) +z = zeros(len(x),float32) +pos = transpose(array([x,y,z])).astype(float32) +r = sqrt(x**2 + y**2 + z**2) + +hsml = ones(len(pos)).astype(float32) * nb.hsml.mean() +rho,hsml=gadget.InitHsml(pos,hsml) + + + + + + +############################### +# plot +############################### + + + +pt.scatter(nb.rxyz(),nb.hsml,s=1) +pt.scatter(r,hsml,s=10,color='r') + +pt.figure() +pt.scatter(nb.rxyz(),nb.rho,s=1) +pt.scatter(r,rho,s=10,color='r') +#pt.semilogx() +#pt.semilogy() + + +pt.show() + + diff --git a/src/PyGadget/examples/test_LoadParticlesQ.py b/src/PyGadget/examples/test_LoadParticlesQ.py new file mode 100755 index 0000000..b4704a2 --- /dev/null +++ b/src/PyGadget/examples/test_LoadParticlesQ.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +ThisTask = MPI.COMM_WORLD.Get_rank() + +random.seed(ThisTask) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + +gadget.LoadParticlesQ(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + + +nb.pos = gadget.GetAllPositionsQ() +nb.vel = gadget.GetAllVelocitiesQ() +nb.mass = gadget.GetAllMassesQ() +nb.num = gadget.GetAllIDQ() +nb.tpe = gadget.GetAllTypesQ() + +nb.init() +nb.set_pio('yes') +nb.rename("snap.dat") + +nb.write() +print nb.p_name + diff --git a/src/PyGadget/examples/test_Ngbs.py b/src/PyGadget/examples/test_Ngbs.py new file mode 100755 index 0000000..578ae75 --- /dev/null +++ b/src/PyGadget/examples/test_Ngbs.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +#import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + + +eps = 0.1 + +params['SofteningGas'] = eps +params['SofteningHalo'] = eps +params['SofteningDisk'] = eps +params['SofteningBulge'] = eps +params['SofteningStars'] = eps +params['SofteningBndry'] = eps +params['SofteningGasMaxPhys'] = eps +params['SofteningHaloMaxPhys'] = eps +params['SofteningDiskMaxPhys'] = eps +params['SofteningBulgeMaxPhys'] = eps +params['SofteningStarsMaxPhys'] = eps +params['SofteningBndryMaxPhys'] = eps + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + + +pos = array([0,0,0],float) +eps = 0.5 + +pos = gadget.Ngbs(pos,eps) + +print pos + + + diff --git a/src/PyGadget/examples/test_Parameters.py b/src/PyGadget/examples/test_Parameters.py new file mode 100755 index 0000000..6b2e1f4 --- /dev/null +++ b/src/PyGadget/examples/test_Parameters.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 100 + +nb = ic.plummer(n,1,1,1,eps=0.1,rmax=1.) + + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + +params = gadget.GetParameters() + + +keys = ['TimeLimitCPU', +'ResubmitOn', +'ICFormat', +'SnapFormat', +'ComovingIntegrationOn', +'TypeOfTimestepCriterion', +'OutputListOn', +'PeriodicBoundariesOn', +'TimeBegin', +'TimeMax', +'Omega0', +'OmegaLambda', +'OmegaBaryon', +'HubbleParam', +'BoxSize', +'TimeBetSnapshot', +'TimeOfFirstSnapshot', +'CpuTimeBetRestartFile', +'TimeBetStatistics', +'NumFilesPerSnapshot', +'NumFilesWrittenInParallel', +'ErrTolIntAccuracy', +'MaxRMSDisplacementFac', +'CourantFac', +'MaxSizeTimestep', +'MinSizeTimestep', +'ErrTolTheta', +'TypeOfOpeningCriterion', +'ErrTolForceAcc', +'TreeDomainUpdateFrequency', +'DesNumNgb', +'MaxNumNgbDeviation', +'ArtBulkViscConst', +'InitGasTemp', +'MinGasTemp', +'PartAllocFactor', +'TreeAllocFactor', +'BufferSize', +'UnitLength_in_cm', +'UnitMass_in_g', +'UnitVelocity_in_cm_per_s', +'GravityConstantInternal', +'MinGasHsmlFractional', +'SofteningGas', +'SofteningHalo', +'SofteningDisk', +'SofteningBulge', +'SofteningStars', +'SofteningBndry', +'SofteningGasMaxPhys', +'SofteningHaloMaxPhys', +'SofteningDiskMaxPhys', +'SofteningBulgeMaxPhys', +'SofteningStarsMaxPhys', +'SofteningBndryMaxPhys'] + +for key in keys: + print key,params[key] + +i=0 +for key in keys: + params[key] = i + i = i + 1 + +print + +for key in keys: + print key,params[key] + + +gadget.SetParameters(params) +params = gadget.GetParameters() + + + +#gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) diff --git a/src/PyGadget/examples/test_Potential.py b/src/PyGadget/examples/test_Potential.py new file mode 100755 index 0000000..0ec2049 --- /dev/null +++ b/src/PyGadget/examples/test_Potential.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +r = arange(0,100,0.1) +y = zeros(len(r)) +z = zeros(len(r)) +pos = transpose(array([r,y,z])) +#pos = array([[0,0,0]]) +eps = rc/100. + +pot = gadget.Potential(pos,eps) + + + +############################### +# plot +############################### + +val = pot + + +pt.scatter(r,val,s=1) +pt.show() + diff --git a/src/PyGadget/examples/test_SphEvaluate.py b/src/PyGadget/examples/test_SphEvaluate.py new file mode 100755 index 0000000..78e0bd4 --- /dev/null +++ b/src/PyGadget/examples/test_SphEvaluate.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +from mpi4py import MPI + +from pNbody import ic + +from numpy import * +from PyGadget import gadget + +import Ptools as pt + +import sys +import time + +random.seed(MPI.COMM_WORLD.Get_rank()) # 2 for two points + +n = 10000 +rc = 2 +rmax = 100 + +nb = ic.plummer(n,1,1,1,eps=rc,rmax=rmax,ftype='gadget') +nb.rename('plummer.dat') +nb.write() + +gadget.InitMPI() # init MPI +gadget.InitDefaultParameters() # init default parameters +gadget.Info() + + +params = {} + +params['ErrTolTheta'] = 0.7 + +params['DesNumNgb'] = 50 +params['MaxNumNgbDeviation'] = 1 + +params['UnitLength_in_cm'] = 3.085e+21 +params['UnitMass_in_g'] = 4.435693e+44 +params['UnitVelocity_in_cm_per_s'] = 97824708.2699 + +params['SofteningGas'] = rc/100. +params['SofteningHalo'] = rc/100. +params['SofteningDisk'] = rc/100. +params['SofteningBulge'] = rc/100. +params['SofteningStars'] = rc/100. +params['SofteningBndry'] = rc/100. +params['SofteningGasMaxPhys'] = rc/100. +params['SofteningHaloMaxPhys'] = rc/100. +params['SofteningDiskMaxPhys'] = rc/100. +params['SofteningBulgeMaxPhys'] = rc/100. +params['SofteningStarsMaxPhys'] = rc/100. +params['SofteningBndryMaxPhys'] = rc/100. + +gadget.SetParameters(params) + + + +params = gadget.GetParameters() + + + +gadget.LoadParticles(array(nb.npart),nb.pos,nb.vel,nb.mass,nb.num,nb.tpe) + +nb.rho = gadget.GetAllDensities() +nb.hsml = gadget.GetAllHsml() +nb.pos = gadget.GetAllPositions() + +nb.obs = nb.x()**2 # define an observable + + + +x = arange(-50,50,5) +y = zeros(len(x),float32) +z = zeros(len(x),float32) +pos = transpose(array([x,y,z])).astype(float32) +r = sqrt(x**2 + y**2 + z**2) + +hsml = ones(len(pos)).astype(float32) * nb.hsml.mean() + +print "start SphEvaluate" + +rho,hsml=gadget.InitHsml(pos,hsml) +obs=gadget.SphEvaluate(pos,hsml,nb.obs) + + + +############################### +# plot +############################### + +if MPI.COMM_WORLD.Get_rank()==0: + color = 'r' +if MPI.COMM_WORLD.Get_rank()==1: + color = 'b' + + + +pt.scatter(nb.x(),nb.obs,s=1,color='k') +pt.scatter(x,obs,s=10,color=color) + +#pt.figure() +#pt.scatter(nb.rxyz(),nb.rho,s=1) +#pt.scatter(r,rho,s=10,color='r') +##pt.semilogx() +##pt.semilogy() + + +pt.show() + + diff --git a/src/PyGadget/setup.py b/src/PyGadget/setup.py new file mode 100644 index 0000000..a321a23 --- /dev/null +++ b/src/PyGadget/setup.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python + +import os,sys + +from distutils.core import setup, Extension +from distutils.sysconfig import get_python_inc +from distutils.sysconfig import get_python_lib + + + +''' +In order to compile with mpi, + +export CC=mpicc +python setup.py build + + +''' + + + + + +incdir = os.path.join(get_python_inc(plat_specific=1), 'numpy') +libdir = os.path.join(get_python_lib(plat_specific=1), 'numpy') + +gadget_files = ["src/main.c", +"src/run.c", +"src/predict.c", +"src/begrun.c", +"src/endrun.c", +"src/global.c", +"src/timestep.c", +"src/init.c", +"src/restart.c", +"src/io.c", +"src/accel.c", +"src/read_ic.c", +"src/ngb.c", +"src/system.c", +"src/allocate.c", +"src/density.c", +"src/gravtree.c", +"src/hydra.c", +"src/driftfac.c", +"src/domain.c", +"src/allvars.c", +"src/potential.c", +"src/forcetree.c", +"src/peano.c", +"src/gravtree_forcetest.c", +"src/pm_periodic.c", +"src/pm_nonperiodic.c", +"src/longrange.c", +"src/sph.c", +"src/python_interface.c", +"src/domainQ.c", +"src/allocateQ.c"] + + + + + +setup(name='PyGadget', + version='0.0', + description='Python Gadget Wrapping', + author='Greg Ward', + author_email='yves.revaz@epfl.ch', + url='http://obswww.unige.ch/~revaz/pNbody', + packages=['PyGadget'], + ext_modules=[ + Extension('PyGadget.gadget', gadget_files,include_dirs=["/usr/lib/mpich/include/","src/","/usr/include/python2.6/"],define_macros=[('PY_INTERFACE', '1'),('UNEQUALSOFTENINGS', '1')] , libraries=['gsl','gslcblas','m','mpi'] ) + ] + ) diff --git a/src/PyGadget/src/Makefile b/src/PyGadget/src/Makefile new file mode 100644 index 0000000..db0d172 --- /dev/null +++ b/src/PyGadget/src/Makefile @@ -0,0 +1,190 @@ + +#---------------------------------------------------------------------- +# From the list below, please activate/deactivate the options that +# apply to your run. If you modify any of these options, make sure +# that you recompile the whole code by typing "make clean; make". +# +# Look at end of file for a brief guide to the compile-time options. +#---------------------------------------------------------------------- + +OPT += -DPY_INTERFACE + + +#--------------------------------------- Basic operation mode of code +#OPT += -DPERIODIC +OPT += -DUNEQUALSOFTENINGS + + +#--------------------------------------- Things that are always recommended +OPT += -DPEANOHILBERT +OPT += -DWALLCLOCK + + +#--------------------------------------- TreePM Options +#OPT += -DPMGRID=128 +#OPT += -DPLACEHIGHRESREGION=3 +#OPT += -DENLARGEREGION=1.2 +#OPT += -DASMTH=1.25 +#OPT += -DRCUT=4.5 + + +#--------------------------------------- Single/Double Precision +#OPT += -DDOUBLEPRECISION +#OPT += -DDOUBLEPRECISION_FFTW + + +#--------------------------------------- Time integration options +OPT += -DSYNCHRONIZATION +#OPT += -DFLEXSTEPS +#OPT += -DPSEUDOSYMMETRIC +#OPT += -DNOSTOP_WHEN_BELOW_MINTIMESTEP +#OPT += -DNOPMSTEPADJUSTMENT + + +#--------------------------------------- Output +#OPT += -DHAVE_HDF5 +#OPT += -DOUTPUTPOTENTIAL +#OPT += -DOUTPUTACCELERATION +#OPT += -DOUTPUTCHANGEOFENTROPY +#OPT += -DOUTPUTTIMESTEP + + +#--------------------------------------- Things for special behaviour +#OPT += -DNOGRAVITY +#OPT += -DNOTREERND +#OPT += -DNOTYPEPREFIX_FFTW +#OPT += -DLONG_X=60 +#OPT += -DLONG_Y=5 +#OPT += -DLONG_Z=0.2 +#OPT += -DTWODIMS +#OPT += -DSPH_BND_PARTICLES +#OPT += -DNOVISCOSITYLIMITER +#OPT += -DCOMPUTE_POTENTIAL_ENERGY +#OPT += -DLONGIDS +#OPT += -DISOTHERM_EQS +#OPT += -DADAPTIVE_GRAVSOFT_FORGAS +#OPT += -DSELECTIVE_NO_GRAVITY=2+4+8+16 + +#--------------------------------------- Testing and Debugging options +#OPT += -DFORCETEST=0.1 + + +#--------------------------------------- Glass making +#OPT += -DMAKEGLASS=262144 + + +#---------------------------------------------------------------------- +# Here, select compile environment for the target machine. This may need +# adjustment, depending on your local system. Follow the examples to add +# additional target platforms, and to get things properly compiled. +#---------------------------------------------------------------------- + +#--------------------------------------- Select some defaults + +CC = mpicc # sets the C-compiler +OPTIMIZE = -O2 -Wall -g # sets optimization and warning flags +MPICHLIB = -lmpich + + +#--------------------------------------- Select target computer + +#SYSTYPE="obsrevaz" +SYSTYPE="obscalc" + + +#--------------------------------------- Adjust settings for target computer + +ifeq ($(SYSTYPE),"obsrevaz") +CC = mpicc +OPTIMIZE = -fPIC +GSL_INCL = -I/home/revaz/local/include +GSL_LIBS = -L/home/revaz/local/lib/ +FFTW_INCL= -I/home/revaz/local/include/ +FFTW_LIBS= -L/home/revaz/local/lib/ +MPICHLIB = -lmpi +HDF5INCL = +HDF5LIB = + +INCLUDEPY = -I/usr/include/python2.6/ +INCLUDENUMPY = -I/usr/include/python2.6 + +endif + + +ifeq ($(SYSTYPE),"obscalc") +CC = mpicc +OPTIMIZE = -fPIC +GSL_INCL = +GSL_LIBS = +FFTW_INCL= +FFTW_LIBS= +MPICHLIB = -lmpi +HDF5INCL = +HDF5LIB = + +INCLUDEPY = -I/usr/include/python2.6/ +INCLUDENUMPY = -I/usr/include/python2.6 +INCLUDENUMARRAY = -I/usr/lib64/python2.6/site-packages/numpy/numarray/ + +NO_FFTW_LIB = "yes" +endif + + + + +ifneq (HAVE_HDF5,$(findstring HAVE_HDF5,$(OPT))) +HDF5INCL = +HDF5LIB = +endif + + +OPTIONS = $(OPTIMIZE) $(OPT) + +EXEC = Gadget2 + +OUTPUTLIB = gadget + +OBJS = main.o run.o predict.o begrun.o endrun.o global.o \ + timestep.o init.o restart.o io.o \ + accel.o read_ic.o ngb.o \ + system.o allocate.o density.o \ + gravtree.o hydra.o driftfac.o \ + domain.o allvars.o potential.o \ + forcetree.o peano.o gravtree_forcetest.o \ + pm_periodic.o pm_nonperiodic.o longrange.o \ + sph.o python_interface.o domainQ.o allocateQ.o + +INCL = allvars.h proto.h tags.h Makefile + + +CFLAGS = $(OPTIONS) $(GSL_INCL) $(FFTW_INCL) $(HDF5INCL) $(INCLUDEPY) $(INCLUDENUMPY) $(INCLUDENUMARRAY) + + +ifeq (NOTYPEPREFIX_FFTW,$(findstring NOTYPEPREFIX_FFTW,$(OPT))) # fftw installed with type prefix? + FFTW_LIB = $(FFTW_LIBS) -lrfftw_mpi -lfftw_mpi -lrfftw -lfftw +else +ifeq (DOUBLEPRECISION_FFTW,$(findstring DOUBLEPRECISION_FFTW,$(OPT))) + FFTW_LIB = $(FFTW_LIBS) -ldrfftw_mpi -ldfftw_mpi -ldrfftw -ldfftw +else + FFTW_LIB = $(FFTW_LIBS) -lsrfftw_mpi -lsfftw_mpi -lsrfftw -lsfftw +endif +endif + +ifeq ($(NO_FFTW_LIB),"yes") + FFTW_LIB = +endif + +LIBS = $(HDF5LIB) -g $(MPICHLIB) $(GSL_LIBS) -lgsl -lgslcblas -lm $(FFTW_LIB) + +$(EXEC): $(OBJS) + $(CC) -shared $(OBJS) $(LIBS) -o $(OUTPUTLIB).so + +(OBJS): $(INCL) + +gadget: + $(CC) $(OBJS) $(LIBS) $(MPI_LIB) -o $(EXEC) + +clean: + rm -f $(OBJS) $(EXEC) *.so + + diff --git a/src/PyGadget/src/accel.c b/src/PyGadget/src/accel.c new file mode 100644 index 0000000..3c4934b --- /dev/null +++ b/src/PyGadget/src/accel.c @@ -0,0 +1,96 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file accel.c + * \brief driver routine to carry out force computation + */ + + +/*! This routine computes the accelerations for all active particles. + * First, the long-range PM force is computed if the TreePM algorithm is + * used and a "big" PM step is done. Next, the gravitational tree forces + * are computed. This also constructs the tree, if needed. + * + * If gas particles are present, the density-loop for active SPH particles + * is carried out. This includes an iteration on the correct number of + * neighbours. Finally, the hydrodynamical forces are added. + */ +void compute_accelerations(int mode) +{ + double tstart, tend; + + if(ThisTask == 0) + { + printf("Start force computation...\n"); + fflush(stdout); + } + +#ifdef PMGRID + if(All.PM_Ti_endstep == All.Ti_Current) + { + tstart = second(); + long_range_force(); + tend = second(); + All.CPU_PM += timediff(tstart, tend); + } +#endif + + tstart = second(); /* measure the time for the full force computation */ + + gravity_tree(); /* computes gravity accel. */ + + if(All.TypeOfOpeningCriterion == 1 && All.Ti_Current == 0) + gravity_tree(); /* For the first timestep, we redo it + * to allow usage of relative opening + * criterion for consistent accuracy. + */ + tend = second(); + All.CPU_Gravity += timediff(tstart, tend); + +#ifdef FORCETEST + gravity_forcetest(); +#endif + + if(All.TotN_gas > 0) + { + if(ThisTask == 0) + { + printf("Start density computation...\n"); + fflush(stdout); + } + + tstart = second(); + density(); /* computes density, and pressure */ + tend = second(); + All.CPU_Hydro += timediff(tstart, tend); + + tstart = second(); + force_update_hmax(); /* tell the tree nodes the new SPH smoothing length such that they are guaranteed to hold the correct max(Hsml) */ + tend = second(); + All.CPU_Predict += timediff(tstart, tend); + + + if(ThisTask == 0) + { + printf("Start hydro-force computation...\n"); + fflush(stdout); + } + + tstart = second(); + hydro_force(); /* adds hydrodynamical accelerations and computes viscous entropy injection */ + tend = second(); + All.CPU_Hydro += timediff(tstart, tend); + } + + if(ThisTask == 0) + { + printf("force computation done.\n"); + fflush(stdout); + } +} diff --git a/src/PyGadget/src/accel.o b/src/PyGadget/src/accel.o new file mode 100644 index 0000000..8faab53 Binary files /dev/null and b/src/PyGadget/src/accel.o differ diff --git a/src/PyGadget/src/allocate.c b/src/PyGadget/src/allocate.c new file mode 100644 index 0000000..7e2abdf --- /dev/null +++ b/src/PyGadget/src/allocate.c @@ -0,0 +1,165 @@ +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file allocate.c + * \brief routines for allocating particle and tree storage + */ + +/*! Allocates a number of small buffers and arrays, the largest one being + * the communication buffer. The communication buffer itself is mapped + * onto various tables used in the different parts of the force + * algorithms. We further allocate space for the top-level tree nodes, and + * auxiliary arrays for the domain decomposition algorithm. + */ +void allocate_commbuffers(void) +{ + size_t bytes; + + Exportflag = malloc(NTask * sizeof(char)); + DomainStartList = malloc(NTask * sizeof(int)); + DomainEndList = malloc(NTask * sizeof(int)); + + TopNodes = malloc(MAXTOPNODES * sizeof(struct topnode_data)); + + DomainWork = malloc(MAXTOPNODES * sizeof(double)); + DomainCount = malloc(MAXTOPNODES * sizeof(int)); + DomainCountSph = malloc(MAXTOPNODES * sizeof(int)); + DomainTask = malloc(MAXTOPNODES * sizeof(int)); + DomainNodeIndex = malloc(MAXTOPNODES * sizeof(int)); + DomainTreeNodeLen = malloc(MAXTOPNODES * sizeof(FLOAT)); + DomainHmax = malloc(MAXTOPNODES * sizeof(FLOAT)); + DomainMoment = malloc(MAXTOPNODES * sizeof(struct DomainNODE)); + + if(!(CommBuffer = malloc(bytes = All.BufferSize * 1024 * 1024))) + { + printf("failed to allocate memory for `CommBuffer' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(2); + } + + All.BunchSizeForce = + (All.BufferSize * 1024 * 1024) / (sizeof(struct gravdata_index) + 2 * sizeof(struct gravdata_in)); + + if(All.BunchSizeForce & 1) + All.BunchSizeForce -= 1; /* make sure that All.BunchSizeForce is an even number + --> 8-byte alignment for 64bit processors */ + + GravDataIndexTable = (struct gravdata_index *) CommBuffer; + GravDataIn = (struct gravdata_in *) (GravDataIndexTable + All.BunchSizeForce); + GravDataGet = GravDataIn + All.BunchSizeForce; + GravDataOut = GravDataIn; /* this will overwrite the GravDataIn-Table */ + GravDataResult = GravDataGet; /* this will overwrite the GravDataGet-Table */ + + + All.BunchSizeDensity = + (All.BufferSize * 1024 * 1024) / (2 * sizeof(struct densdata_in) + 2 * sizeof(struct densdata_out)); + + DensDataIn = (struct densdata_in *) CommBuffer; + DensDataGet = DensDataIn + All.BunchSizeDensity; + DensDataResult = (struct densdata_out *) (DensDataGet + All.BunchSizeDensity); + DensDataPartialResult = DensDataResult + All.BunchSizeDensity; + + All.BunchSizeHydro = + (All.BufferSize * 1024 * 1024) / (2 * sizeof(struct hydrodata_in) + 2 * sizeof(struct hydrodata_out)); + + HydroDataIn = (struct hydrodata_in *) CommBuffer; + HydroDataGet = HydroDataIn + All.BunchSizeHydro; + HydroDataResult = (struct hydrodata_out *) (HydroDataGet + All.BunchSizeHydro); + HydroDataPartialResult = HydroDataResult + All.BunchSizeHydro; + +#ifdef PY_INTERFACE + All.BunchSizeSph = + (All.BufferSize * 1024 * 1024) / (2 * sizeof(struct sphdata_in) + 2 * sizeof(struct sphdata_out)); + + SphDataIn = (struct sphdata_in *) CommBuffer; + SphDataGet = SphDataIn + All.BunchSizeSph; + SphDataResult = (struct sphdata_out *) (SphDataGet + All.BunchSizeSph); + SphDataPartialResult = SphDataResult + All.BunchSizeSph; +#endif + + + + + All.BunchSizeDomain = + (All.BufferSize * 1024 * 1024) / (sizeof(struct particle_data) + sizeof(struct sph_particle_data) + + sizeof(peanokey)); + + if(All.BunchSizeDomain & 1) + All.BunchSizeDomain -= 1; /* make sure that All.BunchSizeDomain is even + --> 8-byte alignment of DomainKeyBuf for 64bit processors */ + + DomainPartBuf = (struct particle_data *) CommBuffer; + DomainSphBuf = (struct sph_particle_data *) (DomainPartBuf + All.BunchSizeDomain); + DomainKeyBuf = (peanokey *) (DomainSphBuf + All.BunchSizeDomain); + + + if(ThisTask == 0) + { + printf("\nAllocated %d MByte communication buffer per processor.\n\n", All.BufferSize); + printf("Communication buffer has room for %d particles in gravity computation\n", All.BunchSizeForce); + printf("Communication buffer has room for %d particles in density computation\n", All.BunchSizeDensity); + printf("Communication buffer has room for %d particles in hydro computation\n", All.BunchSizeHydro); + printf("Communication buffer has room for %d particles in domain decomposition\n", All.BunchSizeDomain); + printf("\n"); + } +} + + + +/*! This routine allocates memory for particle storage, both the + * collisionless and the SPH particles. + */ +void allocate_memory(void) +{ + size_t bytes; + double bytes_tot = 0; + + if(All.MaxPart > 0) + { + if(!(P = malloc(bytes = All.MaxPart * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `P' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + if(ThisTask == 0) + printf("\nAllocated %g MByte for particle storage. %d\n\n", bytes_tot / (1024.0 * 1024.0), sizeof(struct particle_data)); + } + + if(All.MaxPartSph > 0) + { + bytes_tot = 0; + + if(!(SphP = malloc(bytes = All.MaxPartSph * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphP' (%g MB) %d.\n", bytes / (1024.0 * 1024.0), sizeof(struct sph_particle_data)); + endrun(1); + } + bytes_tot += bytes; + + if(ThisTask == 0) + printf("Allocated %g MByte for storage of SPH data. %d\n\n", bytes_tot / (1024.0 * 1024.0), sizeof(struct sph_particle_data)); + } +} + + + + +/*! This routine frees the memory for the particle storage. Note: We don't + * actually bother to call it in the code... When the program terminats, + * the memory will be automatically freed by the operating system. + */ +void free_memory(void) +{ + if(All.MaxPartSph > 0) + free(SphP); + + if(All.MaxPart > 0) + free(P); +} + diff --git a/src/PyGadget/src/allocate.o b/src/PyGadget/src/allocate.o new file mode 100644 index 0000000..04acb56 Binary files /dev/null and b/src/PyGadget/src/allocate.o differ diff --git a/src/PyGadget/src/allocateQ.c b/src/PyGadget/src/allocateQ.c new file mode 100644 index 0000000..2c18585 --- /dev/null +++ b/src/PyGadget/src/allocateQ.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +#ifdef PY_INTERFACE + +/*! \file allocate.c + * \brief routines for allocating particle and tree storage + */ + +/*! Allocates a number of small buffers and arrays, the largest one being + * the communication buffer. The communication buffer itself is mapped + * onto various tables used in the different parts of the force + * algorithms. We further allocate space for the top-level tree nodes, and + * auxiliary arrays for the domain decomposition algorithm. + */ +void allocate_commbuffersQ(void) +{ + size_t bytes; + + DomainStartListQ = malloc(NTask * sizeof(int)); + DomainEndListQ = malloc(NTask * sizeof(int)); + + TopNodesQ = malloc(MAXTOPNODES * sizeof(struct topnode_data)); + + DomainWorkQ = malloc(MAXTOPNODES * sizeof(double)); + DomainCountQ = malloc(MAXTOPNODES * sizeof(int)); + DomainCountSphQ = malloc(MAXTOPNODES * sizeof(int)); + DomainTaskQ = malloc(MAXTOPNODES * sizeof(int)); + + + All.BunchSizeDomain = + (All.BufferSize * 1024 * 1024) / (sizeof(struct particle_data) + sizeof(struct sph_particle_data) + + sizeof(peanokey)); + + if(All.BunchSizeDomain & 1) + All.BunchSizeDomain -= 1; /* make sure that All.BunchSizeDomain is even */ + + + + if(!(CommBufferQ = malloc(bytes = All.BufferSize * 1024 * 1024))) + { + printf("failed to allocate memory for `CommBufferQ' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(2); + } + + DomainPartBufQ = (struct particle_data *) CommBufferQ; + DomainSphBufQ = (struct sph_particle_data *) (DomainPartBufQ + All.BunchSizeDomain); + DomainKeyBufQ = (peanokey *) (DomainSphBufQ + All.BunchSizeDomain); + + + +} + + +#endif diff --git a/src/PyGadget/src/allocateQ.o b/src/PyGadget/src/allocateQ.o new file mode 100644 index 0000000..9220b94 Binary files /dev/null and b/src/PyGadget/src/allocateQ.o differ diff --git a/src/PyGadget/src/allvars.c b/src/PyGadget/src/allvars.c new file mode 100644 index 0000000..14fffe2 --- /dev/null +++ b/src/PyGadget/src/allvars.c @@ -0,0 +1,268 @@ +/*! \file allvars.c + * \brief provides instances of all global variables. + */ + +#include +#include +#include "tags.h" +#include "allvars.h" + + +int ThisTask; /*!< the rank of the local processor */ +int NTask; /*!< number of processors */ +int PTask; /*!< smallest integer such that NTask <= 2^PTask */ + +int NumPart; /*!< number of particles on the LOCAL processor */ +#ifdef PY_INTERFACE +int NumPartQ; +#endif +int N_gas; /*!< number of gas particles on the LOCAL processor */ +#ifdef PY_INTERFACE +int N_gasQ; +#endif +long long Ntype[6]; /*!< total number of particles of each type */ +int NtypeLocal[6]; /*!< local number of particles of each type */ +#ifdef PY_INTERFACE +long long NtypeQ[6]; /*!< total number of particles of each type */ +int NtypeLocalQ[6]; /*!< local number of particles of each type */ +#endif +int NumForceUpdate; /*!< number of active particles on local processor in current timestep */ +int NumSphUpdate; /*!< number of active SPH particles on local processor in current timestep */ + +double CPUThisRun; /*!< Sums the CPU time for the process (current submission only) */ + + +int RestartFlag; /*!< taken from command line used to start code. 0 is normal start-up from + initial conditions, 1 is resuming a run from a set of restart files, while 2 + marks a restart from a snapshot file. */ + +char *Exportflag; /*!< Buffer used for flagging whether a particle needs to be exported to another process */ + +int *Ngblist; /*!< Buffer to hold indices of neighbours retrieved by the neighbour search routines */ + +int TreeReconstructFlag; /*!< Signals that a new tree needs to be constructed */ + +int Flag_FullStep; /*!< This flag signals that the current step involves all particles */ + + +gsl_rng *random_generator; /*!< the employed random number generator of the GSL library */ + +double RndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */ + + +double DomainCorner[3]; /*!< gives the lower left corner of simulation volume */ +double DomainCenter[3]; /*!< gives the center of simulation volume */ +double DomainLen; /*!< gives the (maximum) side-length of simulation volume */ +double DomainFac; /*!< factor used for converting particle coordinates to a Peano-Hilbert mesh covering the simulation volume */ +int DomainMyStart; /*!< first domain mesh cell that resides on the local processor */ +int DomainMyLast; /*!< last domain mesh cell that resides on the local processor */ +int *DomainStartList; /*!< a table that lists the first domain mesh cell for all processors */ +int *DomainEndList; /*!< a table that lists the last domain mesh cell for all processors */ +double *DomainWork; /*!< a table that gives the total "work" due to the particles stored by each processor */ +int *DomainCount; /*!< a table that gives the total number of particles held by each processor */ +int *DomainCountSph; /*!< a table that gives the total number of SPH particles held by each processor */ + +int *DomainTask; /*!< this table gives for each leaf of the top-level tree the processor it was assigned to */ +int *DomainNodeIndex; /*!< this table gives for each leaf of the top-level tree the corresponding node of the gravitational tree */ +FLOAT *DomainTreeNodeLen; /*!< this table gives for each leaf of the top-level tree the side-length of the corresponding node of the gravitational tree */ +FLOAT *DomainHmax; /*!< this table gives for each leaf of the top-level tree the maximum SPH smoothing length among the particles of the corresponding node of the gravitational tree */ + +#ifdef PY_INTERFACE +double DomainCornerQ[3]; /*!< gives the lower left corner of simulation volume */ +double DomainCenterQ[3]; /*!< gives the center of simulation volume */ +double DomainLenQ; /*!< gives the (maximum) side-length of simulation volume */ +double DomainFacQ; /*!< factor used for converting particle coordinates to a Peano-Hilbert mesh covering the simulation volume */ +int DomainMyStartQ; /*!< first domain mesh cell that resides on the local processor */ +int DomainMyLastQ; /*!< last domain mesh cell that resides on the local processor */ +int *DomainStartListQ; /*!< a table that lists the first domain mesh cell for all processors */ +int *DomainEndListQ; /*!< a table that lists the last domain mesh cell for all processors */ +double *DomainWorkQ; /*!< a table that gives the total "work" due to the particles stored by each processor */ +int *DomainCountQ; /*!< a table that gives the total number of particles held by each processor */ +int *DomainCountSphQ; /*!< a table that gives the total number of SPH particles held by each processor */ + +int *DomainTaskQ; /*!< this table gives for each leaf of the top-level tree the processor it was assigned to */ +#endif + + +struct DomainNODE + *DomainMoment; /*!< this table stores for each node of the top-level tree corresponding node data from the gravitational tree */ + +peanokey *DomainKeyBuf; /*!< this points to a buffer used during the exchange of particle data */ +#ifdef PY_INTERFACE +peanokey *DomainKeyBufQ; /*!< this points to a buffer used during the exchange of particle data */ +#endif +peanokey *Key; /*!< a table used for storing Peano-Hilbert keys for particles */ +peanokey *KeySorted; /*!< holds a sorted table of Peano-Hilbert keys for all particles, used to construct top-level tree */ + + +int NTopnodes; /*!< total number of nodes in top-level tree */ +int NTopleaves; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */ + +#ifdef PY_INTERFACE +int NTopnodesQ; /*!< total number of nodes in top-level tree */ +int NTopleavesQ; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */ +#endif + +#ifdef PY_INTERFACE +int NTopnodes; /*!< total number of nodes in top-level tree */ +int NTopleaves; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */ +#endif + +struct topnode_data +#ifdef PY_INTERFACE + *TopNodesQ, +#endif + *TopNodes; /*!< points to the root node of the top-level tree */ + + +double TimeOfLastTreeConstruction; /*!< holds what it says, only used in connection with FORCETEST */ + + + +/* variables for input/output, usually only used on process 0 */ + +char ParameterFile[MAXLEN_FILENAME]; /*!< file name of parameterfile used for starting the simulation */ + +FILE *FdInfo; /*!< file handle for info.txt log-file. */ +FILE *FdEnergy; /*!< file handle for energy.txt log-file. */ +FILE *FdTimings; /*!< file handle for timings.txt log-file. */ +FILE *FdCPU; /*!< file handle for cpu.txt log-file. */ + +#ifdef FORCETEST +FILE *FdForceTest; /*!< file handle for forcetest.txt log-file. */ +#endif + + +double DriftTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological drift factors */ +double GravKickTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological kick factor for gravitational forces */ +double HydroKickTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological kick factor for hydrodynmical forces */ + +void *CommBuffer; /*!< points to communication buffer, which is used in the domain decomposition, the + parallel tree-force computation, the SPH routines, etc. */ + +#ifdef PY_INTERFACE +void *CommBufferQ; /*!< points to communication buffer, which is used in the domain decomposition, the + parallel tree-force computation, the SPH routines, etc. */ +#endif + +/*! This structure contains data which is the SAME for all tasks (mostly code parameters read from the + * parameter file). Holding this data in a structure is convenient for writing/reading the restart file, and + * it allows the introduction of new global variables in a simple way. The only thing to do is to introduce + * them into this structure. + */ +struct global_data_all_processes + All; + + + +/*! This structure holds all the information that is + * stored for each particle of the simulation. + */ +struct particle_data + *P, /*!< holds particle data on local processor */ +#ifdef PY_INTERFACE + *Q, + *DomainPartBufQ, /*!< buffer for particle data used in domain decomposition */ +#endif + *DomainPartBuf; /*!< buffer for particle data used in domain decomposition */ + + +/* the following struture holds data that is stored for each SPH particle in addition to the collisionless + * variables. + */ +struct sph_particle_data + *SphP, /*!< holds SPH particle data on local processor */ +#ifdef PY_INTERFACE + *SphQ, +*DomainSphBufQ, /*!< buffer for SPH particle data in domain decomposition */ +#endif + *DomainSphBuf; /*!< buffer for SPH particle data in domain decomposition */ + + + + + +/* Variables for Tree + */ + +int MaxNodes; /*!< maximum allowed number of internal nodes */ +int Numnodestree; /*!< number of (internal) nodes in each tree */ + +struct NODE + *Nodes_base, /*!< points to the actual memory allocted for the nodes */ + *Nodes; /*!< this is a pointer used to access the nodes which is shifted such that Nodes[All.MaxPart] + gives the first allocated node */ + + +int *Nextnode; /*!< gives next node in tree walk */ +int *Father; /*!< gives parent node in tree */ + + +struct extNODE /*!< this structure holds additional tree-node information which is not needed in the actual gravity computation */ + *Extnodes_base, /*!< points to the actual memory allocted for the extended node information */ + *Extnodes; /*!< provides shifted access to extended node information, parallel to Nodes/Nodes_base */ + + + + + +/*! Header for the standard file format. + */ +struct io_header + header; /*!< holds header for snapshot files */ + + + +char Tab_IO_Labels[IO_NBLOCKS][4]; /* +#include +#include "tags.h" + +#define GADGETVERSION "2.0" /*!< code version string */ + +#define TIMEBASE (1<<28) /*!< The simulated timespan is mapped onto the integer interval [0,TIMESPAN], + * where TIMESPAN needs to be a power of 2. Note that (1<<28) corresponds to 2^29 + */ + +#define MAXTOPNODES 200000 /*!< Maximum number of nodes in the top-level tree used for domain decomposition */ + + +typedef long long peanokey; /*!< defines the variable type used for Peano-Hilbert keys */ + +#define BITS_PER_DIMENSION 18 /*!< Bits per dimension available for Peano-Hilbert order. + Note: If peanokey is defined as type int, the allowed maximum is 10. + If 64-bit integers are used, the maximum is 21 */ + +#define PEANOCELLS (((peanokey)1)<<(3*BITS_PER_DIMENSION)) /*!< The number of different Peano-Hilbert cells */ + + +#define RNDTABLE 3000 /*!< gives the length of a table with random numbers, refreshed at every timestep. + This is used to allow application of random numbers to a specific particle + in a way that is independent of the number of processors used. */ +#define MAX_REAL_NUMBER 1e37 +#define MIN_REAL_NUMBER 1e-37 + +#define MAXLEN_FILENAME 100 /*!< Maximum number of characters for filenames (including the full path) */ + +#ifdef ISOTHERM_EQS +#define GAMMA (1.0) /*!< index for isothermal gas */ +#else +#define GAMMA (5.0/3) /*!< adiabatic index of simulated gas */ +#endif + +#define GAMMA_MINUS1 (GAMMA-1) + +#define HYDROGEN_MASSFRAC 0.76 /*!< mass fraction of hydrogen, relevant only for radiative cooling */ + +/* Some physical constants in cgs units */ + +#define GRAVITY 6.672e-8 /*!< Gravitational constant (in cgs units) */ +#define SOLAR_MASS 1.989e33 +#define SOLAR_LUM 3.826e33 +#define RAD_CONST 7.565e-15 +#define AVOGADRO 6.0222e23 +#define BOLTZMANN 1.3806e-16 +#define GAS_CONST 8.31425e7 +#define C 2.9979e10 +#define PLANCK 6.6262e-27 +#define CM_PER_MPC 3.085678e24 +#define PROTONMASS 1.6726e-24 +#define ELECTRONMASS 9.10953e-28 +#define THOMPSON 6.65245e-25 +#define ELECTRONCHARGE 4.8032e-10 +#define HUBBLE 3.2407789e-18 /* in h/sec */ + +/* Some conversion factors */ + +#define SEC_PER_MEGAYEAR 3.155e13 +#define SEC_PER_YEAR 3.155e7 + +#ifndef ASMTH +#define ASMTH 1.25 /*!< ASMTH gives the scale of the short-range/long-range force split in units of FFT-mesh cells */ +#endif + +#ifndef RCUT +#define RCUT 4.5 /*!< RCUT gives the maximum distance (in units of the scale used for the force split) out to + which short-range forces are evaluated in the short-range tree walk. */ +#endif + +#define MAX_NGB 20000 /*!< defines maximum length of neighbour list */ + +#define MAXLEN_OUTPUTLIST 500 /*!< maxmimum number of entries in list of snapshot output times */ + +#define DRIFT_TABLE_LENGTH 1000 /*!< length of the lookup table used to hold the drift and kick factors */ + +#define MAXITER 150 /*!< maxmimum number of steps for SPH neighbour iteration */ + + +#ifdef DOUBLEPRECISION /*!< If defined, the variable type FLOAT is set to "double", otherwise to FLOAT */ +#define FLOAT double +#else +#define FLOAT float +#endif + + +#ifndef TWODIMS +#define NUMDIMS 3 /*!< For 3D-normalized kernel */ +#define KERNEL_COEFF_1 2.546479089470 /*!< Coefficients for SPH spline kernel and its derivative */ +#define KERNEL_COEFF_2 15.278874536822 +#define KERNEL_COEFF_3 45.836623610466 +#define KERNEL_COEFF_4 30.557749073644 +#define KERNEL_COEFF_5 5.092958178941 +#define KERNEL_COEFF_6 (-15.278874536822) +#define NORM_COEFF 4.188790204786 /*!< Coefficient for kernel normalization. Note: 4.0/3 * PI = 4.188790204786 */ +#else +#define NUMDIMS 2 /*!< For 2D-normalized kernel */ +#define KERNEL_COEFF_1 (5.0/7*2.546479089470) /*!< Coefficients for SPH spline kernel and its derivative */ +#define KERNEL_COEFF_2 (5.0/7*15.278874536822) +#define KERNEL_COEFF_3 (5.0/7*45.836623610466) +#define KERNEL_COEFF_4 (5.0/7*30.557749073644) +#define KERNEL_COEFF_5 (5.0/7*5.092958178941) +#define KERNEL_COEFF_6 (5.0/7*(-15.278874536822)) +#define NORM_COEFF M_PI /*!< Coefficient for kernel normalization. */ +#endif + + + +extern int ThisTask; /*!< the rank of the local processor */ +extern int NTask; /*!< number of processors */ +extern int PTask; /*!< smallest integer such that NTask <= 2^PTask */ + +extern int NumPart; /*!< number of particles on the LOCAL processor */ +#ifdef PY_INTERFACE +extern int NumPartQ; +#endif +extern int N_gas; /*!< number of gas particles on the LOCAL processor */ +#ifdef PY_INTERFACE +extern int N_gasQ; +#endif +extern long long Ntype[6]; /*!< total number of particles of each type */ +extern int NtypeLocal[6]; /*!< local number of particles of each type */ +#ifdef PY_INTERFACE +extern long long NtypeQ[6]; /*!< total number of particles of each type */ +extern int NtypeLocalQ[6]; /*!< local number of particles of each type */ +#endif +extern int NumForceUpdate; /*!< number of active particles on local processor in current timestep */ +extern int NumSphUpdate; /*!< number of active SPH particles on local processor in current timestep */ + +extern double CPUThisRun; /*!< Sums the CPU time for the process (current submission only) */ + + +extern int RestartFlag; /*!< taken from command line used to start code. 0 is normal start-up from + initial conditions, 1 is resuming a run from a set of restart files, while 2 + marks a restart from a snapshot file. */ + +extern char *Exportflag; /*!< Buffer used for flagging whether a particle needs to be exported to another process */ + +extern int *Ngblist; /*!< Buffer to hold indices of neighbours retrieved by the neighbour search routines */ + +extern int TreeReconstructFlag; /*!< Signals that a new tree needs to be constructed */ + +extern int Flag_FullStep; /*!< This flag signals that the current step involves all particles */ + + +extern gsl_rng *random_generator; /*!< the employed random number generator of the GSL library */ + +extern double RndTable[RNDTABLE]; /*!< Hold a table with random numbers, refreshed every timestep */ + + +extern double DomainCorner[3]; /*!< gives the lower left corner of simulation volume */ +extern double DomainCenter[3]; /*!< gives the center of simulation volume */ +extern double DomainLen; /*!< gives the (maximum) side-length of simulation volume */ +extern double DomainFac; /*!< factor used for converting particle coordinates to a Peano-Hilbert mesh covering the simulation volume */ +extern int DomainMyStart; /*!< first domain mesh cell that resides on the local processor */ +extern int DomainMyLast; /*!< last domain mesh cell that resides on the local processor */ +extern int *DomainStartList; /*!< a table that lists the first domain mesh cell for all processors */ +extern int *DomainEndList; /*!< a table that lists the last domain mesh cell for all processors */ +extern double *DomainWork; /*!< a table that gives the total "work" due to the particles stored by each processor */ +extern int *DomainCount; /*!< a table that gives the total number of particles held by each processor */ +extern int *DomainCountSph; /*!< a table that gives the total number of SPH particles held by each processor */ + +extern int *DomainTask; /*!< this table gives for each leaf of the top-level tree the processor it was assigned to */ +extern int *DomainNodeIndex; /*!< this table gives for each leaf of the top-level tree the corresponding node of the gravitational tree */ +extern FLOAT *DomainTreeNodeLen; /*!< this table gives for each leaf of the top-level tree the side-length of the corresponding node of the gravitational tree */ +extern FLOAT *DomainHmax; /*!< this table gives for each leaf of the top-level tree the maximum SPH smoothing length among the particles of the corresponding node of the gravitational tree */ + +#ifdef PY_INTERFACE +extern double DomainCornerQ[3]; /*!< gives the lower left corner of simulation volume */ +extern double DomainCenterQ[3]; /*!< gives the center of simulation volume */ +extern double DomainLenQ; /*!< gives the (maximum) side-length of simulation volume */ +extern double DomainFacQ; /*!< factor used for converting particle coordinates to a Peano-Hilbert mesh covering the simulation volume */ +extern int DomainMyStartQ; /*!< first domain mesh cell that resides on the local processor */ +extern int DomainMyLastQ; /*!< last domain mesh cell that resides on the local processor */ +extern int *DomainStartListQ; /*!< a table that lists the first domain mesh cell for all processors */ +extern int *DomainEndListQ; /*!< a table that lists the last domain mesh cell for all processors */ +extern double *DomainWorkQ; /*!< a table that gives the total "work" due to the particles stored by each processor */ +extern int *DomainCountQ; /*!< a table that gives the total number of particles held by each processor */ +extern int *DomainCountSphQ; /*!< a table that gives the total number of SPH particles held by each processor */ + +extern int *DomainTaskQ; /*!< this table gives for each leaf of the top-level tree the processor it was assigned to */ +#endif + + +extern struct DomainNODE +{ + FLOAT s[3]; /*!< center-of-mass coordinates */ + FLOAT vs[3]; /*!< center-of-mass velocities */ + FLOAT mass; /*!< mass of node */ +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + int bitflags; /*!< this bit-field encodes the particle type with the largest softening among the particles of the nodes, and whether there are particles with different softening in the node */ +#else + FLOAT maxsoft; /*!< hold the maximum gravitational softening of particles in the + node if the ADAPTIVE_GRAVSOFT_FORGAS option is selected */ +#endif +#endif +} + *DomainMoment; /*!< this table stores for each node of the top-level tree corresponding node data from the gravitational tree */ + +extern peanokey *DomainKeyBuf; /*!< this points to a buffer used during the exchange of particle data */ +#ifdef PY_INTERFACE +extern peanokey *DomainKeyBufQ; /*!< this points to a buffer used during the exchange of particle data */ +#endif +extern peanokey *Key; /*!< a table used for storing Peano-Hilbert keys for particles */ +extern peanokey *KeySorted; /*!< holds a sorted table of Peano-Hilbert keys for all particles, used to construct top-level tree */ + + +extern int NTopnodes; /*!< total number of nodes in top-level tree */ +extern int NTopleaves; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */ + +#ifdef PY_INTERFACE +extern int NTopnodesQ; /*!< total number of nodes in top-level tree */ +extern int NTopleavesQ; /*!< number of leaves in top-level tree. Each leaf can be assigned to a different processor */ +#endif + +extern struct topnode_data +{ + int Daughter; /*!< index of first daughter cell (out of 8) of top-level node */ + int Pstart; /*!< for the present top-level node, this gives the index of the first node in the concatenated list of topnodes collected from all processors */ + int Blocks; /*!< for the present top-level node, this gives the number of corresponding nodes in the concatenated list of topnodes collected from all processors */ + int Leaf; /*!< if the node is a leaf, this gives its number when all leaves are traversed in Peano-Hilbert order */ + peanokey Size; /*!< number of Peano-Hilbert mesh-cells represented by top-level node */ + peanokey StartKey; /*!< first Peano-Hilbert key in top-level node */ + long long Count; /*!< counts the number of particles in this top-level node */ +} +#ifdef PY_INTERFACE + *TopNodesQ, +#endif + *TopNodes; /*!< points to the root node of the top-level tree */ + + +extern double TimeOfLastTreeConstruction; /*!< holds what it says, only used in connection with FORCETEST */ + + + +/* variables for input/output, usually only used on process 0 */ + +extern char ParameterFile[MAXLEN_FILENAME]; /*!< file name of parameterfile used for starting the simulation */ + +extern FILE *FdInfo; /*!< file handle for info.txt log-file. */ +extern FILE *FdEnergy; /*!< file handle for energy.txt log-file. */ +extern FILE *FdTimings; /*!< file handle for timings.txt log-file. */ +extern FILE *FdCPU; /*!< file handle for cpu.txt log-file. */ + +#ifdef FORCETEST +extern FILE *FdForceTest; /*!< file handle for forcetest.txt log-file. */ +#endif + + +extern double DriftTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological drift factors */ +extern double GravKickTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological kick factor for gravitational forces */ +extern double HydroKickTable[DRIFT_TABLE_LENGTH]; /*!< table for the cosmological kick factor for hydrodynmical forces */ + +extern void *CommBuffer; /*!< points to communication buffer, which is used in the domain decomposition, the + parallel tree-force computation, the SPH routines, etc. */ +#ifdef PY_INTERFACE +extern void *CommBufferQ; /*!< points to communication buffer, which is used in the domain decomposition, the + parallel tree-force computation, the SPH routines, etc. */ +#endif + + +/*! This structure contains data which is the SAME for all tasks (mostly code parameters read from the + * parameter file). Holding this data in a structure is convenient for writing/reading the restart file, and + * it allows the introduction of new global variables in a simple way. The only thing to do is to introduce + * them into this structure. + */ +extern struct global_data_all_processes +{ + long long TotNumPart; /*!< total particle numbers (global value) */ + long long TotN_gas; /*!< total gas particle number (global value) */ + +#ifdef PY_INTERFACE + long long TotNumPartQ; /*!< total particle numbers (global value) */ + long long TotN_gasQ; /*!< total gas particle number (global value) */ +#endif + + int MaxPart; /*!< This gives the maxmimum number of particles that can be stored on one processor. */ + int MaxPartSph; /*!< This gives the maxmimum number of SPH particles that can be stored on one processor. */ + +#ifdef PY_INTERFACE + int MaxPartQ; /*!< This gives the maxmimum number of particles that can be stored on one processor. */ + int MaxPartSphQ; /*!< This gives the maxmimum number of SPH particles that can be stored on one processor. */ +#endif + + double BoxSize; /*!< Boxsize in case periodic boundary conditions are used */ + + int ICFormat; /*!< selects different versions of IC file-format */ + + int SnapFormat; /*!< selects different versions of snapshot file-formats */ + + int NumFilesPerSnapshot; /*!< number of files in multi-file snapshot dumps */ + int NumFilesWrittenInParallel;/*!< maximum number of files that may be written simultaneously when + writing/reading restart-files, or when writing snapshot files */ + + int BufferSize; /*!< size of communication buffer in MB */ + int BunchSizeForce; /*!< number of particles fitting into the buffer in the parallel tree-force algorithm */ + int BunchSizeDensity; /*!< number of particles fitting into the communication buffer in the density computation */ + int BunchSizeHydro; /*!< number of particles fitting into the communication buffer in the SPH hydrodynamical force computation */ + int BunchSizeDomain; /*!< number of particles fitting into the communication buffer in the domain decomposition */ + +#ifdef PY_INTERFACE + int BunchSizeSph; +#endif + + double PartAllocFactor; /*!< in order to maintain work-load balance, the particle load will usually + NOT be balanced. Each processor allocates memory for PartAllocFactor times + the average number of particles to allow for that */ + + double TreeAllocFactor; /*!< Each processor allocates a number of nodes which is TreeAllocFactor times + the maximum(!) number of particles. Note: A typical local tree for N + particles needs usually about ~0.65*N nodes. */ + + /* some SPH parameters */ + + double DesNumNgb; /*!< Desired number of SPH neighbours */ + double MaxNumNgbDeviation; /*!< Maximum allowed deviation neighbour number */ + + double ArtBulkViscConst; /*!< Sets the parameter \f$\alpha\f$ of the artificial viscosity */ + double InitGasTemp; /*!< may be used to set the temperature in the IC's */ + double MinGasTemp; /*!< may be used to set a floor for the gas temperature */ + double MinEgySpec; /*!< the minimum allowed temperature expressed as energy per unit mass */ + + + /* some force counters */ + + long long TotNumOfForces; /*!< counts total number of force computations */ + long long NumForcesSinceLastDomainDecomp; /*!< count particle updates since last domain decomposition */ + + + /* system of units */ + + double G; /*!< Gravity-constant in internal units */ + double UnitTime_in_s; /*!< factor to convert internal time unit to seconds/h */ + double UnitMass_in_g; /*!< factor to convert internal mass unit to grams/h */ + double UnitVelocity_in_cm_per_s; /*!< factor to convert intqernal velocity unit to cm/sec */ + double UnitLength_in_cm; /*!< factor to convert internal length unit to cm/h */ + double UnitPressure_in_cgs; /*!< factor to convert internal pressure unit to cgs units (little 'h' still around!) */ + double UnitDensity_in_cgs; /*!< factor to convert internal length unit to g/cm^3*h^2 */ + double UnitCoolingRate_in_cgs; /*!< factor to convert internal cooling rate to cgs units */ + double UnitEnergy_in_cgs; /*!< factor to convert internal energy to cgs units */ + double UnitTime_in_Megayears; /*!< factor to convert internal time to megayears/h */ + double GravityConstantInternal; /*!< If set to zero in the parameterfile, the internal value of the + gravitational constant is set to the Newtonian value based on the system of + units specified. Otherwise the value provided is taken as internal gravity constant G. */ + + + /* Cosmological parameters */ + + double Hubble; /*!< Hubble-constant in internal units */ + double Omega0; /*!< matter density in units of the critical density (at z=0)*/ + double OmegaLambda; /*!< vaccum energy density relative to crictical density (at z=0) */ + double OmegaBaryon; /*!< baryon density in units of the critical density (at z=0)*/ + double HubbleParam; /*!< little `h', i.e. Hubble constant in units of 100 km/s/Mpc. Only needed to get absolute physical values for cooling physics */ + + + /* Code options */ + + int ComovingIntegrationOn; /*!< flags that comoving integration is enabled */ + int PeriodicBoundariesOn; /*!< flags that periodic boundaries are enabled */ + int ResubmitOn; /*!< flags that automatic resubmission of job to queue system is enabled */ + int TypeOfOpeningCriterion; /*!< determines tree cell-opening criterion: 0 for Barnes-Hut, 1 for relative criterion */ + int TypeOfTimestepCriterion; /*!< gives type of timestep criterion (only 0 supported right now - unlike gadget-1.1) */ + int OutputListOn; /*!< flags that output times are listed in a specified file */ + + + /* Parameters determining output frequency */ + + int SnapshotFileCount; /*!< number of snapshot that is written next */ + double TimeBetSnapshot; /*!< simulation time interval between snapshot files */ + double TimeOfFirstSnapshot; /*!< simulation time of first snapshot files */ + double CpuTimeBetRestartFile; /*!< cpu-time between regularly generated restart files */ + double TimeLastRestartFile; /*!< cpu-time when last restart-file was written */ + double TimeBetStatistics; /*!< simulation time interval between computations of energy statistics */ + double TimeLastStatistics; /*!< simulation time when the energy statistics was computed the last time */ + int NumCurrentTiStep; /*!< counts the number of system steps taken up to this point */ + + + /* Current time of the simulation, global step, and end of simulation */ + + double Time; /*!< current time of the simulation */ + double TimeBegin; /*!< time of initial conditions of the simulation */ + double TimeStep; /*!< difference between current times of previous and current timestep */ + double TimeMax; /*!< marks the point of time until the simulation is to be evolved */ + + + /* variables for organizing discrete timeline */ + + double Timebase_interval; /*!< factor to convert from floating point time interval to integer timeline */ + int Ti_Current; /*!< current time on integer timeline */ + int Ti_nextoutput; /*!< next output time on integer timeline */ +#ifdef FLEXSTEPS + int PresentMinStep; /*!< If FLEXSTEPS is used, particle timesteps are chosen as multiples of the present minimum timestep. */ + int PresentMaxStep; /*!< If FLEXSTEPS is used, this is the maximum timestep in timeline units, rounded down to the next power 2 division */ +#endif +#ifdef PMGRID + int PM_Ti_endstep; /*!< begin of present long-range timestep */ + int PM_Ti_begstep; /*!< end of present long-range timestep */ +#endif + + + /* Placement of PM grids */ + +#ifdef PMGRID + double Asmth[2]; /*!< Gives the scale of the long-range/short-range split (in mesh-cells), both for the coarse and the high-res mesh */ + double Rcut[2]; /*!< Gives the maximum radius for which the short-range force is evaluated with the tree (in mesh-cells), both for the coarse and the high-res mesh */ + double Corner[2][3]; /*!< lower left corner of coarse and high-res PM-mesh */ + double UpperCorner[2][3]; /*!< upper right corner of coarse and high-res PM-mesh */ + double Xmintot[2][3]; /*!< minimum particle coordinates both for coarse and high-res PM-mesh */ + double Xmaxtot[2][3]; /*!< maximum particle coordinates both for coarse and high-res PM-mesh */ + double TotalMeshSize[2]; /*!< total extension of coarse and high-res PM-mesh */ +#endif + + + /* Variables that keep track of cumulative CPU consumption */ + + double TimeLimitCPU; /*!< CPU time limit as defined in parameterfile */ + double CPU_TreeConstruction; /*!< time spent for constructing the gravitational tree */ + double CPU_TreeWalk; /*!< actual time spent for pure tree-walks */ + double CPU_Gravity; /*!< cumulative time used for gravity computation (tree-algorithm only) */ + double CPU_Potential; /*!< time used for computing gravitational potentials */ + double CPU_Domain; /*!< cumulative time spent for domain decomposition */ + double CPU_Snapshot; /*!< time used for writing snapshot files */ + double CPU_Total; /*!< cumulative time spent for domain decomposition */ + double CPU_CommSum; /*!< accumulated time used for communication, and for collecting partial results, in tree-gravity */ + double CPU_Imbalance; /*!< cumulative time lost accross all processors as work-load imbalance in gravitational tree */ + double CPU_HydCompWalk; /*!< time used for actual SPH computations, including neighbour search */ + double CPU_HydCommSumm; /*!< cumulative time used for communication in SPH, and for collecting partial results */ + double CPU_HydImbalance; /*!< cumulative time lost due to work-load imbalance in SPH */ + double CPU_Hydro; /*!< cumulative time spent for SPH related computations */ + double CPU_EnsureNgb; /*!< time needed to iterate on correct neighbour numbers */ + double CPU_Predict; /*!< cumulative time to drift the system forward in time, including dynamic tree updates */ + double CPU_TimeLine; /*!< time used for determining new timesteps, and for organizing the timestepping, including kicks of active particles */ + double CPU_PM; /*!< time used for long-range gravitational force */ + double CPU_Peano; /*!< time required to establish Peano-Hilbert order */ + + /* tree code opening criterion */ + + double ErrTolTheta; /*!< BH tree opening angle */ + double ErrTolForceAcc; /*!< parameter for relative opening criterion in tree walk */ + + + /* adjusts accuracy of time-integration */ + + double ErrTolIntAccuracy; /*!< accuracy tolerance parameter \f$ \eta \f$ for timestep criterion. The + timestep is \f$ \Delta t = \sqrt{\frac{2 \eta eps}{a}} \f$ */ + + double MinSizeTimestep; /*!< minimum allowed timestep. Normally, the simulation terminates if the + timestep determined by the timestep criteria falls below this limit. */ + double MaxSizeTimestep; /*!< maximum allowed timestep */ + + double MaxRMSDisplacementFac; /*!< this determines a global timestep criterion for cosmological simulations + in comoving coordinates. To this end, the code computes the rms velocity + of all particles, and limits the timestep such that the rms displacement + is a fraction of the mean particle separation (determined from the + particle mass and the cosmological parameters). This parameter specifies + this fraction. */ + + double CourantFac; /*!< SPH-Courant factor */ + + + /* frequency of tree reconstruction/domain decomposition */ + + double TreeDomainUpdateFrequency; /*!< controls frequency of domain decompositions */ + + + /* Gravitational and hydrodynamical softening lengths (given in terms of an `equivalent' Plummer softening length). + * Five groups of particles are supported 0="gas", 1="halo", 2="disk", 3="bulge", 4="stars", 5="bndry" + */ + + double MinGasHsmlFractional; /*!< minimum allowed SPH smoothing length in units of SPH gravitational softening length */ + double MinGasHsml; /*!< minimum allowed SPH smoothing length */ + + + double SofteningGas; /*!< comoving gravitational softening lengths for type 0 */ + double SofteningHalo; /*!< comoving gravitational softening lengths for type 1 */ + double SofteningDisk; /*!< comoving gravitational softening lengths for type 2 */ + double SofteningBulge; /*!< comoving gravitational softening lengths for type 3 */ + double SofteningStars; /*!< comoving gravitational softening lengths for type 4 */ + double SofteningBndry; /*!< comoving gravitational softening lengths for type 5 */ + + double SofteningGasMaxPhys; /*!< maximum physical softening length for type 0 */ + double SofteningHaloMaxPhys; /*!< maximum physical softening length for type 1 */ + double SofteningDiskMaxPhys; /*!< maximum physical softening length for type 2 */ + double SofteningBulgeMaxPhys; /*!< maximum physical softening length for type 3 */ + double SofteningStarsMaxPhys; /*!< maximum physical softening length for type 4 */ + double SofteningBndryMaxPhys; /*!< maximum physical softening length for type 5 */ + + double SofteningTable[6]; /*!< current (comoving) gravitational softening lengths for each particle type */ + double ForceSoftening[6]; /*!< the same, but multiplied by a factor 2.8 - at that scale the force is Newtonian */ +#ifdef PY_INTERFACE + double ForceSofteningQ; +#endif + double MassTable[6]; /*!< Table with particle masses for particle types with equal mass. + If particle masses are all equal for one type, the corresponding entry in MassTable + is set to this value, allowing the size of the snapshot files to be reduced. */ + + + + /* some filenames */ + + char InitCondFile[MAXLEN_FILENAME]; /*!< filename of initial conditions */ + char OutputDir[MAXLEN_FILENAME]; /*!< output directory of the code */ + char SnapshotFileBase[MAXLEN_FILENAME]; /*!< basename to construct the names of snapshotf files */ + char EnergyFile[MAXLEN_FILENAME]; /*!< name of file with energy statistics */ + char CpuFile[MAXLEN_FILENAME]; /*!< name of file with cpu-time statistics */ + char InfoFile[MAXLEN_FILENAME]; /*!< name of log-file with a list of the timesteps taken */ + char TimingsFile[MAXLEN_FILENAME]; /*!< name of file with performance metrics of gravitational tree algorithm */ + char RestartFile[MAXLEN_FILENAME]; /*!< basename of restart-files */ + char ResubmitCommand[MAXLEN_FILENAME]; /*!< name of script-file that will be executed for automatic restart */ + char OutputListFilename[MAXLEN_FILENAME]; /*!< name of file with list of desired output times */ + + double OutputListTimes[MAXLEN_OUTPUTLIST]; /*!< table with desired output times */ + int OutputListLength; /*!< number of output times stored in the table of desired output times */ + +} + All; /*!< a container variable for global variables that are equal on all processors */ + + + +/*! This structure holds all the information that is + * stored for each particle of the simulation. + */ +extern struct particle_data +{ + FLOAT Pos[3]; /*!< particle position at its current time */ + FLOAT Mass; /*!< particle mass */ + FLOAT Vel[3]; /*!< particle velocity at its current time */ + FLOAT GravAccel[3]; /*!< particle acceleration due to gravity */ +#ifdef PMGRID + FLOAT GravPM[3]; /*!< particle acceleration due to long-range PM gravity force*/ +#endif +#ifdef FORCETEST + FLOAT GravAccelDirect[3]; /*!< particle acceleration when computed with direct summation */ +#endif + FLOAT Potential; /*!< gravitational potential */ + FLOAT OldAcc; /*!< magnitude of old gravitational force. Used in relative opening criterion */ +#ifndef LONGIDS + unsigned int ID; /*!< particle identifier */ +#else + unsigned long long ID; /*!< particle identifier */ +#endif + + int Type; /*!< flags particle type. 0=gas, 1=halo, 2=disk, 3=bulge, 4=stars, 5=bndry */ + int Ti_endstep; /*!< marks start of current timestep of particle on integer timeline */ + int Ti_begstep; /*!< marks end of current timestep of particle on integer timeline */ +#ifdef FLEXSTEPS + int FlexStepGrp; /*!< a random 'offset' on the timeline to create a smooth groouping of particles */ +#endif + float GravCost; /*!< weight factor used for balancing the work-load */ +#ifdef PSEUDOSYMMETRIC + float AphysOld; /*!< magnitude of acceleration in last timestep. Used to make a first order + prediction of the change of acceleration expected in the future, thereby + allowing to guess whether a decrease/increase of the timestep should occur + in the timestep that is started. */ +#endif +} + *P, /*!< holds particle data on local processor */ +#ifdef PY_INTERFACE + *Q, + *DomainPartBufQ, /*!< buffer for particle data used in domain decomposition */ +#endif + *DomainPartBuf; /*!< buffer for particle data used in domain decomposition */ + + +/* the following struture holds data that is stored for each SPH particle in addition to the collisionless + * variables. + */ +extern struct sph_particle_data +{ + FLOAT Entropy; /*!< current value of entropy (actually entropic function) of particle */ + FLOAT Density; /*!< current baryonic mass density of particle */ + FLOAT Hsml; /*!< current smoothing length */ + FLOAT Left; /*!< lower bound in iterative smoothing length search */ + FLOAT Right; /*!< upper bound in iterative smoothing length search */ + FLOAT NumNgb; /*!< weighted number of neighbours found */ + FLOAT Pressure; /*!< current pressure */ + FLOAT DtEntropy; /*!< rate of change of entropy */ + FLOAT HydroAccel[3]; /*!< acceleration due to hydrodynamical force */ + FLOAT VelPred[3]; /*!< predicted SPH particle velocity at the current time */ + FLOAT DivVel; /*!< local velocity divergence */ + FLOAT CurlVel; /*!< local velocity curl */ + FLOAT Rot[3]; /*!< local velocity curl */ + FLOAT DhsmlDensityFactor; /*!< correction factor needed in the equation of motion of the conservative entropy formulation of SPH */ + FLOAT MaxSignalVel; /*!< maximum "signal velocity" occuring for this particle */ +#if PY_INTERFACE + FLOAT Observable; + FLOAT ObsMoment0; + FLOAT ObsMoment1; +#endif +} + *SphP, /*!< holds SPH particle data on local processor */ +#ifdef PY_INTERFACE + *SphQ, + *DomainSphBufQ, /*!< buffer for SPH particle data in domain decomposition */ +#endif + *DomainSphBuf; /*!< buffer for SPH particle data in domain decomposition */ + + + + + +/* Variables for Tree + */ + +extern int MaxNodes; /*!< maximum allowed number of internal nodes */ +extern int Numnodestree; /*!< number of (internal) nodes in each tree */ + +extern struct NODE +{ + FLOAT len; /*!< sidelength of treenode */ + FLOAT center[3]; /*!< geometrical center of node */ +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + FLOAT maxsoft; /*!< hold the maximum gravitational softening of particles in the + node if the ADAPTIVE_GRAVSOFT_FORGAS option is selected */ +#endif + union + { + int suns[8]; /*!< temporary pointers to daughter nodes */ + struct + { + FLOAT s[3]; /*!< center of mass of node */ + FLOAT mass; /*!< mass of node */ + int bitflags; /*!< a bit-field with various information on the node */ + int sibling; /*!< this gives the next node in the walk in case the current node can be used */ + int nextnode; /*!< this gives the next node in case the current node needs to be opened */ + int father; /*!< this gives the parent node of each node (or -1 if we have the root node) */ + } + d; + } + u; +} + *Nodes_base, /*!< points to the actual memory allocted for the nodes */ + *Nodes; /*!< this is a pointer used to access the nodes which is shifted such that Nodes[All.MaxPart] + gives the first allocated node */ + + +extern int *Nextnode; /*!< gives next node in tree walk */ +extern int *Father; /*!< gives parent node in tree */ + + +extern struct extNODE /*!< this structure holds additional tree-node information which is not needed in the actual gravity computation */ +{ + FLOAT hmax; /*!< maximum SPH smoothing length in node. Only used for gas particles */ + FLOAT vs[3]; /*!< center-of-mass velocity */ +} + *Extnodes_base, /*!< points to the actual memory allocted for the extended node information */ + *Extnodes; /*!< provides shifted access to extended node information, parallel to Nodes/Nodes_base */ + + + + + +/*! Header for the standard file format. + */ +extern struct io_header +{ + int npart[6]; /*!< number of particles of each type in this file */ + double mass[6]; /*!< mass of particles of each type. If 0, then the masses are explicitly + stored in the mass-block of the snapshot file, otherwise they are omitted */ + double time; /*!< time of snapshot file */ + double redshift; /*!< redshift of snapshot file */ + int flag_sfr; /*!< flags whether the simulation was including star formation */ + int flag_feedback; /*!< flags whether feedback was included (obsolete) */ + unsigned int npartTotal[6]; /*!< total number of particles of each type in this snapshot. This can be + different from npart if one is dealing with a multi-file snapshot. */ + int flag_cooling; /*!< flags whether cooling was included */ + int num_files; /*!< number of files in multi-file snapshot */ + double BoxSize; /*!< box-size of simulation in case periodic boundaries were used */ + double Omega0; /*!< matter density in units of critical density */ + double OmegaLambda; /*!< cosmological constant parameter */ + double HubbleParam; /*!< Hubble parameter in units of 100 km/sec/Mpc */ + int flag_stellarage; /*!< flags whether the file contains formation times of star particles */ + int flag_metals; /*!< flags whether the file contains metallicity values for gas and star particles */ + unsigned int npartTotalHighWord[6]; /*!< High word of the total number of particles of each type */ + int flag_entropy_instead_u; /*!< flags that IC-file contains entropy instead of u */ + char fill[60]; /*!< fills to 256 Bytes */ +} + header; /*!< holds header for snapshot files */ + + +#define IO_NBLOCKS 11 /*!< total number of defined information blocks for snapshot files. + Must be equal to the number of entries in "enum iofields" */ + +enum iofields /*!< this enumeration lists the defined output blocks in snapshot files. Not all of them need to be present. */ +{ + IO_POS, + IO_VEL, + IO_ID, + IO_MASS, + IO_U, + IO_RHO, + IO_HSML, + IO_POT, + IO_ACCEL, + IO_DTENTR, + IO_TSTP, +}; + + +extern char Tab_IO_Labels[IO_NBLOCKS][4]; /* +#include +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file begrun.c + * \brief initial set-up of a simulation run + * + * This file contains various functions to initialize a simulation run. In + * particular, the parameterfile is read in and parsed, the initial + * conditions or restart files are read, and global variables are + * initialized to their proper values. + */ + + +/*! This function performs the initial set-up of the simulation. First, the + * parameterfile is set, then routines for setting units, reading + * ICs/restart-files are called, auxialiary memory is allocated, etc. + */ +void begrun(void) +{ + struct global_data_all_processes all; + + if(ThisTask == 0) + { + printf("\nThis is Gadget, version `%s'.\n", GADGETVERSION); + printf("\nRunning on %d processors.\n", NTask); + } + + read_parameter_file(ParameterFile); /* ... read in parameters for this run */ + + allocate_commbuffers(); /* ... allocate buffer-memory for particle + exchange during force computation */ + set_units(); + +#if defined(PERIODIC) && (!defined(PMGRID) || defined(FORCETEST)) + ewald_init(); +#endif + + open_outputfiles(); + + random_generator = gsl_rng_alloc(gsl_rng_ranlxd1); + gsl_rng_set(random_generator, 42); /* start-up seed */ + +#ifdef PMGRID + long_range_init(); +#endif + + All.TimeLastRestartFile = CPUThisRun; + + if(RestartFlag == 0 || RestartFlag == 2) + { + set_random_numbers(); + + init(); /* ... read in initial model */ + } + else + { + all = All; /* save global variables. (will be read from restart file) */ + + restart(RestartFlag); /* ... read restart file. Note: This also resets + all variables in the struct `All'. + However, during the run, some variables in the parameter + file are allowed to be changed, if desired. These need to + copied in the way below. + Note: All.PartAllocFactor is treated in restart() separately. + */ + + All.MinSizeTimestep = all.MinSizeTimestep; + All.MaxSizeTimestep = all.MaxSizeTimestep; + All.BufferSize = all.BufferSize; + All.BunchSizeForce = all.BunchSizeForce; + All.BunchSizeDensity = all.BunchSizeDensity; + All.BunchSizeHydro = all.BunchSizeHydro; + All.BunchSizeDomain = all.BunchSizeDomain; + + All.TimeLimitCPU = all.TimeLimitCPU; + All.ResubmitOn = all.ResubmitOn; + All.TimeBetSnapshot = all.TimeBetSnapshot; + All.TimeBetStatistics = all.TimeBetStatistics; + All.CpuTimeBetRestartFile = all.CpuTimeBetRestartFile; + All.ErrTolIntAccuracy = all.ErrTolIntAccuracy; + All.MaxRMSDisplacementFac = all.MaxRMSDisplacementFac; + + All.ErrTolForceAcc = all.ErrTolForceAcc; + + All.TypeOfTimestepCriterion = all.TypeOfTimestepCriterion; + All.TypeOfOpeningCriterion = all.TypeOfOpeningCriterion; + All.NumFilesWrittenInParallel = all.NumFilesWrittenInParallel; + All.TreeDomainUpdateFrequency = all.TreeDomainUpdateFrequency; + + All.SnapFormat = all.SnapFormat; + All.NumFilesPerSnapshot = all.NumFilesPerSnapshot; + All.MaxNumNgbDeviation = all.MaxNumNgbDeviation; + All.ArtBulkViscConst = all.ArtBulkViscConst; + + + All.OutputListOn = all.OutputListOn; + All.CourantFac = all.CourantFac; + + All.OutputListLength = all.OutputListLength; + memcpy(All.OutputListTimes, all.OutputListTimes, sizeof(double) * All.OutputListLength); + + + strcpy(All.ResubmitCommand, all.ResubmitCommand); + strcpy(All.OutputListFilename, all.OutputListFilename); + strcpy(All.OutputDir, all.OutputDir); + strcpy(All.RestartFile, all.RestartFile); + strcpy(All.EnergyFile, all.EnergyFile); + strcpy(All.InfoFile, all.InfoFile); + strcpy(All.CpuFile, all.CpuFile); + strcpy(All.TimingsFile, all.TimingsFile); + strcpy(All.SnapshotFileBase, all.SnapshotFileBase); + + if(All.TimeMax != all.TimeMax) + readjust_timebase(All.TimeMax, all.TimeMax); + } + +#ifdef PMGRID + long_range_init_regionsize(); +#endif + + if(All.ComovingIntegrationOn) + init_drift_table(); + + if(RestartFlag == 2) + All.Ti_nextoutput = find_next_outputtime(All.Ti_Current + 1); + else + All.Ti_nextoutput = find_next_outputtime(All.Ti_Current); + + + All.TimeLastRestartFile = CPUThisRun; +} + + + + +/*! Computes conversion factors between internal code units and the + * cgs-system. + */ +void set_units(void) +{ + double meanweight; + + All.UnitTime_in_s = All.UnitLength_in_cm / All.UnitVelocity_in_cm_per_s; + All.UnitTime_in_Megayears = All.UnitTime_in_s / SEC_PER_MEGAYEAR; + + if(All.GravityConstantInternal == 0) + All.G = GRAVITY / pow(All.UnitLength_in_cm, 3) * All.UnitMass_in_g * pow(All.UnitTime_in_s, 2); + else + All.G = All.GravityConstantInternal; + + All.UnitDensity_in_cgs = All.UnitMass_in_g / pow(All.UnitLength_in_cm, 3); + All.UnitPressure_in_cgs = All.UnitMass_in_g / All.UnitLength_in_cm / pow(All.UnitTime_in_s, 2); + All.UnitCoolingRate_in_cgs = All.UnitPressure_in_cgs / All.UnitTime_in_s; + All.UnitEnergy_in_cgs = All.UnitMass_in_g * pow(All.UnitLength_in_cm, 2) / pow(All.UnitTime_in_s, 2); + + /* convert some physical input parameters to internal units */ + + All.Hubble = HUBBLE * All.UnitTime_in_s; + + if(ThisTask == 0) + { + printf("\nHubble (internal units) = %g\n", All.Hubble); + printf("G (internal units) = %g\n", All.G); + printf("UnitMass_in_g = %g \n", All.UnitMass_in_g); + printf("UnitTime_in_s = %g \n", All.UnitTime_in_s); + printf("UnitVelocity_in_cm_per_s = %g \n", All.UnitVelocity_in_cm_per_s); + printf("UnitDensity_in_cgs = %g \n", All.UnitDensity_in_cgs); + printf("UnitEnergy_in_cgs = %g \n", All.UnitEnergy_in_cgs); + printf("\n"); + } + + meanweight = 4.0 / (1 + 3 * HYDROGEN_MASSFRAC); /* note: we assume neutral gas here */ + +#ifdef ISOTHERM_EQS + All.MinEgySpec = 0; +#else + All.MinEgySpec = 1 / meanweight * (1.0 / GAMMA_MINUS1) * (BOLTZMANN / PROTONMASS) * All.MinGasTemp; + All.MinEgySpec *= All.UnitMass_in_g / All.UnitEnergy_in_cgs; +#endif + +} + + + +/*! This function opens various log-files that report on the status and + * performance of the simulstion. On restart from restart-files + * (start-option 1), the code will append to these files. + */ +void open_outputfiles(void) +{ + char mode[2], buf[200]; + + if(ThisTask != 0) /* only the root processor writes to the log files */ + return; + + if(RestartFlag == 0) + strcpy(mode, "w"); + else + strcpy(mode, "a"); + + + sprintf(buf, "%s%s", All.OutputDir, All.CpuFile); + if(!(FdCPU = fopen(buf, mode))) + { + printf("error in opening file '%s'\n", buf); + endrun(1); + } + + sprintf(buf, "%s%s", All.OutputDir, All.InfoFile); + if(!(FdInfo = fopen(buf, mode))) + { + printf("error in opening file '%s'\n", buf); + endrun(1); + } + + sprintf(buf, "%s%s", All.OutputDir, All.EnergyFile); + if(!(FdEnergy = fopen(buf, mode))) + { + printf("error in opening file '%s'\n", buf); + endrun(1); + } + + sprintf(buf, "%s%s", All.OutputDir, All.TimingsFile); + if(!(FdTimings = fopen(buf, mode))) + { + printf("error in opening file '%s'\n", buf); + endrun(1); + } + +#ifdef FORCETEST + if(RestartFlag == 0) + { + sprintf(buf, "%s%s", All.OutputDir, "forcetest.txt"); + if(!(FdForceTest = fopen(buf, "w"))) + { + printf("error in opening file '%s'\n", buf); + endrun(1); + } + fclose(FdForceTest); + } +#endif +} + + +/*! This function closes the global log-files. + */ +void close_outputfiles(void) +{ + if(ThisTask != 0) /* only the root processor writes to the log files */ + return; + + fclose(FdCPU); + fclose(FdInfo); + fclose(FdEnergy); + fclose(FdTimings); +#ifdef FORCETEST + fclose(FdForceTest); +#endif +} + + + + +/*! This function parses the parameterfile in a simple way. Each paramater + * is defined by a keyword (`tag'), and can be either of type double, int, + * or character string. The routine makes sure that each parameter + * appears exactly once in the parameterfile, otherwise error messages are + * produced that complain about the missing parameters. + */ +void read_parameter_file(char *fname) +{ +#define DOUBLE 1 +#define STRING 2 +#define INT 3 +#define MAXTAGS 300 + + FILE *fd, *fdout; + char buf[200], buf1[200], buf2[200], buf3[400]; + int i, j, nt; + int id[MAXTAGS]; + void *addr[MAXTAGS]; + char tag[MAXTAGS][50]; + int errorFlag = 0; + + + if(sizeof(long long) != 8) + { + if(ThisTask == 0) + printf("\nType `long long' is not 64 bit on this platform. Stopping.\n\n"); + endrun(0); + } + + if(sizeof(int) != 4) + { + if(ThisTask == 0) + printf("\nType `int' is not 32 bit on this platform. Stopping.\n\n"); + endrun(0); + } + + if(sizeof(float) != 4) + { + if(ThisTask == 0) + printf("\nType `float' is not 32 bit on this platform. Stopping.\n\n"); + endrun(0); + } + + if(sizeof(double) != 8) + { + if(ThisTask == 0) + printf("\nType `double' is not 64 bit on this platform. Stopping.\n\n"); + endrun(0); + } + + + if(ThisTask == 0) /* read parameter file on process 0 */ + { + nt = 0; + + strcpy(tag[nt], "InitCondFile"); + addr[nt] = All.InitCondFile; + id[nt++] = STRING; + + strcpy(tag[nt], "OutputDir"); + addr[nt] = All.OutputDir; + id[nt++] = STRING; + + strcpy(tag[nt], "SnapshotFileBase"); + addr[nt] = All.SnapshotFileBase; + id[nt++] = STRING; + + strcpy(tag[nt], "EnergyFile"); + addr[nt] = All.EnergyFile; + id[nt++] = STRING; + + strcpy(tag[nt], "CpuFile"); + addr[nt] = All.CpuFile; + id[nt++] = STRING; + + strcpy(tag[nt], "InfoFile"); + addr[nt] = All.InfoFile; + id[nt++] = STRING; + + strcpy(tag[nt], "TimingsFile"); + addr[nt] = All.TimingsFile; + id[nt++] = STRING; + + strcpy(tag[nt], "RestartFile"); + addr[nt] = All.RestartFile; + id[nt++] = STRING; + + strcpy(tag[nt], "ResubmitCommand"); + addr[nt] = All.ResubmitCommand; + id[nt++] = STRING; + + strcpy(tag[nt], "OutputListFilename"); + addr[nt] = All.OutputListFilename; + id[nt++] = STRING; + + strcpy(tag[nt], "OutputListOn"); + addr[nt] = &All.OutputListOn; + id[nt++] = INT; + + strcpy(tag[nt], "Omega0"); + addr[nt] = &All.Omega0; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "OmegaBaryon"); + addr[nt] = &All.OmegaBaryon; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "OmegaLambda"); + addr[nt] = &All.OmegaLambda; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "HubbleParam"); + addr[nt] = &All.HubbleParam; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "BoxSize"); + addr[nt] = &All.BoxSize; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "PeriodicBoundariesOn"); + addr[nt] = &All.PeriodicBoundariesOn; + id[nt++] = INT; + + strcpy(tag[nt], "TimeOfFirstSnapshot"); + addr[nt] = &All.TimeOfFirstSnapshot; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "CpuTimeBetRestartFile"); + addr[nt] = &All.CpuTimeBetRestartFile; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "TimeBetStatistics"); + addr[nt] = &All.TimeBetStatistics; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "TimeBegin"); + addr[nt] = &All.TimeBegin; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "TimeMax"); + addr[nt] = &All.TimeMax; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "TimeBetSnapshot"); + addr[nt] = &All.TimeBetSnapshot; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "UnitVelocity_in_cm_per_s"); + addr[nt] = &All.UnitVelocity_in_cm_per_s; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "UnitLength_in_cm"); + addr[nt] = &All.UnitLength_in_cm; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "UnitMass_in_g"); + addr[nt] = &All.UnitMass_in_g; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "TreeDomainUpdateFrequency"); + addr[nt] = &All.TreeDomainUpdateFrequency; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "ErrTolIntAccuracy"); + addr[nt] = &All.ErrTolIntAccuracy; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "ErrTolTheta"); + addr[nt] = &All.ErrTolTheta; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "ErrTolForceAcc"); + addr[nt] = &All.ErrTolForceAcc; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "MinGasHsmlFractional"); + addr[nt] = &All.MinGasHsmlFractional; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "MaxSizeTimestep"); + addr[nt] = &All.MaxSizeTimestep; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "MinSizeTimestep"); + addr[nt] = &All.MinSizeTimestep; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "MaxRMSDisplacementFac"); + addr[nt] = &All.MaxRMSDisplacementFac; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "ArtBulkViscConst"); + addr[nt] = &All.ArtBulkViscConst; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "CourantFac"); + addr[nt] = &All.CourantFac; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "DesNumNgb"); + addr[nt] = &All.DesNumNgb; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "MaxNumNgbDeviation"); + addr[nt] = &All.MaxNumNgbDeviation; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "ComovingIntegrationOn"); + addr[nt] = &All.ComovingIntegrationOn; + id[nt++] = INT; + + strcpy(tag[nt], "ICFormat"); + addr[nt] = &All.ICFormat; + id[nt++] = INT; + + strcpy(tag[nt], "SnapFormat"); + addr[nt] = &All.SnapFormat; + id[nt++] = INT; + + strcpy(tag[nt], "NumFilesPerSnapshot"); + addr[nt] = &All.NumFilesPerSnapshot; + id[nt++] = INT; + + strcpy(tag[nt], "NumFilesWrittenInParallel"); + addr[nt] = &All.NumFilesWrittenInParallel; + id[nt++] = INT; + + strcpy(tag[nt], "ResubmitOn"); + addr[nt] = &All.ResubmitOn; + id[nt++] = INT; + + strcpy(tag[nt], "TypeOfTimestepCriterion"); + addr[nt] = &All.TypeOfTimestepCriterion; + id[nt++] = INT; + + strcpy(tag[nt], "TypeOfOpeningCriterion"); + addr[nt] = &All.TypeOfOpeningCriterion; + id[nt++] = INT; + + strcpy(tag[nt], "TimeLimitCPU"); + addr[nt] = &All.TimeLimitCPU; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningHalo"); + addr[nt] = &All.SofteningHalo; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningDisk"); + addr[nt] = &All.SofteningDisk; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningBulge"); + addr[nt] = &All.SofteningBulge; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningGas"); + addr[nt] = &All.SofteningGas; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningStars"); + addr[nt] = &All.SofteningStars; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningBndry"); + addr[nt] = &All.SofteningBndry; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningHaloMaxPhys"); + addr[nt] = &All.SofteningHaloMaxPhys; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningDiskMaxPhys"); + addr[nt] = &All.SofteningDiskMaxPhys; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningBulgeMaxPhys"); + addr[nt] = &All.SofteningBulgeMaxPhys; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningGasMaxPhys"); + addr[nt] = &All.SofteningGasMaxPhys; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningStarsMaxPhys"); + addr[nt] = &All.SofteningStarsMaxPhys; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "SofteningBndryMaxPhys"); + addr[nt] = &All.SofteningBndryMaxPhys; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "BufferSize"); + addr[nt] = &All.BufferSize; + id[nt++] = INT; + + strcpy(tag[nt], "PartAllocFactor"); + addr[nt] = &All.PartAllocFactor; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "TreeAllocFactor"); + addr[nt] = &All.TreeAllocFactor; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "GravityConstantInternal"); + addr[nt] = &All.GravityConstantInternal; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "InitGasTemp"); + addr[nt] = &All.InitGasTemp; + id[nt++] = DOUBLE; + + strcpy(tag[nt], "MinGasTemp"); + addr[nt] = &All.MinGasTemp; + id[nt++] = DOUBLE; + + if((fd = fopen(fname, "r"))) + { + sprintf(buf, "%s%s", fname, "-usedvalues"); + if(!(fdout = fopen(buf, "w"))) + { + printf("error opening file '%s' \n", buf); + errorFlag = 1; + } + else + { + while(!feof(fd)) + { + *buf = 0; + fgets(buf, 200, fd); + if(sscanf(buf, "%s%s%s", buf1, buf2, buf3) < 2) + continue; + + if(buf1[0] == '%') + continue; + + for(i = 0, j = -1; i < nt; i++) + if(strcmp(buf1, tag[i]) == 0) + { + j = i; + tag[i][0] = 0; + break; + } + + if(j >= 0) + { + switch (id[j]) + { + case DOUBLE: + *((double *) addr[j]) = atof(buf2); + fprintf(fdout, "%-35s%g\n", buf1, *((double *) addr[j])); + break; + case STRING: + strcpy(addr[j], buf2); + fprintf(fdout, "%-35s%s\n", buf1, buf2); + break; + case INT: + *((int *) addr[j]) = atoi(buf2); + fprintf(fdout, "%-35s%d\n", buf1, *((int *) addr[j])); + break; + } + } + else + { + fprintf(stdout, "Error in file %s: Tag '%s' not allowed or multiple defined.\n", + fname, buf1); + errorFlag = 1; + } + } + fclose(fd); + fclose(fdout); + + i = strlen(All.OutputDir); + if(i > 0) + if(All.OutputDir[i - 1] != '/') + strcat(All.OutputDir, "/"); + + sprintf(buf1, "%s%s", fname, "-usedvalues"); + sprintf(buf2, "%s%s", All.OutputDir, "parameters-usedvalues"); + sprintf(buf3, "cp %s %s", buf1, buf2); + system(buf3); + } + } + else + { + printf("\nParameter file %s not found.\n\n", fname); + errorFlag = 2; + } + + if(errorFlag != 2) + for(i = 0; i < nt; i++) + { + if(*tag[i]) + { + printf("Error. I miss a value for tag '%s' in parameter file '%s'.\n", tag[i], fname); + errorFlag = 1; + } + } + + if(All.OutputListOn && errorFlag == 0) + errorFlag += read_outputlist(All.OutputListFilename); + else + All.OutputListLength = 0; + } + + MPI_Bcast(&errorFlag, 1, MPI_INT, 0, MPI_COMM_WORLD); + + if(errorFlag) + { + MPI_Finalize(); + exit(0); + } + + /* now communicate the relevant parameters to the other processes */ + MPI_Bcast(&All, sizeof(struct global_data_all_processes), MPI_BYTE, 0, MPI_COMM_WORLD); + + + if(All.NumFilesWrittenInParallel < 1) + { + if(ThisTask == 0) + printf("NumFilesWrittenInParallel MUST be at least 1\n"); + endrun(0); + } + + if(All.NumFilesWrittenInParallel > NTask) + { + if(ThisTask == 0) + printf("NumFilesWrittenInParallel MUST be smaller than number of processors\n"); + endrun(0); + } + +#ifdef PERIODIC + if(All.PeriodicBoundariesOn == 0) + { + if(ThisTask == 0) + { + printf("Code was compiled with periodic boundary conditions switched on.\n"); + printf("You must set `PeriodicBoundariesOn=1', or recompile the code.\n"); + } + endrun(0); + } +#else + if(All.PeriodicBoundariesOn == 1) + { + if(ThisTask == 0) + { + printf("Code was compiled with periodic boundary conditions switched off.\n"); + printf("You must set `PeriodicBoundariesOn=0', or recompile the code.\n"); + } + endrun(0); + } +#endif + + + if(All.TypeOfTimestepCriterion >= 1) + { + if(ThisTask == 0) + { + printf("The specified timestep criterion\n"); + printf("is not valid\n"); + } + endrun(0); + } + +#if defined(LONG_X) || defined(LONG_Y) || defined(LONG_Z) +#ifndef NOGRAVITY + if(ThisTask == 0) + { + printf("Code was compiled with LONG_X/Y/Z, but not with NOGRAVITY.\n"); + printf("Stretched periodic boxes are not implemented for gravity yet.\n"); + } + endrun(0); +#endif +#endif + +#undef DOUBLE +#undef STRING +#undef INT +#undef MAXTAGS +} + + +/*! this function reads a table with a list of desired output times. The + * table does not have to be ordered in any way, but may not contain more + * than MAXLEN_OUTPUTLIST entries. + */ +int read_outputlist(char *fname) +{ + FILE *fd; + + if(!(fd = fopen(fname, "r"))) + { + printf("can't read output list in file '%s'\n", fname); + return 1; + } + + All.OutputListLength = 0; + do + { + if(fscanf(fd, " %lg ", &All.OutputListTimes[All.OutputListLength]) == 1) + All.OutputListLength++; + else + break; + } + while(All.OutputListLength < MAXLEN_OUTPUTLIST); + + fclose(fd); + + printf("\nfound %d times in output-list.\n", All.OutputListLength); + + return 0; +} + + +/*! If a restart from restart-files is carried out where the TimeMax + * variable is increased, then the integer timeline needs to be + * adjusted. The approach taken here is to reduce the resolution of the + * integer timeline by factors of 2 until the new final time can be + * reached within TIMEBASE. + */ +void readjust_timebase(double TimeMax_old, double TimeMax_new) +{ + int i; + long long ti_end; + + if(ThisTask == 0) + { + printf("\nAll.TimeMax has been changed in the parameterfile\n"); + printf("Need to adjust integer timeline\n\n\n"); + } + + if(TimeMax_new < TimeMax_old) + { + if(ThisTask == 0) + printf("\nIt is not allowed to reduce All.TimeMax\n\n"); + endrun(556); + } + + if(All.ComovingIntegrationOn) + ti_end = log(TimeMax_new / All.TimeBegin) / All.Timebase_interval; + else + ti_end = (TimeMax_new - All.TimeBegin) / All.Timebase_interval; + + while(ti_end > TIMEBASE) + { + All.Timebase_interval *= 2.0; + + ti_end /= 2; + All.Ti_Current /= 2; + +#ifdef PMGRID + All.PM_Ti_begstep /= 2; + All.PM_Ti_endstep /= 2; +#endif + + for(i = 0; i < NumPart; i++) + { + P[i].Ti_begstep /= 2; + P[i].Ti_endstep /= 2; + } + } + + All.TimeMax = TimeMax_new; +} + diff --git a/src/PyGadget/src/begrun.o b/src/PyGadget/src/begrun.o new file mode 100644 index 0000000..346d0f6 Binary files /dev/null and b/src/PyGadget/src/begrun.o differ diff --git a/src/PyGadget/src/density.c b/src/PyGadget/src/density.c new file mode 100644 index 0000000..9e4cc7a --- /dev/null +++ b/src/PyGadget/src/density.c @@ -0,0 +1,1188 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file density.c + * \brief SPH density computation and smoothing length determination + * + * This file contains the "first SPH loop", where the SPH densities and + * some auxiliary quantities are computed. If the number of neighbours + * obtained falls outside the target range, the correct smoothing + * length is determined iteratively, if needed. + */ + + +#ifdef PERIODIC +static double boxSize, boxHalf; + +#ifdef LONG_X +static double boxSize_X, boxHalf_X; +#else +#define boxSize_X boxSize +#define boxHalf_X boxHalf +#endif +#ifdef LONG_Y +static double boxSize_Y, boxHalf_Y; +#else +#define boxSize_Y boxSize +#define boxHalf_Y boxHalf +#endif +#ifdef LONG_Z +static double boxSize_Z, boxHalf_Z; +#else +#define boxSize_Z boxSize +#define boxHalf_Z boxHalf +#endif +#endif + + +/*! This function computes the local density for each active SPH particle, + * the number of neighbours in the current smoothing radius, and the + * divergence and curl of the velocity field. The pressure is updated as + * well. If a particle with its smoothing region is fully inside the + * local domain, it is not exported to the other processors. The function + * also detects particles that have a number of neighbours outside the + * allowed tolerance range. For these particles, the smoothing length is + * adjusted accordingly, and the density computation is executed again. + * Note that the smoothing length is not allowed to fall below the lower + * bound set by MinGasHsml. + */ +void density(void) +{ + long long ntot, ntotleft; + int *noffset, *nbuffer, *nsend, *nsend_local, *numlist, *ndonelist; + int i, j, n, ndone, npleft, maxfill, source, iter = 0; + int level, ngrp, sendTask, recvTask, place, nexport; + double dt_entr, tstart, tend, tstart_ngb = 0, tend_ngb = 0; + double sumt, sumcomm, timengb, sumtimengb; + double timecomp = 0, timeimbalance = 0, timecommsumm = 0, sumimbalance; + MPI_Status status; + +#ifdef PERIODIC + boxSize = All.BoxSize; + boxHalf = 0.5 * All.BoxSize; +#ifdef LONG_X + boxHalf_X = boxHalf * LONG_X; + boxSize_X = boxSize * LONG_X; +#endif +#ifdef LONG_Y + boxHalf_Y = boxHalf * LONG_Y; + boxSize_Y = boxSize * LONG_Y; +#endif +#ifdef LONG_Z + boxHalf_Z = boxHalf * LONG_Z; + boxSize_Z = boxSize * LONG_Z; +#endif +#endif + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + for(n = 0, NumSphUpdate = 0; n < N_gas; n++) + { + SphP[n].Left = SphP[n].Right = 0; + + if(P[n].Ti_endstep == All.Ti_Current) + NumSphUpdate++; + } + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + + + /* we will repeat the whole thing for those particles where we didn't + * find enough neighbours + */ + do + { + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < N_gas && nexport < All.BunchSizeDensity - NTask; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + + density_evaluate(i, 0); + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + DensDataIn[nexport].Pos[0] = P[i].Pos[0]; + DensDataIn[nexport].Pos[1] = P[i].Pos[1]; + DensDataIn[nexport].Pos[2] = P[i].Pos[2]; + DensDataIn[nexport].Vel[0] = SphP[i].VelPred[0]; + DensDataIn[nexport].Vel[1] = SphP[i].VelPred[1]; + DensDataIn[nexport].Vel[2] = SphP[i].VelPred[2]; + DensDataIn[nexport].Hsml = SphP[i].Hsml; + DensDataIn[nexport].Index = i; + DensDataIn[nexport].Task = j; + nexport++; + nsend_local[j]++; + } + } + } + tend = second(); + timecomp += timediff(tstart, tend); + + qsort(DensDataIn, nexport, sizeof(struct densdata_in), dens_compare_key); + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + tend = second(); + timeimbalance += timediff(tstart, tend); + + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeDensity) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&DensDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct densdata_in), MPI_BYTE, + recvTask, TAG_DENS_A, + &DensDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct densdata_in), + MPI_BYTE, recvTask, TAG_DENS_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + density_evaluate(j, 1); + tend = second(); + timecomp += timediff(tstart, tend); + + /* do a block to explicitly measure imbalance */ + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeDensity) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&DensDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct densdata_out), + MPI_BYTE, recvTask, TAG_DENS_B, + &DensDataPartialResult[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct densdata_out), + MPI_BYTE, recvTask, TAG_DENS_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + source = j + noffset[recvTask]; + place = DensDataIn[source].Index; + + SphP[place].NumNgb += DensDataPartialResult[source].Ngb; + SphP[place].Density += DensDataPartialResult[source].Rho; + SphP[place].DivVel += DensDataPartialResult[source].Div; + + SphP[place].DhsmlDensityFactor += DensDataPartialResult[source].DhsmlDensity; + + SphP[place].Rot[0] += DensDataPartialResult[source].Rot[0]; + SphP[place].Rot[1] += DensDataPartialResult[source].Rot[1]; + SphP[place].Rot[2] += DensDataPartialResult[source].Rot[2]; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + + + /* do final operations on results */ + tstart = second(); + for(i = 0, npleft = 0; i < N_gas; i++) + { + if(P[i].Ti_endstep == All.Ti_Current) + { + { + SphP[i].DhsmlDensityFactor = + 1 / (1 + SphP[i].Hsml * SphP[i].DhsmlDensityFactor / (NUMDIMS * SphP[i].Density)); + + SphP[i].CurlVel = sqrt(SphP[i].Rot[0] * SphP[i].Rot[0] + + SphP[i].Rot[1] * SphP[i].Rot[1] + + SphP[i].Rot[2] * SphP[i].Rot[2]) / SphP[i].Density; + + SphP[i].DivVel /= SphP[i].Density; + + dt_entr = (All.Ti_Current - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval; + + SphP[i].Pressure = + (SphP[i].Entropy + SphP[i].DtEntropy * dt_entr) * pow(SphP[i].Density, GAMMA); + } + + + /* now check whether we had enough neighbours */ + + if(SphP[i].NumNgb < (All.DesNumNgb - All.MaxNumNgbDeviation) || + (SphP[i].NumNgb > (All.DesNumNgb + All.MaxNumNgbDeviation) + && SphP[i].Hsml > (1.01 * All.MinGasHsml))) + { + /* need to redo this particle */ + npleft++; + + if(SphP[i].Left > 0 && SphP[i].Right > 0) + if((SphP[i].Right - SphP[i].Left) < 1.0e-3 * SphP[i].Left) + { + /* this one should be ok */ + npleft--; + P[i].Ti_endstep = -P[i].Ti_endstep - 1; /* Mark as inactive */ + continue; + } + + if(SphP[i].NumNgb < (All.DesNumNgb - All.MaxNumNgbDeviation)) + SphP[i].Left = dmax(SphP[i].Hsml, SphP[i].Left); + else + { + if(SphP[i].Right != 0) + { + if(SphP[i].Hsml < SphP[i].Right) + SphP[i].Right = SphP[i].Hsml; + } + else + SphP[i].Right = SphP[i].Hsml; + } + + if(iter >= MAXITER - 10) + { + printf + ("i=%d task=%d ID=%d Hsml=%g Left=%g Right=%g Ngbs=%g Right-Left=%g\n pos=(%g|%g|%g)\n", + i, ThisTask, (int) P[i].ID, SphP[i].Hsml, SphP[i].Left, SphP[i].Right, + (float) SphP[i].NumNgb, SphP[i].Right - SphP[i].Left, P[i].Pos[0], P[i].Pos[1], + P[i].Pos[2]); + fflush(stdout); + } + + if(SphP[i].Right > 0 && SphP[i].Left > 0) + SphP[i].Hsml = pow(0.5 * (pow(SphP[i].Left, 3) + pow(SphP[i].Right, 3)), 1.0 / 3); + else + { + if(SphP[i].Right == 0 && SphP[i].Left == 0) + endrun(8188); /* can't occur */ + + if(SphP[i].Right == 0 && SphP[i].Left > 0) + { + if(P[i].Type == 0 && fabs(SphP[i].NumNgb - All.DesNumNgb) < 0.5 * All.DesNumNgb) + { + SphP[i].Hsml *= + 1 - (SphP[i].NumNgb - + All.DesNumNgb) / (NUMDIMS * SphP[i].NumNgb) * SphP[i].DhsmlDensityFactor; + } + else + SphP[i].Hsml *= 1.26; + } + + if(SphP[i].Right > 0 && SphP[i].Left == 0) + { + if(P[i].Type == 0 && fabs(SphP[i].NumNgb - All.DesNumNgb) < 0.5 * All.DesNumNgb) + { + SphP[i].Hsml *= + 1 - (SphP[i].NumNgb - + All.DesNumNgb) / (NUMDIMS * SphP[i].NumNgb) * SphP[i].DhsmlDensityFactor; + } + else + SphP[i].Hsml /= 1.26; + } + } + + if(SphP[i].Hsml < All.MinGasHsml) + SphP[i].Hsml = All.MinGasHsml; + } + else + P[i].Ti_endstep = -P[i].Ti_endstep - 1; /* Mark as inactive */ + } + } + tend = second(); + timecomp += timediff(tstart, tend); + + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&npleft, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + if(ntot > 0) + { + if(iter == 0) + tstart_ngb = second(); + + iter++; + + if(iter > 0 && ThisTask == 0) + { + printf("ngb iteration %d: need to repeat for %d%09d particles.\n", iter, + (int) (ntot / 1000000000), (int) (ntot % 1000000000)); + fflush(stdout); + } + + if(iter > MAXITER) + { + printf("failed to converge in neighbour iteration in density()\n"); + fflush(stdout); + endrun(1155); + } + } + else + tend_ngb = second(); + } + while(ntot > 0); + + + /* mark as active again */ + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + P[i].Ti_endstep = -P[i].Ti_endstep - 1; + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + /* collect some timing information */ + if(iter > 0) + timengb = timediff(tstart_ngb, tend_ngb); + else + timengb = 0; + + MPI_Reduce(&timengb, &sumtimengb, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timecomp, &sumt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timecommsumm, &sumcomm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + All.CPU_HydCompWalk += sumt / NTask; + All.CPU_HydCommSumm += sumcomm / NTask; + All.CPU_HydImbalance += sumimbalance / NTask; + All.CPU_EnsureNgb += sumtimengb / NTask; + } +} + + + +/*! This function represents the core of the SPH density computation. The + * target particle may either be local, or reside in the communication + * buffer. + */ +void density_evaluate(int target, int mode) +{ + int j, n, startnode, numngb, numngb_inbox; + double h, h2, fac, hinv, hinv3, hinv4; + double rho, divv, wk, dwk; + double dx, dy, dz, r, r2, u, mass_j; + double dvx, dvy, dvz, rotv[3]; + double weighted_numngb, dhsmlrho; + FLOAT *pos, *vel; + + if(mode == 0) + { + pos = P[target].Pos; + vel = SphP[target].VelPred; + h = SphP[target].Hsml; + } + else + { + pos = DensDataGet[target].Pos; + vel = DensDataGet[target].Vel; + h = DensDataGet[target].Hsml; + } + + h2 = h * h; + hinv = 1.0 / h; +#ifndef TWODIMS + hinv3 = hinv * hinv * hinv; +#else + hinv3 = hinv * hinv / boxSize_Z; +#endif + hinv4 = hinv3 * hinv; + + rho = divv = rotv[0] = rotv[1] = rotv[2] = 0; + weighted_numngb = 0; + dhsmlrho = 0; + + startnode = All.MaxPart; + numngb = 0; + do + { + numngb_inbox = ngb_treefind_variable(&pos[0], h, &startnode); + + for(n = 0; n < numngb_inbox; n++) + { + j = Ngblist[n]; + + dx = pos[0] - P[j].Pos[0]; + dy = pos[1] - P[j].Pos[1]; + dz = pos[2] - P[j].Pos[2]; + +#ifdef PERIODIC /* now find the closest image in the given box size */ + if(dx > boxHalf_X) + dx -= boxSize_X; + if(dx < -boxHalf_X) + dx += boxSize_X; + if(dy > boxHalf_Y) + dy -= boxSize_Y; + if(dy < -boxHalf_Y) + dy += boxSize_Y; + if(dz > boxHalf_Z) + dz -= boxSize_Z; + if(dz < -boxHalf_Z) + dz += boxSize_Z; +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(r2 < h2) + { + numngb++; + + r = sqrt(r2); + + u = r * hinv; + + if(u < 0.5) + { + wk = hinv3 * (KERNEL_COEFF_1 + KERNEL_COEFF_2 * (u - 1) * u * u); + dwk = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4); + } + else + { + wk = hinv3 * KERNEL_COEFF_5 * (1.0 - u) * (1.0 - u) * (1.0 - u); + dwk = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u); + } + + mass_j = P[j].Mass; + + rho += mass_j * wk; + + weighted_numngb += NORM_COEFF * wk / hinv3; + + dhsmlrho += -mass_j * (NUMDIMS * hinv * wk + u * dwk); + + if(r > 0) + { + fac = mass_j * dwk / r; + + dvx = vel[0] - SphP[j].VelPred[0]; + dvy = vel[1] - SphP[j].VelPred[1]; + dvz = vel[2] - SphP[j].VelPred[2]; + + divv -= fac * (dx * dvx + dy * dvy + dz * dvz); + + rotv[0] += fac * (dz * dvy - dy * dvz); + rotv[1] += fac * (dx * dvz - dz * dvx); + rotv[2] += fac * (dy * dvx - dx * dvy); + } + } + } + } + while(startnode >= 0); + + if(mode == 0) + { + SphP[target].NumNgb = weighted_numngb; + SphP[target].Density = rho; + SphP[target].DivVel = divv; + SphP[target].DhsmlDensityFactor = dhsmlrho; + SphP[target].Rot[0] = rotv[0]; + SphP[target].Rot[1] = rotv[1]; + SphP[target].Rot[2] = rotv[2]; + } + else + { + DensDataResult[target].Rho = rho; + DensDataResult[target].Div = divv; + DensDataResult[target].Ngb = weighted_numngb; + DensDataResult[target].DhsmlDensity = dhsmlrho; + DensDataResult[target].Rot[0] = rotv[0]; + DensDataResult[target].Rot[1] = rotv[1]; + DensDataResult[target].Rot[2] = rotv[2]; + } +} + + + + + + + + + + +#ifdef PY_INTERFACE +/*! This function computes the local density for each active SPH particle, + * the number of neighbours in the current smoothing radius, and the + * divergence and curl of the velocity field. The pressure is updated as + * well. If a particle with its smoothing region is fully inside the + * local domain, it is not exported to the other processors. The function + * also detects particles that have a number of neighbours outside the + * allowed tolerance range. For these particles, the smoothing length is + * adjusted accordingly, and the density computation is executed again. + * Note that the smoothing length is not allowed to fall below the lower + * bound set by MinGasHsml. + */ +void density_sub(void) +{ + long long ntot, ntotleft; + int *noffset, *nbuffer, *nsend, *nsend_local, *numlist, *ndonelist; + int i, j, n, ndone, npleft, maxfill, source, iter = 0; + int level, ngrp, sendTask, recvTask, place, nexport; + double dt_entr, tstart, tend, tstart_ngb = 0, tend_ngb = 0; + double sumt, sumcomm, timengb, sumtimengb; + double timecomp = 0, timeimbalance = 0, timecommsumm = 0, sumimbalance; + MPI_Status status; + +#ifdef PERIODIC + boxSize = All.BoxSize; + boxHalf = 0.5 * All.BoxSize; +#ifdef LONG_X + boxHalf_X = boxHalf * LONG_X; + boxSize_X = boxSize * LONG_X; +#endif +#ifdef LONG_Y + boxHalf_Y = boxHalf * LONG_Y; + boxSize_Y = boxSize * LONG_Y; +#endif +#ifdef LONG_Z + boxHalf_Z = boxHalf * LONG_Z; + boxSize_Z = boxSize * LONG_Z; +#endif +#endif + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + for(n = 0, NumSphUpdate = 0; n < N_gasQ; n++) + { + SphQ[n].Left = SphQ[n].Right = 0; + + + //if(Q[n].Ti_endstep == All.Ti_Current) + Q[n].Ti_endstep = All.Ti_Current; + NumSphUpdate++; + } + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + + + /* we will repeat the whole thing for those particles where we didn't + * find enough neighbours + */ + do + { + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < N_gasQ && nexport < All.BunchSizeDensity - NTask; i++) + if(Q[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + + density_evaluate_sub(i, 0); + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + DensDataIn[nexport].Pos[0] = Q[i].Pos[0]; + DensDataIn[nexport].Pos[1] = Q[i].Pos[1]; + DensDataIn[nexport].Pos[2] = Q[i].Pos[2]; + DensDataIn[nexport].Vel[0] = SphQ[i].VelPred[0]; + DensDataIn[nexport].Vel[1] = SphQ[i].VelPred[1]; + DensDataIn[nexport].Vel[2] = SphQ[i].VelPred[2]; + DensDataIn[nexport].Hsml = SphQ[i].Hsml; + DensDataIn[nexport].Index = i; + DensDataIn[nexport].Task = j; + nexport++; + nsend_local[j]++; + } + } + } + tend = second(); + timecomp += timediff(tstart, tend); + + qsort(DensDataIn, nexport, sizeof(struct densdata_in), dens_compare_key); + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + tend = second(); + timeimbalance += timediff(tstart, tend); + + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeDensity) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&DensDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct densdata_in), MPI_BYTE, + recvTask, TAG_DENS_A, + &DensDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct densdata_in), + MPI_BYTE, recvTask, TAG_DENS_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + density_evaluate_sub(j, 1); + tend = second(); + timecomp += timediff(tstart, tend); + + /* do a block to explicitly measure imbalance */ + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeDensity) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&DensDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct densdata_out), + MPI_BYTE, recvTask, TAG_DENS_B, + &DensDataPartialResult[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct densdata_out), + MPI_BYTE, recvTask, TAG_DENS_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + source = j + noffset[recvTask]; + place = DensDataIn[source].Index; + + SphQ[place].NumNgb += DensDataPartialResult[source].Ngb; + SphQ[place].Density += DensDataPartialResult[source].Rho; + SphQ[place].DivVel += DensDataPartialResult[source].Div; + + SphQ[place].DhsmlDensityFactor += DensDataPartialResult[source].DhsmlDensity; + + SphQ[place].Rot[0] += DensDataPartialResult[source].Rot[0]; + SphQ[place].Rot[1] += DensDataPartialResult[source].Rot[1]; + SphQ[place].Rot[2] += DensDataPartialResult[source].Rot[2]; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + + + /* do final operations on results */ + tstart = second(); + for(i = 0, npleft = 0; i < N_gasQ; i++) + { + if(Q[i].Ti_endstep == All.Ti_Current) + { + { + SphQ[i].DhsmlDensityFactor = + 1 / (1 + SphQ[i].Hsml * SphQ[i].DhsmlDensityFactor / (NUMDIMS * SphQ[i].Density)); + + SphQ[i].CurlVel = sqrt(SphQ[i].Rot[0] * SphQ[i].Rot[0] + + SphQ[i].Rot[1] * SphQ[i].Rot[1] + + SphQ[i].Rot[2] * SphQ[i].Rot[2]) / SphQ[i].Density; + + SphQ[i].DivVel /= SphQ[i].Density; + + dt_entr = (All.Ti_Current - (Q[i].Ti_begstep + Q[i].Ti_endstep) / 2) * All.Timebase_interval; + + SphQ[i].Pressure = + (SphQ[i].Entropy + SphQ[i].DtEntropy * dt_entr) * pow(SphQ[i].Density, GAMMA); + } + + + /* now check whether we had enough neighbours */ + + if(SphQ[i].NumNgb < (All.DesNumNgb - All.MaxNumNgbDeviation) || + (SphQ[i].NumNgb > (All.DesNumNgb + All.MaxNumNgbDeviation) + && SphQ[i].Hsml > (1.01 * All.MinGasHsml))) + { + /* need to redo this particle */ + npleft++; + + if(SphQ[i].Left > 0 && SphQ[i].Right > 0) + if((SphQ[i].Right - SphQ[i].Left) < 1.0e-3 * SphQ[i].Left) + { + /* this one should be ok */ + npleft--; + Q[i].Ti_endstep = -Q[i].Ti_endstep - 1; /* Mark as inactive */ + continue; + } + + if(SphQ[i].NumNgb < (All.DesNumNgb - All.MaxNumNgbDeviation)) + SphQ[i].Left = dmax(SphQ[i].Hsml, SphQ[i].Left); + else + { + if(SphQ[i].Right != 0) + { + if(SphQ[i].Hsml < SphQ[i].Right) + SphQ[i].Right = SphQ[i].Hsml; + } + else + SphQ[i].Right = SphQ[i].Hsml; + } + + if(iter >= MAXITER - 10) + { + printf + ("i=%d task=%d ID=%d Hsml=%g Left=%g Right=%g Ngbs=%g Right-Left=%g\n pos=(%g|%g|%g)\n", + i, ThisTask, (int) Q[i].ID, SphQ[i].Hsml, SphQ[i].Left, SphQ[i].Right, + (float) SphQ[i].NumNgb, SphQ[i].Right - SphQ[i].Left, Q[i].Pos[0], Q[i].Pos[1], + Q[i].Pos[2]); + fflush(stdout); + } + + if(SphQ[i].Right > 0 && SphQ[i].Left > 0) + SphQ[i].Hsml = pow(0.5 * (pow(SphQ[i].Left, 3) + pow(SphQ[i].Right, 3)), 1.0 / 3); + else + { + if(SphQ[i].Right == 0 && SphQ[i].Left == 0) + endrun(8188); /* can't occur */ + + if(SphQ[i].Right == 0 && SphQ[i].Left > 0) + { + if(Q[i].Type == 0 && fabs(SphQ[i].NumNgb - All.DesNumNgb) < 0.5 * All.DesNumNgb) + { + SphQ[i].Hsml *= + 1 - (SphQ[i].NumNgb - + All.DesNumNgb) / (NUMDIMS * SphQ[i].NumNgb) * SphQ[i].DhsmlDensityFactor; + } + else + SphQ[i].Hsml *= 1.26; + } + + if(SphQ[i].Right > 0 && SphQ[i].Left == 0) + { + if(Q[i].Type == 0 && fabs(SphQ[i].NumNgb - All.DesNumNgb) < 0.5 * All.DesNumNgb) + { + SphQ[i].Hsml *= + 1 - (SphQ[i].NumNgb - + All.DesNumNgb) / (NUMDIMS * SphQ[i].NumNgb) * SphQ[i].DhsmlDensityFactor; + } + else + SphQ[i].Hsml /= 1.26; + } + } + + if(SphQ[i].Hsml < All.MinGasHsml) + SphQ[i].Hsml = All.MinGasHsml; + } + else + Q[i].Ti_endstep = -Q[i].Ti_endstep - 1; /* Mark as inactive */ + } + } + tend = second(); + timecomp += timediff(tstart, tend); + + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&npleft, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + if(ntot > 0) + { + if(iter == 0) + tstart_ngb = second(); + + iter++; + + if(iter > 0 && ThisTask == 0) + { + printf("ngb iteration %d: need to repeat for %d%09d particles.\n", iter, + (int) (ntot / 1000000000), (int) (ntot % 1000000000)); + fflush(stdout); + } + + if(iter > MAXITER) + { + printf("failed to converge in neighbour iteration in density()\n"); + fflush(stdout); + endrun(1155); + } + } + else + tend_ngb = second(); + } + while(ntot > 0); + + + /* mark as active again */ + for(i = 0; i < NumPartQ; i++) + if(Q[i].Ti_endstep < 0) + Q[i].Ti_endstep = -Q[i].Ti_endstep - 1; + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + /* collect some timing information */ + if(iter > 0) + timengb = timediff(tstart_ngb, tend_ngb); + else + timengb = 0; + + MPI_Reduce(&timengb, &sumtimengb, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timecomp, &sumt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timecommsumm, &sumcomm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + All.CPU_HydCompWalk += sumt / NTask; + All.CPU_HydCommSumm += sumcomm / NTask; + All.CPU_HydImbalance += sumimbalance / NTask; + All.CPU_EnsureNgb += sumtimengb / NTask; + } +} +#endif + +#ifdef PY_INTERFACE +/*! This function represents the core of the SPH density computation. The + * target particle may either be local, or reside in the communication + * buffer. + */ +void density_evaluate_sub(int target, int mode) +{ + int j, n, startnode, numngb, numngb_inbox; + double h, h2, fac, hinv, hinv3, hinv4; + double rho, divv, wk, dwk; + double dx, dy, dz, r, r2, u, mass_j; + double dvx, dvy, dvz, rotv[3]; + double weighted_numngb, dhsmlrho; + FLOAT *pos, *vel; + + if(mode == 0) + { + pos = Q[target].Pos; + vel = SphQ[target].VelPred; + h = SphQ[target].Hsml; + } + else + { + pos = DensDataGet[target].Pos; + vel = DensDataGet[target].Vel; + h = DensDataGet[target].Hsml; + } + + h2 = h * h; + hinv = 1.0 / h; +#ifndef TWODIMS + hinv3 = hinv * hinv * hinv; +#else + hinv3 = hinv * hinv / boxSize_Z; +#endif + hinv4 = hinv3 * hinv; + + rho = divv = rotv[0] = rotv[1] = rotv[2] = 0; + weighted_numngb = 0; + dhsmlrho = 0; + + startnode = All.MaxPart; + numngb = 0; + do + { + numngb_inbox = ngb_treefind_variable(&pos[0], h, &startnode); + + for(n = 0; n < numngb_inbox; n++) + { + j = Ngblist[n]; + + dx = pos[0] - P[j].Pos[0]; + dy = pos[1] - P[j].Pos[1]; + dz = pos[2] - P[j].Pos[2]; + +#ifdef PERIODIC /* now find the closest image in the given box size */ + if(dx > boxHalf_X) + dx -= boxSize_X; + if(dx < -boxHalf_X) + dx += boxSize_X; + if(dy > boxHalf_Y) + dy -= boxSize_Y; + if(dy < -boxHalf_Y) + dy += boxSize_Y; + if(dz > boxHalf_Z) + dz -= boxSize_Z; + if(dz < -boxHalf_Z) + dz += boxSize_Z; +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(r2 < h2) + { + numngb++; + + r = sqrt(r2); + + u = r * hinv; + + if(u < 0.5) + { + wk = hinv3 * (KERNEL_COEFF_1 + KERNEL_COEFF_2 * (u - 1) * u * u); + dwk = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4); + } + else + { + wk = hinv3 * KERNEL_COEFF_5 * (1.0 - u) * (1.0 - u) * (1.0 - u); + dwk = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u); + } + + mass_j = P[j].Mass; + + rho += mass_j * wk; + + weighted_numngb += NORM_COEFF * wk / hinv3; + + dhsmlrho += -mass_j * (NUMDIMS * hinv * wk + u * dwk); + + if(r > 0) + { + fac = mass_j * dwk / r; + + dvx = vel[0] - SphP[j].VelPred[0]; + dvy = vel[1] - SphP[j].VelPred[1]; + dvz = vel[2] - SphP[j].VelPred[2]; + + divv -= fac * (dx * dvx + dy * dvy + dz * dvz); + + rotv[0] += fac * (dz * dvy - dy * dvz); + rotv[1] += fac * (dx * dvz - dz * dvx); + rotv[2] += fac * (dy * dvx - dx * dvy); + } + } + } + } + while(startnode >= 0); + + if(mode == 0) + { + SphQ[target].NumNgb = weighted_numngb; + SphQ[target].Density = rho; + SphQ[target].DivVel = divv; + SphQ[target].DhsmlDensityFactor = dhsmlrho; + SphQ[target].Rot[0] = rotv[0]; + SphQ[target].Rot[1] = rotv[1]; + SphQ[target].Rot[2] = rotv[2]; + } + else + { + DensDataResult[target].Rho = rho; + DensDataResult[target].Div = divv; + DensDataResult[target].Ngb = weighted_numngb; + DensDataResult[target].DhsmlDensity = dhsmlrho; + DensDataResult[target].Rot[0] = rotv[0]; + DensDataResult[target].Rot[1] = rotv[1]; + DensDataResult[target].Rot[2] = rotv[2]; + } +} +#endif + + + + + + + +/*! This routine is a comparison kernel used in a sort routine to group + * particles that are exported to the same processor. + */ +int dens_compare_key(const void *a, const void *b) +{ + if(((struct densdata_in *) a)->Task < (((struct densdata_in *) b)->Task)) + return -1; + + if(((struct densdata_in *) a)->Task > (((struct densdata_in *) b)->Task)) + return +1; + + return 0; +} diff --git a/src/PyGadget/src/density.o b/src/PyGadget/src/density.o new file mode 100644 index 0000000..dd98baf Binary files /dev/null and b/src/PyGadget/src/density.o differ diff --git a/src/PyGadget/src/domain.c b/src/PyGadget/src/domain.c new file mode 100644 index 0000000..d880f0f --- /dev/null +++ b/src/PyGadget/src/domain.c @@ -0,0 +1,1128 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + + +/*! \file domain.c + * \brief code for domain decomposition + * + * This file contains the code for the domain decomposition of the + * simulation volume. The domains are constructed from disjoint subsets + * of the leaves of a fiducial top-level tree that covers the full + * simulation volume. Domain boundaries hence run along tree-node + * divisions of a fiducial global BH tree. As a result of this method, the + * tree force are in principle strictly independent of the way the domains + * are cut. The domain decomposition can be carried out for an arbitrary + * number of CPUs. Individual domains are not cubical, but spatially + * coherent since the leaves are traversed in a Peano-Hilbert order and + * individual domains form segments along this order. This also ensures + * that each domain has a small surface to volume ratio, which minimizes + * communication. + */ + +#define TOPNODEFACTOR 20.0 + +#define REDUC_FAC 0.98 + + +/*! toGo[task*NTask + partner] gives the number of particles in task 'task' + * that have to go to task 'partner' + */ +static int *toGo, *toGoSph; +static int *local_toGo, *local_toGoSph; +static int *list_NumPart; +static int *list_N_gas; +static int *list_load; +static int *list_loadsph; +static double *list_work; + +static long long maxload, maxloadsph; + +static struct topnode_exchange +{ + peanokey Startkey; + int Count; +} + *toplist, *toplist_local; + + + +/*! This is the main routine for the domain decomposition. It acts as a + * driver routine that allocates various temporary buffers, maps the + * particles back onto the periodic box if needed, and then does the + * domain decomposition, and a final Peano-Hilbert order of all particles + * as a tuning measure. + */ +void domain_Decomposition(void) +{ + double t0, t1; + +#ifdef PMGRID + if(All.PM_Ti_endstep == All.Ti_Current) + { + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + /* to make sure that we do a domain decomposition before the PM-force is evaluated. + this is needed to make sure that the particles are wrapped into the box */ + } +#endif + + /* Check whether it is really time for a new domain decomposition */ + if(All.NumForcesSinceLastDomainDecomp > All.TotNumPart * All.TreeDomainUpdateFrequency) + { + t0 = second(); + +#ifdef PERIODIC + do_box_wrapping(); /* map the particles back onto the box */ +#endif + All.NumForcesSinceLastDomainDecomp = 0; + TreeReconstructFlag = 1; /* ensures that new tree will be constructed */ + + if(ThisTask == 0) + { + printf("domain decomposition... \n"); + fflush(stdout); + } + + Key = malloc(sizeof(peanokey) * All.MaxPart); + KeySorted = malloc(sizeof(peanokey) * All.MaxPart); + + toGo = malloc(sizeof(int) * NTask * NTask); + toGoSph = malloc(sizeof(int) * NTask * NTask); + local_toGo = malloc(sizeof(int) * NTask); + local_toGoSph = malloc(sizeof(int) * NTask); + list_NumPart = malloc(sizeof(int) * NTask); + list_N_gas = malloc(sizeof(int) * NTask); + list_load = malloc(sizeof(int) * NTask); + list_loadsph = malloc(sizeof(int) * NTask); + list_work = malloc(sizeof(double) * NTask); + + MPI_Allgather(&NumPart, 1, MPI_INT, list_NumPart, 1, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(&N_gas, 1, MPI_INT, list_N_gas, 1, MPI_INT, MPI_COMM_WORLD); + + maxload = All.MaxPart * REDUC_FAC; + maxloadsph = All.MaxPartSph * REDUC_FAC; + + domain_decompose(); + + free(list_work); + free(list_loadsph); + free(list_load); + free(list_N_gas); + free(list_NumPart); + free(local_toGoSph); + free(local_toGo); + free(toGoSph); + free(toGo); + + + if(ThisTask == 0) + { + printf("domain decomposition done. \n"); + fflush(stdout); + } + + t1 = second(); + All.CPU_Domain += timediff(t0, t1); + +#ifdef PEANOHILBERT + t0 = second(); + peano_hilbert_order(); + t1 = second(); + All.CPU_Peano += timediff(t0, t1); +#endif + + free(KeySorted); + free(Key); + } + +} + + + +/*! This function carries out the actual domain decomposition for all + * particle types. It will try to balance the work-load for each domain, + * as estimated based on the P[i]-GravCost values. The decomposition will + * respect the maximum allowed memory-imbalance given by the value of + * PartAllocFactor. + */ +void domain_decompose(void) +{ + int i, j, status; + int ngrp, task, partner, sendcount, recvcount; + long long sumtogo, sumload; + int maxload, *temp; + double sumwork, maxwork; + + for(i = 0; i < 6; i++) + NtypeLocal[i] = 0; + + for(i = 0; i < NumPart; i++) + NtypeLocal[P[i].Type]++; + + /* because Ntype[] is of type `long long', we cannot do a simple + * MPI_Allreduce() to sum the total particle numbers + */ + temp = malloc(NTask * 6 * sizeof(int)); + MPI_Allgather(NtypeLocal, 6, MPI_INT, temp, 6, MPI_INT, MPI_COMM_WORLD); + for(i = 0; i < 6; i++) + { + Ntype[i] = 0; + for(j = 0; j < NTask; j++) + Ntype[i] += temp[j * 6 + i]; + } + free(temp); + +#ifndef UNEQUALSOFTENINGS + for(i = 0; i < 6; i++) + if(Ntype[i] > 0) + break; + + for(ngrp = i + 1; ngrp < 6; ngrp++) + { + if(Ntype[ngrp] > 0) + if(All.SofteningTable[ngrp] != All.SofteningTable[i]) + { + if(ThisTask == 0) + { + fprintf(stdout, "Code was not compiled with UNEQUALSOFTENINGS, but some of the\n"); + fprintf(stdout, "softening lengths are unequal nevertheless.\n"); + fprintf(stdout, "This is not allowed.\n"); + } + endrun(0); + } + } +#endif + + + /* determine global dimensions of domain grid */ + domain_findExtent(); + + domain_determineTopTree(); + + /* determine cost distribution in domain grid */ + domain_sumCost(); + + /* find the split of the domain grid recursively */ + status = domain_findSplit(0, NTask, 0, NTopleaves - 1); + if(status != 0) + { + if(ThisTask == 0) + printf("\nNo domain decomposition that stays within memory bounds is possible.\n"); + endrun(0); + } + + /* now try to improve the work-load balance of the split */ + domain_shiftSplit(); + + DomainMyStart = DomainStartList[ThisTask]; + DomainMyLast = DomainEndList[ThisTask]; + + if(ThisTask == 0) + { + sumload = maxload = 0; + sumwork = maxwork = 0; + for(i = 0; i < NTask; i++) + { + sumload += list_load[i]; + sumwork += list_work[i]; + + if(list_load[i] > maxload) + maxload = list_load[i]; + + if(list_work[i] > maxwork) + maxwork = list_work[i]; + } + + printf("work-load balance=%g memory-balance=%g\n", + maxwork / (sumwork / NTask), maxload / (((double) sumload) / NTask)); + } + + + /* determine for each cpu how many particles have to be shifted to other cpus */ + domain_countToGo(); + + for(i = 0, sumtogo = 0; i < NTask * NTask; i++) + sumtogo += toGo[i]; + + while(sumtogo > 0) + { + if(ThisTask == 0) + { + printf("exchange of %d%09d particles\n", (int) (sumtogo / 1000000000), + (int) (sumtogo % 1000000000)); + fflush(stdout); + } + + for(ngrp = 1; ngrp < (1 << PTask); ngrp++) + { + for(task = 0; task < NTask; task++) + { + partner = task ^ ngrp; + + if(partner < NTask && task < partner) + { + /* treat SPH separately */ + if(All.TotN_gas > 0) + { + domain_findExchangeNumbers(task, partner, 1, &sendcount, &recvcount); + + list_NumPart[task] += recvcount - sendcount; + list_NumPart[partner] -= recvcount - sendcount; + list_N_gas[task] += recvcount - sendcount; + list_N_gas[partner] -= recvcount - sendcount; + + toGo[task * NTask + partner] -= sendcount; + toGo[partner * NTask + task] -= recvcount; + toGoSph[task * NTask + partner] -= sendcount; + toGoSph[partner * NTask + task] -= recvcount; + + if(task == ThisTask) /* actually carry out the exchange */ + domain_exchangeParticles(partner, 1, sendcount, recvcount); + if(partner == ThisTask) + domain_exchangeParticles(task, 1, recvcount, sendcount); + } + + domain_findExchangeNumbers(task, partner, 0, &sendcount, &recvcount); + + list_NumPart[task] += recvcount - sendcount; + list_NumPart[partner] -= recvcount - sendcount; + + toGo[task * NTask + partner] -= sendcount; + toGo[partner * NTask + task] -= recvcount; + + if(task == ThisTask) /* actually carry out the exchange */ + domain_exchangeParticles(partner, 0, sendcount, recvcount); + if(partner == ThisTask) + domain_exchangeParticles(task, 0, recvcount, sendcount); + } + } + } + + for(i = 0, sumtogo = 0; i < NTask * NTask; i++) + sumtogo += toGo[i]; + } +} + +/*! This function tries to find a split point in a range of cells in the + * domain-grid. The range of cells starts at 'first', and ends at 'last' + * (inclusively). The number of cpus that holds the range is 'ncpu', with + * the first cpu given by 'cpustart'. If more than 2 cpus are to be split, + * the function calls itself recursively. The division tries to achieve a + * best particle-load balance under the constraint that 'maxload' and + * 'maxloadsph' may not be exceeded, and that each cpu holds at least one + * cell from the domaingrid. If such a decomposition cannot be achieved, a + * non-zero error code is returned. + * + * After successful completion, DomainMyStart[] and DomainMyLast[] contain + * the first and last cell of the domaingrid assigned to the local task + * for the given type. Also, DomainTask[] contains for each cell the task + * it was assigned to. + */ +int domain_findSplit(int cpustart, int ncpu, int first, int last) +{ + int i, split, ok_left, ok_right; + long long load, sphload, load_leftOfSplit, sphload_leftOfSplit; + int ncpu_leftOfSplit; + double maxAvgLoad_CurrentSplit, maxAvgLoad_NewSplit; + + + ncpu_leftOfSplit = ncpu / 2; + + for(i = first, load = 0, sphload = 0; i <= last; i++) + { + load += DomainCount[i]; + sphload += DomainCountSph[i]; + } + + split = first + ncpu_leftOfSplit; + + for(i = first, load_leftOfSplit = sphload_leftOfSplit = 0; i < split; i++) + { + load_leftOfSplit += DomainCount[i]; + sphload_leftOfSplit += DomainCountSph[i]; + } + + /* find the best split point in terms of work-load balance */ + + while(split < last - (ncpu - ncpu_leftOfSplit - 1) && split > 0) + { + maxAvgLoad_CurrentSplit = + dmax(load_leftOfSplit / ncpu_leftOfSplit, (load - load_leftOfSplit) / (ncpu - ncpu_leftOfSplit)); + + maxAvgLoad_NewSplit = + dmax((load_leftOfSplit + DomainCount[split]) / ncpu_leftOfSplit, + (load - load_leftOfSplit - DomainCount[split]) / (ncpu - ncpu_leftOfSplit)); + + if(maxAvgLoad_NewSplit <= maxAvgLoad_CurrentSplit) + { + load_leftOfSplit += DomainCount[split]; + sphload_leftOfSplit += DomainCountSph[split]; + split++; + } + else + break; + } + + + /* we will now have to check whether this solution is possible given the restrictions on the maximum load */ + + for(i = first, load_leftOfSplit = 0, sphload_leftOfSplit = 0; i < split; i++) + { + load_leftOfSplit += DomainCount[i]; + sphload_leftOfSplit += DomainCountSph[i]; + } + + if(load_leftOfSplit > maxload * ncpu_leftOfSplit || + (load - load_leftOfSplit) > maxload * (ncpu - ncpu_leftOfSplit)) + { + /* we did not find a viable split */ + return -1; + } + + if(sphload_leftOfSplit > maxloadsph * ncpu_leftOfSplit || + (sphload - sphload_leftOfSplit) > maxloadsph * (ncpu - ncpu_leftOfSplit)) + { + /* we did not find a viable split */ + return -1; + } + + if(ncpu_leftOfSplit >= 2) + ok_left = domain_findSplit(cpustart, ncpu_leftOfSplit, first, split - 1); + else + ok_left = 0; + + if((ncpu - ncpu_leftOfSplit) >= 2) + ok_right = domain_findSplit(cpustart + ncpu_leftOfSplit, ncpu - ncpu_leftOfSplit, split, last); + else + ok_right = 0; + + if(ok_left == 0 && ok_right == 0) + { + /* found a viable split */ + + if(ncpu_leftOfSplit == 1) + { + for(i = first; i < split; i++) + DomainTask[i] = cpustart; + + list_load[cpustart] = load_leftOfSplit; + list_loadsph[cpustart] = sphload_leftOfSplit; + DomainStartList[cpustart] = first; + DomainEndList[cpustart] = split - 1; + } + + if((ncpu - ncpu_leftOfSplit) == 1) + { + for(i = split; i <= last; i++) + DomainTask[i] = cpustart + ncpu_leftOfSplit; + + list_load[cpustart + ncpu_leftOfSplit] = load - load_leftOfSplit; + list_loadsph[cpustart + ncpu_leftOfSplit] = sphload - sphload_leftOfSplit; + DomainStartList[cpustart + ncpu_leftOfSplit] = split; + DomainEndList[cpustart + ncpu_leftOfSplit] = last; + } + + return 0; + } + + /* we did not find a viable split */ + return -1; +} + + + +/*! This function tries to improve the domain decomposition found by + * domain_findSplit() with respect to work-load balance. To this end, the + * boundaries in the existing domain-split solution (which was found by + * trying to balance the particle load) are shifted as long as this leads + * to better work-load while still remaining within the allowed + * memory-imbalance constraints. + */ +void domain_shiftSplit(void) +{ + int i, task, iter = 0, moved; + double maxw, newmaxw; + + for(task = 0; task < NTask; task++) + list_work[task] = 0; + + for(i = 0; i < NTopleaves; i++) + list_work[DomainTask[i]] += DomainWork[i]; + + do + { + for(task = 0, moved = 0; task < NTask - 1; task++) + { + maxw = dmax(list_work[task], list_work[task + 1]); + + if(list_work[task] < list_work[task + 1]) + { + newmaxw = dmax(list_work[task] + DomainWork[DomainStartList[task + 1]], + list_work[task + 1] - DomainWork[DomainStartList[task + 1]]); + if(newmaxw <= maxw) + { + if(list_load[task] + DomainCount[DomainStartList[task + 1]] <= maxload) + { + if(list_loadsph[task] + DomainCountSph[DomainStartList[task + 1]] > maxloadsph) + continue; + + /* ok, we can move one domain cell from right to left */ + list_work[task] += DomainWork[DomainStartList[task + 1]]; + list_load[task] += DomainCount[DomainStartList[task + 1]]; + list_loadsph[task] += DomainCountSph[DomainStartList[task + 1]]; + list_work[task + 1] -= DomainWork[DomainStartList[task + 1]]; + list_load[task + 1] -= DomainCount[DomainStartList[task + 1]]; + list_loadsph[task + 1] -= DomainCountSph[DomainStartList[task + 1]]; + + DomainTask[DomainStartList[task + 1]] = task; + DomainStartList[task + 1] += 1; + DomainEndList[task] += 1; + + moved++; + } + } + } + else + { + newmaxw = dmax(list_work[task] - DomainWork[DomainEndList[task]], + list_work[task + 1] + DomainWork[DomainEndList[task]]); + if(newmaxw <= maxw) + { + if(list_load[task + 1] + DomainCount[DomainEndList[task]] <= maxload) + { + if(list_loadsph[task + 1] + DomainCountSph[DomainEndList[task]] > maxloadsph) + continue; + + /* ok, we can move one domain cell from left to right */ + list_work[task] -= DomainWork[DomainEndList[task]]; + list_load[task] -= DomainCount[DomainEndList[task]]; + list_loadsph[task] -= DomainCountSph[DomainEndList[task]]; + list_work[task + 1] += DomainWork[DomainEndList[task]]; + list_load[task + 1] += DomainCount[DomainEndList[task]]; + list_loadsph[task + 1] += DomainCountSph[DomainEndList[task]]; + + DomainTask[DomainEndList[task]] = task + 1; + DomainEndList[task] -= 1; + DomainStartList[task + 1] -= 1; + + moved++; + } + } + + } + } + + iter++; + } + while(moved > 0 && iter < 10 * NTopleaves); +} + + +/*! This function counts how many particles have to be exchanged between + * two CPUs according to the domain split. If the CPUs are already quite + * full and hold data from other CPUs as well, not all the particles may + * be exchanged at once. In this case the communication phase has to be + * repeated, until enough of the third-party particles have been moved + * away such that the decomposition can be completed. + */ +void domain_findExchangeNumbers(int task, int partner, int sphflag, int *send, int *recv) +{ + int numpartA, numpartsphA, ntobesentA, maxsendA, maxsendA_old; + int numpartB, numpartsphB, ntobesentB, maxsendB, maxsendB_old; + + numpartA = list_NumPart[task]; + numpartsphA = list_N_gas[task]; + + numpartB = list_NumPart[partner]; + numpartsphB = list_N_gas[partner]; + + if(sphflag == 1) + { + ntobesentA = toGoSph[task * NTask + partner]; + ntobesentB = toGoSph[partner * NTask + task]; + } + else + { + ntobesentA = toGo[task * NTask + partner] - toGoSph[task * NTask + partner]; + ntobesentB = toGo[partner * NTask + task] - toGoSph[partner * NTask + task]; + } + + maxsendA = imin(ntobesentA, All.BunchSizeDomain); + maxsendB = imin(ntobesentB, All.BunchSizeDomain); + + do + { + maxsendA_old = maxsendA; + maxsendB_old = maxsendB; + + maxsendA = imin(All.MaxPart - numpartB + maxsendB, maxsendA); + maxsendB = imin(All.MaxPart - numpartA + maxsendA, maxsendB); + } + while((maxsendA != maxsendA_old) || (maxsendB != maxsendB_old)); + + + /* now make also sure that there is enough space for SPH particeles */ + if(sphflag == 1) + { + do + { + maxsendA_old = maxsendA; + maxsendB_old = maxsendB; + + maxsendA = imin(All.MaxPartSph - numpartsphB + maxsendB, maxsendA); + maxsendB = imin(All.MaxPartSph - numpartsphA + maxsendA, maxsendB); + } + while((maxsendA != maxsendA_old) || (maxsendB != maxsendB_old)); + } + + *send = maxsendA; + *recv = maxsendB; +} + + + + +/*! This function exchanges particles between two CPUs according to the + * domain split. In doing this, the memory boundaries which may restrict + * the exhange process are observed. + */ +void domain_exchangeParticles(int partner, int sphflag, int send_count, int recv_count) +{ + int i, no, n, count, rep; + MPI_Status status; + + for(n = 0, count = 0; count < send_count && n < NumPart; n++) + { + if(sphflag) + { + if(P[n].Type != 0) + continue; + } + else + { + if(P[n].Type == 0) + continue; + } + + no = 0; + + while(TopNodes[no].Daughter >= 0) + no = TopNodes[no].Daughter + (Key[n] - TopNodes[no].StartKey) / (TopNodes[no].Size / 8); + + no = TopNodes[no].Leaf; + + if(DomainTask[no] == partner) + { + if(sphflag) /* special reorder routine for SPH particles (need to stay at beginning) */ + { + DomainPartBuf[count] = P[n]; /* copy particle and collect in contiguous memory */ + DomainKeyBuf[count] = Key[n]; + DomainSphBuf[count] = SphP[n]; + + P[n] = P[N_gas - 1]; + P[N_gas - 1] = P[NumPart - 1]; + + Key[n] = Key[N_gas - 1]; + Key[N_gas - 1] = Key[NumPart - 1]; + + SphP[n] = SphP[N_gas - 1]; + + N_gas--; + } + else + { + DomainPartBuf[count] = P[n]; /* copy particle and collect in contiguous memory */ + DomainKeyBuf[count] = Key[n]; + P[n] = P[NumPart - 1]; + Key[n] = Key[NumPart - 1]; + } + + count++; + NumPart--; + n--; + } + } + + if(count != send_count) + { + printf("Houston, we got a problem...\n"); + printf("ThisTask=%d count=%d send_count=%d\n", ThisTask, count, send_count); + endrun(88); + } + + /* transmit */ + + for(rep = 0; rep < 2; rep++) + { + if((rep == 0 && ThisTask < partner) || (rep == 1 && ThisTask > partner)) + { + if(send_count > 0) + { + MPI_Ssend(&DomainPartBuf[0], send_count * sizeof(struct particle_data), MPI_BYTE, partner, + TAG_PDATA, MPI_COMM_WORLD); + + MPI_Ssend(&DomainKeyBuf[0], send_count * sizeof(peanokey), MPI_BYTE, partner, TAG_KEY, + MPI_COMM_WORLD); + + if(sphflag) + MPI_Ssend(&DomainSphBuf[0], send_count * sizeof(struct sph_particle_data), MPI_BYTE, partner, + TAG_SPHDATA, MPI_COMM_WORLD); + } + } + + if((rep == 1 && ThisTask < partner) || (rep == 0 && ThisTask > partner)) + { + if(recv_count > 0) + { + if(sphflag) + { + if((NumPart - N_gas) > recv_count) + { + for(i = 0; i < recv_count; i++) + { + P[NumPart + i] = P[N_gas + i]; + Key[NumPart + i] = Key[N_gas + i]; + } + } + else + { + for(i = NumPart - 1; i >= N_gas; i--) + { + P[i + recv_count] = P[i]; + Key[i + recv_count] = Key[i]; + } + } + + MPI_Recv(&P[N_gas], recv_count * sizeof(struct particle_data), MPI_BYTE, partner, TAG_PDATA, + MPI_COMM_WORLD, &status); + MPI_Recv(&Key[N_gas], recv_count * sizeof(peanokey), MPI_BYTE, partner, TAG_KEY, + MPI_COMM_WORLD, &status); + MPI_Recv(&SphP[N_gas], recv_count * sizeof(struct sph_particle_data), MPI_BYTE, partner, + TAG_SPHDATA, MPI_COMM_WORLD, &status); + + N_gas += recv_count; + } + else + { + MPI_Recv(&P[NumPart], recv_count * sizeof(struct particle_data), MPI_BYTE, partner, + TAG_PDATA, MPI_COMM_WORLD, &status); + MPI_Recv(&Key[NumPart], recv_count * sizeof(peanokey), MPI_BYTE, partner, + TAG_KEY, MPI_COMM_WORLD, &status); + } + + NumPart += recv_count; + } + } + } +} + +/*! This function determines how many particles that are currently stored + * on the local CPU have to be moved off according to the domain + * decomposition. + */ +void domain_countToGo(void) +{ + int n, no; + + for(n = 0; n < NTask; n++) + { + local_toGo[n] = 0; + local_toGoSph[n] = 0; + } + + for(n = 0; n < NumPart; n++) + { + no = 0; + + while(TopNodes[no].Daughter >= 0) + no = TopNodes[no].Daughter + (Key[n] - TopNodes[no].StartKey) / (TopNodes[no].Size / 8); + + no = TopNodes[no].Leaf; + + if(DomainTask[no] != ThisTask) + { + local_toGo[DomainTask[no]] += 1; + if(P[n].Type == 0) + local_toGoSph[DomainTask[no]] += 1; + } + } + + MPI_Allgather(local_toGo, NTask, MPI_INT, toGo, NTask, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(local_toGoSph, NTask, MPI_INT, toGoSph, NTask, MPI_INT, MPI_COMM_WORLD); +} + + +/*! This function walks the global top tree in order to establish the + * number of leaves it has. These leaves are distributed to different + * processors. + */ +void domain_walktoptree(int no) +{ + int i; + + if(TopNodes[no].Daughter == -1) + { + TopNodes[no].Leaf = NTopleaves; + NTopleaves++; + } + else + { + for(i = 0; i < 8; i++) + domain_walktoptree(TopNodes[no].Daughter + i); + } +} + +/*! This routine bins the particles onto the domain-grid, i.e. it sums up the + * total number of particles and the total amount of work in each of the + * domain-cells. This information forms the basis for the actual decision on + * the adopted domain decomposition. + */ +void domain_sumCost(void) +{ + int i, n, no; + double *local_DomainWork; + int *local_DomainCount; + int *local_DomainCountSph; + + local_DomainWork = malloc(NTopnodes * sizeof(double)); + local_DomainCount = malloc(NTopnodes * sizeof(int)); + local_DomainCountSph = malloc(NTopnodes * sizeof(int)); + + + + NTopleaves = 0; + + domain_walktoptree(0); + + for(i = 0; i < NTopleaves; i++) + { + local_DomainWork[i] = 0; + local_DomainCount[i] = 0; + local_DomainCountSph[i] = 0; + } + + if(ThisTask == 0) + printf("NTopleaves= %d\n", NTopleaves); + + for(n = 0; n < NumPart; n++) + { + no = 0; + + while(TopNodes[no].Daughter >= 0) + no = TopNodes[no].Daughter + (Key[n] - TopNodes[no].StartKey) / (TopNodes[no].Size / 8); + + no = TopNodes[no].Leaf; + + if(P[n].Ti_endstep > P[n].Ti_begstep) + local_DomainWork[no] += (1.0 + P[n].GravCost) / (P[n].Ti_endstep - P[n].Ti_begstep); + else + local_DomainWork[no] += (1.0 + P[n].GravCost); + + local_DomainCount[no] += 1; + if(P[n].Type == 0) + local_DomainCountSph[no] += 1; + } + + MPI_Allreduce(local_DomainWork, DomainWork, NTopleaves, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(local_DomainCount, DomainCount, NTopleaves, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(local_DomainCountSph, DomainCountSph, NTopleaves, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + + free(local_DomainCountSph); + free(local_DomainCount); + free(local_DomainWork); +} + + +/*! This routine finds the extent of the global domain grid. + */ +void domain_findExtent(void) +{ + int i, j; + double len, xmin[3], xmax[3], xmin_glob[3], xmax_glob[3]; + + /* determine local extension */ + for(j = 0; j < 3; j++) + { + xmin[j] = MAX_REAL_NUMBER; + xmax[j] = -MAX_REAL_NUMBER; + } + + for(i = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) + { + if(xmin[j] > P[i].Pos[j]) + xmin[j] = P[i].Pos[j]; + + if(xmax[j] < P[i].Pos[j]) + xmax[j] = P[i].Pos[j]; + } + } + + MPI_Allreduce(xmin, xmin_glob, 3, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_Allreduce(xmax, xmax_glob, 3, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + + len = 0; + for(j = 0; j < 3; j++) + if(xmax_glob[j] - xmin_glob[j] > len) + len = xmax_glob[j] - xmin_glob[j]; + + len *= 1.001; + + for(j = 0; j < 3; j++) + { + DomainCenter[j] = 0.5 * (xmin_glob[j] + xmax_glob[j]); + DomainCorner[j] = 0.5 * (xmin_glob[j] + xmax_glob[j]) - 0.5 * len; + } + + DomainLen = len; + DomainFac = 1.0 / len * (((peanokey) 1) << (BITS_PER_DIMENSION)); +} + + +/*! This function constructs the global top-level tree node that is used + * for the domain decomposition. This is done by considering the string of + * Peano-Hilbert keys for all particles, which is recursively chopped off + * in pieces of eight segments until each segment holds at most a certain + * number of particles. + */ +void domain_determineTopTree(void) +{ + int i, ntop_local, ntop; + int *ntopnodelist, *ntopoffset; + + for(i = 0; i < NumPart; i++) + { + KeySorted[i] = Key[i] = peano_hilbert_key((P[i].Pos[0] - DomainCorner[0]) * DomainFac, + (P[i].Pos[1] - DomainCorner[1]) * DomainFac, + (P[i].Pos[2] - DomainCorner[2]) * DomainFac, + BITS_PER_DIMENSION); + } + + qsort(KeySorted, NumPart, sizeof(peanokey), domain_compare_key); + + NTopnodes = 1; + TopNodes[0].Daughter = -1; + TopNodes[0].Size = PEANOCELLS; + TopNodes[0].StartKey = 0; + TopNodes[0].Count = NumPart; + TopNodes[0].Pstart = 0; + + domain_topsplit_local(0, 0); + + toplist_local = malloc(NTopnodes * sizeof(struct topnode_exchange)); + + for(i = 0, ntop_local = 0; i < NTopnodes; i++) + { + if(TopNodes[i].Daughter == -1) /* only use leaves */ + { + toplist_local[ntop_local].Startkey = TopNodes[i].StartKey; + toplist_local[ntop_local].Count = TopNodes[i].Count; + ntop_local++; + } + } + + ntopnodelist = malloc(sizeof(int) * NTask); + ntopoffset = malloc(sizeof(int) * NTask); + + MPI_Allgather(&ntop_local, 1, MPI_INT, ntopnodelist, 1, MPI_INT, MPI_COMM_WORLD); + + for(i = 0, ntop = 0, ntopoffset[0] = 0; i < NTask; i++) + { + ntop += ntopnodelist[i]; + if(i > 0) + ntopoffset[i] = ntopoffset[i - 1] + ntopnodelist[i - 1]; + } + + + toplist = malloc(ntop * sizeof(struct topnode_exchange)); + + for(i = 0; i < NTask; i++) + { + ntopnodelist[i] *= sizeof(struct topnode_exchange); + ntopoffset[i] *= sizeof(struct topnode_exchange); + } + + MPI_Allgatherv(toplist_local, ntop_local * sizeof(struct topnode_exchange), MPI_BYTE, + toplist, ntopnodelist, ntopoffset, MPI_BYTE, MPI_COMM_WORLD); + + qsort(toplist, ntop, sizeof(struct topnode_exchange), domain_compare_toplist); + + NTopnodes = 1; + TopNodes[0].Daughter = -1; + TopNodes[0].Size = PEANOCELLS; + TopNodes[0].StartKey = 0; + TopNodes[0].Count = All.TotNumPart; + TopNodes[0].Pstart = 0; + TopNodes[0].Blocks = ntop; + + domain_topsplit(0, 0); + + free(toplist); + free(ntopoffset); + free(ntopnodelist); + free(toplist_local); + +} + + + +/*! This function is responsible for constructing the local top-level + * Peano-Hilbert segments. A segment is cut into 8 pieces recursively + * until the number of particles in the segment has fallen below + * All.TotNumPart / (TOPNODEFACTOR * NTask * NTask). + */ +void domain_topsplit_local(int node, peanokey startkey) +{ + int i, p, sub, bin; + + if(TopNodes[node].Size >= 8) + { + TopNodes[node].Daughter = NTopnodes; + + for(i = 0; i < 8; i++) + { + if(NTopnodes < MAXTOPNODES) + { + sub = TopNodes[node].Daughter + i; + TopNodes[sub].Size = TopNodes[node].Size / 8; + TopNodes[sub].Count = 0; + TopNodes[sub].Daughter = -1; + TopNodes[sub].StartKey = startkey + i * TopNodes[sub].Size; + TopNodes[sub].Pstart = TopNodes[node].Pstart; + + NTopnodes++; + } + else + { + printf("task=%d: We are out of Topnodes. Increasing the constant MAXTOPNODES might help.\n", + ThisTask); + fflush(stdout); + endrun(13213); + } + } + + for(p = TopNodes[node].Pstart; p < TopNodes[node].Pstart + TopNodes[node].Count; p++) + { + bin = (KeySorted[p] - startkey) / (TopNodes[node].Size / 8); + + if(bin < 0 || bin > 7) + { + printf("task=%d: something odd has happened here. bin=%d\n", ThisTask, bin); + fflush(stdout); + endrun(13123123); + } + + sub = TopNodes[node].Daughter + bin; + + if(TopNodes[sub].Count == 0) + TopNodes[sub].Pstart = p; + + TopNodes[sub].Count++; + } + + for(i = 0; i < 8; i++) + { + sub = TopNodes[node].Daughter + i; + if(TopNodes[sub].Count > All.TotNumPart / (TOPNODEFACTOR * NTask * NTask)) + domain_topsplit_local(sub, TopNodes[sub].StartKey); + } + } +} + + + +/*! This function is responsible for constructing the global top-level tree + * segments. Starting from a joint list of all local top-level segments, + * in which mulitple occurences of the same spatial segment have been + * combined, a segment is subdivided into 8 pieces recursively until the + * number of particles in each segment has fallen below All.TotNumPart / + * (TOPNODEFACTOR * NTask). + */ +void domain_topsplit(int node, peanokey startkey) +{ + int i, p, sub, bin; + + if(TopNodes[node].Size >= 8) + { + TopNodes[node].Daughter = NTopnodes; + + for(i = 0; i < 8; i++) + { + if(NTopnodes < MAXTOPNODES) + { + sub = TopNodes[node].Daughter + i; + TopNodes[sub].Size = TopNodes[node].Size / 8; + TopNodes[sub].Count = 0; + TopNodes[sub].Blocks = 0; + TopNodes[sub].Daughter = -1; + TopNodes[sub].StartKey = startkey + i * TopNodes[sub].Size; + TopNodes[sub].Pstart = TopNodes[node].Pstart; + NTopnodes++; + } + else + { + printf("Task=%d: We are out of Topnodes. Increasing the constant MAXTOPNODES might help.\n", + ThisTask); + fflush(stdout); + endrun(137213); + } + } + + for(p = TopNodes[node].Pstart; p < TopNodes[node].Pstart + TopNodes[node].Blocks; p++) + { + bin = (toplist[p].Startkey - startkey) / (TopNodes[node].Size / 8); + sub = TopNodes[node].Daughter + bin; + + if(bin < 0 || bin > 7) + endrun(77); + + if(TopNodes[sub].Blocks == 0) + TopNodes[sub].Pstart = p; + + TopNodes[sub].Count += toplist[p].Count; + TopNodes[sub].Blocks++; + } + + for(i = 0; i < 8; i++) + { + sub = TopNodes[node].Daughter + i; + if(TopNodes[sub].Count > All.TotNumPart / (TOPNODEFACTOR * NTask)) + domain_topsplit(sub, TopNodes[sub].StartKey); + } + } +} + + +/*! This is a comparison kernel used in a sort routine. + */ +int domain_compare_toplist(const void *a, const void *b) +{ + if(((struct topnode_exchange *) a)->Startkey < (((struct topnode_exchange *) b)->Startkey)) + return -1; + + if(((struct topnode_exchange *) a)->Startkey > (((struct topnode_exchange *) b)->Startkey)) + return +1; + + return 0; +} + +/*! This is a comparison kernel used in a sort routine. + */ +int domain_compare_key(const void *a, const void *b) +{ + if(*(peanokey *) a < *(peanokey *) b) + return -1; + + if(*(peanokey *) a > *(peanokey *) b) + return +1; + + return 0; +} diff --git a/src/PyGadget/src/domain.o b/src/PyGadget/src/domain.o new file mode 100644 index 0000000..282cd9f Binary files /dev/null and b/src/PyGadget/src/domain.o differ diff --git a/src/PyGadget/src/domainQ.c b/src/PyGadget/src/domainQ.c new file mode 100644 index 0000000..8a0d624 --- /dev/null +++ b/src/PyGadget/src/domainQ.c @@ -0,0 +1,1141 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +#ifdef PY_INTERFACE + +/*! \file domain.c + * \brief code for domain decomposition + * + * This file contains the code for the domain decomposition of the + * simulation volume. The domains are constructed from disjoint subsets + * of the leaves of a fiducial top-level tree that covers the full + * simulation volume. Domain boundaries hence run along tree-node + * divisions of a fiducial global BH tree. As a result of this method, the + * tree force are in principle strictly independent of the way the domains + * are cut. The domain decomposition can be carried out for an arbitrary + * number of CPUs. Individual domains are not cubical, but spatially + * coherent since the leaves are traversed in a Peano-Hilbert order and + * individual domains form segments along this order. This also ensures + * that each domain has a small surface to volume ratio, which minimizes + * communication. + */ + +#define TOPNODEFACTOR 20.0 + +#define REDUC_FAC 0.98 + + +/*! toGo[task*NTask + partner] gives the number of particles in task 'task' + * that have to go to task 'partner' + */ +static int *toGo, *toGoSph; +static int *local_toGo, *local_toGoSph; +static int *list_NumPart; +static int *list_N_gas; +static int *list_load; +static int *list_loadsph; +static double *list_work; + +static long long maxload, maxloadsph; + +static struct topnode_exchange +{ + peanokey Startkey; + int Count; +} + *toplist, *toplist_local; + + + +/*! This is the main routine for the domain decomposition. It acts as a + * driver routine that allocates various temporary buffers, maps the + * particles back onto the periodic box if needed, and then does the + * domain decomposition, and a final Peano-Hilbert order of all particles + * as a tuning measure. + */ +void domain_DecompositionQ(void) +{ + double t0, t1; + +#ifdef PMGRID + //if(All.PM_Ti_endstep == All.Ti_Current) + { + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPartQ * All.TreeDomainUpdateFrequency; + /* to make sure that we do a domain decomposition before the PM-force is evaluated. + this is needed to make sure that the particles are wrapped into the box */ + } +#endif + + /* Check whether it is really time for a new domain decomposition */ + //if(All.NumForcesSinceLastDomainDecomp > All.TotNumPartQ * All.TreeDomainUpdateFrequency) + { + t0 = second(); + +#ifdef PERIODIC + do_box_wrappingQ(); /* map the particles back onto the box */ +#endif + All.NumForcesSinceLastDomainDecomp = 0; + TreeReconstructFlag = 1; /* ensures that new tree will be constructed */ + + if(ThisTask == 0) + { + printf("domain decompositionQ... \n"); + fflush(stdout); + } + + Key = malloc(sizeof(peanokey) * All.MaxPartQ); + KeySorted = malloc(sizeof(peanokey) * All.MaxPartQ); + + toGo = malloc(sizeof(int) * NTask * NTask); + toGoSph = malloc(sizeof(int) * NTask * NTask); + local_toGo = malloc(sizeof(int) * NTask); + local_toGoSph = malloc(sizeof(int) * NTask); + list_NumPart = malloc(sizeof(int) * NTask); + list_N_gas = malloc(sizeof(int) * NTask); + list_load = malloc(sizeof(int) * NTask); + list_loadsph = malloc(sizeof(int) * NTask); + list_work = malloc(sizeof(double) * NTask); + + MPI_Allgather(&NumPartQ, 1, MPI_INT, list_NumPart, 1, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(&N_gasQ, 1, MPI_INT, list_N_gas, 1, MPI_INT, MPI_COMM_WORLD); + + maxload = All.MaxPartQ * REDUC_FAC; + maxloadsph = All.MaxPartSphQ * REDUC_FAC; + + domain_decomposeQ(); + + free(list_work); + free(list_loadsph); + free(list_load); + free(list_N_gas); + free(list_NumPart); + free(local_toGoSph); + free(local_toGo); + free(toGoSph); + free(toGo); + + + if(ThisTask == 0) + { + printf("domain decomposition done. \n"); + fflush(stdout); + } + + t1 = second(); + All.CPU_Domain += timediff(t0, t1); + +#ifdef PEANOHILBERT + t0 = second(); + peano_hilbert_orderQ(); + t1 = second(); + All.CPU_Peano += timediff(t0, t1); +#endif + + free(KeySorted); + free(Key); + } + +} + + + +/*! This function carries out the actual domain decomposition for all + * particle types. It will try to balance the work-load for each domain, + * as estimated based on the Q[i]-GravCost values. The decomposition will + * respect the maximum allowed memory-imbalance given by the value of + * PartAllocFactor. + */ +void domain_decomposeQ(void) +{ + int i, j, status; + int ngrp, task, partner, sendcount, recvcount; + long long sumtogo, sumload; + int maxload, *temp; + double sumwork, maxwork; + + for(i = 0; i < 6; i++) + NtypeLocalQ[i] = 0; + + for(i = 0; i < NumPartQ; i++) + NtypeLocalQ[Q[i].Type]++; + + /* because Ntype[] is of type `long long', we cannot do a simple + * MPI_Allreduce() to sum the total particle numbers + */ + temp = malloc(NTask * 6 * sizeof(int)); + MPI_Allgather(NtypeLocalQ, 6, MPI_INT, temp, 6, MPI_INT, MPI_COMM_WORLD); + for(i = 0; i < 6; i++) + { + NtypeQ[i] = 0; + for(j = 0; j < NTask; j++) + NtypeQ[i] += temp[j * 6 + i]; + } + free(temp); + +#ifndef UNEQUALSOFTENINGS + for(i = 0; i < 6; i++) + if(NtypeQ[i] > 0) + break; + + for(ngrp = i + 1; ngrp < 6; ngrp++) + { + if(NtypeQ[ngrp] > 0) + if(All.SofteningTableQ[ngrp] != All.SofteningTableQ[i]) + { + if(ThisTask == 0) + { + fprintf(stdout, "Code was not compiled with UNEQUALSOFTENINGS, but some of the\n"); + fprintf(stdout, "softening lengths are unequal nevertheless.\n"); + fprintf(stdout, "This is not allowed.\n"); + } + endrun(0); + } + } +#endif + + + /* determine global dimensions of domain grid */ + domain_findExtentQ(); + + domain_determineTopTreeQ(); + + /* determine cost distribution in domain grid */ + domain_sumCostQ(); + + /* find the split of the domain grid recursively */ + status = domain_findSplitQ(0, NTask, 0, NTopleavesQ - 1); + if(status != 0) + { + if(ThisTask == 0) + printf("\nNo domain decomposition that stays within memory bounds is possible.\n"); + endrun(0); + } + + /* now try to improve the work-load balance of the split */ + domain_shiftSplitQ(); + + DomainMyStartQ = DomainStartListQ[ThisTask]; + DomainMyLastQ = DomainEndListQ[ThisTask]; + + if(ThisTask == 0) + { + sumload = maxload = 0; + sumwork = maxwork = 0; + for(i = 0; i < NTask; i++) + { + sumload += list_load[i]; + sumwork += list_work[i]; + + if(list_load[i] > maxload) + maxload = list_load[i]; + + if(list_work[i] > maxwork) + maxwork = list_work[i]; + } + + printf("work-load balance=%g memory-balance=%g\n", + maxwork / (sumwork / NTask), maxload / (((double) sumload) / NTask)); + } + + + /* determine for each cpu how many particles have to be shifted to other cpus */ + domain_countToGoQ(); + + for(i = 0, sumtogo = 0; i < NTask * NTask; i++) + sumtogo += toGo[i]; + + while(sumtogo > 0) + { + if(ThisTask == 0) + { + printf("exchange of %d%09d particles\n", (int) (sumtogo / 1000000000), + (int) (sumtogo % 1000000000)); + fflush(stdout); + } + + for(ngrp = 1; ngrp < (1 << PTask); ngrp++) + { + for(task = 0; task < NTask; task++) + { + partner = task ^ ngrp; + + if(partner < NTask && task < partner) + { + /* treat SPH separately */ + if(All.TotN_gasQ > 0) + { + + domain_findExchangeNumbersQ(task, partner, 1, &sendcount, &recvcount); + + list_NumPart[task] += recvcount - sendcount; + list_NumPart[partner] -= recvcount - sendcount; + list_N_gas[task] += recvcount - sendcount; + list_N_gas[partner] -= recvcount - sendcount; + + toGo[task * NTask + partner] -= sendcount; + toGo[partner * NTask + task] -= recvcount; + toGoSph[task * NTask + partner] -= sendcount; + toGoSph[partner * NTask + task] -= recvcount; + + if(task == ThisTask) /* actually carry out the exchange */ + domain_exchangeParticlesQ(partner, 1, sendcount, recvcount); + if(partner == ThisTask) + domain_exchangeParticlesQ(task, 1, recvcount, sendcount); + + } + + domain_findExchangeNumbersQ(task, partner, 0, &sendcount, &recvcount); + + list_NumPart[task] += recvcount - sendcount; + list_NumPart[partner] -= recvcount - sendcount; + + toGo[task * NTask + partner] -= sendcount; + toGo[partner * NTask + task] -= recvcount; + + if(task == ThisTask) /* actually carry out the exchange */ + domain_exchangeParticlesQ(partner, 0, sendcount, recvcount); + if(partner == ThisTask) + domain_exchangeParticlesQ(task, 0, recvcount, sendcount); + } + } + } + + for(i = 0, sumtogo = 0; i < NTask * NTask; i++) + sumtogo += toGo[i]; + } +} + +/*! This function tries to find a split point in a range of cells in the + * domain-grid. The range of cells starts at 'first', and ends at 'last' + * (inclusively). The number of cpus that holds the range is 'ncpu', with + * the first cpu given by 'cpustart'. If more than 2 cpus are to be split, + * the function calls itself recursively. The division tries to achieve a + * best particle-load balance under the constraint that 'maxload' and + * 'maxloadsph' may not be exceeded, and that each cpu holds at least one + * cell from the domaingrid. If such a decomposition cannot be achieved, a + * non-zero error code is returned. + * + * After successful completion, DomainMyStartQ[] and DomainMyLastQ[] contain + * the first and last cell of the domaingrid assigned to the local task + * for the given type. Also, DomainTaskQ[] contains for each cell the task + * it was assigned to. + */ +int domain_findSplitQ(int cpustart, int ncpu, int first, int last) +{ + int i, split, ok_left, ok_right; + long long load, sphload, load_leftOfSplit, sphload_leftOfSplit; + int ncpu_leftOfSplit; + double maxAvgLoad_CurrentSplit, maxAvgLoad_NewSplit; + + + ncpu_leftOfSplit = ncpu / 2; + + for(i = first, load = 0, sphload = 0; i <= last; i++) + { + load += DomainCountQ[i]; + sphload += DomainCountSphQ[i]; + } + + split = first + ncpu_leftOfSplit; + + for(i = first, load_leftOfSplit = sphload_leftOfSplit = 0; i < split; i++) + { + load_leftOfSplit += DomainCountQ[i]; + sphload_leftOfSplit += DomainCountSphQ[i]; + } + + /* find the best split point in terms of work-load balance */ + + while(split < last - (ncpu - ncpu_leftOfSplit - 1) && split > 0) + { + maxAvgLoad_CurrentSplit = + dmax(load_leftOfSplit / ncpu_leftOfSplit, (load - load_leftOfSplit) / (ncpu - ncpu_leftOfSplit)); + + maxAvgLoad_NewSplit = + dmax((load_leftOfSplit + DomainCountQ[split]) / ncpu_leftOfSplit, + (load - load_leftOfSplit - DomainCountQ[split]) / (ncpu - ncpu_leftOfSplit)); + + if(maxAvgLoad_NewSplit <= maxAvgLoad_CurrentSplit) + { + load_leftOfSplit += DomainCountQ[split]; + sphload_leftOfSplit += DomainCountSphQ[split]; + split++; + } + else + break; + } + + + /* we will now have to check whether this solution is possible given the restrictions on the maximum load */ + + for(i = first, load_leftOfSplit = 0, sphload_leftOfSplit = 0; i < split; i++) + { + load_leftOfSplit += DomainCountQ[i]; + sphload_leftOfSplit += DomainCountSphQ[i]; + } + + if(load_leftOfSplit > maxload * ncpu_leftOfSplit || + (load - load_leftOfSplit) > maxload * (ncpu - ncpu_leftOfSplit)) + { + /* we did not find a viable split */ + return -1; + } + + if(sphload_leftOfSplit > maxloadsph * ncpu_leftOfSplit || + (sphload - sphload_leftOfSplit) > maxloadsph * (ncpu - ncpu_leftOfSplit)) + { + /* we did not find a viable split */ + return -1; + } + + if(ncpu_leftOfSplit >= 2) + ok_left = domain_findSplitQ(cpustart, ncpu_leftOfSplit, first, split - 1); + else + ok_left = 0; + + if((ncpu - ncpu_leftOfSplit) >= 2) + ok_right = domain_findSplitQ(cpustart + ncpu_leftOfSplit, ncpu - ncpu_leftOfSplit, split, last); + else + ok_right = 0; + + if(ok_left == 0 && ok_right == 0) + { + /* found a viable split */ + + if(ncpu_leftOfSplit == 1) + { + for(i = first; i < split; i++) + DomainTaskQ[i] = cpustart; + + list_load[cpustart] = load_leftOfSplit; + list_loadsph[cpustart] = sphload_leftOfSplit; + DomainStartListQ[cpustart] = first; + DomainEndListQ[cpustart] = split - 1; + } + + if((ncpu - ncpu_leftOfSplit) == 1) + { + for(i = split; i <= last; i++) + DomainTaskQ[i] = cpustart + ncpu_leftOfSplit; + + list_load[cpustart + ncpu_leftOfSplit] = load - load_leftOfSplit; + list_loadsph[cpustart + ncpu_leftOfSplit] = sphload - sphload_leftOfSplit; + DomainStartListQ[cpustart + ncpu_leftOfSplit] = split; + DomainEndListQ[cpustart + ncpu_leftOfSplit] = last; + } + + return 0; + } + + /* we did not find a viable split */ + return -1; +} + + + +/*! This function tries to improve the domain decomposition found by + * domain_findSplitQ() with respect to work-load balance. To this end, the + * boundaries in the existing domain-split solution (which was found by + * trying to balance the particle load) are shifted as long as this leads + * to better work-load while still remaining within the allowed + * memory-imbalance constraints. + */ +void domain_shiftSplitQ(void) +{ + int i, task, iter = 0, moved; + double maxw, newmaxw; + + for(task = 0; task < NTask; task++) + list_work[task] = 0; + + for(i = 0; i < NTopleavesQ; i++) + list_work[DomainTaskQ[i]] += DomainWorkQ[i]; + + do + { + for(task = 0, moved = 0; task < NTask - 1; task++) + { + maxw = dmax(list_work[task], list_work[task + 1]); + + if(list_work[task] < list_work[task + 1]) + { + newmaxw = dmax(list_work[task] + DomainWorkQ[DomainStartListQ[task + 1]], + list_work[task + 1] - DomainWorkQ[DomainStartListQ[task + 1]]); + if(newmaxw <= maxw) + { + if(list_load[task] + DomainCountQ[DomainStartListQ[task + 1]] <= maxload) + { + if(list_loadsph[task] + DomainCountSphQ[DomainStartListQ[task + 1]] > maxloadsph) + continue; + + /* ok, we can move one domain cell from right to left */ + list_work[task] += DomainWorkQ[DomainStartListQ[task + 1]]; + list_load[task] += DomainCountQ[DomainStartListQ[task + 1]]; + list_loadsph[task] += DomainCountSphQ[DomainStartListQ[task + 1]]; + list_work[task + 1] -= DomainWorkQ[DomainStartListQ[task + 1]]; + list_load[task + 1] -= DomainCountQ[DomainStartListQ[task + 1]]; + list_loadsph[task + 1] -= DomainCountSphQ[DomainStartListQ[task + 1]]; + + DomainTaskQ[DomainStartListQ[task + 1]] = task; + DomainStartListQ[task + 1] += 1; + DomainEndListQ[task] += 1; + + moved++; + } + } + } + else + { + newmaxw = dmax(list_work[task] - DomainWorkQ[DomainEndListQ[task]], + list_work[task + 1] + DomainWorkQ[DomainEndListQ[task]]); + if(newmaxw <= maxw) + { + if(list_load[task + 1] + DomainCountQ[DomainEndListQ[task]] <= maxload) + { + if(list_loadsph[task + 1] + DomainCountSphQ[DomainEndListQ[task]] > maxloadsph) + continue; + + /* ok, we can move one domain cell from left to right */ + list_work[task] -= DomainWorkQ[DomainEndListQ[task]]; + list_load[task] -= DomainCountQ[DomainEndListQ[task]]; + list_loadsph[task] -= DomainCountSphQ[DomainEndListQ[task]]; + list_work[task + 1] += DomainWorkQ[DomainEndListQ[task]]; + list_load[task + 1] += DomainCountQ[DomainEndListQ[task]]; + list_loadsph[task + 1] += DomainCountSphQ[DomainEndListQ[task]]; + + DomainTaskQ[DomainEndListQ[task]] = task + 1; + DomainEndListQ[task] -= 1; + DomainStartListQ[task + 1] -= 1; + + moved++; + } + } + + } + } + + iter++; + } + while(moved > 0 && iter < 10 * NTopleavesQ); +} + + +/*! This function counts how many particles have to be exchanged between + * two CPUs according to the domain split. If the CPUs are already quite + * full and hold data from other CPUs as well, not all the particles may + * be exchanged at once. In this case the communication phase has to be + * repeated, until enough of the third-party particles have been moved + * away such that the decomposition can be completed. + */ +void domain_findExchangeNumbersQ(int task, int partner, int sphflag, int *send, int *recv) +{ + int numpartA, numpartsphA, ntobesentA, maxsendA, maxsendA_old; + int numpartB, numpartsphB, ntobesentB, maxsendB, maxsendB_old; + + numpartA = list_NumPart[task]; + numpartsphA = list_N_gas[task]; + + numpartB = list_NumPart[partner]; + numpartsphB = list_N_gas[partner]; + + if(sphflag == 1) + { + ntobesentA = toGoSph[task * NTask + partner]; + ntobesentB = toGoSph[partner * NTask + task]; + } + else + { + ntobesentA = toGo[task * NTask + partner] - toGoSph[task * NTask + partner]; + ntobesentB = toGo[partner * NTask + task] - toGoSph[partner * NTask + task]; + } + + maxsendA = imin(ntobesentA, All.BunchSizeDomain); + maxsendB = imin(ntobesentB, All.BunchSizeDomain); + + do + { + maxsendA_old = maxsendA; + maxsendB_old = maxsendB; + + maxsendA = imin(All.MaxPartQ - numpartB + maxsendB, maxsendA); + maxsendB = imin(All.MaxPartQ - numpartA + maxsendA, maxsendB); + } + while((maxsendA != maxsendA_old) || (maxsendB != maxsendB_old)); + + + /* now make also sure that there is enough space for SPH particeles */ + if(sphflag == 1) + { + do + { + maxsendA_old = maxsendA; + maxsendB_old = maxsendB; + + maxsendA = imin(All.MaxPartSphQ - numpartsphB + maxsendB, maxsendA); + maxsendB = imin(All.MaxPartSphQ - numpartsphA + maxsendA, maxsendB); + } + while((maxsendA != maxsendA_old) || (maxsendB != maxsendB_old)); + } + + *send = maxsendA; + *recv = maxsendB; +} + + + + +/*! This function exchanges particles between two CPUs according to the + * domain split. In doing this, the memory boundaries which may restrict + * the exhange process are observed. + */ +void domain_exchangeParticlesQ(int partner, int sphflag, int send_count, int recv_count) +{ + int i, no, n, count, rep; + MPI_Status status; + + for(n = 0, count = 0; count < send_count && n < NumPartQ; n++) + { + if(sphflag) + { + if(Q[n].Type != 0) + continue; + } + else + { + if(Q[n].Type == 0) + continue; + } + + no = 0; + + while(TopNodesQ[no].Daughter >= 0) + no = TopNodesQ[no].Daughter + (Key[n] - TopNodesQ[no].StartKey) / (TopNodesQ[no].Size / 8); + + no = TopNodesQ[no].Leaf; + + if(DomainTaskQ[no] == partner) + { + if(sphflag) /* special reorder routine for SPH particles (need to stay at beginning) */ + { + DomainPartBufQ[count] = Q[n]; /* copy particle and collect in contiguous memory */ + DomainKeyBufQ[count] = Key[n]; + DomainSphBufQ[count] = SphQ[n]; + + Q[n] = Q[N_gasQ - 1]; + Q[N_gasQ - 1] = Q[NumPartQ - 1]; + + Key[n] = Key[N_gasQ - 1]; + Key[N_gasQ - 1] = Key[NumPartQ - 1]; + + SphQ[n] = SphQ[N_gasQ - 1]; + + N_gasQ--; + } + else + { + DomainPartBufQ[count] = Q[n]; /* copy particle and collect in contiguous memory */ + DomainKeyBufQ[count] = Key[n]; + Q[n] = Q[NumPartQ - 1]; + Key[n] = Key[NumPartQ - 1]; + } + + count++; + NumPartQ--; + n--; + } + } + + if(count != send_count) + { + printf("Houston, we got a problem...\n"); + printf("ThisTask=%d count=%d send_count=%d\n", ThisTask, count, send_count); + endrun(88); + } + + /* transmit */ + + for(rep = 0; rep < 2; rep++) + { + if((rep == 0 && ThisTask < partner) || (rep == 1 && ThisTask > partner)) + { + if(send_count > 0) + { + MPI_Ssend(&DomainPartBufQ[0], send_count * sizeof(struct particle_data), MPI_BYTE, partner, + TAG_PDATA, MPI_COMM_WORLD); + + MPI_Ssend(&DomainKeyBufQ[0], send_count * sizeof(peanokey), MPI_BYTE, partner, TAG_KEY, + MPI_COMM_WORLD); + + if(sphflag) + MPI_Ssend(&DomainSphBufQ[0], send_count * sizeof(struct sph_particle_data), MPI_BYTE, partner, + TAG_SPHDATA, MPI_COMM_WORLD); + } + } + + if((rep == 1 && ThisTask < partner) || (rep == 0 && ThisTask > partner)) + { + if(recv_count > 0) + { + if(sphflag) + { + if((NumPartQ - N_gasQ) > recv_count) + { + for(i = 0; i < recv_count; i++) + { + Q[NumPartQ + i] = Q[N_gasQ + i]; + Key[NumPartQ + i] = Key[N_gasQ + i]; + } + } + else + { + for(i = NumPartQ - 1; i >= N_gasQ; i--) + { + Q[i + recv_count] = Q[i]; + Key[i + recv_count] = Key[i]; + } + } + + MPI_Recv(&Q[N_gasQ], recv_count * sizeof(struct particle_data), MPI_BYTE, partner, TAG_PDATA, + MPI_COMM_WORLD, &status); + MPI_Recv(&Key[N_gasQ], recv_count * sizeof(peanokey), MPI_BYTE, partner, TAG_KEY, + MPI_COMM_WORLD, &status); + MPI_Recv(&SphQ[N_gasQ], recv_count * sizeof(struct sph_particle_data), MPI_BYTE, partner, + TAG_SPHDATA, MPI_COMM_WORLD, &status); + + N_gasQ += recv_count; + } + else + { + MPI_Recv(&Q[NumPartQ], recv_count * sizeof(struct particle_data), MPI_BYTE, partner, + TAG_PDATA, MPI_COMM_WORLD, &status); + MPI_Recv(&Key[NumPartQ], recv_count * sizeof(peanokey), MPI_BYTE, partner, + TAG_KEY, MPI_COMM_WORLD, &status); + } + + NumPartQ += recv_count; + } + } + } +} + +/*! This function determines how many particles that are currently stored + * on the local CPU have to be moved off according to the domain + * decomposition. + */ +void domain_countToGoQ(void) +{ + int n, no; + + for(n = 0; n < NTask; n++) + { + local_toGo[n] = 0; + local_toGoSph[n] = 0; + } + + for(n = 0; n < NumPartQ; n++) + { + no = 0; + + while(TopNodesQ[no].Daughter >= 0) + no = TopNodesQ[no].Daughter + (Key[n] - TopNodesQ[no].StartKey) / (TopNodesQ[no].Size / 8); + + no = TopNodesQ[no].Leaf; + + if(DomainTaskQ[no] != ThisTask) + { + local_toGo[DomainTaskQ[no]] += 1; + if(Q[n].Type == 0) + local_toGoSph[DomainTaskQ[no]] += 1; + } + } + + MPI_Allgather(local_toGo, NTask, MPI_INT, toGo, NTask, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(local_toGoSph, NTask, MPI_INT, toGoSph, NTask, MPI_INT, MPI_COMM_WORLD); +} + + +/*! This function walks the global top tree in order to establish the + * number of leaves it has. These leaves are distributed to different + * processors. + */ +void domain_walktoptreeQ(int no) +{ + int i; + + if(TopNodesQ[no].Daughter == -1) + { + TopNodesQ[no].Leaf = NTopleavesQ; + NTopleavesQ++; + } + else + { + for(i = 0; i < 8; i++) + domain_walktoptreeQ(TopNodesQ[no].Daughter + i); + } +} + +/*! This routine bins the particles onto the domain-grid, i.e. it sums up the + * total number of particles and the total amount of work in each of the + * domain-cells. This information forms the basis for the actual decision on + * the adopted domain decomposition. + */ +void domain_sumCostQ(void) +{ + int i, n, no; + double *local_DomainWorkQ; + int *local_DomainCountQ; + int *local_DomainCountSphQ; + + local_DomainWorkQ = malloc(NTopnodesQ * sizeof(double)); + local_DomainCountQ = malloc(NTopnodesQ * sizeof(int)); + local_DomainCountSphQ = malloc(NTopnodesQ * sizeof(int)); + + + + NTopleavesQ = 0; + + domain_walktoptreeQ(0); + + for(i = 0; i < NTopleavesQ; i++) + { + local_DomainWorkQ[i] = 0; + local_DomainCountQ[i] = 0; + local_DomainCountSphQ[i] = 0; + } + + if(ThisTask == 0) + printf("NTopleavesQ= %d\n", NTopleavesQ); + + for(n = 0; n < NumPartQ; n++) + { + no = 0; + + while(TopNodesQ[no].Daughter >= 0) + no = TopNodesQ[no].Daughter + (Key[n] - TopNodesQ[no].StartKey) / (TopNodesQ[no].Size / 8); + + no = TopNodesQ[no].Leaf; + + if(Q[n].Ti_endstep > Q[n].Ti_begstep) + local_DomainWorkQ[no] += (1.0 + Q[n].GravCost) / (Q[n].Ti_endstep - Q[n].Ti_begstep); + else + local_DomainWorkQ[no] += (1.0 + Q[n].GravCost); + + local_DomainCountQ[no] += 1; + if(Q[n].Type == 0) + local_DomainCountSphQ[no] += 1; + } + + MPI_Allreduce(local_DomainWorkQ, DomainWorkQ, NTopleavesQ, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(local_DomainCountQ, DomainCountQ, NTopleavesQ, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(local_DomainCountSphQ, DomainCountSphQ, NTopleavesQ, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + + free(local_DomainCountSphQ); + free(local_DomainCountQ); + free(local_DomainWorkQ); +} + + +/*! This routine finds the extent of the global domain grid. + */ +void domain_findExtentQ(void) +{ + int i, j; + double len, xmin[3], xmax[3], xmin_glob[3], xmax_glob[3]; + + /* determine local extension */ + for(j = 0; j < 3; j++) + { + xmin[j] = MAX_REAL_NUMBER; + xmax[j] = -MAX_REAL_NUMBER; + } + + for(i = 0; i < NumPartQ; i++) + { + for(j = 0; j < 3; j++) + { + if(xmin[j] > Q[i].Pos[j]) + xmin[j] = Q[i].Pos[j]; + + if(xmax[j] < Q[i].Pos[j]) + xmax[j] = Q[i].Pos[j]; + } + } + + MPI_Allreduce(xmin, xmin_glob, 3, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_Allreduce(xmax, xmax_glob, 3, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + + len = 0; + for(j = 0; j < 3; j++) + if(xmax_glob[j] - xmin_glob[j] > len) + len = xmax_glob[j] - xmin_glob[j]; + + len *= 1.001; + + for(j = 0; j < 3; j++) + { + DomainCenterQ[j] = 0.5 * (xmin_glob[j] + xmax_glob[j]); + DomainCornerQ[j] = 0.5 * (xmin_glob[j] + xmax_glob[j]) - 0.5 * len; + } + + DomainLenQ = len; + DomainFacQ = 1.0 / len * (((peanokey) 1) << (BITS_PER_DIMENSION)); + + + /* + ici il faudrait plutot tester que tous les Q set P sont dans le meme domaine ... + + il faut faire ça ailleurs.... + + */ + + + + + + if (ThisTask==0) + if (DomainLenQ!=DomainLen) + printf("!!!!!!!!!!!!!!!! warning domains are differents !!!!!\n"); + + + /* + for(j = 0; j < 3; j++) + { + DomainCenterQ[j] = DomainCenter[j]; + DomainCornerQ[j] = DomainCorner[j]; + } + + DomainLenQ = DomainLen; + DomainFacQ = DomainFac; + + */ + + +} + + +/*! This function constructs the global top-level tree node that is used + * for the domain decomposition. This is done by considering the string of + * Peano-Hilbert keys for all particles, which is recursively chopped off + * in pieces of eight segments until each segment holds at most a certain + * number of particles. + */ +void domain_determineTopTreeQ(void) +{ + int i, ntop_local, ntop; + int *ntopnodelist, *ntopoffset; + + for(i = 0; i < NumPartQ; i++) + { + KeySorted[i] = Key[i] = peano_hilbert_key((Q[i].Pos[0] - DomainCornerQ[0]) * DomainFacQ, + (Q[i].Pos[1] - DomainCornerQ[1]) * DomainFacQ, + (Q[i].Pos[2] - DomainCornerQ[2]) * DomainFacQ, + BITS_PER_DIMENSION); + } + + qsort(KeySorted, NumPartQ, sizeof(peanokey), domain_compare_key); + + NTopnodesQ = 1; + TopNodesQ[0].Daughter = -1; + TopNodesQ[0].Size = PEANOCELLS; + TopNodesQ[0].StartKey = 0; + TopNodesQ[0].Count = NumPartQ; + TopNodesQ[0].Pstart = 0; + + domain_topsplit_localQ(0, 0); + + toplist_local = malloc(NTopnodesQ * sizeof(struct topnode_exchange)); + + for(i = 0, ntop_local = 0; i < NTopnodesQ; i++) + { + if(TopNodesQ[i].Daughter == -1) /* only use leaves */ + { + toplist_local[ntop_local].Startkey = TopNodesQ[i].StartKey; + toplist_local[ntop_local].Count = TopNodesQ[i].Count; + ntop_local++; + } + } + + ntopnodelist = malloc(sizeof(int) * NTask); + ntopoffset = malloc(sizeof(int) * NTask); + + MPI_Allgather(&ntop_local, 1, MPI_INT, ntopnodelist, 1, MPI_INT, MPI_COMM_WORLD); + + for(i = 0, ntop = 0, ntopoffset[0] = 0; i < NTask; i++) + { + ntop += ntopnodelist[i]; + if(i > 0) + ntopoffset[i] = ntopoffset[i - 1] + ntopnodelist[i - 1]; + } + + + toplist = malloc(ntop * sizeof(struct topnode_exchange)); + + for(i = 0; i < NTask; i++) + { + ntopnodelist[i] *= sizeof(struct topnode_exchange); + ntopoffset[i] *= sizeof(struct topnode_exchange); + } + + MPI_Allgatherv(toplist_local, ntop_local * sizeof(struct topnode_exchange), MPI_BYTE, + toplist, ntopnodelist, ntopoffset, MPI_BYTE, MPI_COMM_WORLD); + + qsort(toplist, ntop, sizeof(struct topnode_exchange), domain_compare_toplist); + + NTopnodesQ = 1; + TopNodesQ[0].Daughter = -1; + TopNodesQ[0].Size = PEANOCELLS; + TopNodesQ[0].StartKey = 0; + TopNodesQ[0].Count = All.TotNumPartQ; + TopNodesQ[0].Pstart = 0; + TopNodesQ[0].Blocks = ntop; + + domain_topsplitQ(0, 0); + + free(toplist); + free(ntopoffset); + free(ntopnodelist); + free(toplist_local); + +} + + + +/*! This function is responsible for constructing the local top-level + * Peano-Hilbert segments. A segment is cut into 8 pieces recursively + * until the number of particles in the segment has fallen below + * All.TotNumPartQ / (TOPNODEFACTOR * NTask * NTask). + */ +void domain_topsplit_localQ(int node, peanokey startkey) +{ + int i, p, sub, bin; + + if(TopNodesQ[node].Size >= 8) + { + TopNodesQ[node].Daughter = NTopnodesQ; + + for(i = 0; i < 8; i++) + { + if(NTopnodesQ < MAXTOPNODES) + { + sub = TopNodesQ[node].Daughter + i; + TopNodesQ[sub].Size = TopNodesQ[node].Size / 8; + TopNodesQ[sub].Count = 0; + TopNodesQ[sub].Daughter = -1; + TopNodesQ[sub].StartKey = startkey + i * TopNodesQ[sub].Size; + TopNodesQ[sub].Pstart = TopNodesQ[node].Pstart; + + NTopnodesQ++; + } + else + { + printf("task=%d: We are out of Topnodes. Increasing the constant MAXTOPNODES might help.\n", + ThisTask); + fflush(stdout); + endrun(13213); + } + } + + for(p = TopNodesQ[node].Pstart; p < TopNodesQ[node].Pstart + TopNodesQ[node].Count; p++) + { + bin = (KeySorted[p] - startkey) / (TopNodesQ[node].Size / 8); + + if(bin < 0 || bin > 7) + { + printf("task=%d: something odd has happened here. bin=%d\n", ThisTask, bin); + fflush(stdout); + endrun(13123123); + } + + sub = TopNodesQ[node].Daughter + bin; + + if(TopNodesQ[sub].Count == 0) + TopNodesQ[sub].Pstart = p; + + TopNodesQ[sub].Count++; + } + + for(i = 0; i < 8; i++) + { + sub = TopNodesQ[node].Daughter + i; + if(TopNodesQ[sub].Count > All.TotNumPartQ / (TOPNODEFACTOR * NTask * NTask)) + domain_topsplit_localQ(sub, TopNodesQ[sub].StartKey); + } + } +} + + + +/*! This function is responsible for constructing the global top-level tree + * segments. Starting from a joint list of all local top-level segments, + * in which mulitple occurences of the same spatial segment have been + * combined, a segment is subdivided into 8 pieces recursively until the + * number of particles in each segment has fallen below All.TotNumPartQ / + * (TOPNODEFACTOR * NTask). + */ +void domain_topsplitQ(int node, peanokey startkey) +{ + int i, p, sub, bin; + + if(TopNodesQ[node].Size >= 8) + { + TopNodesQ[node].Daughter = NTopnodesQ; + + for(i = 0; i < 8; i++) + { + if(NTopnodesQ < MAXTOPNODES) + { + sub = TopNodesQ[node].Daughter + i; + TopNodesQ[sub].Size = TopNodesQ[node].Size / 8; + TopNodesQ[sub].Count = 0; + TopNodesQ[sub].Blocks = 0; + TopNodesQ[sub].Daughter = -1; + TopNodesQ[sub].StartKey = startkey + i * TopNodesQ[sub].Size; + TopNodesQ[sub].Pstart = TopNodesQ[node].Pstart; + NTopnodesQ++; + } + else + { + printf("Task=%d: We are out of Topnodes. Increasing the constant MAXTOPNODES might help.\n", + ThisTask); + fflush(stdout); + endrun(137213); + } + } + + for(p = TopNodesQ[node].Pstart; p < TopNodesQ[node].Pstart + TopNodesQ[node].Blocks; p++) + { + bin = (toplist[p].Startkey - startkey) / (TopNodesQ[node].Size / 8); + sub = TopNodesQ[node].Daughter + bin; + + if(bin < 0 || bin > 7) + endrun(77); + + if(TopNodesQ[sub].Blocks == 0) + TopNodesQ[sub].Pstart = p; + + TopNodesQ[sub].Count += toplist[p].Count; + TopNodesQ[sub].Blocks++; + } + + for(i = 0; i < 8; i++) + { + sub = TopNodesQ[node].Daughter + i; + if(TopNodesQ[sub].Count > All.TotNumPartQ / (TOPNODEFACTOR * NTask)) + domain_topsplitQ(sub, TopNodesQ[sub].StartKey); + } + } +} + + + + +#endif + diff --git a/src/PyGadget/src/domainQ.o b/src/PyGadget/src/domainQ.o new file mode 100644 index 0000000..d4d78dc Binary files /dev/null and b/src/PyGadget/src/domainQ.o differ diff --git a/src/PyGadget/src/driftfac.c b/src/PyGadget/src/driftfac.c new file mode 100644 index 0000000..0b38342 --- /dev/null +++ b/src/PyGadget/src/driftfac.c @@ -0,0 +1,224 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file driftfac.c + * \brief compute loop-up tables for prefactors in cosmological integration + */ + +static double logTimeBegin; +static double logTimeMax; + + +/*! This function computes look-up tables for factors needed in + * cosmological integrations. The (simple) integrations are carried out + * with the GSL library. Separate factors are computed for the "drift", + * and the gravitational and hydrodynamical "kicks". The lookup-table is + * used for reasons of speed. + */ +void init_drift_table(void) +{ +#define WORKSIZE 100000 + int i; + double result, abserr; + gsl_function F; + gsl_integration_workspace *workspace; + + logTimeBegin = log(All.TimeBegin); + logTimeMax = log(All.TimeMax); + + workspace = gsl_integration_workspace_alloc(WORKSIZE); + + for(i = 0; i < DRIFT_TABLE_LENGTH; i++) + { + F.function = &drift_integ; + gsl_integration_qag(&F, exp(logTimeBegin), exp(logTimeBegin + ((logTimeMax - logTimeBegin) / DRIFT_TABLE_LENGTH) * (i + 1)), All.Hubble, /* note: absolute error just a dummy */ + 1.0e-8, WORKSIZE, GSL_INTEG_GAUSS41, workspace, &result, &abserr); + DriftTable[i] = result; + + + F.function = &gravkick_integ; + gsl_integration_qag(&F, exp(logTimeBegin), exp(logTimeBegin + ((logTimeMax - logTimeBegin) / DRIFT_TABLE_LENGTH) * (i + 1)), All.Hubble, /* note: absolute error just a dummy */ + 1.0e-8, WORKSIZE, GSL_INTEG_GAUSS41, workspace, &result, &abserr); + GravKickTable[i] = result; + + + F.function = &hydrokick_integ; + gsl_integration_qag(&F, exp(logTimeBegin), exp(logTimeBegin + ((logTimeMax - logTimeBegin) / DRIFT_TABLE_LENGTH) * (i + 1)), All.Hubble, /* note: absolute error just a dummy */ + 1.0e-8, WORKSIZE, GSL_INTEG_GAUSS41, workspace, &result, &abserr); + HydroKickTable[i] = result; + } + + gsl_integration_workspace_free(workspace); +} + + +/*! This function integrates the cosmological prefactor for a drift step + * between time0 and time1. The value returned is * \f[ \int_{a_0}^{a_1} + * \frac{{\rm d}a}{H(a)} * \f] + */ +double get_drift_factor(int time0, int time1) +{ + double a1, a2, df1, df2, u1, u2; + int i1, i2; + + /* note: will only be called for cosmological integration */ + + a1 = logTimeBegin + time0 * All.Timebase_interval; + a2 = logTimeBegin + time1 * All.Timebase_interval; + + u1 = (a1 - logTimeBegin) / (logTimeMax - logTimeBegin) * DRIFT_TABLE_LENGTH; + i1 = (int) u1; + if(i1 >= DRIFT_TABLE_LENGTH) + i1 = DRIFT_TABLE_LENGTH - 1; + + if(i1 <= 1) + df1 = u1 * DriftTable[0]; + else + df1 = DriftTable[i1 - 1] + (DriftTable[i1] - DriftTable[i1 - 1]) * (u1 - i1); + + + u2 = (a2 - logTimeBegin) / (logTimeMax - logTimeBegin) * DRIFT_TABLE_LENGTH; + i2 = (int) u2; + if(i2 >= DRIFT_TABLE_LENGTH) + i2 = DRIFT_TABLE_LENGTH - 1; + + if(i2 <= 1) + df2 = u2 * DriftTable[0]; + else + df2 = DriftTable[i2 - 1] + (DriftTable[i2] - DriftTable[i2 - 1]) * (u2 - i2); + + return df2 - df1; +} + + +/*! This function integrates the cosmological prefactor for a kick step of + * the gravitational force. + */ +double get_gravkick_factor(int time0, int time1) +{ + double a1, a2, df1, df2, u1, u2; + int i1, i2; + + /* note: will only be called for cosmological integration */ + + a1 = logTimeBegin + time0 * All.Timebase_interval; + a2 = logTimeBegin + time1 * All.Timebase_interval; + + u1 = (a1 - logTimeBegin) / (logTimeMax - logTimeBegin) * DRIFT_TABLE_LENGTH; + i1 = (int) u1; + if(i1 >= DRIFT_TABLE_LENGTH) + i1 = DRIFT_TABLE_LENGTH - 1; + + if(i1 <= 1) + df1 = u1 * GravKickTable[0]; + else + df1 = GravKickTable[i1 - 1] + (GravKickTable[i1] - GravKickTable[i1 - 1]) * (u1 - i1); + + + u2 = (a2 - logTimeBegin) / (logTimeMax - logTimeBegin) * DRIFT_TABLE_LENGTH; + i2 = (int) u2; + if(i2 >= DRIFT_TABLE_LENGTH) + i2 = DRIFT_TABLE_LENGTH - 1; + + if(i2 <= 1) + df2 = u2 * GravKickTable[0]; + else + df2 = GravKickTable[i2 - 1] + (GravKickTable[i2] - GravKickTable[i2 - 1]) * (u2 - i2); + + return df2 - df1; +} + +/*! This function integrates the cosmological prefactor for a kick step of + * the hydrodynamical force. + */ +double get_hydrokick_factor(int time0, int time1) +{ + double a1, a2, df1, df2, u1, u2; + int i1, i2; + + /* note: will only be called for cosmological integration */ + + a1 = logTimeBegin + time0 * All.Timebase_interval; + a2 = logTimeBegin + time1 * All.Timebase_interval; + + u1 = (a1 - logTimeBegin) / (logTimeMax - logTimeBegin) * DRIFT_TABLE_LENGTH; + i1 = (int) u1; + if(i1 >= DRIFT_TABLE_LENGTH) + i1 = DRIFT_TABLE_LENGTH - 1; + + if(i1 <= 1) + df1 = u1 * HydroKickTable[0]; + else + df1 = HydroKickTable[i1 - 1] + (HydroKickTable[i1] - HydroKickTable[i1 - 1]) * (u1 - i1); + + + u2 = (a2 - logTimeBegin) / (logTimeMax - logTimeBegin) * DRIFT_TABLE_LENGTH; + i2 = (int) u2; + if(i2 >= DRIFT_TABLE_LENGTH) + i2 = DRIFT_TABLE_LENGTH - 1; + + if(i2 <= 1) + df2 = u2 * HydroKickTable[0]; + else + df2 = HydroKickTable[i2 - 1] + (HydroKickTable[i2] - HydroKickTable[i2 - 1]) * (u2 - i2); + + return df2 - df1; +} + + +/*! Integration kernel for drift factor computation. + */ +double drift_integ(double a, void *param) +{ + double h; + + h = All.Omega0 / (a * a * a) + (1 - All.Omega0 - All.OmegaLambda) / (a * a) + All.OmegaLambda; + h = All.Hubble * sqrt(h); + + return 1 / (h * a * a * a); +} + +/*! Integration kernel for gravitational kick factor computation. + */ +double gravkick_integ(double a, void *param) +{ + double h; + + h = All.Omega0 / (a * a * a) + (1 - All.Omega0 - All.OmegaLambda) / (a * a) + All.OmegaLambda; + h = All.Hubble * sqrt(h); + + return 1 / (h * a * a); +} + + +/*! Integration kernel for hydrodynamical kick factor computation. + */ +double hydrokick_integ(double a, void *param) +{ + double h; + + h = All.Omega0 / (a * a * a) + (1 - All.Omega0 - All.OmegaLambda) / (a * a) + All.OmegaLambda; + h = All.Hubble * sqrt(h); + + return 1 / (h * pow(a, 3 * GAMMA_MINUS1) * a); +} + +double growthfactor_integ(double a, void *param) +{ + double s; + + s = All.Omega0 + (1 - All.Omega0 - All.OmegaLambda) * a + All.OmegaLambda * a * a * a; + s = sqrt(s); + + return pow(sqrt(a) / s, 3); +} + + diff --git a/src/PyGadget/src/driftfac.o b/src/PyGadget/src/driftfac.o new file mode 100644 index 0000000..c84947b Binary files /dev/null and b/src/PyGadget/src/driftfac.o differ diff --git a/src/PyGadget/src/endrun.c b/src/PyGadget/src/endrun.c new file mode 100644 index 0000000..3070a7e --- /dev/null +++ b/src/PyGadget/src/endrun.c @@ -0,0 +1,43 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file endrun.c + * \brief Termination of simulation + * + * This file contains routines for termination of the simulation. + */ + +/*! This function aborts the simulations. If a single processors wants an + * immediate termination, the function needs to be called with ierr>0. A + * bunch of MPI-error messages may also appear in this case. For ierr=0, + * MPI is gracefully cleaned up, but this requires that all processors + * call endrun(). + */ +void endrun(int ierr) +{ + if(ierr) + { + printf("task %d: endrun called with an error level of %d\n\n\n", ThisTask, ierr); + fflush(stdout); +#ifdef DEBUG + terminate_processes(); + raise(SIGABRT); + sleep(60); +#else + MPI_Abort(MPI_COMM_WORLD, ierr); +#endif + exit(0); + } + + MPI_Finalize(); + exit(0); +} diff --git a/src/PyGadget/src/endrun.o b/src/PyGadget/src/endrun.o new file mode 100644 index 0000000..74f1341 Binary files /dev/null and b/src/PyGadget/src/endrun.o differ diff --git a/src/PyGadget/src/forcetree.c b/src/PyGadget/src/forcetree.c new file mode 100644 index 0000000..88bf8d8 --- /dev/null +++ b/src/PyGadget/src/forcetree.c @@ -0,0 +1,4486 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file forcetree.c + * \brief gravitational tree and code for Ewald correction + * + * This file contains the computation of the gravitational force by means + * of a tree. The type of tree implemented is a geometrical oct-tree, + * starting from a cube encompassing all particles. This cube is + * automatically found in the domain decomposition, which also splits up + * the global "top-level" tree along node boundaries, moving the particles + * of different parts of the tree to separate processors. Tree nodes can + * be dynamically updated in drift/kick operations to avoid having to + * reconstruct the tree every timestep. + */ + +/*! auxialiary variable used to set-up non-recursive walk */ +static int last; + + + +/*! length of lock-up table for short-range force kernel in TreePM algorithm */ +#define NTAB 1000 +/*! variables for short-range lookup table */ +static float tabfac, shortrange_table[NTAB], shortrange_table_potential[NTAB]; + +/*! toggles after first tree-memory allocation, has only influence on log-files */ +static int first_flag = 0; + + + + +#ifdef PERIODIC +/*! Macro that maps a distance to the nearest periodic neighbour */ +#define NEAREST(x) (((x)>boxhalf)?((x)-boxsize):(((x)<-boxhalf)?((x)+boxsize):(x))) +/*! Size of 3D lock-up table for Ewald correction force */ +#define EN 64 +/*! 3D lock-up table for Ewald correction to force and potential. Only one + * octant is stored, the rest constructed by using the symmetry + */ +static FLOAT fcorrx[EN + 1][EN + 1][EN + 1]; +static FLOAT fcorry[EN + 1][EN + 1][EN + 1]; +static FLOAT fcorrz[EN + 1][EN + 1][EN + 1]; +static FLOAT potcorr[EN + 1][EN + 1][EN + 1]; +static double fac_intp; +#endif + + + +/*! This function is a driver routine for constructing the gravitational + * oct-tree, which is done by calling a small number of other functions. + */ +int force_treebuild(int npart) +{ + Numnodestree = force_treebuild_single(npart); + + force_update_pseudoparticles(); + + force_flag_localnodes(); + + TimeOfLastTreeConstruction = All.Time; + + return Numnodestree; +} + + + +/*! Constructs the gravitational oct-tree. + * + * The index convention for accessing tree nodes is the following: the + * indices 0...NumPart-1 reference single particles, the indices + * All.MaxPart.... All.MaxPart+nodes-1 reference tree nodes. `Nodes_base' + * points to the first tree node, while `nodes' is shifted such that + * nodes[All.MaxPart] gives the first tree node. Finally, node indices + * with values 'All.MaxPart + MaxNodes' and larger indicate "pseudo + * particles", i.e. multipole moments of top-level nodes that lie on + * different CPUs. If such a node needs to be opened, the corresponding + * particle must be exported to that CPU. The 'Extnodes' structure + * parallels that of 'Nodes'. Its information is only needed for the SPH + * part of the computation. (The data is split onto these two structures + * as a tuning measure. If it is merged into 'Nodes' a somewhat bigger + * size of the nodes also for gravity would result, which would reduce + * cache utilization slightly. + */ +int force_treebuild_single(int npart) +{ + int i, j, subnode = 0, parent, numnodes; + int nfree, th, nn, no; + struct NODE *nfreep; + double lenhalf, epsilon; + peanokey key; + + + /* create an empty root node */ + nfree = All.MaxPart; /* index of first free node */ + nfreep = &Nodes[nfree]; /* select first node */ + + nfreep->len = DomainLen; + for(j = 0; j < 3; j++) + nfreep->center[j] = DomainCenter[j]; + for(j = 0; j < 8; j++) + nfreep->u.suns[j] = -1; + + + numnodes = 1; + nfreep++; + nfree++; + + /* create a set of empty nodes corresponding to the top-level domain + * grid. We need to generate these nodes first to make sure that we have a + * complete top-level tree which allows the easy insertion of the + * pseudo-particles at the right place + */ + + force_create_empty_nodes(All.MaxPart, 0, 1, 0, 0, 0, &numnodes, &nfree); + + + /* if a high-resolution region in a global tree is used, we need to generate + * an additional set empty nodes to make sure that we have a complete + * top-level tree for the high-resolution inset + */ + + nfreep = &Nodes[nfree]; + parent = -1; /* note: will not be used below before it is changed */ + + + /* now we insert all particles */ + for(i = 0; i < npart; i++) + { + + /* the softening is only used to check whether particles are so close + * that the tree needs not to be refined further + */ + epsilon = All.ForceSoftening[P[i].Type]; + + key = peano_hilbert_key((P[i].Pos[0] - DomainCorner[0]) * DomainFac, + (P[i].Pos[1] - DomainCorner[1]) * DomainFac, + (P[i].Pos[2] - DomainCorner[2]) * DomainFac, BITS_PER_DIMENSION); + + no = 0; + while(TopNodes[no].Daughter >= 0) + no = TopNodes[no].Daughter + (key - TopNodes[no].StartKey) / (TopNodes[no].Size / 8); + + no = TopNodes[no].Leaf; + th = DomainNodeIndex[no]; + + while(1) + { + if(th >= All.MaxPart) /* we are dealing with an internal node */ + { + subnode = 0; + if(P[i].Pos[0] > Nodes[th].center[0]) + subnode += 1; + if(P[i].Pos[1] > Nodes[th].center[1]) + subnode += 2; + if(P[i].Pos[2] > Nodes[th].center[2]) + subnode += 4; + + nn = Nodes[th].u.suns[subnode]; + + if(nn >= 0) /* ok, something is in the daughter slot already, need to continue */ + { + parent = th; + th = nn; + } + else + { + /* here we have found an empty slot where we can attach + * the new particle as a leaf. + */ + Nodes[th].u.suns[subnode] = i; + break; /* done for this particle */ + } + } + else + { + /* We try to insert into a leaf with a single particle. Need + * to generate a new internal node at this point. + */ + Nodes[parent].u.suns[subnode] = nfree; + + nfreep->len = 0.5 * Nodes[parent].len; + lenhalf = 0.25 * Nodes[parent].len; + + if(subnode & 1) + nfreep->center[0] = Nodes[parent].center[0] + lenhalf; + else + nfreep->center[0] = Nodes[parent].center[0] - lenhalf; + + if(subnode & 2) + nfreep->center[1] = Nodes[parent].center[1] + lenhalf; + else + nfreep->center[1] = Nodes[parent].center[1] - lenhalf; + + if(subnode & 4) + nfreep->center[2] = Nodes[parent].center[2] + lenhalf; + else + nfreep->center[2] = Nodes[parent].center[2] - lenhalf; + + nfreep->u.suns[0] = -1; + nfreep->u.suns[1] = -1; + nfreep->u.suns[2] = -1; + nfreep->u.suns[3] = -1; + nfreep->u.suns[4] = -1; + nfreep->u.suns[5] = -1; + nfreep->u.suns[6] = -1; + nfreep->u.suns[7] = -1; + + + subnode = 0; + if(P[th].Pos[0] > nfreep->center[0]) + subnode += 1; + if(P[th].Pos[1] > nfreep->center[1]) + subnode += 2; + if(P[th].Pos[2] > nfreep->center[2]) + subnode += 4; +#ifndef NOTREERND + if(nfreep->len < 1.0e-3 * epsilon) + { + /* seems like we're dealing with particles at identical (or extremely close) + * locations. Randomize subnode index to allow tree construction. Note: Multipole moments + * of tree are still correct, but this will only happen well below gravitational softening + * length-scale anyway. + */ + subnode = (int) (8.0 * get_random_number((0xffff & P[i].ID) + P[i].GravCost)); + P[i].GravCost += 1; + if(subnode >= 8) + subnode = 7; + } +#endif + nfreep->u.suns[subnode] = th; + + th = nfree; /* resume trying to insert the new particle at + * the newly created internal node + */ + + numnodes++; + nfree++; + nfreep++; + + if((numnodes) >= MaxNodes) + { + printf("task %d: maximum number %d of tree-nodes reached.\n", ThisTask, MaxNodes); + printf("for particle %d\n", i); + dump_particles(); + endrun(1); + } + } + } + } + + + /* insert the pseudo particles that represent the mass distribution of other domains */ + force_insert_pseudo_particles(); + + + /* now compute the multipole moments recursively */ + last = -1; + + force_update_node_recursive(All.MaxPart, -1, -1); + + if(last >= All.MaxPart) + { + if(last >= All.MaxPart + MaxNodes) /* a pseudo-particle */ + Nextnode[last - MaxNodes] = -1; + else + Nodes[last].u.d.nextnode = -1; + } + else + Nextnode[last] = -1; + + return numnodes; +} + + + +/*! This function recursively creates a set of empty tree nodes which + * corresponds to the top-level tree for the domain grid. This is done to + * ensure that this top-level tree is always "complete" so that we can + * easily associate the pseudo-particles of other CPUs with tree-nodes at + * a given level in the tree, even when the particle population is so + * sparse that some of these nodes are actually empty. +*/ +void force_create_empty_nodes(int no, int topnode, int bits, int x, int y, int z, int *nodecount, + int *nextfree) +{ + int i, j, k, n, sub, count; + + if(TopNodes[topnode].Daughter >= 0) + { + for(i = 0; i < 2; i++) + for(j = 0; j < 2; j++) + for(k = 0; k < 2; k++) + { + sub = 7 & peano_hilbert_key((x << 1) + i, (y << 1) + j, (z << 1) + k, bits); + + count = i + 2 * j + 4 * k; + + Nodes[no].u.suns[count] = *nextfree; + + + Nodes[*nextfree].len = 0.5 * Nodes[no].len; + Nodes[*nextfree].center[0] = Nodes[no].center[0] + (2 * i - 1) * 0.25 * Nodes[no].len; + Nodes[*nextfree].center[1] = Nodes[no].center[1] + (2 * j - 1) * 0.25 * Nodes[no].len; + Nodes[*nextfree].center[2] = Nodes[no].center[2] + (2 * k - 1) * 0.25 * Nodes[no].len; + + for(n = 0; n < 8; n++) + Nodes[*nextfree].u.suns[n] = -1; + + if(TopNodes[TopNodes[topnode].Daughter + sub].Daughter == -1) + DomainNodeIndex[TopNodes[TopNodes[topnode].Daughter + sub].Leaf] = *nextfree; + + *nextfree = *nextfree + 1; + *nodecount = *nodecount + 1; + + if((*nodecount) >= MaxNodes) + { + printf("task %d: maximum number %d of tree-nodes reached.\n", ThisTask, MaxNodes); + printf("in create empty nodes\n"); + dump_particles(); + endrun(11); + } + + force_create_empty_nodes(*nextfree - 1, TopNodes[topnode].Daughter + sub, + bits + 1, 2 * x + i, 2 * y + j, 2 * z + k, nodecount, nextfree); + } + } +} + + + +/*! this function inserts pseudo-particles which will represent the mass + * distribution of the other CPUs. Initially, the mass of the + * pseudo-particles is set to zero, and their coordinate is set to the + * center of the domain-cell they correspond to. These quantities will be + * updated later on. + */ +void force_insert_pseudo_particles(void) +{ + int i, index, subnode, nn, th; + + for(i = 0; i < NTopleaves; i++) + { + index = DomainNodeIndex[i]; + + DomainMoment[i].mass = 0; + DomainMoment[i].s[0] = Nodes[index].center[0]; + DomainMoment[i].s[1] = Nodes[index].center[1]; + DomainMoment[i].s[2] = Nodes[index].center[2]; + } + + for(i = 0; i < NTopleaves; i++) + { + if(i < DomainMyStart || i > DomainMyLast) + { + th = All.MaxPart; /* select index of first node in tree */ + + while(1) + { + if(th >= All.MaxPart) /* we are dealing with an internal node */ + { + if(th >= All.MaxPart + MaxNodes) + endrun(888); /* this can't be */ + + subnode = 0; + if(DomainMoment[i].s[0] > Nodes[th].center[0]) + subnode += 1; + if(DomainMoment[i].s[1] > Nodes[th].center[1]) + subnode += 2; + if(DomainMoment[i].s[2] > Nodes[th].center[2]) + subnode += 4; + + nn = Nodes[th].u.suns[subnode]; + + if(nn >= 0) /* ok, something is in the daughter slot already, need to continue */ + { + th = nn; + } + else + { + /* here we have found an empty slot where we can + * attach the pseudo particle as a leaf + */ + Nodes[th].u.suns[subnode] = All.MaxPart + MaxNodes + i; + + break; /* done for this pseudo particle */ + } + } + else + { + endrun(889); /* this can't be */ + } + } + } + } +} + + +/*! this routine determines the multipole moments for a given internal node + * and all its subnodes using a recursive computation. The result is + * stored in the Nodes[] structure in the sequence of this tree-walk. + * + * Note that the bitflags-variable for each node is used to store in the + * lowest bits some special information: Bit 0 flags whether the node + * belongs to the top-level tree corresponding to the domain + * decomposition, while Bit 1 signals whether the top-level node is + * dependent on local mass. + * + * If UNEQUALSOFTENINGS is set, bits 2-4 give the particle type with + * the maximum softening among the particles in the node, and bit 5 + * flags whether the node contains any particles with lower softening + * than that. + */ +void force_update_node_recursive(int no, int sib, int father) +{ + int j, jj, p, pp, nextsib, suns[8]; + FLOAT hmax; + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + int maxsofttype, diffsoftflag; +#else + FLOAT maxsoft; +#endif +#endif + struct particle_data *pa; + double s[3], vs[3], mass; + + if(no >= All.MaxPart && no < All.MaxPart + MaxNodes) /* internal node */ + { + for(j = 0; j < 8; j++) + suns[j] = Nodes[no].u.suns[j]; /* this "backup" is necessary because the nextnode entry will + overwrite one element (union!) */ + if(last >= 0) + { + if(last >= All.MaxPart) + { + if(last >= All.MaxPart + MaxNodes) /* a pseudo-particle */ + Nextnode[last - MaxNodes] = no; + else + Nodes[last].u.d.nextnode = no; + } + else + Nextnode[last] = no; + } + + last = no; + + mass = 0; + s[0] = 0; + s[1] = 0; + s[2] = 0; + vs[0] = 0; + vs[1] = 0; + vs[2] = 0; + hmax = 0; +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + maxsofttype = 7; + diffsoftflag = 0; +#else + maxsoft = 0; +#endif +#endif + + for(j = 0; j < 8; j++) + { + if((p = suns[j]) >= 0) + { + /* check if we have a sibling on the same level */ + for(jj = j + 1; jj < 8; jj++) + if((pp = suns[jj]) >= 0) + break; + + if(jj < 8) /* yes, we do */ + nextsib = pp; + else + nextsib = sib; + + force_update_node_recursive(p, nextsib, no); + + + if(p >= All.MaxPart) /* an internal node or pseudo particle */ + { + if(p >= All.MaxPart + MaxNodes) /* a pseudo particle */ + { + /* nothing to be done here because the mass of the + * pseudo-particle is still zero. This will be changed + * later. + */ + } + else + { + mass += Nodes[p].u.d.mass; + s[0] += Nodes[p].u.d.mass * Nodes[p].u.d.s[0]; + s[1] += Nodes[p].u.d.mass * Nodes[p].u.d.s[1]; + s[2] += Nodes[p].u.d.mass * Nodes[p].u.d.s[2]; + vs[0] += Nodes[p].u.d.mass * Extnodes[p].vs[0]; + vs[1] += Nodes[p].u.d.mass * Extnodes[p].vs[1]; + vs[2] += Nodes[p].u.d.mass * Extnodes[p].vs[2]; + + if(Extnodes[p].hmax > hmax) + hmax = Extnodes[p].hmax; + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + diffsoftflag |= (Nodes[p].u.d.bitflags >> 5) & 1; + + if(maxsofttype == 7) + { + maxsofttype = (Nodes[p].u.d.bitflags >> 2) & 7; + } + else + { + if(((Nodes[p].u.d.bitflags >> 2) & 7) != 7) + { + if(All.ForceSoftening[((Nodes[p].u.d.bitflags >> 2) & 7)] > + All.ForceSoftening[maxsofttype]) + { + maxsofttype = ((Nodes[p].u.d.bitflags >> 2) & 7); + diffsoftflag = 1; + } + else + { + if(All.ForceSoftening[((Nodes[p].u.d.bitflags >> 2) & 7)] < + All.ForceSoftening[maxsofttype]) + diffsoftflag = 1; + } + } + } +#else + if(Nodes[p].maxsoft > maxsoft) + maxsoft = Nodes[p].maxsoft; +#endif +#endif + } + } + else /* a particle */ + { + pa = &P[p]; + + mass += pa->Mass; + s[0] += pa->Mass * pa->Pos[0]; + s[1] += pa->Mass * pa->Pos[1]; + s[2] += pa->Mass * pa->Pos[2]; + vs[0] += pa->Mass * pa->Vel[0]; + vs[1] += pa->Mass * pa->Vel[1]; + vs[2] += pa->Mass * pa->Vel[2]; + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + if(maxsofttype == 7) + { + maxsofttype = pa->Type; + } + else + { + if(All.ForceSoftening[pa->Type] > All.ForceSoftening[maxsofttype]) + { + maxsofttype = pa->Type; + diffsoftflag = 1; + } + else + { + if(All.ForceSoftening[pa->Type] < All.ForceSoftening[maxsofttype]) + diffsoftflag = 1; + } + } +#else + if(pa->Type == 0) + { + if(SphP[p].Hsml > maxsoft) + maxsoft = SphP[p].Hsml; + } + else + { + if(All.ForceSoftening[pa->Type] > maxsoft) + maxsoft = All.ForceSoftening[pa->Type]; + } +#endif +#endif + if(pa->Type == 0) + if(SphP[p].Hsml > hmax) + hmax = SphP[p].Hsml; + } + } + } + + + if(mass) + { + s[0] /= mass; + s[1] /= mass; + s[2] /= mass; + vs[0] /= mass; + vs[1] /= mass; + vs[2] /= mass; + } + else + { + s[0] = Nodes[no].center[0]; + s[1] = Nodes[no].center[1]; + s[2] = Nodes[no].center[2]; + } + + Nodes[no].u.d.s[0] = s[0]; + Nodes[no].u.d.s[1] = s[1]; + Nodes[no].u.d.s[2] = s[2]; + Nodes[no].u.d.mass = mass; + + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + Nodes[no].u.d.bitflags = 4 * maxsofttype + 32 * diffsoftflag; +#else + Nodes[no].u.d.bitflags = 0; + Nodes[no].maxsoft = maxsoft; +#endif +#else + Nodes[no].u.d.bitflags = 0; +#endif + + + Extnodes[no].vs[0] = vs[0]; + Extnodes[no].vs[1] = vs[1]; + Extnodes[no].vs[2] = vs[2]; + Extnodes[no].hmax = hmax; + + Nodes[no].u.d.sibling = sib; + Nodes[no].u.d.father = father; + } + else /* single particle or pseudo particle */ + { + if(last >= 0) + { + if(last >= All.MaxPart) + { + if(last >= All.MaxPart + MaxNodes) /* a pseudo-particle */ + Nextnode[last - MaxNodes] = no; + else + Nodes[last].u.d.nextnode = no; + } + else + Nextnode[last] = no; + } + + last = no; + + if(no < All.MaxPart) /* only set it for single particles */ + Father[no] = father; + } + +} + + + +/*! This function updates the multipole moments of the pseudo-particles + * that represent the mass distribution on different CPUs. For that + * purpose, it first exchanges the necessary data, and then updates the + * top-level tree accordingly. The detailed implementation of these two + * tasks is done in separate functions. + */ +void force_update_pseudoparticles(void) +{ + force_exchange_pseudodata(); + + force_treeupdate_pseudos(); +} + + + +/*! This function communicates the values of the multipole moments of the + * top-level tree-nodes of the domain grid. This data can then be used to + * update the pseudo-particles on each CPU accordingly. + */ +void force_exchange_pseudodata(void) +{ + int i, no; + MPI_Status status; + int level, sendTask, recvTask; + + for(i = DomainMyStart; i <= DomainMyLast; i++) + { + no = DomainNodeIndex[i]; + + /* read out the multipole moments from the local base cells */ + DomainMoment[i].s[0] = Nodes[no].u.d.s[0]; + DomainMoment[i].s[1] = Nodes[no].u.d.s[1]; + DomainMoment[i].s[2] = Nodes[no].u.d.s[2]; + DomainMoment[i].vs[0] = Extnodes[no].vs[0]; + DomainMoment[i].vs[1] = Extnodes[no].vs[1]; + DomainMoment[i].vs[2] = Extnodes[no].vs[2]; + DomainMoment[i].mass = Nodes[no].u.d.mass; +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + DomainMoment[i].bitflags = Nodes[no].u.d.bitflags; +#else + DomainMoment[i].maxsoft = Nodes[no].maxsoft; +#endif +#endif + } + + /* share the pseudo-particle data accross CPUs */ + + for(level = 1; level < (1 << PTask); level++) + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + MPI_Sendrecv(&DomainMoment[DomainStartList[sendTask]], + (DomainEndList[sendTask] - DomainStartList[sendTask] + 1) * sizeof(struct DomainNODE), + MPI_BYTE, recvTask, TAG_DMOM, + &DomainMoment[DomainStartList[recvTask]], + (DomainEndList[recvTask] - DomainStartList[recvTask] + 1) * sizeof(struct DomainNODE), + MPI_BYTE, recvTask, TAG_DMOM, MPI_COMM_WORLD, &status); + } + +} + +/*! This function updates the top-level tree after the multipole moments of + * the pseudo-particles have been updated. + */ +void force_treeupdate_pseudos(void) +{ + int i, k, no; + FLOAT sold[3], vsold[3], snew[3], vsnew[3], massold, massnew, mm; + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + int maxsofttype, diffsoftflag; +#else + FLOAT maxsoft; +#endif +#endif + + for(i = 0; i < NTopleaves; i++) + if(i < DomainMyStart || i > DomainMyLast) + { + no = DomainNodeIndex[i]; + + for(k = 0; k < 3; k++) + { + sold[k] = Nodes[no].u.d.s[k]; + vsold[k] = Extnodes[no].vs[k]; + } + massold = Nodes[no].u.d.mass; + + for(k = 0; k < 3; k++) + { + snew[k] = DomainMoment[i].s[k]; + vsnew[k] = DomainMoment[i].vs[k]; + } + massnew = DomainMoment[i].mass; + + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + maxsofttype = (DomainMoment[i].bitflags >> 2) & 7; + diffsoftflag = (DomainMoment[i].bitflags >> 5) & 1; +#else + maxsoft = DomainMoment[i].maxsoft; +#endif +#endif + do + { + mm = Nodes[no].u.d.mass + massnew - massold; + for(k = 0; k < 3; k++) + { + if(mm > 0) + { + Nodes[no].u.d.s[k] = + (Nodes[no].u.d.mass * Nodes[no].u.d.s[k] + massnew * snew[k] - massold * sold[k]) / mm; + Extnodes[no].vs[k] = + (Nodes[no].u.d.mass * Extnodes[no].vs[k] + massnew * vsnew[k] - + massold * vsold[k]) / mm; + } + } + Nodes[no].u.d.mass = mm; + + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + diffsoftflag |= (Nodes[no].u.d.bitflags >> 5) & 1; + + if(maxsofttype == 7) + maxsofttype = (Nodes[no].u.d.bitflags >> 2) & 7; + else + { + if(((Nodes[no].u.d.bitflags >> 2) & 7) != 7) + { + if(All.ForceSoftening[((Nodes[no].u.d.bitflags >> 2) & 7)] > + All.ForceSoftening[maxsofttype]) + { + maxsofttype = ((Nodes[no].u.d.bitflags >> 2) & 7); + diffsoftflag = 1; + } + else + { + if(All.ForceSoftening[((Nodes[no].u.d.bitflags >> 2) & 7)] < + All.ForceSoftening[maxsofttype]) + diffsoftflag = 1; + } + } + } + + Nodes[no].u.d.bitflags = (Nodes[no].u.d.bitflags & 3) + 4 * maxsofttype + 32 * diffsoftflag; +#else + if(Nodes[no].maxsoft < maxsoft) + Nodes[no].maxsoft = maxsoft; + maxsoft = Nodes[no].maxsoft; +#endif +#endif + no = Nodes[no].u.d.father; + + } + while(no >= 0); + } +} + + + +/*! This function flags nodes in the top-level tree that are dependent on + * local particle data. + */ +void force_flag_localnodes(void) +{ + int no, i; + + /* mark all top-level nodes */ + + for(i = 0; i < NTopleaves; i++) + { + no = DomainNodeIndex[i]; + + while(no >= 0) + { + if((Nodes[no].u.d.bitflags & 1)) + break; + + Nodes[no].u.d.bitflags |= 1; + + no = Nodes[no].u.d.father; + } + } + + /* mark top-level nodes that contain local particles */ + + for(i = DomainMyStart; i <= DomainMyLast; i++) + { + /* + if(DomainMoment[i].mass > 0) + */ + { + no = DomainNodeIndex[i]; + + while(no >= 0) + { + if((Nodes[no].u.d.bitflags & 2)) + break; + + Nodes[no].u.d.bitflags |= 2; + + no = Nodes[no].u.d.father; + } + } + } +} + + + +/*! This function updates the side-length of tree nodes in case the tree is + * not reconstructed, but only drifted. The grouping of particles to tree + * nodes is not changed in this case, but some tree nodes may need to be + * enlarged because particles moved out of their original bounds. + */ +void force_update_len(void) +{ + int i, no; + MPI_Status status; + int level, sendTask, recvTask; + + force_update_node_len_local(); + + /* first update the side-lengths of all local nodes */ + for(i = DomainMyStart; i <= DomainMyLast; i++) + { + no = DomainNodeIndex[i]; + + DomainTreeNodeLen[i] = Nodes[no].len; + } + + for(level = 1; level < (1 << PTask); level++) + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + MPI_Sendrecv(&DomainTreeNodeLen[DomainStartList[sendTask]], + (DomainEndList[sendTask] - DomainStartList[sendTask] + 1) * sizeof(FLOAT), + MPI_BYTE, recvTask, TAG_NODELEN, + &DomainTreeNodeLen[DomainStartList[recvTask]], + (DomainEndList[recvTask] - DomainStartList[recvTask] + 1) * sizeof(FLOAT), + MPI_BYTE, recvTask, TAG_NODELEN, MPI_COMM_WORLD, &status); + } + + /* Finally, we update the top-level tree. */ + force_update_node_len_toptree(); +} + + +/*! This function recursively enlarges nodes such that they always contain + * all their daughter nodes and daughter particles. + */ +void force_update_node_len_local(void) +{ + int i, p, k, no; + FLOAT dist, distmax; + + for(i = 0; i < NumPart; i++) + { + no = Father[i]; + + for(k = 0, distmax = 0; k < 3; k++) + { + dist = P[i].Pos[k] - Nodes[no].center[k]; + if(dist < 0) + dist = -dist; + if(dist > distmax) + distmax = dist; + } + + if(distmax + distmax > Nodes[no].len) + { + Nodes[no].len = distmax + distmax; + p = Nodes[no].u.d.father; + + while(p >= 0) + { + distmax = Nodes[p].center[0] - Nodes[no].center[0]; + if(distmax < 0) + distmax = -distmax; + distmax = distmax + distmax + Nodes[no].len; + + if(0.999999 * distmax > Nodes[p].len) + { + Nodes[p].len = distmax; + no = p; + p = Nodes[p].u.d.father; + } + else + break; + } + } + } +} + + +/*! This function recursively enlarges nodes of the top-level tree such + * that they always contain all their daughter nodes. + */ +void force_update_node_len_toptree(void) +{ + int i, no, p; + FLOAT distmax; + + for(i = 0; i < NTopleaves; i++) + if(i < DomainMyStart || i > DomainMyLast) + { + no = DomainNodeIndex[i]; + + if(Nodes[no].len < DomainTreeNodeLen[i]) + Nodes[no].len = DomainTreeNodeLen[i]; + + p = Nodes[no].u.d.father; + + while(p >= 0) + { + distmax = Nodes[p].center[0] - Nodes[no].center[0]; + if(distmax < 0) + distmax = -distmax; + distmax = distmax + distmax + Nodes[no].len; + + if(0.999999 * distmax > Nodes[p].len) + { + Nodes[p].len = distmax; + no = p; + p = Nodes[p].u.d.father; + } + else + break; + } + } +} + + + + +/*! This function updates the hmax-values in tree nodes that hold SPH + * particles. These values are needed to find all neighbors in the + * hydro-force computation. Since the Hsml-values are potentially changed + * in the SPH-denity computation, force_update_hmax() should be carried + * out just before the hydrodynamical SPH forces are computed, i.e. after + * density(). + */ +void force_update_hmax(void) +{ + int i, no; + MPI_Status status; + int level, sendTask, recvTask; + + force_update_node_hmax_local(); + + for(i = DomainMyStart; i <= DomainMyLast; i++) + { + no = DomainNodeIndex[i]; + + DomainHmax[i] = Extnodes[no].hmax; + } + + /* share the hmax-data of the pseudo-particles accross CPUs */ + + for(level = 1; level < (1 << PTask); level++) + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + MPI_Sendrecv(&DomainHmax[DomainStartList[sendTask]], + (DomainEndList[sendTask] - DomainStartList[sendTask] + 1) * sizeof(FLOAT), + MPI_BYTE, recvTask, TAG_HMAX, + &DomainHmax[DomainStartList[recvTask]], + (DomainEndList[recvTask] - DomainStartList[recvTask] + 1) * sizeof(FLOAT), + MPI_BYTE, recvTask, TAG_HMAX, MPI_COMM_WORLD, &status); + } + + + force_update_node_hmax_toptree(); +} + +/*! This routine updates the hmax-values of local tree nodes. + */ +void force_update_node_hmax_local(void) +{ + int i, p, no; + + for(i = 0; i < N_gas; i++) + { + + no = Father[i]; + + if(SphP[i].Hsml > Extnodes[no].hmax) + { + + Extnodes[no].hmax = SphP[i].Hsml; + p = Nodes[no].u.d.father; + + while(p >= 0) + { + if(Extnodes[no].hmax > Extnodes[p].hmax) + { + Extnodes[p].hmax = Extnodes[no].hmax; + no = p; + p = Nodes[p].u.d.father; + } + else + break; + } + } + + } +} + + + + +/*! This function recursively sets the hmax-values of the top-level tree. + */ +void force_update_node_hmax_toptree(void) +{ + + int i, no, p; + + + for(i = 0; i < NTopleaves; i++) + if(i < DomainMyStart || i > DomainMyLast) + { + no = DomainNodeIndex[i]; + + if(Extnodes[no].hmax < DomainHmax[i]) + Extnodes[no].hmax = DomainHmax[i]; + + p = Nodes[no].u.d.father; + + while(p >= 0) + { + if(Extnodes[no].hmax > Extnodes[p].hmax) + { + Extnodes[p].hmax = Extnodes[no].hmax; + no = p; + p = Nodes[p].u.d.father; + } + else + break; + } + } +} + + + + + +/*! This routine computes the gravitational force for a given local + * particle, or for a particle in the communication buffer. Depending on + * the value of TypeOfOpeningCriterion, either the geometrical BH + * cell-opening criterion, or the `relative' opening criterion is used. + */ +int force_treeevaluate(int target, int mode, double *ewaldcountsum) +{ + struct NODE *nop = 0; + int no, ninteractions, ptype; + double r2, dx, dy, dz, mass, r, fac, u, h, h_inv, h3_inv; + double acc_x, acc_y, acc_z, pos_x, pos_y, pos_z, aold; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + acc_x = 0; + acc_y = 0; + acc_z = 0; + ninteractions = 0; + + if(mode == 0) + { + pos_x = P[target].Pos[0]; + pos_y = P[target].Pos[1]; + pos_z = P[target].Pos[2]; + ptype = P[target].Type; + aold = All.ErrTolForceAcc * P[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphP[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSoftening[ptype]; + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + no = All.MaxPart; /* root node */ + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + + mass = nop->u.d.mass; + } +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(no < All.MaxPart) + { +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSoftening[ptype]; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node. Need to check opening criterion */ + { + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node + * which does not contain + * local particles we can + * continue to do a short-cut */ + { + no = nop->u.d.sibling; + continue; + } + } + + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + /* check in addition whether we lie inside the cell */ + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSoftening[ptype]; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(986); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + if(((nop->u.d.bitflags >> 5) & 1)) /* bit-5 signals that there are particles of different softening in the node */ + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + + no = nop->u.d.sibling; /* ok, node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + if(r >= h) + fac = mass / (r2 * r); + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + u = r * h_inv; + if(u < 0.5) + fac = mass * h3_inv * (10.666666666667 + u * u * (32.0 * u - 38.4)); + else + fac = + mass * h3_inv * (21.333333333333 - 48.0 * u + + 38.4 * u * u - 10.666666666667 * u * u * u - 0.066666666667 / (u * u * u)); + } + + acc_x += dx * fac; + acc_y += dy * fac; + acc_z += dz * fac; + + ninteractions++; + } + + + /* store result at the proper place */ + if(mode == 0) + { + P[target].GravAccel[0] = acc_x; + P[target].GravAccel[1] = acc_y; + P[target].GravAccel[2] = acc_z; + P[target].GravCost = ninteractions; + } + else + { + GravDataResult[target].u.Acc[0] = acc_x; + GravDataResult[target].u.Acc[1] = acc_y; + GravDataResult[target].u.Acc[2] = acc_z; + GravDataResult[target].w.Ninteractions = ninteractions; + } + +#ifdef PERIODIC + *ewaldcountsum += force_treeevaluate_ewald_correction(target, mode, pos_x, pos_y, pos_z, aold); +#endif + + return ninteractions; +} + + + +#ifdef PY_INTERFACE +/*! This routine computes the gravitational force for a given local + * particle, or for a particle in the communication buffer. Depending on + * the value of TypeOfOpeningCriterion, either the geometrical BH + * cell-opening criterion, or the `relative' opening criterion is used. + */ +int force_treeevaluate_sub(int target, int mode, double *ewaldcountsum) +{ + struct NODE *nop = 0; + int no, ninteractions, ptype; + double r2, dx, dy, dz, mass, r, fac, u, h, h_inv, h3_inv; + double acc_x, acc_y, acc_z, pos_x, pos_y, pos_z, aold; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + acc_x = 0; + acc_y = 0; + acc_z = 0; + ninteractions = 0; + + if(mode == 0) + { + pos_x = Q[target].Pos[0]; + pos_y = Q[target].Pos[1]; + pos_z = Q[target].Pos[2]; + ptype = Q[target].Type; + aold = All.ErrTolForceAcc * Q[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphQ[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSofteningQ; + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + no = All.MaxPart; /* root node */ + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + + mass = nop->u.d.mass; + } +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(no < All.MaxPart) + { +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSofteningQ; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node. Need to check opening criterion */ + { + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node + * which does not contain + * local particles we can + * continue to do a short-cut */ + { + no = nop->u.d.sibling; + continue; + } + } + + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + /* check in addition whether we lie inside the cell */ + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSofteningQ; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(986); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + if(((nop->u.d.bitflags >> 5) & 1)) /* bit-5 signals that there are particles of different softening in the node */ + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + + no = nop->u.d.sibling; /* ok, node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + if(r >= h) + fac = mass / (r2 * r); + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + u = r * h_inv; + if(u < 0.5) + fac = mass * h3_inv * (10.666666666667 + u * u * (32.0 * u - 38.4)); + else + fac = + mass * h3_inv * (21.333333333333 - 48.0 * u + + 38.4 * u * u - 10.666666666667 * u * u * u - 0.066666666667 / (u * u * u)); + } + + acc_x += dx * fac; + acc_y += dy * fac; + acc_z += dz * fac; + + ninteractions++; + } + + + /* store result at the proper place */ + if(mode == 0) + { + Q[target].GravAccel[0] = acc_x; + Q[target].GravAccel[1] = acc_y; + Q[target].GravAccel[2] = acc_z; + Q[target].GravCost = ninteractions; + } + else + { + GravDataResult[target].u.Acc[0] = acc_x; + GravDataResult[target].u.Acc[1] = acc_y; + GravDataResult[target].u.Acc[2] = acc_z; + GravDataResult[target].w.Ninteractions = ninteractions; + } + +#ifdef PERIODIC + *ewaldcountsum += force_treeevaluate_ewald_correction(target, mode, pos_x, pos_y, pos_z, aold); +#endif + + return ninteractions; +} +#endif + + + + +#ifdef PMGRID +/*! In the TreePM algorithm, the tree is walked only locally around the + * target coordinate. Tree nodes that fall outside a box of half + * side-length Rcut= RCUT*ASMTH*MeshSize can be discarded. The short-range + * potential is modified by a complementary error function, multiplied + * with the Newtonian form. The resulting short-range suppression compared + * to the Newtonian force is tabulated, because looking up from this table + * is faster than recomputing the corresponding factor, despite the + * memory-access panelty (which reduces cache performance) incurred by the + * table. + */ +int force_treeevaluate_shortrange(int target, int mode) +{ + struct NODE *nop = 0; + int no, ptype, ninteractions, tabindex; + double r2, dx, dy, dz, mass, r, fac, u, h, h_inv, h3_inv; + double acc_x, acc_y, acc_z, pos_x, pos_y, pos_z, aold; + double eff_dist; + double rcut, asmth, asmthfac, rcut2, dist; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + + acc_x = 0; + acc_y = 0; + acc_z = 0; + ninteractions = 0; + + if(mode == 0) + { + pos_x = P[target].Pos[0]; + pos_y = P[target].Pos[1]; + pos_z = P[target].Pos[2]; + ptype = P[target].Type; + aold = All.ErrTolForceAcc * P[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphP[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + rcut = All.Rcut[0]; + asmth = All.Asmth[0]; +#ifdef PLACEHIGHRESREGION + if(((1 << ptype) & (PLACEHIGHRESREGION))) + { + rcut = All.Rcut[1]; + asmth = All.Asmth[1]; + } +#endif + rcut2 = rcut * rcut; + + asmthfac = 0.5 / asmth * (NTAB / 3.0); + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSoftening[ptype]; + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + no = All.MaxPart; /* root node */ + + while(no >= 0) + { + if(no < All.MaxPart) + { + /* the index of the node is the index of the particle */ + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + mass = P[no].Mass; +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSoftening[ptype]; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node */ + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node + * which does not contain + * local particles we can + * continue at this point + */ + { + no = nop->u.d.sibling; + continue; + } + } + + mass = nop->u.d.mass; + + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(r2 > rcut2) + { + /* check whether we can stop walking along this branch */ + eff_dist = rcut + 0.5 * nop->len; +#ifdef PERIODIC + dist = NEAREST(nop->center[0] - pos_x); +#else + dist = nop->center[0] - pos_x; +#endif + if(dist < -eff_dist || dist > eff_dist) + { + no = nop->u.d.sibling; + continue; + } +#ifdef PERIODIC + dist = NEAREST(nop->center[1] - pos_y); +#else + dist = nop->center[1] - pos_y; +#endif + if(dist < -eff_dist || dist > eff_dist) + { + no = nop->u.d.sibling; + continue; + } +#ifdef PERIODIC + dist = NEAREST(nop->center[2] - pos_z); +#else + dist = nop->center[2] - pos_z; +#endif + if(dist < -eff_dist || dist > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + } + + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + /* check in addition whether we lie inside the cell */ + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSoftening[ptype]; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(987); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + if(((nop->u.d.bitflags >> 5) & 1)) /* bit-5 signals that there are particles of different softening in the node */ + { + no = nop->u.d.nextnode; + + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + no = nop->u.d.sibling; /* ok, node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + if(r >= h) + fac = mass / (r2 * r); + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + u = r * h_inv; + if(u < 0.5) + fac = mass * h3_inv * (10.666666666667 + u * u * (32.0 * u - 38.4)); + else + fac = + mass * h3_inv * (21.333333333333 - 48.0 * u + + 38.4 * u * u - 10.666666666667 * u * u * u - 0.066666666667 / (u * u * u)); + } + + tabindex = (int) (asmthfac * r); + + if(tabindex < NTAB) + { + fac *= shortrange_table[tabindex]; + + acc_x += dx * fac; + acc_y += dy * fac; + acc_z += dz * fac; + + ninteractions++; + } + } + + + /* store result at the proper place */ + if(mode == 0) + { + P[target].GravAccel[0] = acc_x; + P[target].GravAccel[1] = acc_y; + P[target].GravAccel[2] = acc_z; + P[target].GravCost = ninteractions; + } + else + { + GravDataResult[target].u.Acc[0] = acc_x; + GravDataResult[target].u.Acc[1] = acc_y; + GravDataResult[target].u.Acc[2] = acc_z; + GravDataResult[target].w.Ninteractions = ninteractions; + } + + return ninteractions; +} + +#endif + + +#ifdef PY_INTERFACE +#ifdef PMGRID +/*! In the TreePM algorithm, the tree is walked only locally around the + * target coordinate. Tree nodes that fall outside a box of half + * side-length Rcut= RCUT*ASMTH*MeshSize can be discarded. The short-range + * potential is modified by a complementary error function, multiplied + * with the Newtonian form. The resulting short-range suppression compared + * to the Newtonian force is tabulated, because looking up from this table + * is faster than recomputing the corresponding factor, despite the + * memory-access panelty (which reduces cache performance) incurred by the + * table. + */ +int force_treeevaluate_shortrange_sub(int target, int mode) +{ + struct NODE *nop = 0; + int no, ptype, ninteractions, tabindex; + double r2, dx, dy, dz, mass, r, fac, u, h, h_inv, h3_inv; + double acc_x, acc_y, acc_z, pos_x, pos_y, pos_z, aold; + double eff_dist; + double rcut, asmth, asmthfac, rcut2, dist; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + + acc_x = 0; + acc_y = 0; + acc_z = 0; + ninteractions = 0; + + if(mode == 0) + { + pos_x = Q[target].Pos[0]; + pos_y = Q[target].Pos[1]; + pos_z = Q[target].Pos[2]; + ptype = Q[target].Type; + aold = All.ErrTolForceAcc * Q[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphQ[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + rcut = All.Rcut[0]; + asmth = All.Asmth[0]; +#ifdef PLACEHIGHRESREGION + if(((1 << ptype) & (PLACEHIGHRESREGION))) + { + rcut = All.Rcut[1]; + asmth = All.Asmth[1]; + } +#endif + rcut2 = rcut * rcut; + + asmthfac = 0.5 / asmth * (NTAB / 3.0); + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSofteningQ; + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + no = All.MaxPart; /* root node */ + + while(no >= 0) + { + if(no < All.MaxPart) + { + /* the index of the node is the index of the particle */ + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + mass = P[no].Mass; +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSofteningQ; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node */ + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node + * which does not contain + * local particles we can + * continue at this point + */ + { + no = nop->u.d.sibling; + continue; + } + } + + mass = nop->u.d.mass; + + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(r2 > rcut2) + { + /* check whether we can stop walking along this branch */ + eff_dist = rcut + 0.5 * nop->len; +#ifdef PERIODIC + dist = NEAREST(nop->center[0] - pos_x); +#else + dist = nop->center[0] - pos_x; +#endif + if(dist < -eff_dist || dist > eff_dist) + { + no = nop->u.d.sibling; + continue; + } +#ifdef PERIODIC + dist = NEAREST(nop->center[1] - pos_y); +#else + dist = nop->center[1] - pos_y; +#endif + if(dist < -eff_dist || dist > eff_dist) + { + no = nop->u.d.sibling; + continue; + } +#ifdef PERIODIC + dist = NEAREST(nop->center[2] - pos_z); +#else + dist = nop->center[2] - pos_z; +#endif + if(dist < -eff_dist || dist > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + } + + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + /* check in addition whether we lie inside the cell */ + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSofteningQ; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(987); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + if(((nop->u.d.bitflags >> 5) & 1)) /* bit-5 signals that there are particles of different softening in the node */ + { + no = nop->u.d.nextnode; + + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + no = nop->u.d.sibling; /* ok, node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + if(r >= h) + fac = mass / (r2 * r); + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; + h3_inv = h_inv * h_inv * h_inv; +#endif + u = r * h_inv; + if(u < 0.5) + fac = mass * h3_inv * (10.666666666667 + u * u * (32.0 * u - 38.4)); + else + fac = + mass * h3_inv * (21.333333333333 - 48.0 * u + + 38.4 * u * u - 10.666666666667 * u * u * u - 0.066666666667 / (u * u * u)); + } + + tabindex = (int) (asmthfac * r); + + if(tabindex < NTAB) + { + fac *= shortrange_table[tabindex]; + + acc_x += dx * fac; + acc_y += dy * fac; + acc_z += dz * fac; + + ninteractions++; + } + } + + + /* store result at the proper place */ + if(mode == 0) + { + Q[target].GravAccel[0] = acc_x; + Q[target].GravAccel[1] = acc_y; + Q[target].GravAccel[2] = acc_z; + Q[target].GravCost = ninteractions; + } + else + { + GravDataResult[target].u.Acc[0] = acc_x; + GravDataResult[target].u.Acc[1] = acc_y; + GravDataResult[target].u.Acc[2] = acc_z; + GravDataResult[target].w.Ninteractions = ninteractions; + } + + return ninteractions; +} + +#endif +#endif + +#ifdef PERIODIC +/*! This function computes the Ewald correction, and is needed if periodic + * boundary conditions together with a pure tree algorithm are used. Note + * that the ordinary tree walk does not carry out this correction directly + * as it was done in Gadget-1.1. Instead, the tree is walked a second + * time. This is actually faster because the "Ewald-Treewalk" can use a + * different opening criterion than the normal tree walk. In particular, + * the Ewald correction is negligible for particles that are very close, + * but it is large for particles that are far away (this is quite + * different for the normal direct force). So we can here use a different + * opening criterion. Sufficient accuracy is usually obtained if the node + * length has dropped to a certain fraction ~< 0.25 of the + * BoxLength. However, we may only short-cut the interaction list of the + * normal full Ewald tree walk if we are sure that the whole node and all + * daughter nodes "lie on the same side" of the periodic boundary, + * i.e. that the real tree walk would not find a daughter node or particle + * that was mapped to a different nearest neighbour position when the tree + * walk would be further refined. + */ +int force_treeevaluate_ewald_correction(int target, int mode, double pos_x, double pos_y, double pos_z, + double aold) +{ + struct NODE *nop = 0; + int no, cost; + double dx, dy, dz, mass, r2; + int signx, signy, signz; + int i, j, k, openflag; + double u, v, w; + double f1, f2, f3, f4, f5, f6, f7, f8; + double acc_x, acc_y, acc_z; + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; + + acc_x = 0; + acc_y = 0; + acc_z = 0; + cost = 0; + + no = All.MaxPart; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + mass = nop->u.d.mass; + } + + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); + + if(no < All.MaxPart) + no = Nextnode[no]; + else /* we have an internal node. Need to check opening criterion */ + { + openflag = 0; + + r2 = dx * dx + dy * dy + dz * dz; + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + openflag = 1; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + openflag = 1; + } + else + { + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + openflag = 1; + } + } + } + } + } + + if(openflag) + { + /* now we check if we can avoid opening the cell */ + + u = nop->center[0] - pos_x; + if(u > boxhalf) + u -= boxsize; + if(u < -boxhalf) + u += boxsize; + + if(fabs(u) > 0.5 * (boxsize - nop->len)) + { + no = nop->u.d.nextnode; + continue; + } + + u = nop->center[1] - pos_y; + if(u > boxhalf) + u -= boxsize; + if(u < -boxhalf) + u += boxsize; + + if(fabs(u) > 0.5 * (boxsize - nop->len)) + { + no = nop->u.d.nextnode; + continue; + } + + u = nop->center[2] - pos_z; + if(u > boxhalf) + u -= boxsize; + if(u < -boxhalf) + u += boxsize; + + if(fabs(u) > 0.5 * (boxsize - nop->len)) + { + no = nop->u.d.nextnode; + continue; + } + + /* if the cell is too large, we need to refine + * it further + */ + if(nop->len > 0.20 * boxsize) + { + /* cell is too large */ + no = nop->u.d.nextnode; + continue; + } + } + + no = nop->u.d.sibling; /* ok, node can be used */ + + if(mode == 1) + { + if((nop->u.d.bitflags & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + /* compute the Ewald correction force */ + + if(dx < 0) + { + dx = -dx; + signx = +1; + } + else + signx = -1; + + if(dy < 0) + { + dy = -dy; + signy = +1; + } + else + signy = -1; + + if(dz < 0) + { + dz = -dz; + signz = +1; + } + else + signz = -1; + + u = dx * fac_intp; + i = (int) u; + if(i >= EN) + i = EN - 1; + u -= i; + v = dy * fac_intp; + j = (int) v; + if(j >= EN) + j = EN - 1; + v -= j; + w = dz * fac_intp; + k = (int) w; + if(k >= EN) + k = EN - 1; + w -= k; + + /* compute factors for trilinear interpolation */ + + f1 = (1 - u) * (1 - v) * (1 - w); + f2 = (1 - u) * (1 - v) * (w); + f3 = (1 - u) * (v) * (1 - w); + f4 = (1 - u) * (v) * (w); + f5 = (u) * (1 - v) * (1 - w); + f6 = (u) * (1 - v) * (w); + f7 = (u) * (v) * (1 - w); + f8 = (u) * (v) * (w); + + acc_x += mass * signx * (fcorrx[i][j][k] * f1 + + fcorrx[i][j][k + 1] * f2 + + fcorrx[i][j + 1][k] * f3 + + fcorrx[i][j + 1][k + 1] * f4 + + fcorrx[i + 1][j][k] * f5 + + fcorrx[i + 1][j][k + 1] * f6 + + fcorrx[i + 1][j + 1][k] * f7 + fcorrx[i + 1][j + 1][k + 1] * f8); + + acc_y += mass * signy * (fcorry[i][j][k] * f1 + + fcorry[i][j][k + 1] * f2 + + fcorry[i][j + 1][k] * f3 + + fcorry[i][j + 1][k + 1] * f4 + + fcorry[i + 1][j][k] * f5 + + fcorry[i + 1][j][k + 1] * f6 + + fcorry[i + 1][j + 1][k] * f7 + fcorry[i + 1][j + 1][k + 1] * f8); + + acc_z += mass * signz * (fcorrz[i][j][k] * f1 + + fcorrz[i][j][k + 1] * f2 + + fcorrz[i][j + 1][k] * f3 + + fcorrz[i][j + 1][k + 1] * f4 + + fcorrz[i + 1][j][k] * f5 + + fcorrz[i + 1][j][k + 1] * f6 + + fcorrz[i + 1][j + 1][k] * f7 + fcorrz[i + 1][j + 1][k + 1] * f8); + cost++; + } + + + /* add the result at the proper place */ + + if(mode == 0) + { + P[target].GravAccel[0] += acc_x; + P[target].GravAccel[1] += acc_y; + P[target].GravAccel[2] += acc_z; + P[target].GravCost += cost; + } + else + { + GravDataResult[target].u.Acc[0] += acc_x; + GravDataResult[target].u.Acc[1] += acc_y; + GravDataResult[target].u.Acc[2] += acc_z; + GravDataResult[target].w.Ninteractions += cost; + } + + return cost; +} + +#endif + + + + + + +/*! This routine computes the gravitational potential by walking the + * tree. The same opening criteria is used as for the gravitational force + * walk. + */ +void force_treeevaluate_potential(int target, int mode) +{ + struct NODE *nop = 0; + int no, ptype; + double r2, dx, dy, dz, mass, r, u, h, h_inv, wp; + double pot, pos_x, pos_y, pos_z, aold; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + pot = 0; + + if(mode == 0) + { + pos_x = P[target].Pos[0]; + pos_y = P[target].Pos[1]; + pos_z = P[target].Pos[2]; + ptype = P[target].Type; + aold = All.ErrTolForceAcc * P[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphP[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSoftening[ptype]; + h_inv = 1.0 / h; +#endif + no = All.MaxPart; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + mass = nop->u.d.mass; + } + +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(no < All.MaxPart) + { +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSoftening[ptype]; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node. Need to check opening criterion */ + { + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node + * which does not contain + * local particles we can make + * a short-cut + */ + { + no = nop->u.d.sibling; + continue; + } + } + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSoftening[ptype]; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(988); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + if(((nop->u.d.bitflags >> 5) & 1)) /* bit-5 signals that there are particles of different softening in the node */ + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + + no = nop->u.d.sibling; /* node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + if(r >= h) + pot -= mass / r; + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; +#endif + u = r * h_inv; + + if(u < 0.5) + wp = -2.8 + u * u * (5.333333333333 + u * u * (6.4 * u - 9.6)); + else + wp = + -3.2 + 0.066666666667 / u + u * u * (10.666666666667 + + u * (-16.0 + u * (9.6 - 2.133333333333 * u))); + + pot += mass * h_inv * wp; + } +#ifdef PERIODIC + pot += mass * ewald_pot_corr(dx, dy, dz); +#endif + } + + /* store result at the proper place */ + + if(mode == 0) + P[target].Potential = pot; + else + GravDataResult[target].u.Potential = pot; +} + + + + +#ifdef PMGRID +/*! This function computes the short-range potential when the TreePM + * algorithm is used. This potential is the Newtonian potential, modified + * by a complementary error function. + */ +void force_treeevaluate_potential_shortrange(int target, int mode) +{ + struct NODE *nop = 0; + int no, ptype, tabindex; + double r2, dx, dy, dz, mass, r, u, h, h_inv, wp; + double pot, pos_x, pos_y, pos_z, aold; + double eff_dist, fac, rcut, asmth, asmthfac; + double dxx, dyy, dzz; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif + +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + pot = 0; + + if(mode == 0) + { + pos_x = P[target].Pos[0]; + pos_y = P[target].Pos[1]; + pos_z = P[target].Pos[2]; + ptype = P[target].Type; + aold = All.ErrTolForceAcc * P[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphP[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + + rcut = All.Rcut[0]; + asmth = All.Asmth[0]; +#ifdef PLACEHIGHRESREGION + if(((1 << ptype) & (PLACEHIGHRESREGION))) + { + rcut = All.Rcut[1]; + asmth = All.Asmth[1]; + } +#endif + asmthfac = 0.5 / asmth * (NTAB / 3.0); + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSoftening[ptype]; + h_inv = 1.0 / h; +#endif + + no = All.MaxPart; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + mass = nop->u.d.mass; + } + +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(no < All.MaxPart) + { +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSoftening[ptype]; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node. Need to check opening criterion */ + { + /* check whether we can stop walking along this branch */ + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node which does not contain local particles */ + { + no = nop->u.d.sibling; + continue; + } + } + + eff_dist = rcut + 0.5 * nop->len; + + dxx = nop->center[0] - pos_x; /* observe the sign ! */ + dyy = nop->center[1] - pos_y; /* this vector is -y in my thesis notation */ + dzz = nop->center[2] - pos_z; +#ifdef PERIODIC + dxx = NEAREST(dxx); + dyy = NEAREST(dyy); + dzz = NEAREST(dzz); +#endif + if(dxx < -eff_dist || dxx > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + + if(dyy < -eff_dist || dyy > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + + if(dzz < -eff_dist || dzz > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSoftening[ptype]; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(989); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + /* bit-5 signals that there are particles of + * different softening in the node + */ + if(((nop->u.d.bitflags >> 5) & 1)) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSoftening[ptype]; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + no = nop->u.d.sibling; /* node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + tabindex = (int) (r * asmthfac); + + if(tabindex < NTAB) + { + fac = shortrange_table_potential[tabindex]; + + if(r >= h) + pot -= fac * mass / r; + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; +#endif + u = r * h_inv; + + if(u < 0.5) + wp = -2.8 + u * u * (5.333333333333 + u * u * (6.4 * u - 9.6)); + else + wp = + -3.2 + 0.066666666667 / u + u * u * (10.666666666667 + + u * (-16.0 + u * (9.6 - 2.133333333333 * u))); + pot += fac * mass * h_inv * wp; + } + } + } + + + /* store result at the proper place */ + if(mode == 0) + P[target].Potential = pot; + else + GravDataResult[target].u.Potential = pot; +} + +#endif + + + + + + + + + + + +#ifdef PY_INTERFACE + +/*! This routine computes the gravitational potential by walking the + * tree. The same opening criteria is used as for the gravitational force + * walk. + */ +void force_treeevaluate_potential_sub(int target, int mode) +{ + struct NODE *nop = 0; + int no, ptype; + double r2, dx, dy, dz, mass, r, u, h, h_inv, wp; + double pot, pos_x, pos_y, pos_z, aold; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + pot = 0; + + if(mode == 0) + { + pos_x = Q[target].Pos[0]; + pos_y = Q[target].Pos[1]; + pos_z = Q[target].Pos[2]; + ptype = Q[target].Type; + aold = All.ErrTolForceAcc * Q[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphQ[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSofteningQ; + h_inv = 1.0 / h; +#endif + no = All.MaxPart; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + mass = nop->u.d.mass; + } + +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(no < All.MaxPart) + { +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSofteningQ; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node. Need to check opening criterion */ + { + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node + * which does not contain + * local particles we can make + * a short-cut + */ + { + no = nop->u.d.sibling; + continue; + } + } + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSofteningQ; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(988); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + if(((nop->u.d.bitflags >> 5) & 1)) /* bit-5 signals that there are particles of different softening in the node */ + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + + no = nop->u.d.sibling; /* node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + if(r >= h) + pot -= mass / r; + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; +#endif + u = r * h_inv; + + if(u < 0.5) + wp = -2.8 + u * u * (5.333333333333 + u * u * (6.4 * u - 9.6)); + else + wp = + -3.2 + 0.066666666667 / u + u * u * (10.666666666667 + + u * (-16.0 + u * (9.6 - 2.133333333333 * u))); + + pot += mass * h_inv * wp; + } +#ifdef PERIODIC + pot += mass * ewald_pot_corr(dx, dy, dz); +#endif + } + + /* store result at the proper place */ + + if(mode == 0) + Q[target].Potential = pot; + else + GravDataResult[target].u.Potential = pot; +} + + + + +#ifdef PMGRID +/*! This function computes the short-range potential when the TreePM + * algorithm is used. This potential is the Newtonian potential, modified + * by a complementary error function. + */ +void force_treeevaluate_potential_shortrange_sub(int target, int mode) +{ + struct NODE *nop = 0; + int no, ptype, tabindex; + double r2, dx, dy, dz, mass, r, u, h, h_inv, wp; + double pot, pos_x, pos_y, pos_z, aold; + double eff_dist, fac, rcut, asmth, asmthfac; + double dxx, dyy, dzz; +#if defined(UNEQUALSOFTENINGS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS) + int maxsofttype; +#endif +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + double soft = 0; +#endif + +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + pot = 0; + + if(mode == 0) + { + pos_x = Q[target].Pos[0]; + pos_y = Q[target].Pos[1]; + pos_z = Q[target].Pos[2]; + ptype = Q[target].Type; + aold = All.ErrTolForceAcc * Q[target].OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = SphQ[target].Hsml; +#endif + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + aold = All.ErrTolForceAcc * GravDataGet[target].w.OldAcc; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + soft = GravDataGet[target].Soft; +#endif + } + + + rcut = All.Rcut[0]; + asmth = All.Asmth[0]; +#ifdef PLACEHIGHRESREGION + if(((1 << ptype) & (PLACEHIGHRESREGION))) + { + rcut = All.Rcut[1]; + asmth = All.Asmth[1]; + } +#endif + asmthfac = 0.5 / asmth * (NTAB / 3.0); + +#ifndef UNEQUALSOFTENINGS + h = All.ForceSofteningQ; + h_inv = 1.0 / h; +#endif + + no = All.MaxPart; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + /* the index of the node is the index of the particle */ + /* observe the sign */ + + dx = P[no].Pos[0] - pos_x; + dy = P[no].Pos[1] - pos_y; + dz = P[no].Pos[2] - pos_z; + mass = P[no].Mass; + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + nop = &Nodes[no]; + dx = nop->u.d.s[0] - pos_x; + dy = nop->u.d.s[1] - pos_y; + dz = nop->u.d.s[2] - pos_z; + mass = nop->u.d.mass; + } + +#ifdef PERIODIC + dx = NEAREST(dx); + dy = NEAREST(dy); + dz = NEAREST(dz); +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(no < All.MaxPart) + { +#ifdef UNEQUALSOFTENINGS +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(P[no].Type == 0) + { + if(h < SphP[no].Hsml) + h = SphP[no].Hsml; + } + else + { + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; + } +#else + h = All.ForceSofteningQ; + if(h < All.ForceSoftening[P[no].Type]) + h = All.ForceSoftening[P[no].Type]; +#endif +#endif + no = Nextnode[no]; + } + else /* we have an internal node. Need to check opening criterion */ + { + /* check whether we can stop walking along this branch */ + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + if(mode == 0) + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + } + no = Nextnode[no - MaxNodes]; + continue; + } + + if(mode == 1) + { + if((nop->u.d.bitflags & 3) == 1) /* if it's a top-level node which does not contain local particles */ + { + no = nop->u.d.sibling; + continue; + } + } + + eff_dist = rcut + 0.5 * nop->len; + + dxx = nop->center[0] - pos_x; /* observe the sign ! */ + dyy = nop->center[1] - pos_y; /* this vector is -y in my thesis notation */ + dzz = nop->center[2] - pos_z; +#ifdef PERIODIC + dxx = NEAREST(dxx); + dyy = NEAREST(dyy); + dzz = NEAREST(dzz); +#endif + if(dxx < -eff_dist || dxx > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + + if(dyy < -eff_dist || dyy > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + + if(dzz < -eff_dist || dzz > eff_dist) + { + no = nop->u.d.sibling; + continue; + } + + if(All.ErrTolTheta) /* check Barnes-Hut opening criterion */ + { + if(nop->len * nop->len > r2 * All.ErrTolTheta * All.ErrTolTheta) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + } + else /* check relative opening criterion */ + { + if(mass * nop->len * nop->len > r2 * r2 * aold) + { + /* open cell */ + no = nop->u.d.nextnode; + continue; + } + + if(fabs(nop->center[0] - pos_x) < 0.60 * nop->len) + { + if(fabs(nop->center[1] - pos_y) < 0.60 * nop->len) + { + if(fabs(nop->center[2] - pos_z) < 0.60 * nop->len) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } + +#ifdef UNEQUALSOFTENINGS +#ifndef ADAPTIVE_GRAVSOFT_FORGAS + h = All.ForceSofteningQ; + maxsofttype = (nop->u.d.bitflags >> 2) & 7; + if(maxsofttype == 7) /* may only occur for zero mass top-level nodes */ + { + if(mass > 0) + endrun(989); + no = nop->u.d.nextnode; + continue; + } + else + { + if(h < All.ForceSoftening[maxsofttype]) + { + h = All.ForceSoftening[maxsofttype]; + if(r2 < h * h) + { + /* bit-5 signals that there are particles of + * different softening in the node + */ + if(((nop->u.d.bitflags >> 5) & 1)) + { + no = nop->u.d.nextnode; + continue; + } + } + } + } +#else + if(ptype == 0) + h = soft; + else + h = All.ForceSofteningQ; + + if(h < nop->maxsoft) + { + h = nop->maxsoft; + if(r2 < h * h) + { + no = nop->u.d.nextnode; + continue; + } + } +#endif +#endif + no = nop->u.d.sibling; /* node can be used */ + + if(mode == 1) + { + if(((nop->u.d.bitflags) & 1)) /* Bit 0 signals that this node belongs to top-level tree */ + continue; + } + } + + r = sqrt(r2); + + tabindex = (int) (r * asmthfac); + + if(tabindex < NTAB) + { + fac = shortrange_table_potential[tabindex]; + + if(r >= h) + pot -= fac * mass / r; + else + { +#ifdef UNEQUALSOFTENINGS + h_inv = 1.0 / h; +#endif + u = r * h_inv; + + if(u < 0.5) + wp = -2.8 + u * u * (5.333333333333 + u * u * (6.4 * u - 9.6)); + else + wp = + -3.2 + 0.066666666667 / u + u * u * (10.666666666667 + + u * (-16.0 + u * (9.6 - 2.133333333333 * u))); + pot += fac * mass * h_inv * wp; + } + } + } + + + /* store result at the proper place */ + if(mode == 0) + Q[target].Potential = pot; + else + GravDataResult[target].u.Potential = pot; +} + +#endif +#endif /*PY_INTERFACE*/ + + + + + + + + + + + + + + +/*! This function allocates the memory used for storage of the tree and of + * auxiliary arrays needed for tree-walk and link-lists. Usually, + * maxnodes approximately equal to 0.7*maxpart is sufficient to store the + * tree for up to maxpart particles. + */ +void force_treeallocate(int maxnodes, int maxpart) +{ + int i; + size_t bytes; + double allbytes = 0; + double u; + + MaxNodes = maxnodes; + + if(!(Nodes_base = malloc(bytes = (MaxNodes + 1) * sizeof(struct NODE)))) + { + printf("failed to allocate memory for %d tree-nodes (%g MB).\n", MaxNodes, bytes / (1024.0 * 1024.0)); + endrun(3); + } + allbytes += bytes; + + if(!(Extnodes_base = malloc(bytes = (MaxNodes + 1) * sizeof(struct extNODE)))) + { + printf("failed to allocate memory for %d tree-extnodes (%g MB).\n", MaxNodes, + bytes / (1024.0 * 1024.0)); + endrun(3); + } + allbytes += bytes; + + Nodes = Nodes_base - All.MaxPart; + Extnodes = Extnodes_base - All.MaxPart; + + if(!(Nextnode = malloc(bytes = (maxpart + MAXTOPNODES) * sizeof(int)))) + { + printf("Failed to allocate %d spaces for 'Nextnode' array (%g MB)\n", maxpart + MAXTOPNODES, + bytes / (1024.0 * 1024.0)); + exit(0); + } + allbytes += bytes; + + if(!(Father = malloc(bytes = (maxpart) * sizeof(int)))) + { + printf("Failed to allocate %d spaces for 'Father' array (%g MB)\n", maxpart, bytes / (1024.0 * 1024.0)); + exit(0); + } + allbytes += bytes; + + if(first_flag == 0) + { + first_flag = 1; + + if(ThisTask == 0) + printf("\nAllocated %g MByte for BH-tree. %d\n\n", allbytes / (1024.0 * 1024.0), + sizeof(struct NODE) + sizeof(struct extNODE)); + + tabfac = NTAB / 3.0; + + for(i = 0; i < NTAB; i++) + { + u = 3.0 / NTAB * (i + 0.5); + shortrange_table[i] = erfc(u) + 2.0 * u / sqrt(M_PI) * exp(-u * u); + shortrange_table_potential[i] = erfc(u); + } + } +} + + +/*! This function frees the memory allocated for the tree, i.e. it frees + * the space allocated by the function force_treeallocate(). + */ +void force_treefree(void) +{ + free(Father); + free(Nextnode); + free(Extnodes_base); + free(Nodes_base); +} + + + + +/*! This function does the force computation with direct summation for the + * specified particle in the communication buffer. This can be useful for + * debugging purposes, in particular for explicit checks of the force + * accuracy. + */ +#ifdef FORCETEST +int force_treeevaluate_direct(int target, int mode) +{ + double epsilon; + double h, h_inv, dx, dy, dz, r, r2, u, r_inv, fac; + int i, ptype; + double pos_x, pos_y, pos_z; + double acc_x, acc_y, acc_z; + +#ifdef PERIODIC + double fcorr[3]; +#endif +#ifdef PERIODIC + double boxsize, boxhalf; + + boxsize = All.BoxSize; + boxhalf = 0.5 * All.BoxSize; +#endif + + acc_x = 0; + acc_y = 0; + acc_z = 0; + + if(mode == 0) + { + pos_x = P[target].Pos[0]; + pos_y = P[target].Pos[1]; + pos_z = P[target].Pos[2]; + ptype = P[target].Type; + } + else + { + pos_x = GravDataGet[target].u.Pos[0]; + pos_y = GravDataGet[target].u.Pos[1]; + pos_z = GravDataGet[target].u.Pos[2]; +#ifdef UNEQUALSOFTENINGS + ptype = GravDataGet[target].Type; +#else + ptype = P[0].Type; +#endif + } + + for(i = 0; i < NumPart; i++) + { + epsilon = dmax(All.ForceSoftening[P[i].Type], All.ForceSoftening[ptype]); + + h = epsilon; + h_inv = 1 / h; + + dx = P[i].Pos[0] - pos_x; + dy = P[i].Pos[1] - pos_y; + dz = P[i].Pos[2] - pos_z; + +#ifdef PERIODIC + while(dx > boxhalf) + dx -= boxsize; + while(dy > boxhalf) + dy -= boxsize; + while(dz > boxhalf) + dz -= boxsize; + while(dx < -boxhalf) + dx += boxsize; + while(dy < -boxhalf) + dy += boxsize; + while(dz < -boxhalf) + dz += boxsize; +#endif + r2 = dx * dx + dy * dy + dz * dz; + + r = sqrt(r2); + + u = r * h_inv; + + if(u >= 1) + { + r_inv = 1 / r; + + fac = P[i].Mass * r_inv * r_inv * r_inv; + } + else + { + if(u < 0.5) + fac = P[i].Mass * h_inv * h_inv * h_inv * (10.666666666667 + u * u * (32.0 * u - 38.4)); + else + fac = + P[i].Mass * h_inv * h_inv * h_inv * (21.333333333333 - + 48.0 * u + 38.4 * u * u - + 10.666666666667 * u * u * + u - 0.066666666667 / (u * u * u)); + } + + acc_x += dx * fac; + acc_y += dy * fac; + acc_z += dz * fac; + +#ifdef PERIODIC + if(u > 1.0e-5) + { + ewald_corr(dx, dy, dz, fcorr); + + acc_x += P[i].Mass * fcorr[0]; + acc_y += P[i].Mass * fcorr[1]; + acc_z += P[i].Mass * fcorr[2]; + } +#endif + } + + + if(mode == 0) + { + P[target].GravAccelDirect[0] = acc_x; + P[target].GravAccelDirect[1] = acc_y; + P[target].GravAccelDirect[2] = acc_z; + } + else + { + GravDataResult[target].u.Acc[0] = acc_x; + GravDataResult[target].u.Acc[1] = acc_y; + GravDataResult[target].u.Acc[2] = acc_z; + } + + + return NumPart; +} +#endif + + +/*! This function dumps some of the basic particle data to a file. In case + * the tree construction fails, it is called just before the run + * terminates with an error message. Examination of the generated file may + * then give clues to what caused the problem. + */ +void dump_particles(void) +{ + FILE *fd; + char buffer[200]; + int i; + + sprintf(buffer, "particles%d.dat", ThisTask); + fd = fopen(buffer, "w"); + my_fwrite(&NumPart, 1, sizeof(int), fd); + + for(i = 0; i < NumPart; i++) + my_fwrite(&P[i].Pos[0], 3, sizeof(FLOAT), fd); + + for(i = 0; i < NumPart; i++) + my_fwrite(&P[i].Vel[0], 3, sizeof(FLOAT), fd); + + for(i = 0; i < NumPart; i++) + my_fwrite(&P[i].ID, 1, sizeof(int), fd); + + fclose(fd); +} + + + +#ifdef PERIODIC + +/*! This function initializes tables with the correction force and the + * correction potential due to the periodic images of a point mass located + * at the origin. These corrections are obtained by Ewald summation. (See + * Hernquist, Bouchet, Suto, ApJS, 1991, 75, 231) The correction fields + * are used to obtain the full periodic force if periodic boundaries + * combined with the pure tree algorithm are used. For the TreePM + * algorithm, the Ewald correction is not used. + * + * The correction fields are stored on disk once they are computed. If a + * corresponding file is found, they are loaded from disk to speed up the + * initialization. The Ewald summation is done in parallel, i.e. the + * processors share the work to compute the tables if needed. + */ +void ewald_init(void) +{ + int i, j, k, beg, len, size, n, task, count; + double x[3], force[3]; + char buf[200]; + FILE *fd; + + if(ThisTask == 0) + { + printf("initialize Ewald correction...\n"); + fflush(stdout); + } + +#ifdef DOUBLEPRECISION + sprintf(buf, "ewald_spc_table_%d_dbl.dat", EN); +#else + sprintf(buf, "ewald_spc_table_%d.dat", EN); +#endif + + if((fd = fopen(buf, "r"))) + { + if(ThisTask == 0) + { + printf("\nreading Ewald tables from file `%s'\n", buf); + fflush(stdout); + } + + my_fread(&fcorrx[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + my_fread(&fcorry[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + my_fread(&fcorrz[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + my_fread(&potcorr[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + fclose(fd); + } + else + { + if(ThisTask == 0) + { + printf("\nNo Ewald tables in file `%s' found.\nRecomputing them...\n", buf); + fflush(stdout); + } + + /* ok, let's recompute things. Actually, we do that in parallel. */ + + size = (EN + 1) * (EN + 1) * (EN + 1) / NTask; + + + beg = ThisTask * size; + len = size; + if(ThisTask == (NTask - 1)) + len = (EN + 1) * (EN + 1) * (EN + 1) - beg; + + for(i = 0, count = 0; i <= EN; i++) + for(j = 0; j <= EN; j++) + for(k = 0; k <= EN; k++) + { + n = (i * (EN + 1) + j) * (EN + 1) + k; + if(n >= beg && n < (beg + len)) + { + if(ThisTask == 0) + { + if((count % (len / 20)) == 0) + { + printf("%4.1f percent done\n", count / (len / 100.0)); + fflush(stdout); + } + } + + x[0] = 0.5 * ((double) i) / EN; + x[1] = 0.5 * ((double) j) / EN; + x[2] = 0.5 * ((double) k) / EN; + + ewald_force(i, j, k, x, force); + + fcorrx[i][j][k] = force[0]; + fcorry[i][j][k] = force[1]; + fcorrz[i][j][k] = force[2]; + + if(i + j + k == 0) + potcorr[i][j][k] = 2.8372975; + else + potcorr[i][j][k] = ewald_psi(x); + + count++; + } + } + + for(task = 0; task < NTask; task++) + { + beg = task * size; + len = size; + if(task == (NTask - 1)) + len = (EN + 1) * (EN + 1) * (EN + 1) - beg; + +#ifdef DOUBLEPRECISION + MPI_Bcast(&fcorrx[0][0][beg], len, MPI_DOUBLE, task, MPI_COMM_WORLD); + MPI_Bcast(&fcorry[0][0][beg], len, MPI_DOUBLE, task, MPI_COMM_WORLD); + MPI_Bcast(&fcorrz[0][0][beg], len, MPI_DOUBLE, task, MPI_COMM_WORLD); + MPI_Bcast(&potcorr[0][0][beg], len, MPI_DOUBLE, task, MPI_COMM_WORLD); +#else + MPI_Bcast(&fcorrx[0][0][beg], len, MPI_FLOAT, task, MPI_COMM_WORLD); + MPI_Bcast(&fcorry[0][0][beg], len, MPI_FLOAT, task, MPI_COMM_WORLD); + MPI_Bcast(&fcorrz[0][0][beg], len, MPI_FLOAT, task, MPI_COMM_WORLD); + MPI_Bcast(&potcorr[0][0][beg], len, MPI_FLOAT, task, MPI_COMM_WORLD); +#endif + } + + if(ThisTask == 0) + { + printf("\nwriting Ewald tables to file `%s'\n", buf); + fflush(stdout); + + if((fd = fopen(buf, "w"))) + { + my_fwrite(&fcorrx[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + my_fwrite(&fcorry[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + my_fwrite(&fcorrz[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + my_fwrite(&potcorr[0][0][0], sizeof(FLOAT), (EN + 1) * (EN + 1) * (EN + 1), fd); + fclose(fd); + } + } + } + + fac_intp = 2 * EN / All.BoxSize; + + for(i = 0; i <= EN; i++) + for(j = 0; j <= EN; j++) + for(k = 0; k <= EN; k++) + { + potcorr[i][j][k] /= All.BoxSize; + fcorrx[i][j][k] /= All.BoxSize * All.BoxSize; + fcorry[i][j][k] /= All.BoxSize * All.BoxSize; + fcorrz[i][j][k] /= All.BoxSize * All.BoxSize; + } + + if(ThisTask == 0) + { + printf("initialization of periodic boundaries finished.\n"); + fflush(stdout); + } +} + + +/*! This function looks up the correction force due to the infinite number + * of periodic particle/node images. We here use trilinear interpolation + * to get it from the precomputed tables, which contain one octant + * around the target particle at the origin. The other octants are + * obtained from it by exploiting the symmetry properties. + */ +#ifdef FORCETEST +void ewald_corr(double dx, double dy, double dz, double *fper) +{ + int signx, signy, signz; + int i, j, k; + double u, v, w; + double f1, f2, f3, f4, f5, f6, f7, f8; + + if(dx < 0) + { + dx = -dx; + signx = +1; + } + else + signx = -1; + + if(dy < 0) + { + dy = -dy; + signy = +1; + } + else + signy = -1; + + if(dz < 0) + { + dz = -dz; + signz = +1; + } + else + signz = -1; + + u = dx * fac_intp; + i = (int) u; + if(i >= EN) + i = EN - 1; + u -= i; + v = dy * fac_intp; + j = (int) v; + if(j >= EN) + j = EN - 1; + v -= j; + w = dz * fac_intp; + k = (int) w; + if(k >= EN) + k = EN - 1; + w -= k; + + f1 = (1 - u) * (1 - v) * (1 - w); + f2 = (1 - u) * (1 - v) * (w); + f3 = (1 - u) * (v) * (1 - w); + f4 = (1 - u) * (v) * (w); + f5 = (u) * (1 - v) * (1 - w); + f6 = (u) * (1 - v) * (w); + f7 = (u) * (v) * (1 - w); + f8 = (u) * (v) * (w); + + fper[0] = signx * (fcorrx[i][j][k] * f1 + + fcorrx[i][j][k + 1] * f2 + + fcorrx[i][j + 1][k] * f3 + + fcorrx[i][j + 1][k + 1] * f4 + + fcorrx[i + 1][j][k] * f5 + + fcorrx[i + 1][j][k + 1] * f6 + + fcorrx[i + 1][j + 1][k] * f7 + fcorrx[i + 1][j + 1][k + 1] * f8); + + fper[1] = signy * (fcorry[i][j][k] * f1 + + fcorry[i][j][k + 1] * f2 + + fcorry[i][j + 1][k] * f3 + + fcorry[i][j + 1][k + 1] * f4 + + fcorry[i + 1][j][k] * f5 + + fcorry[i + 1][j][k + 1] * f6 + + fcorry[i + 1][j + 1][k] * f7 + fcorry[i + 1][j + 1][k + 1] * f8); + + fper[2] = signz * (fcorrz[i][j][k] * f1 + + fcorrz[i][j][k + 1] * f2 + + fcorrz[i][j + 1][k] * f3 + + fcorrz[i][j + 1][k + 1] * f4 + + fcorrz[i + 1][j][k] * f5 + + fcorrz[i + 1][j][k + 1] * f6 + + fcorrz[i + 1][j + 1][k] * f7 + fcorrz[i + 1][j + 1][k + 1] * f8); +} +#endif + + +/*! This function looks up the correction potential due to the infinite + * number of periodic particle/node images. We here use tri-linear + * interpolation to get it from the precomputed table, which contains + * one octant around the target particle at the origin. The other + * octants are obtained from it by exploiting symmetry properties. + */ +double ewald_pot_corr(double dx, double dy, double dz) +{ + int i, j, k; + double u, v, w; + double f1, f2, f3, f4, f5, f6, f7, f8; + + if(dx < 0) + dx = -dx; + + if(dy < 0) + dy = -dy; + + if(dz < 0) + dz = -dz; + + u = dx * fac_intp; + i = (int) u; + if(i >= EN) + i = EN - 1; + u -= i; + v = dy * fac_intp; + j = (int) v; + if(j >= EN) + j = EN - 1; + v -= j; + w = dz * fac_intp; + k = (int) w; + if(k >= EN) + k = EN - 1; + w -= k; + + f1 = (1 - u) * (1 - v) * (1 - w); + f2 = (1 - u) * (1 - v) * (w); + f3 = (1 - u) * (v) * (1 - w); + f4 = (1 - u) * (v) * (w); + f5 = (u) * (1 - v) * (1 - w); + f6 = (u) * (1 - v) * (w); + f7 = (u) * (v) * (1 - w); + f8 = (u) * (v) * (w); + + return potcorr[i][j][k] * f1 + + potcorr[i][j][k + 1] * f2 + + potcorr[i][j + 1][k] * f3 + + potcorr[i][j + 1][k + 1] * f4 + + potcorr[i + 1][j][k] * f5 + + potcorr[i + 1][j][k + 1] * f6 + potcorr[i + 1][j + 1][k] * f7 + potcorr[i + 1][j + 1][k + 1] * f8; +} + + + +/*! This function computes the potential correction term by means of Ewald + * summation. + */ +double ewald_psi(double x[3]) +{ + double alpha, psi; + double r, sum1, sum2, hdotx; + double dx[3]; + int i, n[3], h[3], h2; + + alpha = 2.0; + + for(n[0] = -4, sum1 = 0; n[0] <= 4; n[0]++) + for(n[1] = -4; n[1] <= 4; n[1]++) + for(n[2] = -4; n[2] <= 4; n[2]++) + { + for(i = 0; i < 3; i++) + dx[i] = x[i] - n[i]; + + r = sqrt(dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]); + sum1 += erfc(alpha * r) / r; + } + + for(h[0] = -4, sum2 = 0; h[0] <= 4; h[0]++) + for(h[1] = -4; h[1] <= 4; h[1]++) + for(h[2] = -4; h[2] <= 4; h[2]++) + { + hdotx = x[0] * h[0] + x[1] * h[1] + x[2] * h[2]; + h2 = h[0] * h[0] + h[1] * h[1] + h[2] * h[2]; + if(h2 > 0) + sum2 += 1 / (M_PI * h2) * exp(-M_PI * M_PI * h2 / (alpha * alpha)) * cos(2 * M_PI * hdotx); + } + + r = sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2]); + + psi = M_PI / (alpha * alpha) - sum1 - sum2 + 1 / r; + + return psi; +} + + +/*! This function computes the force correction term (difference between full + * force of infinite lattice and nearest image) by Ewald summation. + */ +void ewald_force(int iii, int jjj, int kkk, double x[3], double force[3]) +{ + double alpha, r2; + double r, val, hdotx, dx[3]; + int i, h[3], n[3], h2; + + alpha = 2.0; + + for(i = 0; i < 3; i++) + force[i] = 0; + + if(iii == 0 && jjj == 0 && kkk == 0) + return; + + r2 = x[0] * x[0] + x[1] * x[1] + x[2] * x[2]; + + for(i = 0; i < 3; i++) + force[i] += x[i] / (r2 * sqrt(r2)); + + for(n[0] = -4; n[0] <= 4; n[0]++) + for(n[1] = -4; n[1] <= 4; n[1]++) + for(n[2] = -4; n[2] <= 4; n[2]++) + { + for(i = 0; i < 3; i++) + dx[i] = x[i] - n[i]; + + r = sqrt(dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]); + + val = erfc(alpha * r) + 2 * alpha * r / sqrt(M_PI) * exp(-alpha * alpha * r * r); + + for(i = 0; i < 3; i++) + force[i] -= dx[i] / (r * r * r) * val; + } + + for(h[0] = -4; h[0] <= 4; h[0]++) + for(h[1] = -4; h[1] <= 4; h[1]++) + for(h[2] = -4; h[2] <= 4; h[2]++) + { + hdotx = x[0] * h[0] + x[1] * h[1] + x[2] * h[2]; + h2 = h[0] * h[0] + h[1] * h[1] + h[2] * h[2]; + + if(h2 > 0) + { + val = 2.0 / ((double) h2) * exp(-M_PI * M_PI * h2 / (alpha * alpha)) * sin(2 * M_PI * hdotx); + + for(i = 0; i < 3; i++) + force[i] -= h[i] * val; + } + } +} + +#endif diff --git a/src/PyGadget/src/forcetree.o b/src/PyGadget/src/forcetree.o new file mode 100644 index 0000000..f794369 Binary files /dev/null and b/src/PyGadget/src/forcetree.o differ diff --git a/src/PyGadget/src/gadget.so b/src/PyGadget/src/gadget.so new file mode 100755 index 0000000..86c8e33 Binary files /dev/null and b/src/PyGadget/src/gadget.so differ diff --git a/src/PyGadget/src/global.c b/src/PyGadget/src/global.c new file mode 100644 index 0000000..1450116 --- /dev/null +++ b/src/PyGadget/src/global.c @@ -0,0 +1,198 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file global.c + * \brief Computes global physical properties of the system + */ + + +/*! This routine computes various global properties of the particle + * distribution and stores the result in the struct `SysState'. + * Currently, not all the information that's computed here is actually + * used (e.g. momentum is not really used anywhere), just the energies are + * written to a log-file every once in a while. + */ +void compute_global_quantities_of_system(void) +{ + int i, j, n; + struct state_of_system sys; + double a1, a2, a3; + double entr = 0, egyspec, vel[3]; + double dt_entr, dt_gravkick, dt_hydrokick; + + + + if(All.ComovingIntegrationOn) + { + a1 = All.Time; + a2 = All.Time * All.Time; + a3 = All.Time * All.Time * All.Time; + } + else + { + a1 = a2 = a3 = 1; + } + + + for(n = 0; n < 6; n++) + { + sys.MassComp[n] = sys.EnergyKinComp[n] = sys.EnergyPotComp[n] = sys.EnergyIntComp[n] = 0; + + for(j = 0; j < 4; j++) + sys.CenterOfMassComp[n][j] = sys.MomentumComp[n][j] = sys.AngMomentumComp[n][j] = 0; + } + + for(i = 0; i < NumPart; i++) + { + sys.MassComp[P[i].Type] += P[i].Mass; + + sys.EnergyPotComp[P[i].Type] += 0.5 * P[i].Mass * P[i].Potential / a1; + + if(All.ComovingIntegrationOn) + { + dt_entr = (All.Ti_Current - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval; + dt_gravkick = get_gravkick_factor(P[i].Ti_begstep, All.Ti_Current) - + get_gravkick_factor(P[i].Ti_begstep, (P[i].Ti_begstep + P[i].Ti_endstep) / 2); + dt_hydrokick = get_hydrokick_factor(P[i].Ti_begstep, All.Ti_Current) - + get_hydrokick_factor(P[i].Ti_begstep, (P[i].Ti_begstep + P[i].Ti_endstep) / 2); + } + else + dt_entr = dt_gravkick = dt_hydrokick = + (All.Ti_Current - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval; + + for(j = 0; j < 3; j++) + { + vel[j] = P[i].Vel[j] + P[i].GravAccel[j] * dt_gravkick; + if(P[i].Type == 0) + vel[j] += SphP[i].HydroAccel[j] * dt_hydrokick; + } + if(P[i].Type == 0) + entr = SphP[i].Entropy + SphP[i].DtEntropy * dt_entr; + +#ifdef PMGRID + if(All.ComovingIntegrationOn) + dt_gravkick = get_gravkick_factor(All.PM_Ti_begstep, All.Ti_Current) - + get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2); + else + dt_gravkick = (All.Ti_Current - (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2) * All.Timebase_interval; + + for(j = 0; j < 3; j++) + vel[j] += P[i].GravPM[j] * dt_gravkick; +#endif + + sys.EnergyKinComp[P[i].Type] += + 0.5 * P[i].Mass * (vel[0] * vel[0] + vel[1] * vel[1] + vel[2] * vel[2]) / a2; + + if(P[i].Type == 0) + { +#ifdef ISOTHERM_EQS + egyspec = entr; +#else + egyspec = entr / (GAMMA_MINUS1) * pow(SphP[i].Density / a3, GAMMA_MINUS1); +#endif + sys.EnergyIntComp[0] += P[i].Mass * egyspec; + } + + + + for(j = 0; j < 3; j++) + { + sys.MomentumComp[P[i].Type][j] += P[i].Mass * vel[j]; + sys.CenterOfMassComp[P[i].Type][j] += P[i].Mass * P[i].Pos[j]; + } + + sys.AngMomentumComp[P[i].Type][0] += P[i].Mass * (P[i].Pos[1] * vel[2] - P[i].Pos[2] * vel[1]); + sys.AngMomentumComp[P[i].Type][1] += P[i].Mass * (P[i].Pos[2] * vel[0] - P[i].Pos[0] * vel[2]); + sys.AngMomentumComp[P[i].Type][2] += P[i].Mass * (P[i].Pos[0] * vel[1] - P[i].Pos[1] * vel[0]); + } + + + /* some the stuff over all processors */ + MPI_Reduce(&sys.MassComp[0], &SysState.MassComp[0], 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&sys.EnergyPotComp[0], &SysState.EnergyPotComp[0], 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&sys.EnergyIntComp[0], &SysState.EnergyIntComp[0], 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&sys.EnergyKinComp[0], &SysState.EnergyKinComp[0], 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&sys.MomentumComp[0][0], &SysState.MomentumComp[0][0], 6 * 4, MPI_DOUBLE, MPI_SUM, 0, + MPI_COMM_WORLD); + MPI_Reduce(&sys.AngMomentumComp[0][0], &SysState.AngMomentumComp[0][0], 6 * 4, MPI_DOUBLE, MPI_SUM, 0, + MPI_COMM_WORLD); + MPI_Reduce(&sys.CenterOfMassComp[0][0], &SysState.CenterOfMassComp[0][0], 6 * 4, MPI_DOUBLE, MPI_SUM, 0, + MPI_COMM_WORLD); + + + if(ThisTask == 0) + { + for(i = 0; i < 6; i++) + SysState.EnergyTotComp[i] = SysState.EnergyKinComp[i] + + SysState.EnergyPotComp[i] + SysState.EnergyIntComp[i]; + + SysState.Mass = SysState.EnergyKin = SysState.EnergyPot = SysState.EnergyInt = SysState.EnergyTot = 0; + + for(j = 0; j < 3; j++) + SysState.Momentum[j] = SysState.AngMomentum[j] = SysState.CenterOfMass[j] = 0; + + for(i = 0; i < 6; i++) + { + SysState.Mass += SysState.MassComp[i]; + SysState.EnergyKin += SysState.EnergyKinComp[i]; + SysState.EnergyPot += SysState.EnergyPotComp[i]; + SysState.EnergyInt += SysState.EnergyIntComp[i]; + SysState.EnergyTot += SysState.EnergyTotComp[i]; + + for(j = 0; j < 3; j++) + { + SysState.Momentum[j] += SysState.MomentumComp[i][j]; + SysState.AngMomentum[j] += SysState.AngMomentumComp[i][j]; + SysState.CenterOfMass[j] += SysState.CenterOfMassComp[i][j]; + } + } + + for(i = 0; i < 6; i++) + for(j = 0; j < 3; j++) + if(SysState.MassComp[i] > 0) + SysState.CenterOfMassComp[i][j] /= SysState.MassComp[i]; + + for(j = 0; j < 3; j++) + if(SysState.Mass > 0) + SysState.CenterOfMass[j] /= SysState.Mass; + + for(i = 0; i < 6; i++) + { + SysState.CenterOfMassComp[i][3] = SysState.MomentumComp[i][3] = SysState.AngMomentumComp[i][3] = 0; + for(j = 0; j < 3; j++) + { + SysState.CenterOfMassComp[i][3] += + SysState.CenterOfMassComp[i][j] * SysState.CenterOfMassComp[i][j]; + SysState.MomentumComp[i][3] += SysState.MomentumComp[i][j] * SysState.MomentumComp[i][j]; + SysState.AngMomentumComp[i][3] += + SysState.AngMomentumComp[i][j] * SysState.AngMomentumComp[i][j]; + } + SysState.CenterOfMassComp[i][3] = sqrt(SysState.CenterOfMassComp[i][3]); + SysState.MomentumComp[i][3] = sqrt(SysState.MomentumComp[i][3]); + SysState.AngMomentumComp[i][3] = sqrt(SysState.AngMomentumComp[i][3]); + } + + SysState.CenterOfMass[3] = SysState.Momentum[3] = SysState.AngMomentum[3] = 0; + + for(j = 0; j < 3; j++) + { + SysState.CenterOfMass[3] += SysState.CenterOfMass[j] * SysState.CenterOfMass[j]; + SysState.Momentum[3] += SysState.Momentum[j] * SysState.Momentum[j]; + SysState.AngMomentum[3] += SysState.AngMomentum[j] * SysState.AngMomentum[j]; + } + + SysState.CenterOfMass[3] = sqrt(SysState.CenterOfMass[3]); + SysState.Momentum[3] = sqrt(SysState.Momentum[3]); + SysState.AngMomentum[3] = sqrt(SysState.AngMomentum[3]); + } + + /* give everyone the result, maybe the want to do something with it */ + MPI_Bcast(&SysState, sizeof(struct state_of_system), MPI_BYTE, 0, MPI_COMM_WORLD); +} diff --git a/src/PyGadget/src/global.o b/src/PyGadget/src/global.o new file mode 100644 index 0000000..2ff53f0 Binary files /dev/null and b/src/PyGadget/src/global.o differ diff --git a/src/PyGadget/src/gravtree.c b/src/PyGadget/src/gravtree.c new file mode 100644 index 0000000..6a05fe1 --- /dev/null +++ b/src/PyGadget/src/gravtree.c @@ -0,0 +1,945 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file gravtree.c + * \brief main driver routines for gravitational (short-range) force computation + * + * This file contains the code for the gravitational force computation by + * means of the tree algorithm. To this end, a tree force is computed for + * all active local particles, and particles are exported to other + * processors if needed, where they can receive additional force + * contributions. If the TreePM algorithm is enabled, the force computed + * will only be the short-range part. + */ + +/*! This function computes the gravitational forces for all active + * particles. If needed, a new tree is constructed, otherwise the + * dynamically updated tree is used. Particles are only exported to other + * processors when really needed, thereby allowing a good use of the + * communication buffer. + */ +void gravity_tree(void) +{ + long long ntot; + int numnodes, nexportsum = 0; + int i, j, iter = 0; + int *numnodeslist, maxnumnodes, nexport, *numlist, *nrecv, *ndonelist; + double tstart, tend, timetree = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance; + double ewaldcount; + double costtotal, ewaldtot, *costtreelist, *ewaldlist; + double maxt, sumt, *timetreelist, *timecommlist; + double fac, plb, plb_max, sumcomm; + +#ifndef NOGRAVITY + int *noffset, *nbuffer, *nsend, *nsend_local; + long long ntotleft; + int ndone, maxfill, ngrp; + int k, place; + int level, sendTask, recvTask; + double ax, ay, az; + MPI_Status status; +#endif + + /* set new softening lengths */ + if(All.ComovingIntegrationOn) + set_softenings(); + + + /* contruct tree if needed */ + tstart = second(); + if(TreeReconstructFlag) + { + if(ThisTask == 0) + printf("Tree construction.\n"); + + force_treebuild(NumPart); + + TreeReconstructFlag = 0; + + if(ThisTask == 0) + printf("Tree construction done.\n"); + } + tend = second(); + All.CPU_TreeConstruction += timediff(tstart, tend); + + costtotal = ewaldcount = 0; + + /* Note: 'NumForceUpdate' has already been determined in find_next_sync_point_and_drift() */ + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumForceUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + +#ifndef NOGRAVITY + if(ThisTask == 0) + printf("Begin tree force.\n"); + + +#ifdef SELECTIVE_NO_GRAVITY + for(i = 0; i < NumPart; i++) + if(((1 << P[i].Type) & (SELECTIVE_NO_GRAVITY))) + P[i].Ti_endstep = -P[i].Ti_endstep - 1; +#endif + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + iter++; + + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < NumPart && nexport < All.BunchSizeForce - NTask; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; +#ifndef PMGRID + costtotal += force_treeevaluate(i, 0, &ewaldcount); +#else + costtotal += force_treeevaluate_shortrange(i, 0); +#endif + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + GravDataGet[nexport].u.Pos[k] = P[i].Pos[k]; +#ifdef UNEQUALSOFTENINGS + GravDataGet[nexport].Type = P[i].Type; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(P[i].Type == 0) + GravDataGet[nexport].Soft = SphP[i].Hsml; +#endif +#endif + GravDataGet[nexport].w.OldAcc = P[i].OldAcc; + GravDataIndexTable[nexport].Task = j; + GravDataIndexTable[nexport].Index = i; + GravDataIndexTable[nexport].SortIndex = nexport; + nexport++; + nexportsum++; + nsend_local[j]++; + } + } + } + tend = second(); + timetree += timediff(tstart, tend); + + qsort(GravDataIndexTable, nexport, sizeof(struct gravdata_index), grav_tree_compare_key); + + for(j = 0; j < nexport; j++) + GravDataIn[j] = GravDataGet[GravDataIndexTable[j].SortIndex]; + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&GravDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_GRAV_A, + &GravDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_GRAV_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + { +#ifndef PMGRID + costtotal += force_treeevaluate(j, 1, &ewaldcount); +#else + costtotal += force_treeevaluate_shortrange(j, 1); +#endif + } + tend = second(); + timetree += timediff(tstart, tend); + + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&GravDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_GRAV_B, + &GravDataOut[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_GRAV_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + place = GravDataIndexTable[noffset[recvTask] + j].Index; + + for(k = 0; k < 3; k++) + P[place].GravAccel[k] += GravDataOut[j + noffset[recvTask]].u.Acc[k]; + + P[place].GravCost += GravDataOut[j + noffset[recvTask]].w.Ninteractions; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + /* now add things for comoving integration */ + +#ifndef PERIODIC +#ifndef PMGRID + if(All.ComovingIntegrationOn) + { + fac = 0.5 * All.Hubble * All.Hubble * All.Omega0 / All.G; + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + P[i].GravAccel[j] += fac * P[i].Pos[j]; + } +#endif +#endif + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { +#ifdef PMGRID + ax = P[i].GravAccel[0] + P[i].GravPM[0] / All.G; + ay = P[i].GravAccel[1] + P[i].GravPM[1] / All.G; + az = P[i].GravAccel[2] + P[i].GravPM[2] / All.G; +#else + ax = P[i].GravAccel[0]; + ay = P[i].GravAccel[1]; + az = P[i].GravAccel[2]; +#endif + P[i].OldAcc = sqrt(ax * ax + ay * ay + az * az); + } + + + if(All.TypeOfOpeningCriterion == 1) + All.ErrTolTheta = 0; /* This will switch to the relative opening criterion for the following force computations */ + + /* muliply by G */ + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + P[i].GravAccel[j] *= All.G; + + + /* Finally, the following factor allows a computation of a cosmological simulation + with vacuum energy in physical coordinates */ +#ifndef PERIODIC +#ifndef PMGRID + if(All.ComovingIntegrationOn == 0) + { + fac = All.OmegaLambda * All.Hubble * All.Hubble; + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + P[i].GravAccel[j] += fac * P[i].Pos[j]; + } +#endif +#endif + +#ifdef SELECTIVE_NO_GRAVITY + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + P[i].Ti_endstep = -P[i].Ti_endstep - 1; +#endif + + if(ThisTask == 0) + printf("tree is done.\n"); + +#else /* gravity is switched off */ + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + P[i].GravAccel[j] = 0; + +#endif + + + /* Now the force computation is finished */ + + /* gather some diagnostic information */ + + timetreelist = malloc(sizeof(double) * NTask); + timecommlist = malloc(sizeof(double) * NTask); + costtreelist = malloc(sizeof(double) * NTask); + numnodeslist = malloc(sizeof(int) * NTask); + ewaldlist = malloc(sizeof(double) * NTask); + nrecv = malloc(sizeof(int) * NTask); + + numnodes = Numnodestree; + + MPI_Gather(&costtotal, 1, MPI_DOUBLE, costtreelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&numnodes, 1, MPI_INT, numnodeslist, 1, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Gather(&timetree, 1, MPI_DOUBLE, timetreelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&timecommsumm, 1, MPI_DOUBLE, timecommlist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&NumPart, 1, MPI_INT, nrecv, 1, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Gather(&ewaldcount, 1, MPI_DOUBLE, ewaldlist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Reduce(&nexportsum, &nexport, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + All.TotNumOfForces += ntot; + + //fprintf(FdTimings, "Step= %d t= %g dt= %g \n", All.NumCurrentTiStep, All.Time, All.TimeStep); + //fprintf(FdTimings, "Nf= %d%09d total-Nf= %d%09d ex-frac= %g iter= %d\n", + // (int) (ntot / 1000000000), (int) (ntot % 1000000000), + // (int) (All.TotNumOfForces / 1000000000), (int) (All.TotNumOfForces % 1000000000), + // nexport / ((double) ntot), iter); + /* note: on Linux, the 8-byte integer could be printed with the format identifier "%qd", but doesn't work on AIX */ + + fac = NTask / ((double) All.TotNumPart); + + for(i = 0, maxt = timetreelist[0], sumt = 0, plb_max = 0, + maxnumnodes = 0, costtotal = 0, sumcomm = 0, ewaldtot = 0; i < NTask; i++) + { + costtotal += costtreelist[i]; + + sumcomm += timecommlist[i]; + + if(maxt < timetreelist[i]) + maxt = timetreelist[i]; + sumt += timetreelist[i]; + + plb = nrecv[i] * fac; + + if(plb > plb_max) + plb_max = plb; + + if(numnodeslist[i] > maxnumnodes) + maxnumnodes = numnodeslist[i]; + + ewaldtot += ewaldlist[i]; + } + //fprintf(FdTimings, "work-load balance: %g max=%g avg=%g PE0=%g\n", + // maxt / (sumt / NTask), maxt, sumt / NTask, timetreelist[0]); + //fprintf(FdTimings, "particle-load balance: %g\n", plb_max); + //fprintf(FdTimings, "max. nodes: %d, filled: %g\n", maxnumnodes, + // maxnumnodes / (All.TreeAllocFactor * All.MaxPart)); + //fprintf(FdTimings, "part/sec=%g | %g ia/part=%g (%g)\n", ntot / (sumt + 1.0e-20), + // ntot / (maxt * NTask), ((double) (costtotal)) / ntot, ((double) ewaldtot) / ntot); + //fprintf(FdTimings, "\n"); + // + //fflush(FdTimings); + + All.CPU_TreeWalk += sumt / NTask; + All.CPU_Imbalance += sumimbalance / NTask; + All.CPU_CommSum += sumcomm / NTask; + } + + free(nrecv); + free(ewaldlist); + free(numnodeslist); + free(costtreelist); + free(timecommlist); + free(timetreelist); +} + + +#ifdef PY_INTERFACE +/*! This function computes the gravitational forces for all active + * particles. If needed, a new tree is constructed, otherwise the + * dynamically updated tree is used. Particles are only exported to other + * processors when really needed, thereby allowing a good use of the + * communication buffer. + */ +void gravity_tree_sub(void) +{ + long long ntot; + int numnodes, nexportsum = 0; + int i, j, iter = 0; + int *numnodeslist, maxnumnodes, nexport, *numlist, *nrecv, *ndonelist; + double tstart, tend, timetree = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance; + double ewaldcount; + double costtotal, ewaldtot, *costtreelist, *ewaldlist; + double maxt, sumt, *timetreelist, *timecommlist; + double fac, plb, plb_max, sumcomm; + +#ifndef NOGRAVITY + int *noffset, *nbuffer, *nsend, *nsend_local; + long long ntotleft; + int ndone, maxfill, ngrp; + int k, place; + int level, sendTask, recvTask; + double ax, ay, az; + MPI_Status status; +#endif + + /* set new softening lengths */ + if(All.ComovingIntegrationOn) + set_softenings(); + + + /* contruct tree if needed */ + tstart = second(); + if(TreeReconstructFlag) + { + if(ThisTask == 0) + printf("Tree construction.\n"); + + force_treebuild(NumPart); + + TreeReconstructFlag = 0; + + if(ThisTask == 0) + printf("Tree construction done.\n"); + } + tend = second(); + All.CPU_TreeConstruction += timediff(tstart, tend); + + costtotal = ewaldcount = 0; + + /* Note: 'NumForceUpdate' has already been determined in find_next_sync_point_and_drift() */ + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumPartQ, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + +#ifndef NOGRAVITY + if(ThisTask == 0) + printf("Begin tree force.\n"); + + +#ifdef SELECTIVE_NO_GRAVITY + for(i = 0; i < NumPartQ; i++) + if(((1 << Q[i].Type) & (SELECTIVE_NO_GRAVITY))) + Q[i].Ti_endstep = -Q[i].Ti_endstep - 1; +#endif + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + iter++; + + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < NumPartQ && nexport < All.BunchSizeForce - NTask; i++) + //if(Q[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; +#ifndef PMGRID + costtotal += force_treeevaluate_sub(i, 0, &ewaldcount); +#else + costtotal += force_treeevaluate_shortrange_sub(i, 0); +#endif + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + GravDataGet[nexport].u.Pos[k] = Q[i].Pos[k]; +#ifdef UNEQUALSOFTENINGS + GravDataGet[nexport].Type = Q[i].Type; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(Q[i].Type == 0) + GravDataGet[nexport].Soft = SphQ[i].Hsml; +#endif +#endif + GravDataGet[nexport].w.OldAcc = Q[i].OldAcc; + GravDataIndexTable[nexport].Task = j; + GravDataIndexTable[nexport].Index = i; + GravDataIndexTable[nexport].SortIndex = nexport; + nexport++; + nexportsum++; + nsend_local[j]++; + } + } + } + tend = second(); + timetree += timediff(tstart, tend); + + qsort(GravDataIndexTable, nexport, sizeof(struct gravdata_index), grav_tree_compare_key); + + for(j = 0; j < nexport; j++) + GravDataIn[j] = GravDataGet[GravDataIndexTable[j].SortIndex]; + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&GravDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_GRAV_A, + &GravDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_GRAV_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + { +#ifndef PMGRID + costtotal += force_treeevaluate_sub(j, 1, &ewaldcount); +#else + costtotal += force_treeevaluate_shortrange_sub(j, 1); +#endif + } + tend = second(); + timetree += timediff(tstart, tend); + + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&GravDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_GRAV_B, + &GravDataOut[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_GRAV_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + place = GravDataIndexTable[noffset[recvTask] + j].Index; + + for(k = 0; k < 3; k++) + Q[place].GravAccel[k] += GravDataOut[j + noffset[recvTask]].u.Acc[k]; + + Q[place].GravCost += GravDataOut[j + noffset[recvTask]].w.Ninteractions; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + /* now add things for comoving integration */ + +#ifndef PERIODIC +#ifndef PMGRID + if(All.ComovingIntegrationOn) + { + fac = 0.5 * All.Hubble * All.Hubble * All.Omega0 / All.G; + + for(i = 0; i < NumPartQ; i++) + //if(Q[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + Q[i].GravAccel[j] += fac * Q[i].Pos[j]; + } +#endif +#endif + + for(i = 0; i < NumPartQ; i++) + //if(Q[i].Ti_endstep == All.Ti_Current) + { +#ifdef PMGRID + ax = Q[i].GravAccel[0] + Q[i].GravPM[0] / All.G; + ay = Q[i].GravAccel[1] + Q[i].GravPM[1] / All.G; + az = Q[i].GravAccel[2] + Q[i].GravPM[2] / All.G; +#else + ax = Q[i].GravAccel[0]; + ay = Q[i].GravAccel[1]; + az = Q[i].GravAccel[2]; +#endif + Q[i].OldAcc = sqrt(ax * ax + ay * ay + az * az); + } + + + if(All.TypeOfOpeningCriterion == 1) + All.ErrTolTheta = 0; /* This will switch to the relative opening criterion for the following force computations */ + + /* muliply by G */ + for(i = 0; i < NumPartQ; i++) + //if(Q[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + Q[i].GravAccel[j] *= All.G; + + + /* Finally, the following factor allows a computation of a cosmological simulation + with vacuum energy in physical coordinates */ +#ifndef PERIODIC +#ifndef PMGRID + if(All.ComovingIntegrationOn == 0) + { + fac = All.OmegaLambda * All.Hubble * All.Hubble; + + for(i = 0; i < NumPartQ; i++) + //if(Q[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + Q[i].GravAccel[j] += fac * Q[i].Pos[j]; + } +#endif +#endif + +#ifdef SELECTIVE_NO_GRAVITY + for(i = 0; i < NumPartQ; i++) + //if(Q[i].Ti_endstep < 0) + Q[i].Ti_endstep = -Q[i].Ti_endstep - 1; +#endif + + if(ThisTask == 0) + printf("tree is done.\n"); + +#else /* gravity is switched off */ + + for(i = 0; i < NumPartQ; i++) + //if(Q[i].Ti_endstep == All.Ti_Current) + for(j = 0; j < 3; j++) + Q[i].GravAccel[j] = 0; + +#endif + + + /* Now the force computation is finished */ + + /* gather some diagnostic information */ + + timetreelist = malloc(sizeof(double) * NTask); + timecommlist = malloc(sizeof(double) * NTask); + costtreelist = malloc(sizeof(double) * NTask); + numnodeslist = malloc(sizeof(int) * NTask); + ewaldlist = malloc(sizeof(double) * NTask); + nrecv = malloc(sizeof(int) * NTask); + + numnodes = Numnodestree; + + MPI_Gather(&costtotal, 1, MPI_DOUBLE, costtreelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&numnodes, 1, MPI_INT, numnodeslist, 1, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Gather(&timetree, 1, MPI_DOUBLE, timetreelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&timecommsumm, 1, MPI_DOUBLE, timecommlist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&NumPartQ, 1, MPI_INT, nrecv, 1, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Gather(&ewaldcount, 1, MPI_DOUBLE, ewaldlist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Reduce(&nexportsum, &nexport, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + All.TotNumOfForces += ntot; + + //fprintf(FdTimings, "Step= %d t= %g dt= %g \n", All.NumCurrentTiStep, All.Time, All.TimeStep); + //fprintf(FdTimings, "Nf= %d%09d total-Nf= %d%09d ex-frac= %g iter= %d\n", + // (int) (ntot / 1000000000), (int) (ntot % 1000000000), + // (int) (All.TotNumOfForces / 1000000000), (int) (All.TotNumOfForces % 1000000000), + // nexport / ((double) ntot), iter); + /* note: on Linux, the 8-byte integer could be printed with the format identifier "%qd", but doesn't work on AIX */ + + fac = NTask / ((double) All.TotNumPart); + + for(i = 0, maxt = timetreelist[0], sumt = 0, plb_max = 0, + maxnumnodes = 0, costtotal = 0, sumcomm = 0, ewaldtot = 0; i < NTask; i++) + { + costtotal += costtreelist[i]; + + sumcomm += timecommlist[i]; + + if(maxt < timetreelist[i]) + maxt = timetreelist[i]; + sumt += timetreelist[i]; + + plb = nrecv[i] * fac; + + if(plb > plb_max) + plb_max = plb; + + if(numnodeslist[i] > maxnumnodes) + maxnumnodes = numnodeslist[i]; + + ewaldtot += ewaldlist[i]; + } + //fprintf(FdTimings, "work-load balance: %g max=%g avg=%g PE0=%g\n", + // maxt / (sumt / NTask), maxt, sumt / NTask, timetreelist[0]); + //fprintf(FdTimings, "particle-load balance: %g\n", plb_max); + //fprintf(FdTimings, "max. nodes: %d, filled: %g\n", maxnumnodes, + // maxnumnodes / (All.TreeAllocFactor * All.MaxPart)); + //fprintf(FdTimings, "part/sec=%g | %g ia/part=%g (%g)\n", ntot / (sumt + 1.0e-20), + // ntot / (maxt * NTask), ((double) (costtotal)) / ntot, ((double) ewaldtot) / ntot); + //fprintf(FdTimings, "\n"); + // + //fflush(FdTimings); + + All.CPU_TreeWalk += sumt / NTask; + All.CPU_Imbalance += sumimbalance / NTask; + All.CPU_CommSum += sumcomm / NTask; + } + + free(nrecv); + free(ewaldlist); + free(numnodeslist); + free(costtreelist); + free(timecommlist); + free(timetreelist); +} +#endif + + +/*! This function sets the (comoving) softening length of all particle + * types in the table All.SofteningTable[...]. We check that the physical + * softening length is bounded by the Softening-MaxPhys values. + */ +void set_softenings(void) +{ + int i; + + if(All.ComovingIntegrationOn) + { + if(All.SofteningGas * All.Time > All.SofteningGasMaxPhys) + All.SofteningTable[0] = All.SofteningGasMaxPhys / All.Time; + else + All.SofteningTable[0] = All.SofteningGas; + + if(All.SofteningHalo * All.Time > All.SofteningHaloMaxPhys) + All.SofteningTable[1] = All.SofteningHaloMaxPhys / All.Time; + else + All.SofteningTable[1] = All.SofteningHalo; + + if(All.SofteningDisk * All.Time > All.SofteningDiskMaxPhys) + All.SofteningTable[2] = All.SofteningDiskMaxPhys / All.Time; + else + All.SofteningTable[2] = All.SofteningDisk; + + if(All.SofteningBulge * All.Time > All.SofteningBulgeMaxPhys) + All.SofteningTable[3] = All.SofteningBulgeMaxPhys / All.Time; + else + All.SofteningTable[3] = All.SofteningBulge; + + if(All.SofteningStars * All.Time > All.SofteningStarsMaxPhys) + All.SofteningTable[4] = All.SofteningStarsMaxPhys / All.Time; + else + All.SofteningTable[4] = All.SofteningStars; + + if(All.SofteningBndry * All.Time > All.SofteningBndryMaxPhys) + All.SofteningTable[5] = All.SofteningBndryMaxPhys / All.Time; + else + All.SofteningTable[5] = All.SofteningBndry; + } + else + { + All.SofteningTable[0] = All.SofteningGas; + All.SofteningTable[1] = All.SofteningHalo; + All.SofteningTable[2] = All.SofteningDisk; + All.SofteningTable[3] = All.SofteningBulge; + All.SofteningTable[4] = All.SofteningStars; + All.SofteningTable[5] = All.SofteningBndry; + } + + for(i = 0; i < 6; i++) + All.ForceSoftening[i] = 2.8 * All.SofteningTable[i]; + + All.MinGasHsml = All.MinGasHsmlFractional * All.ForceSoftening[0]; +} + + +/*! This function is used as a comparison kernel in a sort routine. It is + * used to group particles in the communication buffer that are going to + * be sent to the same CPU. + */ +int grav_tree_compare_key(const void *a, const void *b) +{ + if(((struct gravdata_index *) a)->Task < (((struct gravdata_index *) b)->Task)) + return -1; + + if(((struct gravdata_index *) a)->Task > (((struct gravdata_index *) b)->Task)) + return +1; + + return 0; +} diff --git a/src/PyGadget/src/gravtree.o b/src/PyGadget/src/gravtree.o new file mode 100644 index 0000000..a47a997 Binary files /dev/null and b/src/PyGadget/src/gravtree.o differ diff --git a/src/PyGadget/src/gravtree_forcetest.c b/src/PyGadget/src/gravtree_forcetest.c new file mode 100644 index 0000000..177ac09 --- /dev/null +++ b/src/PyGadget/src/gravtree_forcetest.c @@ -0,0 +1,354 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file gravtree_forcetest.c + * \brief routines for direct summation forces + * + * The code in this file allows to compute checks of the force accuracy by + * an independent direct summation computation. To this end, one can + * instruct GADGET in the Makefile to coompute direct summation forces for + * a certain random subfraction of particles. + */ + + + +#ifdef FORCETEST + +/*! This routine does the test of the gravitational tree force by computing + * the force for a random subset of particles with direct summation. + */ +void gravity_forcetest(void) +{ + int ntot, iter = 0, ntotleft, nthis; + double tstart, tend, timetree = 0; + int i, j, ndone, ngrp, maxfill, place, ndonetot; + +#ifndef NOGRAVITY + int *noffset, *nbuffer, *nsend, *nsend_local; + int k, nexport; + int level, sendTask, recvTask; + double fac1; + MPI_Status status; +#endif + double costtotal, *costtreelist; + double maxt, sumt, *timetreelist; + double fac; + char buf[200]; + +#ifdef PMGRID + if(All.PM_Ti_endstep != All.Ti_Current) + return; +#endif + + if(All.ComovingIntegrationOn) + set_softenings(); /* set new softening lengths */ + + for(i = 0, NumForceUpdate = 0; i < NumPart; i++) + { + if(P[i].Ti_endstep == All.Ti_Current) + { + if(get_random_number(P[i].ID) < FORCETEST) + { + P[i].Ti_endstep = -P[i].Ti_endstep - 1; + NumForceUpdate++; + } + } + } + + /* NumForceUpdate is the number of particles on this processor that want a force update */ + + MPI_Allreduce(&NumForceUpdate, &ntot, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + costtotal = 0; + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + iter++; + + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < NumPart && nexport < All.BunchSizeForce - NTask; i++) + if(P[i].Ti_endstep < 0) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 1; + Exportflag[ThisTask] = 0; + + costtotal += force_treeevaluate_direct(i, 0); + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + GravDataGet[nexport].u.Pos[k] = P[i].Pos[k]; + +#ifdef UNEQUALSOFTENINGS + GravDataGet[nexport].Type = P[i].Type; +#endif + GravDataGet[nexport].w.OldAcc = P[i].OldAcc; + + GravDataIndexTable[nexport].Task = j; + GravDataIndexTable[nexport].Index = i; + GravDataIndexTable[nexport].SortIndex = nexport; + + nexport++; + nsend_local[j]++; + } + } + } + tend = second(); + timetree += timediff(tstart, tend); + + qsort(GravDataIndexTable, nexport, sizeof(struct gravdata_index), grav_tree_compare_key); + + for(j = 0; j < nexport; j++) + GravDataIn[j] = GravDataGet[GravDataIndexTable[j].SortIndex]; + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&GravDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_DIRECT_A, + &GravDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_DIRECT_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + { + costtotal += force_treeevaluate_direct(j, 1); + } + tend = second(); + timetree += timediff(tstart, tend); + + + /* get the result */ + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&GravDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_DIRECT_B, + &GravDataOut[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_DIRECT_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + place = GravDataIndexTable[noffset[recvTask] + j].Index; + + for(k = 0; k < 3; k++) + P[place].GravAccelDirect[k] += GravDataOut[j + noffset[recvTask]].u.Acc[k]; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + + level = ngrp - 1; + } + + MPI_Allreduce(&ndone, &ndonetot, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + ntotleft -= ndonetot; + } + + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + /* now add things for comoving integration */ + + if(All.ComovingIntegrationOn) + { +#ifndef PERIODIC + fac1 = 0.5 * All.Hubble * All.Hubble * All.Omega0 / All.G; + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + for(j = 0; j < 3; j++) + P[i].GravAccelDirect[j] += fac1 * P[i].Pos[j]; +#endif + } + + + + /* muliply by G */ + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + for(j = 0; j < 3; j++) + P[i].GravAccelDirect[j] *= All.G; + + + + /* Finally, the following factor allows a computation of cosmological simulation + with vacuum energy in physical coordinates */ + + if(All.ComovingIntegrationOn == 0) + { + fac1 = All.OmegaLambda * All.Hubble * All.Hubble; + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + for(j = 0; j < 3; j++) + P[i].GravAccelDirect[j] += fac1 * P[i].Pos[j]; + } + + /* now output the forces to a file */ + + for(nthis = 0; nthis < NTask; nthis++) + { + if(nthis == ThisTask) + { + sprintf(buf, "%s%s", All.OutputDir, "forcetest.txt"); + if(!(FdForceTest = fopen(buf, "a"))) + { + printf("error in opening file '%s'\n", buf); + endrun(17); + } + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + { +#ifndef PMGRID + fprintf(FdForceTest, "%d %g %g %g %g %g %g %g %g %g %g %g\n", + P[i].Type, All.Time, All.Time - TimeOfLastTreeConstruction, + P[i].Pos[0], P[i].Pos[1], P[i].Pos[2], + P[i].GravAccelDirect[0], P[i].GravAccelDirect[1], P[i].GravAccelDirect[2], + P[i].GravAccel[0], P[i].GravAccel[1], P[i].GravAccel[2]); +#else + fprintf(FdForceTest, "%d %g %g %g %g %g %g %g %g %g %g %g %g %g %g\n", + P[i].Type, All.Time, All.Time - TimeOfLastTreeConstruction, + P[i].Pos[0], P[i].Pos[1], P[i].Pos[2], + P[i].GravAccelDirect[0], P[i].GravAccelDirect[1], P[i].GravAccelDirect[2], + P[i].GravAccel[0], P[i].GravAccel[1], P[i].GravAccel[2], + P[i].GravPM[0] + P[i].GravAccel[0], + P[i].GravPM[1] + P[i].GravAccel[1], P[i].GravPM[2] + P[i].GravAccel[2]); +#endif + } + fclose(FdForceTest); + } + MPI_Barrier(MPI_COMM_WORLD); + } + + for(i = 0; i < NumPart; i++) + if(P[i].Ti_endstep < 0) + P[i].Ti_endstep = -P[i].Ti_endstep - 1; + + /* Now the force computation is finished */ + + + + timetreelist = malloc(sizeof(double) * NTask); + costtreelist = malloc(sizeof(double) * NTask); + + MPI_Gather(&costtotal, 1, MPI_DOUBLE, costtreelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + MPI_Gather(&timetree, 1, MPI_DOUBLE, timetreelist, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + fac = NTask / ((double) All.TotNumPart); + + for(i = 0, maxt = timetreelist[0], sumt = 0, costtotal = 0; i < NTask; i++) + { + costtotal += costtreelist[i]; + + if(maxt < timetreelist[i]) + maxt = timetreelist[i]; + sumt += timetreelist[i]; + } + + fprintf(FdTimings, "DIRECT Nf= %d part/sec=%g | %g ia/part=%g \n", ntot, ntot / (sumt + 1.0e-20), + ntot / (maxt * NTask), ((double) (costtotal)) / ntot); + fprintf(FdTimings, "\n"); + + fflush(FdTimings); + } + + free(costtreelist); + free(timetreelist); +} + +#endif diff --git a/src/PyGadget/src/gravtree_forcetest.o b/src/PyGadget/src/gravtree_forcetest.o new file mode 100644 index 0000000..6f01623 Binary files /dev/null and b/src/PyGadget/src/gravtree_forcetest.o differ diff --git a/src/PyGadget/src/hydra.c b/src/PyGadget/src/hydra.c new file mode 100644 index 0000000..167953a --- /dev/null +++ b/src/PyGadget/src/hydra.c @@ -0,0 +1,570 @@ +#include +#include +#include +#include +#include +#include +#include "allvars.h" +#include "proto.h" + +/*! \file hydra.c + * \brief Computation of SPH forces and rate of entropy generation + * + * This file contains the "second SPH loop", where the SPH forces are + * computed, and where the rate of change of entropy due to the shock heating + * (via artificial viscosity) is computed. + */ + + +static double hubble_a, atime, hubble_a2, fac_mu, fac_vsic_fix, a3inv, fac_egy; + +#ifdef PERIODIC +static double boxSize, boxHalf; + +#ifdef LONG_X +static double boxSize_X, boxHalf_X; +#else +#define boxSize_X boxSize +#define boxHalf_X boxHalf +#endif +#ifdef LONG_Y +static double boxSize_Y, boxHalf_Y; +#else +#define boxSize_Y boxSize +#define boxHalf_Y boxHalf +#endif +#ifdef LONG_Z +static double boxSize_Z, boxHalf_Z; +#else +#define boxSize_Z boxSize +#define boxHalf_Z boxHalf +#endif +#endif + + + +/*! This function is the driver routine for the calculation of hydrodynamical + * force and rate of change of entropy due to shock heating for all active + * particles . + */ +void hydro_force(void) +{ + long long ntot, ntotleft; + int i, j, k, n, ngrp, maxfill, source, ndone; + int *nbuffer, *noffset, *nsend_local, *nsend, *numlist, *ndonelist; + int level, sendTask, recvTask, nexport, place; + double soundspeed_i; + double tstart, tend, sumt, sumcomm; + double timecomp = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance; + MPI_Status status; + +#ifdef PERIODIC + boxSize = All.BoxSize; + boxHalf = 0.5 * All.BoxSize; +#ifdef LONG_X + boxHalf_X = boxHalf * LONG_X; + boxSize_X = boxSize * LONG_X; +#endif +#ifdef LONG_Y + boxHalf_Y = boxHalf * LONG_Y; + boxSize_Y = boxSize * LONG_Y; +#endif +#ifdef LONG_Z + boxHalf_Z = boxHalf * LONG_Z; + boxSize_Z = boxSize * LONG_Z; +#endif +#endif + + if(All.ComovingIntegrationOn) + { + /* Factors for comoving integration of hydro */ + hubble_a = All.Omega0 / (All.Time * All.Time * All.Time) + + (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda; + + hubble_a = All.Hubble * sqrt(hubble_a); + hubble_a2 = All.Time * All.Time * hubble_a; + + fac_mu = pow(All.Time, 3 * (GAMMA - 1) / 2) / All.Time; + + fac_egy = pow(All.Time, 3 * (GAMMA - 1)); + + fac_vsic_fix = hubble_a * pow(All.Time, 3 * GAMMA_MINUS1); + + a3inv = 1 / (All.Time * All.Time * All.Time); + atime = All.Time; + } + else + hubble_a = hubble_a2 = atime = fac_mu = fac_vsic_fix = a3inv = fac_egy = 1.0; + + + /* `NumSphUpdate' gives the number of particles on this processor that want a force update */ + for(n = 0, NumSphUpdate = 0; n < N_gas; n++) + { + if(P[n].Ti_endstep == All.Ti_Current) + NumSphUpdate++; + } + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + + i = 0; /* first particle for this task */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < N_gas && nexport < All.BunchSizeHydro - NTask; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + + hydro_evaluate(i, 0); + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + { + HydroDataIn[nexport].Pos[k] = P[i].Pos[k]; + HydroDataIn[nexport].Vel[k] = SphP[i].VelPred[k]; + } + HydroDataIn[nexport].Hsml = SphP[i].Hsml; + HydroDataIn[nexport].Mass = P[i].Mass; + HydroDataIn[nexport].DhsmlDensityFactor = SphP[i].DhsmlDensityFactor; + HydroDataIn[nexport].Density = SphP[i].Density; + HydroDataIn[nexport].Pressure = SphP[i].Pressure; + HydroDataIn[nexport].Timestep = P[i].Ti_endstep - P[i].Ti_begstep; + + /* calculation of F1 */ + soundspeed_i = sqrt(GAMMA * SphP[i].Pressure / SphP[i].Density); + HydroDataIn[nexport].F1 = fabs(SphP[i].DivVel) / + (fabs(SphP[i].DivVel) + SphP[i].CurlVel + + 0.0001 * soundspeed_i / SphP[i].Hsml / fac_mu); + + HydroDataIn[nexport].Index = i; + HydroDataIn[nexport].Task = j; + nexport++; + nsend_local[j]++; + } + } + } + tend = second(); + timecomp += timediff(tstart, tend); + + qsort(HydroDataIn, nexport, sizeof(struct hydrodata_in), hydro_compare_key); + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + tend = second(); + timeimbalance += timediff(tstart, tend); + + + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeHydro) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&HydroDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct hydrodata_in), MPI_BYTE, + recvTask, TAG_HYDRO_A, + &HydroDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct hydrodata_in), MPI_BYTE, + recvTask, TAG_HYDRO_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + /* now do the imported particles */ + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + hydro_evaluate(j, 1); + tend = second(); + timecomp += timediff(tstart, tend); + + /* do a block to measure imbalance */ + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeHydro) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&HydroDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct hydrodata_out), + MPI_BYTE, recvTask, TAG_HYDRO_B, + &HydroDataPartialResult[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct hydrodata_out), + MPI_BYTE, recvTask, TAG_HYDRO_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + source = j + noffset[recvTask]; + place = HydroDataIn[source].Index; + + for(k = 0; k < 3; k++) + SphP[place].HydroAccel[k] += HydroDataPartialResult[source].Acc[k]; + + SphP[place].DtEntropy += HydroDataPartialResult[source].DtEntropy; + + if(SphP[place].MaxSignalVel < HydroDataPartialResult[source].MaxSignalVel) + SphP[place].MaxSignalVel = HydroDataPartialResult[source].MaxSignalVel; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + + /* do final operations on results */ + tstart = second(); + + for(i = 0; i < N_gas; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { + SphP[i].DtEntropy *= GAMMA_MINUS1 / (hubble_a2 * pow(SphP[i].Density, GAMMA_MINUS1)); +#ifdef SPH_BND_PARTICLES + if(P[i].ID == 0) + { + SphP[i].DtEntropy = 0; + for(k = 0; k < 3; k++) + SphP[i].HydroAccel[k] = 0; + } +#endif + } + + tend = second(); + timecomp += timediff(tstart, tend); + + /* collect some timing information */ + + MPI_Reduce(&timecomp, &sumt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timecommsumm, &sumcomm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + All.CPU_HydCompWalk += sumt / NTask; + All.CPU_HydCommSumm += sumcomm / NTask; + All.CPU_HydImbalance += sumimbalance / NTask; + } +} + + +/*! This function is the 'core' of the SPH force computation. A target + * particle is specified which may either be local, or reside in the + * communication buffer. + */ +void hydro_evaluate(int target, int mode) +{ + int j, k, n, timestep, startnode, numngb; + FLOAT *pos, *vel; + FLOAT mass, h_i, dhsmlDensityFactor, rho, pressure, f1, f2; + double acc[3], dtEntropy, maxSignalVel; + double dx, dy, dz, dvx, dvy, dvz; + double h_i2, hinv, hinv4; + double p_over_rho2_i, p_over_rho2_j, soundspeed_i, soundspeed_j; + double hfc, dwk_i, vdotr, vdotr2, visc, mu_ij, rho_ij, vsig; + double h_j, dwk_j, r, r2, u, hfc_visc; + +#ifndef NOVISCOSITYLIMITER + double dt; +#endif + + if(mode == 0) + { + pos = P[target].Pos; + vel = SphP[target].VelPred; + h_i = SphP[target].Hsml; + mass = P[target].Mass; + dhsmlDensityFactor = SphP[target].DhsmlDensityFactor; + rho = SphP[target].Density; + pressure = SphP[target].Pressure; + timestep = P[target].Ti_endstep - P[target].Ti_begstep; + soundspeed_i = sqrt(GAMMA * pressure / rho); + f1 = fabs(SphP[target].DivVel) / + (fabs(SphP[target].DivVel) + SphP[target].CurlVel + + 0.0001 * soundspeed_i / SphP[target].Hsml / fac_mu); + } + else + { + pos = HydroDataGet[target].Pos; + vel = HydroDataGet[target].Vel; + h_i = HydroDataGet[target].Hsml; + mass = HydroDataGet[target].Mass; + dhsmlDensityFactor = HydroDataGet[target].DhsmlDensityFactor; + rho = HydroDataGet[target].Density; + pressure = HydroDataGet[target].Pressure; + timestep = HydroDataGet[target].Timestep; + soundspeed_i = sqrt(GAMMA * pressure / rho); + f1 = HydroDataGet[target].F1; + } + + + /* initialize variables before SPH loop is started */ + acc[0] = acc[1] = acc[2] = dtEntropy = 0; + maxSignalVel = 0; + + p_over_rho2_i = pressure / (rho * rho) * dhsmlDensityFactor; + h_i2 = h_i * h_i; + + /* Now start the actual SPH computation for this particle */ + startnode = All.MaxPart; + do + { + numngb = ngb_treefind_pairs(&pos[0], h_i, &startnode); + + for(n = 0; n < numngb; n++) + { + j = Ngblist[n]; + + dx = pos[0] - P[j].Pos[0]; + dy = pos[1] - P[j].Pos[1]; + dz = pos[2] - P[j].Pos[2]; + +#ifdef PERIODIC /* find the closest image in the given box size */ + if(dx > boxHalf_X) + dx -= boxSize_X; + if(dx < -boxHalf_X) + dx += boxSize_X; + if(dy > boxHalf_Y) + dy -= boxSize_Y; + if(dy < -boxHalf_Y) + dy += boxSize_Y; + if(dz > boxHalf_Z) + dz -= boxSize_Z; + if(dz < -boxHalf_Z) + dz += boxSize_Z; +#endif + r2 = dx * dx + dy * dy + dz * dz; + h_j = SphP[j].Hsml; + if(r2 < h_i2 || r2 < h_j * h_j) + { + r = sqrt(r2); + if(r > 0) + { + p_over_rho2_j = SphP[j].Pressure / (SphP[j].Density * SphP[j].Density); + soundspeed_j = sqrt(GAMMA * p_over_rho2_j * SphP[j].Density); + dvx = vel[0] - SphP[j].VelPred[0]; + dvy = vel[1] - SphP[j].VelPred[1]; + dvz = vel[2] - SphP[j].VelPred[2]; + vdotr = dx * dvx + dy * dvy + dz * dvz; + + if(All.ComovingIntegrationOn) + vdotr2 = vdotr + hubble_a2 * r2; + else + vdotr2 = vdotr; + + if(r2 < h_i2) + { + hinv = 1.0 / h_i; +#ifndef TWODIMS + hinv4 = hinv * hinv * hinv * hinv; +#else + hinv4 = hinv * hinv * hinv / boxSize_Z; +#endif + u = r * hinv; + if(u < 0.5) + dwk_i = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4); + else + dwk_i = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u); + } + else + { + dwk_i = 0; + } + + if(r2 < h_j * h_j) + { + hinv = 1.0 / h_j; +#ifndef TWODIMS + hinv4 = hinv * hinv * hinv * hinv; +#else + hinv4 = hinv * hinv * hinv / boxSize_Z; +#endif + u = r * hinv; + if(u < 0.5) + dwk_j = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4); + else + dwk_j = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u); + } + else + { + dwk_j = 0; + } + + if(soundspeed_i + soundspeed_j > maxSignalVel) + maxSignalVel = soundspeed_i + soundspeed_j; + + if(vdotr2 < 0) /* ... artificial viscosity */ + { + mu_ij = fac_mu * vdotr2 / r; /* note: this is negative! */ + + vsig = soundspeed_i + soundspeed_j - 3 * mu_ij; + + if(vsig > maxSignalVel) + maxSignalVel = vsig; + + rho_ij = 0.5 * (rho + SphP[j].Density); + f2 = + fabs(SphP[j].DivVel) / (fabs(SphP[j].DivVel) + SphP[j].CurlVel + + 0.0001 * soundspeed_j / fac_mu / SphP[j].Hsml); + + visc = 0.25 * All.ArtBulkViscConst * vsig * (-mu_ij) / rho_ij * (f1 + f2); + + /* .... end artificial viscosity evaluation */ +#ifndef NOVISCOSITYLIMITER + /* make sure that viscous acceleration is not too large */ + dt = imax(timestep, (P[j].Ti_endstep - P[j].Ti_begstep)) * All.Timebase_interval; + if(dt > 0 && (dwk_i + dwk_j) < 0) + { + visc = dmin(visc, 0.5 * fac_vsic_fix * vdotr2 / + (0.5 * (mass + P[j].Mass) * (dwk_i + dwk_j) * r * dt)); + } +#endif + } + else + visc = 0; + + p_over_rho2_j *= SphP[j].DhsmlDensityFactor; + + hfc_visc = 0.5 * P[j].Mass * visc * (dwk_i + dwk_j) / r; + + hfc = hfc_visc + P[j].Mass * (p_over_rho2_i * dwk_i + p_over_rho2_j * dwk_j) / r; + + acc[0] -= hfc * dx; + acc[1] -= hfc * dy; + acc[2] -= hfc * dz; + dtEntropy += 0.5 * hfc_visc * vdotr2; + } + } + } + } + while(startnode >= 0); + + /* Now collect the result at the right place */ + if(mode == 0) + { + for(k = 0; k < 3; k++) + SphP[target].HydroAccel[k] = acc[k]; + SphP[target].DtEntropy = dtEntropy; + SphP[target].MaxSignalVel = maxSignalVel; + } + else + { + for(k = 0; k < 3; k++) + HydroDataResult[target].Acc[k] = acc[k]; + HydroDataResult[target].DtEntropy = dtEntropy; + HydroDataResult[target].MaxSignalVel = maxSignalVel; + } +} + + + + +/*! This is a comparison kernel for a sort routine, which is used to group + * particles that are going to be exported to the same CPU. + */ +int hydro_compare_key(const void *a, const void *b) +{ + if(((struct hydrodata_in *) a)->Task < (((struct hydrodata_in *) b)->Task)) + return -1; + if(((struct hydrodata_in *) a)->Task > (((struct hydrodata_in *) b)->Task)) + return +1; + return 0; +} diff --git a/src/PyGadget/src/hydra.o b/src/PyGadget/src/hydra.o new file mode 100644 index 0000000..d36f218 Binary files /dev/null and b/src/PyGadget/src/hydra.o differ diff --git a/src/PyGadget/src/init.c b/src/PyGadget/src/init.c new file mode 100644 index 0000000..d43d175 --- /dev/null +++ b/src/PyGadget/src/init.c @@ -0,0 +1,315 @@ +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file init.c + * \brief Code for initialisation of a simulation from initial conditions + */ + + +/*! This function reads the initial conditions, and allocates storage for the + * tree. Various variables of the particle data are initialised and An intial + * domain decomposition is performed. If SPH particles are present, the inial + * SPH smoothing lengths are determined. + */ +void init(void) +{ + int i, j; + double a3; + + All.Time = All.TimeBegin; + + switch (All.ICFormat) + { + case 1: +#if (MAKEGLASS > 1) + seed_glass(); +#else + read_ic(All.InitCondFile); +#endif + break; + case 2: + case 3: + read_ic(All.InitCondFile); + break; + default: + if(ThisTask == 0) + printf("ICFormat=%d not supported.\n", All.ICFormat); + endrun(0); + } + + All.Time = All.TimeBegin; + All.Ti_Current = 0; + + if(All.ComovingIntegrationOn) + { + All.Timebase_interval = (log(All.TimeMax) - log(All.TimeBegin)) / TIMEBASE; + a3 = All.Time * All.Time * All.Time; + } + else + { + All.Timebase_interval = (All.TimeMax - All.TimeBegin) / TIMEBASE; + a3 = 1; + } + + set_softenings(); + + All.NumCurrentTiStep = 0; /* setup some counters */ + All.SnapshotFileCount = 0; + if(RestartFlag == 2) + All.SnapshotFileCount = atoi(All.InitCondFile + strlen(All.InitCondFile) - 3) + 1; + + All.TotNumOfForces = 0; + All.NumForcesSinceLastDomainDecomp = 0; + + if(All.ComovingIntegrationOn) + if(All.PeriodicBoundariesOn == 1) + check_omega(); + + All.TimeLastStatistics = All.TimeBegin - All.TimeBetStatistics; + + if(All.ComovingIntegrationOn) /* change to new velocity variable */ + { + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + P[i].Vel[j] *= sqrt(All.Time) * All.Time; + } + + for(i = 0; i < NumPart; i++) /* start-up initialization */ + { + for(j = 0; j < 3; j++) + P[i].GravAccel[j] = 0; +#ifdef PMGRID + for(j = 0; j < 3; j++) + P[i].GravPM[j] = 0; +#endif + P[i].Ti_endstep = 0; + P[i].Ti_begstep = 0; + + P[i].OldAcc = 0; + P[i].GravCost = 1; + P[i].Potential = 0; + } + +#ifdef PMGRID + All.PM_Ti_endstep = All.PM_Ti_begstep = 0; +#endif + +#ifdef FLEXSTEPS + All.PresentMinStep = TIMEBASE; + for(i = 0; i < NumPart; i++) /* start-up initialization */ + { + P[i].FlexStepGrp = (int) (TIMEBASE * get_random_number(P[i].ID)); + } +#endif + + + for(i = 0; i < N_gas; i++) /* initialize sph_properties */ + { + for(j = 0; j < 3; j++) + { + SphP[i].VelPred[j] = P[i].Vel[j]; + SphP[i].HydroAccel[j] = 0; + } + + SphP[i].DtEntropy = 0; + + if(RestartFlag == 0) + { + SphP[i].Hsml = 0; + SphP[i].Density = -1; + } + } + + ngb_treeallocate(MAX_NGB); + + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + Flag_FullStep = 1; /* to ensure that Peano-Hilber order is done */ + + domain_Decomposition(); /* do initial domain decomposition (gives equal numbers of particles) */ + + ngb_treebuild(); /* will build tree */ + + setup_smoothinglengths(); + + TreeReconstructFlag = 1; + + /* at this point, the entropy variable normally contains the + * internal energy, read in from the initial conditions file, unless the file + * explicitly signals that the initial conditions contain the entropy directly. + * Once the density has been computed, we can convert thermal energy to entropy. + */ +#ifndef ISOTHERM_EQS + if(header.flag_entropy_instead_u == 0) + for(i = 0; i < N_gas; i++) + SphP[i].Entropy = GAMMA_MINUS1 * SphP[i].Entropy / pow(SphP[i].Density / a3, GAMMA_MINUS1); +#endif +} + + +/*! This routine computes the mass content of the box and compares it to the + * specified value of Omega-matter. If discrepant, the run is terminated. + */ +void check_omega(void) +{ + double mass = 0, masstot, omega; + int i; + + for(i = 0; i < NumPart; i++) + mass += P[i].Mass; + + MPI_Allreduce(&mass, &masstot, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + + omega = + masstot / (All.BoxSize * All.BoxSize * All.BoxSize) / (3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)); + + if(fabs(omega - All.Omega0) > 1.0e-3) + { + if(ThisTask == 0) + { + printf("\n\nI've found something odd!\n"); + printf + ("The mass content accounts only for Omega=%g,\nbut you specified Omega=%g in the parameterfile.\n", + omega, All.Omega0); + printf("\nI better stop.\n"); + + fflush(stdout); + } + endrun(1); + } +} + + + +/*! This function is used to find an initial smoothing length for each SPH + * particle. It guarantees that the number of neighbours will be between + * desired_ngb-MAXDEV and desired_ngb+MAXDEV. For simplicity, a first guess + * of the smoothing length is provided to the function density(), which will + * then iterate if needed to find the right smoothing length. + */ +void setup_smoothinglengths(void) +{ + int i, no, p; + + if(RestartFlag == 0) + { + + for(i = 0; i < N_gas; i++) + { + no = Father[i]; + + while(10 * All.DesNumNgb * P[i].Mass > Nodes[no].u.d.mass) + { + p = Nodes[no].u.d.father; + + if(p < 0) + break; + + no = p; + } +#ifndef TWODIMS + SphP[i].Hsml = + pow(3.0 / (4 * M_PI) * All.DesNumNgb * P[i].Mass / Nodes[no].u.d.mass, 1.0 / 3) * Nodes[no].len; +#else + SphP[i].Hsml = + pow(1.0 / (M_PI) * All.DesNumNgb * P[i].Mass / Nodes[no].u.d.mass, 1.0 / 2) * Nodes[no].len; +#endif + } + } + + density(); +} + +#ifdef PY_INTERFACE +/*! This function is used to find an initial smoothing length for each SPH + * particle. It guarantees that the number of neighbours will be between + * desired_ngb-MAXDEV and desired_ngb+MAXDEV. For simplicity, a first guess + * of the smoothing length is provided to the function density(), which will + * then iterate if needed to find the right smoothing length. + */ +void setup_smoothinglengths_sub(void) +{ + + + /* before, we can not use the tree, as Q particles do not belong to them... */ + + + density_sub(); +} +#endif + + + +/*! If the code is run in glass-making mode, this function populates the + * simulation box with a Poisson sample of particles. + */ +#if (MAKEGLASS > 1) +void seed_glass(void) +{ + int i, k, n_for_this_task; + double Range[3], LowerBound[3]; + double drandom, partmass; + long long IDstart; + + All.TotNumPart = MAKEGLASS; + partmass = All.Omega0 * (3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)) + * (All.BoxSize * All.BoxSize * All.BoxSize) / All.TotNumPart; + + All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); /* sets the maximum number of particles that may */ + + allocate_memory(); + + header.npartTotal[1] = All.TotNumPart; + header.mass[1] = partmass; + + if(ThisTask == 0) + { + printf("\nGlass initialising\nPartMass= %g\n", partmass); + printf("TotNumPart= %d%09d\n\n", + (int) (All.TotNumPart / 1000000000), (int) (All.TotNumPart % 1000000000)); + } + + /* set the number of particles assigned locally to this task */ + n_for_this_task = All.TotNumPart / NTask; + + if(ThisTask == NTask - 1) + n_for_this_task = All.TotNumPart - (NTask - 1) * n_for_this_task; + + NumPart = 0; + IDstart = 1 + (All.TotNumPart / NTask) * ThisTask; + + /* split the temporal domain into Ntask slabs in z-direction */ + + Range[0] = Range[1] = All.BoxSize; + Range[2] = All.BoxSize / NTask; + LowerBound[0] = LowerBound[1] = 0; + LowerBound[2] = ThisTask * Range[2]; + + srand48(ThisTask); + + for(i = 0; i < n_for_this_task; i++) + { + for(k = 0; k < 3; k++) + { + drandom = drand48(); + + P[i].Pos[k] = LowerBound[k] + Range[k] * drandom; + P[i].Vel[k] = 0; + } + + P[i].Mass = partmass; + P[i].Type = 1; + P[i].ID = IDstart + i; + + NumPart++; + } +} +#endif diff --git a/src/PyGadget/src/init.o b/src/PyGadget/src/init.o new file mode 100644 index 0000000..c31f32e Binary files /dev/null and b/src/PyGadget/src/init.o differ diff --git a/src/PyGadget/src/io.c b/src/PyGadget/src/io.c new file mode 100644 index 0000000..e46443a --- /dev/null +++ b/src/PyGadget/src/io.c @@ -0,0 +1,1150 @@ +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_HDF5 +#include +#endif + +#include "allvars.h" +#include "proto.h" + + + +/*! \file io.c + * \brief Routines for producing a snapshot file on disk. + */ + +static int n_type[6]; +static long long ntot_type_all[6]; + + + + +/*! This function writes a snapshot of the particle distribution to one or + * several files using the selected file format. If NumFilesPerSnapshot>1, + * the snapshot is distributed onto several files, several of them can be + * written simultaneously (up to NumFilesWrittenInParallel). Each file + * contains data from a group of processors. + */ +void savepositions(int num) +{ + double t0, t1; + char buf[500]; + int i, j, *temp, n, filenr, gr, ngroups, masterTask, lastTask; + + t0 = second(); + + if(ThisTask == 0) + printf("\nwriting snapshot file... \n"); + +#if defined(SFR) || defined(BLACK_HOLES) + rearrange_particle_sequence(); + /* ensures that new tree will be constructed */ + All.NumForcesSinceLastDomainDecomp = 1 + All.TreeDomainUpdateFrequency * All.TotNumPart; +#endif + + if(NTask < All.NumFilesPerSnapshot) + { + if(ThisTask == 0) + printf("Fatal error.\nNumber of processors must be larger or equal than All.NumFilesPerSnapshot.\n"); + endrun(0); + } + if(All.SnapFormat < 1 || All.SnapFormat > 3) + { + if(ThisTask == 0) + printf("Unsupported File-Format\n"); + endrun(0); + } +#ifndef HAVE_HDF5 + if(All.SnapFormat == 3) + { + if(ThisTask == 0) + printf("Code wasn't compiled with HDF5 support enabled!\n"); + endrun(0); + } +#endif + + + /* determine global and local particle numbers */ + for(n = 0; n < 6; n++) + n_type[n] = 0; + + for(n = 0; n < NumPart; n++) + n_type[P[n].Type]++; + + /* because ntot_type_all[] is of type `long long', we cannot do a simple + * MPI_Allreduce() to sum the total particle numbers + */ + temp = malloc(NTask * 6 * sizeof(int)); + MPI_Allgather(n_type, 6, MPI_INT, temp, 6, MPI_INT, MPI_COMM_WORLD); + for(i = 0; i < 6; i++) + { + ntot_type_all[i] = 0; + for(j = 0; j < NTask; j++) + ntot_type_all[i] += temp[j * 6 + i]; + } + free(temp); + + + /* assign processors to output files */ + distribute_file(All.NumFilesPerSnapshot, 0, 0, NTask - 1, &filenr, &masterTask, &lastTask); + + fill_Tab_IO_Labels(); + + if(All.NumFilesPerSnapshot > 1) + sprintf(buf, "%s%s_%03d.%d", All.OutputDir, All.SnapshotFileBase, num, filenr); + else + sprintf(buf, "%s%s_%03d", All.OutputDir, All.SnapshotFileBase, num); + + ngroups = All.NumFilesPerSnapshot / All.NumFilesWrittenInParallel; + if((All.NumFilesPerSnapshot % All.NumFilesWrittenInParallel)) + ngroups++; + + for(gr = 0; gr < ngroups; gr++) + { + if((filenr / All.NumFilesWrittenInParallel) == gr) /* ok, it's this processor's turn */ + write_file(buf, masterTask, lastTask); + MPI_Barrier(MPI_COMM_WORLD); + } + + + if(ThisTask == 0) + printf("done with snapshot.\n"); + + t1 = second(); + + All.CPU_Snapshot += timediff(t0, t1); + +} + + + +/*! This function fills the write buffer with particle data. New output blocks + * can in principle be added here. + */ +void fill_write_buffer(enum iofields blocknr, int *startindex, int pc, int type) +{ + int n, k, pindex; + float *fp; + +#ifdef LONGIDS + long long *ip; +#else + int *ip; +#endif + +#ifdef PERIODIC + FLOAT boxSize; +#endif +#ifdef PMGRID + double dt_gravkick_pm = 0; +#endif + double dt_gravkick, dt_hydrokick, a3inv = 1, fac1, fac2; + + + if(All.ComovingIntegrationOn) + { + a3inv = 1 / (All.Time * All.Time * All.Time); + fac1 = 1 / (All.Time * All.Time); + fac2 = 1 / pow(All.Time, 3 * GAMMA - 2); + } + else + a3inv = fac1 = fac2 = 1; + +#ifdef PMGRID + if(All.ComovingIntegrationOn) + dt_gravkick_pm = + get_gravkick_factor(All.PM_Ti_begstep, + All.Ti_Current) - + get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2); + else + dt_gravkick_pm = (All.Ti_Current - (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2) * All.Timebase_interval; +#endif + + + + fp = CommBuffer; + ip = CommBuffer; + + pindex = *startindex; + + switch (blocknr) + { + case IO_POS: /* positions */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + for(k = 0; k < 3; k++) + { + fp[k] = P[pindex].Pos[k]; +#ifdef PERIODIC + boxSize = All.BoxSize; +#ifdef LONG_X + if(k == 0) + boxSize = All.BoxSize * LONG_X; +#endif +#ifdef LONG_Y + if(k == 1) + boxSize = All.BoxSize * LONG_Y; +#endif +#ifdef LONG_Z + if(k == 2) + boxSize = All.BoxSize * LONG_Z; +#endif + while(fp[k] < 0) + fp[k] += boxSize; + while(fp[k] >= boxSize) + fp[k] -= boxSize; +#endif + } + n++; + fp += 3; + } + break; + + case IO_VEL: /* velocities */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + if(All.ComovingIntegrationOn) + { + dt_gravkick = + get_gravkick_factor(P[pindex].Ti_begstep, + All.Ti_Current) - + get_gravkick_factor(P[pindex].Ti_begstep, + (P[pindex].Ti_begstep + P[pindex].Ti_endstep) / 2); + dt_hydrokick = + get_hydrokick_factor(P[pindex].Ti_begstep, + All.Ti_Current) - + get_hydrokick_factor(P[pindex].Ti_begstep, + (P[pindex].Ti_begstep + P[pindex].Ti_endstep) / 2); + } + else + dt_gravkick = dt_hydrokick = + (All.Ti_Current - (P[pindex].Ti_begstep + P[pindex].Ti_endstep) / 2) * All.Timebase_interval; + + for(k = 0; k < 3; k++) + { + fp[k] = P[pindex].Vel[k] + P[pindex].GravAccel[k] * dt_gravkick; + if(P[pindex].Type == 0) + fp[k] += SphP[pindex].HydroAccel[k] * dt_hydrokick; + } +#ifdef PMGRID + for(k = 0; k < 3; k++) + fp[k] += P[pindex].GravPM[k] * dt_gravkick_pm; +#endif + for(k = 0; k < 3; k++) + fp[k] *= sqrt(a3inv); + + n++; + fp += 3; + } + break; + + case IO_ID: /* particle ID */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *ip++ = P[pindex].ID; + n++; + } + break; + + case IO_MASS: /* particle mass */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *fp++ = P[pindex].Mass; + n++; + } + break; + + case IO_U: /* internal energy */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { +#ifdef ISOTHERM_EQS + *fp++ = SphP[pindex].Entropy; +#else + *fp++ = + dmax(All.MinEgySpec, + SphP[pindex].Entropy / GAMMA_MINUS1 * pow(SphP[pindex].Density * a3inv, GAMMA_MINUS1)); +#endif + n++; + } + break; + + case IO_RHO: /* density */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *fp++ = SphP[pindex].Density; + n++; + } + break; + + case IO_HSML: /* SPH smoothing length */ + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *fp++ = SphP[pindex].Hsml; + n++; + } + break; + + + case IO_POT: /* gravitational potential */ +#ifdef OUTPUTPOTENTIAL + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *fp++ = P[pindex].Potential; + n++; + } +#endif + break; + + case IO_ACCEL: /* acceleration */ +#ifdef OUTPUTACCELERATION + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + for(k = 0; k < 3; k++) + fp[k] = fac1 * P[pindex].GravAccel[k]; +#ifdef PMGRID + for(k = 0; k < 3; k++) + fp[k] += fac1 * P[pindex].GravPM[k]; +#endif + if(P[pindex].Type == 0) + for(k = 0; k < 3; k++) + fp[k] += fac2 * SphP[pindex].HydroAccel[k]; + fp += 3; + n++; + } +#endif + break; + + case IO_DTENTR: /* rate of change of entropy */ +#ifdef OUTPUTCHANGEOFENTROPY + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *fp++ = SphP[pindex].DtEntropy; + n++; + } +#endif + break; + + case IO_TSTP: /* timestep */ +#ifdef OUTPUTTIMESTEP + + for(n = 0; n < pc; pindex++) + if(P[pindex].Type == type) + { + *fp++ = (P[pindex].Ti_endstep - P[pindex].Ti_begstep) * All.Timebase_interval; + n++; + } +#endif + break; + + } + + *startindex = pindex; +} + + + + +/*! This function tells the size of one data entry in each of the blocks + * defined for the output file. If one wants to add a new output-block, this + * function should be augmented accordingly. + */ +int get_bytes_per_blockelement(enum iofields blocknr) +{ + int bytes_per_blockelement = 0; + + switch (blocknr) + { + case IO_POS: + case IO_VEL: + case IO_ACCEL: + bytes_per_blockelement = 3 * sizeof(float); + break; + + case IO_ID: +#ifdef LONGIDS + bytes_per_blockelement = sizeof(long long); +#else + bytes_per_blockelement = sizeof(int); +#endif + break; + + case IO_MASS: + case IO_U: + case IO_RHO: + case IO_HSML: + case IO_POT: + case IO_DTENTR: + case IO_TSTP: + bytes_per_blockelement = sizeof(float); + break; + } + + return bytes_per_blockelement; +} + + +/*! This function returns the type of the data contained in a given block of + * the output file. If one wants to add a new output-block, this function + * should be augmented accordingly. + */ +int get_datatype_in_block(enum iofields blocknr) +{ + int typekey; + + switch (blocknr) + { + case IO_ID: +#ifdef LONGIDS + typekey = 2; /* native long long */ +#else + typekey = 0; /* native int */ +#endif + break; + + default: + typekey = 1; /* native float */ + break; + } + + return typekey; +} + + +/*! This function informs about the number of elements stored per particle for + * the given block of the output file. If one wants to add a new + * output-block, this function should be augmented accordingly. + */ +int get_values_per_blockelement(enum iofields blocknr) +{ + int values = 0; + + switch (blocknr) + { + case IO_POS: + case IO_VEL: + case IO_ACCEL: + values = 3; + break; + + case IO_ID: + case IO_MASS: + case IO_U: + case IO_RHO: + case IO_HSML: + case IO_POT: + case IO_DTENTR: + case IO_TSTP: + values = 1; + break; + } + + return values; +} + + +/*! This function determines how many particles there are in a given block, + * based on the information in the header-structure. It also flags particle + * types that are present in the block in the typelist array. If one wants to + * add a new output-block, this function should be augmented accordingly. + */ +int get_particles_in_block(enum iofields blocknr, int *typelist) +{ + int i, nall, ntot_withmasses, ngas, nstars; + + nall = 0; + ntot_withmasses = 0; + + for(i = 0; i < 6; i++) + { + typelist[i] = 0; + + if(header.npart[i] > 0) + { + nall += header.npart[i]; + typelist[i] = 1; + } + + if(All.MassTable[i] == 0) + ntot_withmasses += header.npart[i]; + } + + ngas = header.npart[0]; + nstars = header.npart[4]; + + + switch (blocknr) + { + case IO_POS: + case IO_VEL: + case IO_ACCEL: + case IO_TSTP: + case IO_ID: + case IO_POT: + return nall; + break; + + case IO_MASS: + for(i = 0; i < 6; i++) + { + typelist[i] = 0; + if(All.MassTable[i] == 0 && header.npart[i] > 0) + typelist[i] = 1; + } + return ntot_withmasses; + break; + + case IO_U: + case IO_RHO: + case IO_HSML: + case IO_DTENTR: + for(i = 1; i < 6; i++) + typelist[i] = 0; + return ngas; + break; + } + + endrun(212); + return 0; +} + + + +/*! This function tells whether or not a given block in the output file is + * present, depending on the type of simulation run and the compile-time + * options. If one wants to add a new output-block, this function should be + * augmented accordingly. + */ +int blockpresent(enum iofields blocknr) +{ + +#ifndef OUTPUTPOTENTIAL + if(blocknr == IO_POT) + return 0; +#endif + +#ifndef OUTPUTACCELERATION + if(blocknr == IO_ACCEL) + return 0; +#endif + +#ifndef OUTPUTCHANGEOFENTROPY + if(blocknr == IO_DTENTR) + return 0; +#endif + +#ifndef OUTPUTTIMESTEP + if(blocknr == IO_TSTP) + return 0; +#endif + + return 1; /* default: present */ +} + + + + +/*! This function associates a short 4-character block name with each block + * number. This is stored in front of each block for snapshot + * FileFormat=2. If one wants to add a new output-block, this function should + * be augmented accordingly. + */ +void fill_Tab_IO_Labels(void) +{ + enum iofields i; + + for(i = 0; i < IO_NBLOCKS; i++) + switch (i) + { + case IO_POS: + strncpy(Tab_IO_Labels[IO_POS], "POS ", 4); + break; + case IO_VEL: + strncpy(Tab_IO_Labels[IO_VEL], "VEL ", 4); + break; + case IO_ID: + strncpy(Tab_IO_Labels[IO_ID], "ID ", 4); + break; + case IO_MASS: + strncpy(Tab_IO_Labels[IO_MASS], "MASS", 4); + break; + case IO_U: + strncpy(Tab_IO_Labels[IO_U], "U ", 4); + break; + case IO_RHO: + strncpy(Tab_IO_Labels[IO_RHO], "RHO ", 4); + break; + case IO_HSML: + strncpy(Tab_IO_Labels[IO_HSML], "HSML", 4); + break; + case IO_POT: + strncpy(Tab_IO_Labels[IO_POT], "POT ", 4); + break; + case IO_ACCEL: + strncpy(Tab_IO_Labels[IO_ACCEL], "ACCE", 4); + break; + case IO_DTENTR: + strncpy(Tab_IO_Labels[IO_DTENTR], "ENDT", 4); + break; + case IO_TSTP: + strncpy(Tab_IO_Labels[IO_TSTP], "TSTP", 4); + break; + } +} + +/*! This function returns a descriptive character string that describes the + * name of the block when the HDF5 file format is used. If one wants to add + * a new output-block, this function should be augmented accordingly. + */ +void get_dataset_name(enum iofields blocknr, char *buf) +{ + + strcpy(buf, "default"); + + switch (blocknr) + { + case IO_POS: + strcpy(buf, "Coordinates"); + break; + case IO_VEL: + strcpy(buf, "Velocities"); + break; + case IO_ID: + strcpy(buf, "ParticleIDs"); + break; + case IO_MASS: + strcpy(buf, "Masses"); + break; + case IO_U: + strcpy(buf, "InternalEnergy"); + break; + case IO_RHO: + strcpy(buf, "Density"); + break; + case IO_HSML: + strcpy(buf, "SmoothingLength"); + break; + case IO_POT: + strcpy(buf, "Potential"); + break; + case IO_ACCEL: + strcpy(buf, "Acceleration"); + break; + case IO_DTENTR: + strcpy(buf, "RateOfChangeOfEntropy"); + break; + case IO_TSTP: + strcpy(buf, "TimeStep"); + break; + } +} + + + +/*! This function writes an actual snapshot file containing the data from + * processors 'writeTask' to 'lastTask'. 'writeTask' is the one that actually + * writes. Each snapshot file contains a header first, then particle + * positions, velocities and ID's. Particle masses are written only for + * those particle types with zero entry in MassTable. After that, first the + * internal energies u, and then the density is written for the SPH + * particles. If cooling is enabled, mean molecular weight and neutral + * hydrogen abundance are written for the gas particles. This is followed by + * the SPH smoothing length and further blocks of information, depending on + * included physics and compile-time flags. If HDF5 is used, the header is + * stored in a group called "/Header", and the particle data is stored + * separately for each particle type in groups calles "/PartType0", + * "/PartType1", etc. The sequence of the blocks is unimportant in this case. + */ +void write_file(char *fname, int writeTask, int lastTask) +{ + int type, bytes_per_blockelement, npart, nextblock, typelist[6]; + int n_for_this_task, ntask, n, p, pc, offset = 0, task; + int blockmaxlen, ntot_type[6], nn[6]; + enum iofields blocknr; + int blksize; + MPI_Status status; + FILE *fd = 0; + +#ifdef HAVE_HDF5 + hid_t hdf5_file = 0, hdf5_grp[6], hdf5_headergrp = 0, hdf5_dataspace_memory; + hid_t hdf5_datatype = 0, hdf5_dataspace_in_file = 0, hdf5_dataset = 0; + herr_t hdf5_status; + hsize_t dims[2], count[2], start[2]; + int rank, pcsum = 0; + char buf[500]; +#endif + +#define SKIP {my_fwrite(&blksize,sizeof(int),1,fd);} + + /* determine particle numbers of each type in file */ + + if(ThisTask == writeTask) + { + for(n = 0; n < 6; n++) + ntot_type[n] = n_type[n]; + + for(task = writeTask + 1; task <= lastTask; task++) + { + MPI_Recv(&nn[0], 6, MPI_INT, task, TAG_LOCALN, MPI_COMM_WORLD, &status); + for(n = 0; n < 6; n++) + ntot_type[n] += nn[n]; + } + + for(task = writeTask + 1; task <= lastTask; task++) + MPI_Send(&ntot_type[0], 6, MPI_INT, task, TAG_N, MPI_COMM_WORLD); + } + else + { + MPI_Send(&n_type[0], 6, MPI_INT, writeTask, TAG_LOCALN, MPI_COMM_WORLD); + MPI_Recv(&ntot_type[0], 6, MPI_INT, writeTask, TAG_N, MPI_COMM_WORLD, &status); + } + + + + /* fill file header */ + + for(n = 0; n < 6; n++) + { + header.npart[n] = ntot_type[n]; + header.npartTotal[n] = (unsigned int) ntot_type_all[n]; + header.npartTotalHighWord[n] = (unsigned int) (ntot_type_all[n] >> 32); + } + + for(n = 0; n < 6; n++) + header.mass[n] = All.MassTable[n]; + + header.time = All.Time; + + if(All.ComovingIntegrationOn) + header.redshift = 1.0 / All.Time - 1; + else + header.redshift = 0; + + header.flag_sfr = 0; + header.flag_feedback = 0; + header.flag_cooling = 0; + header.flag_stellarage = 0; + header.flag_metals = 0; + +#ifdef COOLING + header.flag_cooling = 1; +#endif +#ifdef SFR + header.flag_sfr = 1; + header.flag_feedback = 1; +#ifdef STELLARAGE + header.flag_stellarage = 1; +#endif +#ifdef METALS + header.flag_metals = 1; +#endif +#endif + + header.num_files = All.NumFilesPerSnapshot; + header.BoxSize = All.BoxSize; + header.Omega0 = All.Omega0; + header.OmegaLambda = All.OmegaLambda; + header.HubbleParam = All.HubbleParam; + + + /* open file and write header */ + + if(ThisTask == writeTask) + { + if(All.SnapFormat == 3) + { +#ifdef HAVE_HDF5 + sprintf(buf, "%s.hdf5", fname); + hdf5_file = H5Fcreate(buf, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + + hdf5_headergrp = H5Gcreate(hdf5_file, "/Header", 0); + + for(type = 0; type < 6; type++) + { + if(header.npart[type] > 0) + { + sprintf(buf, "/PartType%d", type); + hdf5_grp[type] = H5Gcreate(hdf5_file, buf, 0); + } + } + + write_header_attributes_in_hdf5(hdf5_headergrp); +#endif + } + else + { + if(!(fd = fopen(fname, "w"))) + { + printf("can't open file `%s' for writing snapshot.\n", fname); + endrun(123); + } + + if(All.SnapFormat == 2) + { + blksize = sizeof(int) + 4 * sizeof(char); + SKIP; + my_fwrite("HEAD", sizeof(char), 4, fd); + nextblock = sizeof(header) + 2 * sizeof(int); + my_fwrite(&nextblock, sizeof(int), 1, fd); + SKIP; + } + + blksize = sizeof(header); + SKIP; + my_fwrite(&header, sizeof(header), 1, fd); + SKIP; + } + } + + ntask = lastTask - writeTask + 1; + + for(blocknr = 0; blocknr < IO_NBLOCKS; blocknr++) + { + if(blockpresent(blocknr)) + { + bytes_per_blockelement = get_bytes_per_blockelement(blocknr); + + blockmaxlen = ((int) (All.BufferSize * 1024 * 1024)) / bytes_per_blockelement; + + npart = get_particles_in_block(blocknr, &typelist[0]); + + if(npart > 0) + { + if(ThisTask == writeTask) + { + + if(All.SnapFormat == 1 || All.SnapFormat == 2) + { + if(All.SnapFormat == 2) + { + blksize = sizeof(int) + 4 * sizeof(char); + SKIP; + my_fwrite(Tab_IO_Labels[blocknr], sizeof(char), 4, fd); + nextblock = npart * bytes_per_blockelement + 2 * sizeof(int); + my_fwrite(&nextblock, sizeof(int), 1, fd); + SKIP; + } + + blksize = npart * bytes_per_blockelement; + SKIP; + + } + } + + for(type = 0; type < 6; type++) + { + if(typelist[type]) + { +#ifdef HAVE_HDF5 + if(ThisTask == writeTask && All.SnapFormat == 3 && header.npart[type] > 0) + { + switch (get_datatype_in_block(blocknr)) + { + case 0: + hdf5_datatype = H5Tcopy(H5T_NATIVE_UINT); + break; + case 1: + hdf5_datatype = H5Tcopy(H5T_NATIVE_FLOAT); + break; + case 2: + hdf5_datatype = H5Tcopy(H5T_NATIVE_UINT64); + break; + } + + dims[0] = header.npart[type]; + dims[1] = get_values_per_blockelement(blocknr); + if(dims[1] == 1) + rank = 1; + else + rank = 2; + + get_dataset_name(blocknr, buf); + + hdf5_dataspace_in_file = H5Screate_simple(rank, dims, NULL); + hdf5_dataset = + H5Dcreate(hdf5_grp[type], buf, hdf5_datatype, hdf5_dataspace_in_file, + H5P_DEFAULT); + pcsum = 0; + } +#endif + + for(task = writeTask, offset = 0; task <= lastTask; task++) + { + if(task == ThisTask) + { + n_for_this_task = n_type[type]; + + for(p = writeTask; p <= lastTask; p++) + if(p != ThisTask) + MPI_Send(&n_for_this_task, 1, MPI_INT, p, TAG_NFORTHISTASK, MPI_COMM_WORLD); + } + else + MPI_Recv(&n_for_this_task, 1, MPI_INT, task, TAG_NFORTHISTASK, MPI_COMM_WORLD, + &status); + + while(n_for_this_task > 0) + { + pc = n_for_this_task; + + if(pc > blockmaxlen) + pc = blockmaxlen; + + if(ThisTask == task) + fill_write_buffer(blocknr, &offset, pc, type); + + if(ThisTask == writeTask && task != writeTask) + MPI_Recv(CommBuffer, bytes_per_blockelement * pc, MPI_BYTE, task, + TAG_PDATA, MPI_COMM_WORLD, &status); + + if(ThisTask != writeTask && task == ThisTask) + MPI_Ssend(CommBuffer, bytes_per_blockelement * pc, MPI_BYTE, writeTask, + TAG_PDATA, MPI_COMM_WORLD); + + if(ThisTask == writeTask) + { + if(All.SnapFormat == 3) + { +#ifdef HAVE_HDF5 + start[0] = pcsum; + start[1] = 0; + + count[0] = pc; + count[1] = get_values_per_blockelement(blocknr); + pcsum += pc; + + H5Sselect_hyperslab(hdf5_dataspace_in_file, H5S_SELECT_SET, + start, NULL, count, NULL); + + dims[0] = pc; + dims[1] = get_values_per_blockelement(blocknr); + hdf5_dataspace_memory = H5Screate_simple(rank, dims, NULL); + + hdf5_status = + H5Dwrite(hdf5_dataset, hdf5_datatype, hdf5_dataspace_memory, + hdf5_dataspace_in_file, H5P_DEFAULT, CommBuffer); + + H5Sclose(hdf5_dataspace_memory); +#endif + } + else + my_fwrite(CommBuffer, bytes_per_blockelement, pc, fd); + } + + n_for_this_task -= pc; + } + } + +#ifdef HAVE_HDF5 + if(ThisTask == writeTask && All.SnapFormat == 3 && header.npart[type] > 0) + { + if(All.SnapFormat == 3) + { + H5Dclose(hdf5_dataset); + H5Sclose(hdf5_dataspace_in_file); + H5Tclose(hdf5_datatype); + } + } +#endif + } + } + + if(ThisTask == writeTask) + { + if(All.SnapFormat == 1 || All.SnapFormat == 2) + SKIP; + } + } + } + } + + if(ThisTask == writeTask) + { + if(All.SnapFormat == 3) + { +#ifdef HAVE_HDF5 + for(type = 5; type >= 0; type--) + if(header.npart[type] > 0) + H5Gclose(hdf5_grp[type]); + H5Gclose(hdf5_headergrp); + H5Fclose(hdf5_file); +#endif + } + else + fclose(fd); + } +} + + + + +/*! This function writes the header information in case HDF5 is selected as + * file format. + */ +#ifdef HAVE_HDF5 +void write_header_attributes_in_hdf5(hid_t handle) +{ + hsize_t adim[1] = { 6 }; + hid_t hdf5_dataspace, hdf5_attribute; + + hdf5_dataspace = H5Screate(H5S_SIMPLE); + H5Sset_extent_simple(hdf5_dataspace, 1, adim, NULL); + hdf5_attribute = H5Acreate(handle, "NumPart_ThisFile", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_UINT, header.npart); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SIMPLE); + H5Sset_extent_simple(hdf5_dataspace, 1, adim, NULL); + hdf5_attribute = H5Acreate(handle, "NumPart_Total", H5T_NATIVE_UINT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_UINT, header.npartTotal); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SIMPLE); + H5Sset_extent_simple(hdf5_dataspace, 1, adim, NULL); + hdf5_attribute = H5Acreate(handle, "NumPart_Total_HW", H5T_NATIVE_UINT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_UINT, header.npartTotalHighWord); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + + hdf5_dataspace = H5Screate(H5S_SIMPLE); + H5Sset_extent_simple(hdf5_dataspace, 1, adim, NULL); + hdf5_attribute = H5Acreate(handle, "MassTable", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, header.mass); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Time", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.time); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Redshift", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.redshift); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "BoxSize", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.BoxSize); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "NumFilesPerSnapshot", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &header.num_files); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Omega0", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.Omega0); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "OmegaLambda", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.OmegaLambda); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "HubbleParam", H5T_NATIVE_DOUBLE, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.HubbleParam); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Flag_Sfr", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &header.flag_sfr); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Flag_Cooling", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &header.flag_cooling); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Flag_StellarAge", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &header.flag_stellarage); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Flag_Metals", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &header.flag_metals); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + hdf5_dataspace = H5Screate(H5S_SCALAR); + hdf5_attribute = H5Acreate(handle, "Flag_Feedback", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &header.flag_feedback); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); + + header.flag_entropy_instead_u = 0; + + hdf5_dataspace = H5Screate(H5S_SIMPLE); + H5Sset_extent_simple(hdf5_dataspace, 1, adim, NULL); + hdf5_attribute = H5Acreate(handle, "Flag_Entropy_ICs", H5T_NATIVE_UINT, hdf5_dataspace, H5P_DEFAULT); + H5Awrite(hdf5_attribute, H5T_NATIVE_UINT, &header.flag_entropy_instead_u); + H5Aclose(hdf5_attribute); + H5Sclose(hdf5_dataspace); +} +#endif + + + + + +/*! This catches I/O errors occuring for my_fwrite(). In this case we + * better stop. + */ +size_t my_fwrite(void *ptr, size_t size, size_t nmemb, FILE * stream) +{ + size_t nwritten; + + if((nwritten = fwrite(ptr, size, nmemb, stream)) != nmemb) + { + printf("I/O error (fwrite) on task=%d has occured: %s\n", ThisTask, strerror(errno)); + fflush(stdout); + endrun(777); + } + return nwritten; +} + + +/*! This catches I/O errors occuring for fread(). In this case we + * better stop. + */ +size_t my_fread(void *ptr, size_t size, size_t nmemb, FILE * stream) +{ + size_t nread; + + if((nread = fread(ptr, size, nmemb, stream)) != nmemb) + { + printf("I/O error (fread) on task=%d has occured: %s\n", ThisTask, strerror(errno)); + fflush(stdout); + endrun(778); + } + return nread; +} diff --git a/src/PyGadget/src/io.o b/src/PyGadget/src/io.o new file mode 100644 index 0000000..0e40254 Binary files /dev/null and b/src/PyGadget/src/io.o differ diff --git a/src/PyGadget/src/longrange.c b/src/PyGadget/src/longrange.c new file mode 100644 index 0000000..16322c2 --- /dev/null +++ b/src/PyGadget/src/longrange.c @@ -0,0 +1,144 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file longrange.c + * \brief driver routines for computation of long-range gravitational PM force + */ + +#ifdef PMGRID + +/*! Calls initializiation routines of periodic or/and non-periodic FFT + * routines. + */ +void long_range_init(void) +{ +#ifdef PERIODIC + pm_init_periodic(); +#ifdef PLACEHIGHRESREGION + pm_init_nonperiodic(); +#endif +#else + pm_init_nonperiodic(); +#endif +} + + +/*! This function calls subroutines that determine the spatial region covered + * by the PM mesh. + */ +void long_range_init_regionsize(void) +{ +#ifdef PERIODIC +#ifdef PLACEHIGHRESREGION + if(RestartFlag != 1) + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); +#endif +#else + if(RestartFlag != 1) + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); +#endif +} + + +/*! This function is a driver routine for the long-range PM force + * computation. It calls periodic and/or non-periodic FFT routines as needed + * for the present simulation set-up. + */ +void long_range_force(void) +{ + int i; + +#ifndef PERIODIC + int j; + double fac; +#endif + + + for(i = 0; i < NumPart; i++) + P[i].GravPM[0] = P[i].GravPM[1] = P[i].GravPM[2] = 0; + +#ifdef NOGRAVITY + return; +#endif + + +#ifdef PERIODIC + pmforce_periodic(); +#ifdef PLACEHIGHRESREGION + i = pmforce_nonperiodic(1); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + i = pmforce_nonperiodic(1); /* try again */ + } + if(i == 1) + endrun(68686); +#endif +#else + i = pmforce_nonperiodic(0); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + i = pmforce_nonperiodic(0); /* try again */ + } + if(i == 1) + endrun(68687); +#ifdef PLACEHIGHRESREGION + i = pmforce_nonperiodic(1); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + + /* try again */ + + for(i = 0; i < NumPart; i++) + P[i].GravPM[0] = P[i].GravPM[1] = P[i].GravPM[2] = 0; + + i = pmforce_nonperiodic(0) + pmforce_nonperiodic(1); + } + if(i != 0) + endrun(68688); +#endif +#endif + + +#ifndef PERIODIC + if(All.ComovingIntegrationOn) + { + fac = 0.5 * All.Hubble * All.Hubble * All.Omega0; + + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + P[i].GravPM[j] += fac * P[i].Pos[j]; + } + + + /* Finally, the following factor allows a computation of cosmological simulation + with vacuum energy in physical coordinates */ + + if(All.ComovingIntegrationOn == 0) + { + fac = All.OmegaLambda * All.Hubble * All.Hubble; + + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + P[i].GravPM[j] += fac * P[i].Pos[j]; + } +#endif + +} + + +#endif diff --git a/src/PyGadget/src/longrange.o b/src/PyGadget/src/longrange.o new file mode 100644 index 0000000..8872fe7 Binary files /dev/null and b/src/PyGadget/src/longrange.o differ diff --git a/src/PyGadget/src/main.c b/src/PyGadget/src/main.c new file mode 100644 index 0000000..b112416 --- /dev/null +++ b/src/PyGadget/src/main.c @@ -0,0 +1,834 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file main.c + * \brief start of the program + */ + +/*! + * This function initializes the MPI communication packages, and sets + * cpu-time counters to 0. Then begrun() is called, which sets up + * the simulation either from IC's or from restart files. Finally, + * run() is started, the main simulation loop, which iterates over + * the timesteps. + */ +int main(int argc, char **argv) +{ + double t0, t1; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask); + MPI_Comm_size(MPI_COMM_WORLD, &NTask); + + if(NTask <= 1) + { + if(ThisTask == 0) + printf + ("Note: This is a massively parallel code, but you are running with 1 processor only.\nCompared to an equivalent serial code, there is some unnecessary overhead.\n"); + } + + for(PTask = 0; NTask > (1 << PTask); PTask++); + + if(argc < 2) + { + if(ThisTask == 0) + { + printf("Parameters are missing.\n"); + printf("Call with []\n"); + } + endrun(0); + } + + strcpy(ParameterFile, argv[1]); + + if(argc >= 3) + RestartFlag = atoi(argv[2]); + else + RestartFlag = 0; + + All.CPU_TreeConstruction = All.CPU_TreeWalk = All.CPU_Gravity = All.CPU_Potential = All.CPU_Domain = + All.CPU_Snapshot = All.CPU_Total = All.CPU_CommSum = All.CPU_Imbalance = All.CPU_Hydro = + All.CPU_HydCompWalk = All.CPU_HydCommSumm = All.CPU_HydImbalance = + All.CPU_EnsureNgb = All.CPU_Predict = All.CPU_TimeLine = All.CPU_PM = All.CPU_Peano = 0; + + CPUThisRun = 0; + + t0 = second(); + + begrun(); /* set-up run */ + + t1 = second(); + CPUThisRun += timediff(t0, t1); + All.CPU_Total += timediff(t0, t1); + + run(); /* main simulation loop */ + + MPI_Finalize(); /* clean up & finalize MPI */ + + return 0; +} + + + + +/* ---------------------------------------------------------------------- + The rest of this file contains documentation for compiling and + running the code, in a format appropriate for 'doxygen'. + ---------------------------------------------------------------------- + */ + +/*! \mainpage Reference documentation for GADGET-2 + +\author Volker Springel \n + Max-Planck-Institute for Astrophysics \n + Karl-Schwarzschild-Str. 1 \n + 85740 Garching \n + Germany \n + volker@mpa-garching.mpg.de \n + +\n + +\section prelim Getting started + +GADGET-2 is a massively parallel code for hydrodynamical cosmological +simulations. It is a flexible code that can be applied to a variety of +different types of simulations, offering a number of sophisticated +simulation algorithms. + +A full account of the numerical algorithms employed by the code is given +in the accompanying code paper, and detailed instructions for usage of +the code are given in the included code documentation. + +This html-document serves as a cross-referenced documentation of the +source code itself - in fact, using the doxygen tool, the html-pages +have been produced from comments inlined in the source code. Apart from +the source-code documentation, a brief guide to code compilation is +given below, and under Related Pages (see link on top) you can find an +explanation of GADGET's parameterfile and a short guide to compile-time +options of the code. + + +\section install Compilation + +GADGET-2 needs the following non-standard libraries for compilation: + +- \b MPI - the Message Passing Interface (version 1.0 or higher). Many + vendor supplied versions exist, in addition to excellent open source + implementations, e.g. MPICH + (http://www-unix.mcs.anl.gov/mpi/mpich/) or LAM + (http://www.lam-mpi.org/). + +- \b GSL - the GNU scientific library. This open-source package can be + obtained at http://www.gnu.org/software/gsl , for example. GADGET-2 + needs this library for a few simple cosmological + integrations at start-up, and for random number generation. + +- \b HDF5 - the Hierarchical Data Format. This library has been + developed by NCSA and can be obtained at http://hdf.ncsa.uiuc.edu/HDF5 . + GADGET-2 can be compiled without this library, but then the HDF5 format + is not supported. + +- \b FFTW - the Fastest Fourier Transform in the West. This + open-source package can be obtained at http://www.fftw.org . It is only + needed for simulations that use the TreePM algorithm. Note that the + MPI-capable version 2.x of FFTW is required, and that FFTW needs to be + explicitly compiled with parallel support enabled. This can be achieved + by passing the option --enable-mpi to the configure script. When + at it, you might as well add --enable-type-prefix to obtain the + libraries in both a single and double precision version. If this has not + been done, you should set the option NOTYPEPREFIX_FFTW in GADGET's + \ref Gadget-Makefile "Makefile". + +Note that if any of the above libraries is not installed in standard +locations on your system, the \ref Gadget-Makefile "Makefile" provided with +the code may need slight adjustments. Similarly, compiler options, +particularly with respect to optimisations, may need adjustment to the +C-compiler that is used. Finally, the \ref Gadget-Makefile "Makefile" +contains a number of compile-time options that need to be set appropriately +for the type of simulation that is simulated. + +The provided makefile is compatible with GNU-make, i.e. typing \b make or +\b gmake should then build the executable Gadget2. If your site +does not have GNU-make, get it, or write your own makefile. + +\section howtorun Running the code + +In order to start the simulation code, a \ref parameterfile "parameterfile" +needs to be specified. An additional optional numerical parameter can be +used to signal whether a continuation from a set of restart files, or from +a snapshot file, is desired. A typical command to start the code looks like +the following: \n \n + + mpirun -np 8 ./Gadget2 [restartflag] \n \n + +This would start the code using 8 processors, assuming that the parallel +environment uses the mpirun command to start MPI +applications. Depending on the operating system, other commands may be +required for this task, e.g. poe on IBM/AIX machines. Note that +the code can in principle be started using an arbitrary number of +processors, but the communication algorithms will be most efficient for +powers of 2. It is also possible to use a single processor only, in +which case the code behaves like a serial code, except that GADGET-2 +will still go through some of the overhead induced by the +parallelization algorithms, so the code will not quite reach the same +performance as an optimum serial solution in this case. + + +The optional restartflag can have the values 0, 1, or 2, only. "1" +signals a continuation from restart files, while "2" can be used to restart +from a snapshot file produced by the code. If omitted (equivalent to the +default of "0"), the code starts from initial conditions. + +*/ + + + + + + + + + + + +/*! \page parameterfile Parameterfile of GADGET-2 + +The parameterfile for GADGET-2 is a simple text file, consisting of pairs of +tags and values. For each parameter, a separate line needs to be specified, +first listing the name (tag) of the parameter, and then the assigned value, +separated by whitespace. It is allowed to add further text behind the +assigned parameter value. The order of the parameters is arbitrary, but +each one needs to occur exactly one time, otherwise an error message will +be produced. Empty lines, or lines beginning with a \%-sign, are ignored and +treated as comments. + + +- \b InitCondFile \n The filename of the initial conditions file. If a + restart from a snapshot with the "2" option is desired, one + needs to specify the snapshot file here. + +- \b OutputDir \n Pathname of the output directory of the code. + +- \b EnergyFile \n Filename of the log-file that contain the energy + statistics. + +- \b InfoFile \n Log-file that contains a list of the timesteps taken. + +- \b TimingsFile \n Log-file with performance metrics of the gravitational + tree computation. + +- \b CpuFile \n Log-file with CPU time consumption in various parts of the + code. + +- \b RestartFile \n Basename of restart-files produced by the code. + +- \b SnapshotFileBase \n Basename of snapshot files produced by the code. + +- \b OutputListFilename \n File with a list of the desired output times. + +- \b TimeLimitCPU \n CPU-time limit for the present submission of the + code. If 85 percent of this time have been reached at the end + of a timestep, the code terminates itself and produces restart + files. + +- \b ResubmitOn \n If set to "1", the code will try to resubmit itself to + the queuing system when an interruption of the run due to the + CPU-time limit occurs. The resubmission itself is done by + executing the program/script given with + ResubmitCommand. + +- \b ResubmitCommand \n The name of a script file or program that is + executed for automatic resubmission of the job to the queuing + system. Note that the file given here needs to be executable. + +- \b ICFormat \n The file format of the initial conditions. Currently, + three different formats are supported, selected by one of the + choices "1", "2", or "3". Format "1" is the traditional + fortran-style unformatted format familiar from + GADGET-1. Format "2" is a variant of this format, where each + block of data is preceeded by a 4-character + block-identifier. Finally, format "3" selects the HDF-5 + format. + +- \b SnapFormat \n Similar as ICFormat, this parameter selects the + file-format of snapshot dumps produced by the code. + +- \b ComovingIntegrationOn \n If set to "1", the code assumes that a + cosmological integration in comoving coordinates is carried + out, otherwise ordinary Newtonian dynamics is assumed. + + +- \b TypeOfTimestepCriterion \n This parameter can in principle be used to + select different kinds of timestep criteria for gravitational + dynamics. However, GADGET-2 presently only supports the + standard criterion "0". + +- \b OutputListOn \n If set to "1", the code tries to read a list of + desired output times from the file given in + OutputListFilename. Otherwise, output times are + generated equally spaced from the values assigned for + TimeOfFirstSnapshot and TimeBetSnapshot. + +- \b PeriodicBoundariesOn \n If set to "1", periodic boundary conditions + are assumed, with a cubical box-size of side-length + BoxSize. Particle coordinates are expected to be in + the range [0,BoxSize[. + + +- \b TimeBegin \n This sets the starting time of a simulation when the code + is started from initial conditions. For cosmological + integrations, the value specified here is taken as the initial + scale factor. + +- \b TimeMax \n This sets the final time for the simulation. The code + normally tries to run until this time is reached. For + cosmological integrations, the value given here is the final + scale factor. + +- \b Omega0 \n Gives the total matter density (in units of the critical + density) at z=0 for cosmological simulations. + +- \b OmegaLambda \n Gives the vacuum energy density at z=0 for cosmological + simulations. + +- \b OmegaBaryon \n Gives the baryon density at z=0 for cosmological + simulations. + +- \b HubbleParam \n This gives the Hubble constant at z=0 in units of 100 + km/sec/Mpc. Note that this parameter has been basically + absorbed into the definition of the internal code units, such + that for gravitational dynamics and adiabatic gas dynamics the + actual value assigned for HubbleParam is not used by + the code. + +- \b BoxSize \n The boxsize for simulations with periodic boundary + conditions. + +- \b TimeOfFirstSnapshot \n The time of the first desired snapshot file in + case a file with output times is not specified. For + cosmological simulations, the value given here is the scale factor + of the first desired output. + +- \b TimeBetSnapshot \n The time interval between two subsequent snapshot + files in case a file with output times is not specified. For + cosmological simulations, this is a multiplicative factor + applied to the time of the last snapshot, such that the + snapshots will have a constant logarithmic spacing in the + scale factor. Otherwise, the parameter is an additive constant + that gives the linear spacing between snapshot times. + +- \b CpuTimeBetRestartFile \n The value specfied here gives the time in + seconds the code will run before it writes regularly produced + restart files. This can be useful to protect against + unexpected interruptions (for example due to a hardware + problem) of a simulation, particularly if it is run for a long + time. It is then possible to resume a simulation from the last + restart file, reducing the potential loss to the elapsed + CPU-time since this was produced. + +- \b TimeBetStatistics \n The code can be asked to measure the total + kinetic, thermal, and potential energy in regular intervals, + and to write the results to the file given in + EnergyFile. The time interval between two such + measurements is given by the parameter + TimeBetStatistics, in an analogous way as with + TimeBetSnapshot. Note that the compile time option + COMPUTE_POTENTIAL_ENERGY needs to be activated to + obtain a measurement of the gravitational potential energy. + +- \b NumFilesPerSnapshot \n The number of separate files requested for each + snapshot dump. Each file of the snapshot will hold the data of + one or several processors, up to all of + them. NumFilesPerSnapshot must hence lie between 1 + and the number of processors used. Distributing a snapshot + onto several files can be done in parallel and may lead to + much better I/O performance, depending on the hardware + configuration. It can also help to avoid problems due to big + files (>2GB) for large simulations. Note that initial + conditions may also be distributed into several files, the + number of which is automatically recognised by the code and + does not have to be equal to NumFilesPerSnapshot (it + may also be larger than the number of processors). + + +- \b NumFilesWrittenInParallel \n The number of files the code may read or + write simultaneously when writing or reading snapshot/restart + files. The value of this parameter must be smaller or equal to + the number of processors. + + +- \b ErrTolIntAccuracy \n This dimensionless parameter controls the + accuracy of the timestep criterion selected by + TypeOfTimestepCriterion. + +- \b CourantFac \n This sets the value of the Courant parameter used in the + determination of the hydrodynamical timestep of SPH particles. + +- \b MaxSizeTimestep \n This gives the maximum timestep a particle may + take. This should be set to a sensible value in order to + protect against too large timesteps for particles with very + small acceleration. For cosmological simulations, the + parameter given here is the maximum allowed step in the + logarithm of the expansion factor. Note that the definition + of MaxSizeTimestep has changed compared to Gadget-1.1 for cosmological simulations. + + +- \b MinSizeTimestep \n If a particle requests a timestep smaller than the + value specified here, the code will normally terminate with a + warning message. If compiled with the + NOSTOP_WHEN_BELOW_MINTIMESTEP option, the code will + instead force the timesteps to be at least as large as + MinSizeTimestep. + +- \b TypeOfOpeningCriterion \n This selects the type of cell-opening + criterion used in the tree walks. A value of `0' results in + standard Barnes & Hut, while `1' selects the relative opening + criterion of GADGET-2. + +- \b ErrTolTheta \n This gives the maximum opening angle if the BH + criterion is used for the tree walk. If the relative opening + criterion is used instead, a first force estimate is computed + using the BH algorithm, which is then recomputed with the + relative opening criterion. + +- \b ErrTolForceAcc \n The accuracy parameter for the relative opening + criterion for the tree walk. + +- \b TreeDomainUpdateFrequency \n The domain decomposition and tree + construction need not necessarily be done every single + timestep. Instead, tree nodes can be dynamically updated, + which is faster. However, the tree walk will become more + expensive since the tree nodes have to "grow" to keep + accomodating all particles they enclose. The parameter + TreeDomainUpdateFrequency controls how often the + domain decomposition is carried out and the tree is + reconstructed from scratch. For example, a value of 0.1 means + that the domain decomposition and the tree are reconstructed + whenever there have been more than 0.1*N force computations + since the last reconstruction, where N is the total particle + number. A value of 0 will reconstruct the tree every timestep. + +- \b MaxRMSDisplacementFac \n This parameter is an additional timestep + criterion for the long-range integration in case the TreePM + algorithm is used. It limits the long-range timestep such that + the rms-displacement of particles per step is at most + MaxRMSDisplacementFac times the mean + particle separation, or the mesh-scale, whichever is smaller. + +- \b DesNumNgb \n This sets the desired number of SPH smoothing neighbours. + +- \b MaxNumNgbDeviation \n This sets the allowed variation of the number of + neighbours around the target value DesNumNgb. + +- \b ArtBulkViscConst \n This sets the value of the artificial viscosity + parameter used by GADGET-2. + + +- \b InitGasTemp \n This sets the initial gas temperature (assuming either + a mean molecular weight corresponding to full ionization or + full neutrality, depending on whether the temperature is above + or below 10^4 K) in Kelvin when initial conditions are + read. However, the gas temperature is only set to a certain + temperature if InitGasTemp>0, and if the temperature + of the gas particles in the initial conditions file is zero, + otherwise the initial gas temperature is left at the value + stored in the IC file. + +- \b MinGasTemp \n A minimum temperature floor imposed by the code. This + may be set to zero. + +- \b PartAllocFactor \n Each processor allocates space for + PartAllocFactor times the average number of particles + per processor. This number needs to be larger than 1 to allow + the simulation to achieve a good work-load balancing, which + requires to trade particle-load balance for work-load + balance. It is good to make PartAllocFactor quite a + bit larger than 1, but values in excess of 3 will typically + not improve performance any more. For a value that is too + small, the code may not be able to succeed in the domain + decomposition and terminate. + +- \b TreeAllocFactor \n To construct the BH-tree for N particles, somewhat + less than N internal tree-nodes are necessary for `normal' + particle distributions. TreeAllocFactor sets the + number of internal tree-nodes allocated in units of the + particle number. By experience, space for 0.65 N internal + nodes is usually fully sufficient, so a value of 0.7 should + put you on the safe side. + +- \b BufferSize \n This specifies the size (in MByte per processor) of a + communication buffer used by the code. + +- \b UnitLength_in_cm \n This sets the internal length unit in cm/h, where + H_0 = 100 h km/sec/Mpc. For example, a choice of 3.085678e21 + sets the length unit to 1.0 kpc/h. + +- \b UnitMass_in_g \n This sets the internal mass unit in g/h, where H_0 = + 100 h km/sec/Mpc. For example, a choice of 1.989e43 sets the + mass unit to 10^10 M_sun/h. + +- \b UnitVelocity_in_cm_per_s \n This sets the internal velocity unit in + cm/sec. For example, a choice of 1e5 sets the velocity unit to + km/sec. Note that the specification of + UnitLength_in_cm, UnitMass_in_g, and + UnitVelocity_in_cm_per_s also determines the internal + unit of time. + +- \b GravityConstantInternal \n The numerical value of the gravitational + constant G in internal units depends on the system of units + you choose. For example, for the choices above, G=43007.1 in + internal units. For GravityConstantInternal=0, the + code calculates the value corresponding to the physical value + of G automatically. However, you might want to set G + yourself. For example, by specifying + GravityConstantInternal=1, + UnitLength_in_cm=1, UnitMass_in_g=1, and + UnitVelocity_in_cm_per_s=1, one obtains a `natural' + system of units. Note that the code will nevertheless try to + use the `correct' value of the Hubble constant in this case, + so you should not set GravityConstantInternal in + cosmological integrations. + +- \b MinGasHsmlFractional \n This parameter sets the minimum allowed SPH + smoothing length in units of the gravitational softening + length of the gas particles. The smoothing length will be + prevented from falling below this value. When this bound is + actually reached, the number of smoothing neighbors will + instead be increased above DesNumNgb. + +- \b SofteningGas \n The Plummer equivalent gravitational softening length + for particle type 0, which are the gas particles. For + cosmological simulations in comoving coordinates, this is + interpreted as a comoving softening length. + +- \b SofteningHalo \n The Plummer equivalent gravitational softening length + for particle type 1. + +- \b SofteningDisk \n The Plummer equivalent gravitational softening length + for particle type 2. + +- \b SofteningBulge \n The Plummer equivalent gravitational softening + length for particle type 3. + +- \b SofteningStars \n The Plummer equivalent gravitational softening + length for particle type 4. + +- \b SofteningBndry \n The Plummer equivalent gravitational softening + length for particle type 5. + + +- \b SofteningGasMaxPhys \n When comoving integration is used, this + parameter gives the maximum physical gravitational softening + length for particle type 0. Depening on the relative settings + of SofteningGas and SofteningGasMaxPhys, the + code will hence switch from a softening constant in comoving + units to one constant in physical units. + +- \b SofteningHaloMaxPhys \n When comoving integration is used, this + parameter gives the maximum physical gravitational softening + length for particle type 1. + +- \b SofteningDiskMaxPhys \n When comoving integration is used, this + parameter gives the maximum physical gravitational softening + length for particle type 2. + +- \b SofteningBulgeMaxPhys \n When comoving integration is used, this + parameter gives the maximum physical gravitational softening + length for particle type 3. + +- \b SofteningStarsMaxPhys \n When comoving integration is used, this + parameter gives the maximum physical gravitational softening + length for particle type 4. + +- \b SofteningBndryMaxPhys \n When comoving integration is used, this + parameter gives the maximum physical gravitational softening + length for particle type 5. + + +*/ + + + + + + + + + + +/*! \page Gadget-Makefile Makefile of GADGET-2 + +A number of features of GADGET-2 are controlled with compile-time options +in the makefile rather than by the parameterfile. This has been done in +order to allow the generation of highly optimised binaries by the compiler, +even when the underlying source code allows for many different ways to run the +code. + +The makefile contains a dummy list of all available compile-time options, +with most of them commented out by default. To activate a certain feature, +the corresponding parameter should be commented in, and given the desired +value, where appropriate. Below, a brief guide to these options is +included. + +Important Note: Whenever one of the compile-time options +described below is modified, a full recompilation of the code may be +necessary. To guarantee that this is done when a simple make is +specified, all source files have been specified in the Makefile as being +dependent on the Makefile itself. Alternatively, one can also issue the +command make clean, which will erase all object files, followed +by make. + +Note that the above technique has the disadvantage that different +simulations may require different binaries of GADGET-2. If several +simulations are run concurrently, there is hence the danger that a +simulation is started/resumed with the `wrong' binary. Note that while +GADGET-2 checks the plausibility of some of the most important code +options, this is not done for all of them. To minimise the risk of using +the wrong executable for a simulation, it is recommended to produce a +separate executable for each simulation that is run. For example, a good +strategy is to make a copy of the whole code together with its makefile in +the output directory of each simulation run, and then to use this copy to +compile the code and to run the simulation. + + +\n +\section secmake1 Basic operation mode of code +- \b PERIODIC \n Set this if you want to have periodic boundary conditions. + +- \b UNEQUALSOFTENINGS \n Set this if you use particles with different + gravitational softening lengths. + +\n +\section secmake2 Things that are always recommended +- \b PEANOHILBERT \n This is a tuning option. When set, the code will bring + the particles into Peano-Hilbert order after each domain + decomposition. This improves cache utilisation and performance. + +- \b WALLCLOCK \n If set, a wallclock timer is used by the code to measure + internal time consumption (see cpu-log file). Otherwise, a timer that + measures consumed processor ticks is used. + +\n +\section secmake3 TreePM options +- \b PMGRID=128 \n This enables the TreePM method, i.e. the long-range + force is computed with a PM-algorithm, and the short range force with + the tree. The parameter has to be set to the size of the mesh that + should be used, e.g.~64, 96, 128, etc. The mesh dimensions need not + necessarily be a power of two, but the FFT is fastest for such a + choice. Note: If the simulation is not in a periodic box, then a FFT + method for vacuum boundaries is employed, using a mesh with dimension + twice that specified by PMGRID. + +- \b PLACEHIGHRESREGION=1+8 \n If this option is set (will only work + together with \b PMGRID), then the long range force is computed in two + stages: One Fourier-grid is used to cover the whole simulation volume, + allowing the computation of the large-scale force. A second Fourier + mesh is placed on the region occupied by `high-resolution' particles, + allowing the computation of an intermediate-scale force. Finally, the + force on very small scales is computed by the tree. This procedure can + be useful for `zoom-simulations', where the majority of particles (the + high-res particles) are occupying only a small fraction of the + volume. To activate this option, the parameter needs to be set to an + integer that encodes the particle types that make up the high-res + particles in the form of a bit mask. For example, if types 0, 1, and 4 + are the high-res particles, then the parameter should be set to + PLACEHIGHRESREGION=1+2+16, i.e. to the sum + \f$2^0+2^1+2^4\f$. The spatial region covered by the high-res grid is + determined automatically from the initial conditions. Note: If a + periodic box is used, the high-res zone is not allowed to intersect the box + boundaries. + +- ENLARGEREGION=1.1 \n The spatial region covered by the high-res zone + normally has a fixed size during the simulation, which initially is + set to the smallest region that encompasses all high-res + particles. Normally, the simulation will be interrupted if high-res + particles leave this region in the course of the run. However, by + setting this parameter to a value larger than one, the high-res region + can be expanded on the fly. For example, setting it to 1.4 will enlarge its + side-length by 40% in such an event (it remains centred on the high-res + particles). Hence, with such a setting, the high-res region may expand + or move by a limited amount. If in addition \b SYNCHRONIZATION is + activated, then the code will be able to continue even if high-res + particles leave the initial high-res grid. In this case, the code will + update the size and position of the grid that is placed onto the + high-resolution region automatically. To prevent that this potentially + happens every single PM step, one should nevertheless assign a value + slightly larger than 1 to \b ENLARGEREGION. + +- ASMTH=1.25 \n This can be used to override the value assumed for the + scale that defines the long-range/short-range force-split in the + TreePM algorithm. The default value is 1.25, in mesh-cells. + +- RCUT=4.5 \n This can be used to override the maximum radius in which + the short-range tree-force is evaluated (in case the TreePM algorithm + is used). The default value is 4.5, given in mesh-cells. + +\n +\section secmake4 Single or double precision +- \b DOUBLEPRECISION \n This makes the code store and compute internal + particle data in double precision. Note that output files are + nevertheless written by converting the values that are saved to single + precision. + +- \b DOUBLEPRECISION_FFTW \n If this is set, the code will use the + double-precision version of FTTW, provided the latter has been + explicitly installed with a "d" prefix, and NOTYPEPREFIX_FFTW is not + set. Otherwise the single precision version ("s" prefix) is used. + + +\n +\section secmake5 Time integration options +- \b SYNCHRONIZATION \n When this is set, particles may only increase their + timestep if the new timestep will put them into synchronisation with + the higher time level. This typically means that only on half of the + timesteps of a particle an increase of its step may occur. Especially + for TreePM runs, it is usually advisable to set this option. + +- \b FLEXSTEPS \n This is an alternative to SYNCHRONIZATION. Particle + timesteps are here allowed to be integer multiples of the minimum + timestep that occurs among the particles, which in turn is rounded + down to the nearest power-of-two devision of the total simulated + timespan. This option distributes particles more evenly over + individual system timesteps, particularly once a simulation has run + for a while, and may then result in a reduction of work-load imbalance + losses. + +- \b PSEUDOSYMMETRIC \n When this option is set, the code will try to + `anticipate' timestep changes by extrapolating the change of the + acceleration into the future. This in general improves the long-term + integration behaviour of periodic orbits, because then the adaptive + integration becomes more akin to a strictly time reversible + integrator. Note: This option has no effect if FLEXSTEPS is set. + +- \b NOSTOP_WHEN_BELOW_MINTIMESTEP \n If this is activated, the code will + not terminate when the timestep falls below the value of \b + MinSizeTimestep specified in the parameterfile. This is useful for + runs where one wants to enforce a constant timestep for all + particles. This can be done by activating this option, and by setting + \b MinSizeTimestep and \b MaxSizeTimestep to an equal value. + +- \b NOPMSTEPADJUSTMENT \n When this is set, the long-range timestep for + the PM force computation is always determined by \b MaxSizeTimeStep. + Otherwise, it is set to the minimum of \b MaxSizeTimeStep and the + timestep obtained for the maximum long-range force with an effective + softening scale equal to the PM smoothing-scale. + +\n +\section secmake6 Output options +- \b HAVE_HDF5 \n If this is set, the code will be compiled with support + for input and output in the HDF5 format. You need to have the HDF5 + libraries and headers installed on your computer for this option to + work. The HDF5 format can then be selected as format "3" in Gadget's + parameterfile. + +- \b OUTPUTPOTENTIAL \n This will force the code to compute gravitational + potentials for all particles each time a snapshot file is + generated. These values are then included in the snapshot files. Note + that the computation of the values of the potential costs additional + time. + +- \b OUTPUTACCELERATION \n This will include the physical acceleration of + each particle in snapshot files. + +- \b OUTPUTCHANGEOFENTROPY \n This will include the rate of change of + entropy of gas particles in snapshot files. + +- \b OUTPUTTIMESTEP \n This will include the timesteps actually taken by + each particle in the snapshot files. + +\n +\section secmake7 Things for special behaviour +- \b NOGRAVITY \n This switches off gravity. Makes only sense for pure SPH + simulations in non-expanding space. + +- \b NOTREERND \n If this is not set, the tree construction will succeed + even when there are a few particles at identical locations. This is + done by `rerouting' particles once the node-size has fallen below + \f$10^{-3}\f$ of the softening length. When this option is activated, + this will be suppressed and the tree construction will always fail if + there are particles at extremely close or identical coordinates. + +- \b NOTYPEPREFIX_FFTW \n If this is set, the fftw-header/libraries are + accessed without type prefix (adopting whatever was chosen as default + at compile-time of fftw). Otherwise, the type prefix 'd' for + double-precision is used. + +- \b LONG_X/Y/Z \n These options can be used together with PERIODIC and + NOGRAVITY only. When set, the options define numerical factors that + can be used to distort the periodic simulation cube into a + parallelepiped of arbitrary aspect ratio. This can be useful for + idealized SPH tests. + +- \b TWODIMS \n This effectively switches of one dimension in SPH, + i.e. the code follows only 2d hydrodynamics in the xy-, yz-, or + xz-plane. This only works with NOGRAVITY, and if all coordinates of + the third axis are exactly equal. Can be useful for idealized SPH + tests. + +- \b SPH_BND_PARTICLES \n If this is set, particles with a particle-ID + equal to zero do not receive any SPH acceleration. This can be useful + for idealized SPH tests, where these particles represent fixed + "walls". + +- \b NOVISCOSITYLIMITER \n If this is set, there is no explicit upper + limit on the viscosity. In the default version, this limiter will + try to protect against possible particle `reflections', which could + in principle occur if very poor timestepping is used in the + presence of strong shocks. + +- \b COMPUTE_POTENTIAL_ENERGY \n When this option is set, the code will + compute the gravitational potential energy each time a global + statistics is computed. This can be useful for testing global energy + conservation. + +- \b ISOTHERM_EQS \n This special option makes the gas behave like an + isothermal gas with equation of state \f$ P = c_s^2 \rho \f$. The + sound-speed \f$ c_s \f$ is set by the thermal energy per unit mass in the + intial conditions, i.e. \f$ c_s^2=u \f$. If the value for \f$ u \f$ is + zero, then the initial gas temperature in the parameter file is used to + define the sound speed according to \f$ c_s^2= k\,T/m_p \f$ , where \f$ + m_p \f$ is the proton mass. + +- \b ADAPTIVE_GRAVSOFT_FORGAS \n When this option is set, the gravitational + softening lengths used for gas particles is tied to their SPH smoothing + length. This can be useful for dissipative collapse simulations. The + option requires the setting of UNEQUALSOFTENINGS. + +- \b SELECTIVE_NO_GRAVITY \n This can be used for special computations where + one wants to exclude certain particle types from receiving gravitational + forces. The particle types that are excluded in this fashion are specified + by a bit mask, in the same as for the PLACEHIGHRESREGION option. + +- \b LONGIDS \n If this is set, the code assumes that particle-IDs are + stored as 64-bit long integers. This is only really needed if you want + to go beyond ~2 billion particles. + +\n +\section secmake8 Testing and Debugging options +- \b FORCETEST=0.01 \n This can be set to check the force accuracy of the + code, and is only included as a debugging option. The option needs to + be set to a number between 0 and 1 (e.g. 0.01), which specifies the + fraction of randomly chosen particles for which at each timestep + forces by direct summation are computed. The normal tree-forces and + the `correct' direct summation forces are then collected in a file \b + forcetest.txt for later inspection. Note that the simulation itself is + unaffected by this option, but it will of course run much(!) slower, + particularly if FORCETEST*NumPart*NumPart>>NumPart + Note: Particle IDs must be set to numbers >=1 for this + option to work. + +\n +\section secmake9 Glass making +- \b MAKEGLASS=262144 \n This option can be used to generate a glass-like + particle configuration. The value assigned gives the particle load, + which is initially generated as a Poisson sample and then evolved + towards a glass with the sign of gravity reversed + +*/ diff --git a/src/PyGadget/src/main.o b/src/PyGadget/src/main.o new file mode 100644 index 0000000..653bfb3 Binary files /dev/null and b/src/PyGadget/src/main.o differ diff --git a/src/PyGadget/src/ngb.c b/src/PyGadget/src/ngb.c new file mode 100644 index 0000000..8b61606 --- /dev/null +++ b/src/PyGadget/src/ngb.c @@ -0,0 +1,413 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file ngb.c + * \brief neighbour search by means of the tree + * + * This file contains routines for neighbour finding. We use the + * gravity-tree and a range-searching technique to find neighbours. + */ + +#ifdef PERIODIC +static double boxSize, boxHalf; + +#ifdef LONG_X +static double boxSize_X, boxHalf_X; +#else +#define boxSize_X boxSize +#define boxHalf_X boxHalf +#endif +#ifdef LONG_Y +static double boxSize_Y, boxHalf_Y; +#else +#define boxSize_Y boxSize +#define boxHalf_Y boxHalf +#endif +#ifdef LONG_Z +static double boxSize_Z, boxHalf_Z; +#else +#define boxSize_Z boxSize +#define boxHalf_Z boxHalf +#endif +#endif + + +/*! these macros maps a coordinate difference to the nearest periodic + * image + */ + +#define NGB_PERIODIC_X(x) (xtmp=(x),(xtmp>boxHalf_X)?(xtmp-boxSize_X):((xtmp<-boxHalf_X)?(xtmp+boxSize_X):xtmp)) +#define NGB_PERIODIC_Y(x) (xtmp=(x),(xtmp>boxHalf_Y)?(xtmp-boxSize_Y):((xtmp<-boxHalf_Y)?(xtmp+boxSize_Y):xtmp)) +#define NGB_PERIODIC_Z(x) (xtmp=(x),(xtmp>boxHalf_Z)?(xtmp-boxSize_Z):((xtmp<-boxHalf_Z)?(xtmp+boxSize_Z):xtmp)) + + + +/*! This routine finds all neighbours `j' that can interact with the + * particle `i' in the communication buffer. + * + * Note that an interaction can take place if + * \f$ r_{ij} < h_i \f$ OR if \f$ r_{ij} < h_j \f$. + * + * In the range-search this is taken into account, i.e. it is guaranteed that + * all particles are found that fulfil this condition, including the (more + * difficult) second part of it. For this purpose, each node knows the + * maximum h occuring among the particles it represents. + */ +int ngb_treefind_pairs(FLOAT searchcenter[3], FLOAT hsml, int *startnode) +{ + int k, no, p, numngb; + FLOAT hdiff; + FLOAT searchmin[3], searchmax[3]; + struct NODE *this; + +#ifdef PERIODIC + double xtmp; +#endif + + for(k = 0; k < 3; k++) /* cube-box window */ + { + searchmin[k] = searchcenter[k] - hsml; + searchmax[k] = searchcenter[k] + hsml; + } + + numngb = 0; + no = *startnode; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + p = no; + no = Nextnode[no]; + + if(P[p].Type > 0) + continue; + + hdiff = SphP[p].Hsml - hsml; + if(hdiff < 0) + hdiff = 0; + +#ifdef PERIODIC + if(NGB_PERIODIC_X(P[p].Pos[0] - searchcenter[0]) < (-hsml - hdiff)) + continue; + if(NGB_PERIODIC_X(P[p].Pos[0] - searchcenter[0]) > (hsml + hdiff)) + continue; + if(NGB_PERIODIC_Y(P[p].Pos[1] - searchcenter[1]) < (-hsml - hdiff)) + continue; + if(NGB_PERIODIC_Y(P[p].Pos[1] - searchcenter[1]) > (hsml + hdiff)) + continue; + if(NGB_PERIODIC_Z(P[p].Pos[2] - searchcenter[2]) < (-hsml - hdiff)) + continue; + if(NGB_PERIODIC_Z(P[p].Pos[2] - searchcenter[2]) > (hsml + hdiff)) + continue; +#else + if(P[p].Pos[0] < (searchmin[0] - hdiff)) + continue; + if(P[p].Pos[0] > (searchmax[0] + hdiff)) + continue; + if(P[p].Pos[1] < (searchmin[1] - hdiff)) + continue; + if(P[p].Pos[1] > (searchmax[1] + hdiff)) + continue; + if(P[p].Pos[2] < (searchmin[2] - hdiff)) + continue; + if(P[p].Pos[2] > (searchmax[2] + hdiff)) + continue; +#endif + Ngblist[numngb++] = p; + + if(numngb == MAX_NGB) + { + printf + ("ThisTask=%d: Need to do a second neighbour loop in hydro-force for (%g|%g|%g) hsml=%g no=%d\n", + ThisTask, searchcenter[0], searchcenter[1], searchcenter[2], hsml, no); + *startnode = no; + return numngb; + } + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + no = Nextnode[no - MaxNodes]; + continue; + } + + this = &Nodes[no]; + hdiff = Extnodes[no].hmax - hsml; + if(hdiff < 0) + hdiff = 0; + + no = this->u.d.sibling; /* in case the node can be discarded */ + +#ifdef PERIODIC + if((NGB_PERIODIC_X(this->center[0] - searchcenter[0]) + 0.5 * this->len) < (-hsml - hdiff)) + continue; + if((NGB_PERIODIC_X(this->center[0] - searchcenter[0]) - 0.5 * this->len) > (hsml + hdiff)) + continue; + if((NGB_PERIODIC_Y(this->center[1] - searchcenter[1]) + 0.5 * this->len) < (-hsml - hdiff)) + continue; + if((NGB_PERIODIC_Y(this->center[1] - searchcenter[1]) - 0.5 * this->len) > (hsml + hdiff)) + continue; + if((NGB_PERIODIC_Z(this->center[2] - searchcenter[2]) + 0.5 * this->len) < (-hsml - hdiff)) + continue; + if((NGB_PERIODIC_Z(this->center[2] - searchcenter[2]) - 0.5 * this->len) > (hsml + hdiff)) + continue; +#else + if((this->center[0] + 0.5 * this->len) < (searchmin[0] - hdiff)) + continue; + if((this->center[0] - 0.5 * this->len) > (searchmax[0] + hdiff)) + continue; + if((this->center[1] + 0.5 * this->len) < (searchmin[1] - hdiff)) + continue; + if((this->center[1] - 0.5 * this->len) > (searchmax[1] + hdiff)) + continue; + if((this->center[2] + 0.5 * this->len) < (searchmin[2] - hdiff)) + continue; + if((this->center[2] - 0.5 * this->len) > (searchmax[2] + hdiff)) + continue; +#endif + no = this->u.d.nextnode; /* ok, we need to open the node */ + } + } + + *startnode = -1; + return numngb; +} + + + +/*! This function returns neighbours with distance <= hsml and returns them in + * Ngblist. Actually, particles in a box of half side length hsml are + * returned, i.e. the reduction to a sphere still needs to be done in the + * calling routine. + */ +int ngb_treefind_variable(FLOAT searchcenter[3], FLOAT hsml, int *startnode) +{ + int k, numngb; + int no, p; + struct NODE *this; + FLOAT searchmin[3], searchmax[3]; + +#ifdef PERIODIC + double xtmp; +#endif + + for(k = 0; k < 3; k++) /* cube-box window */ + { + searchmin[k] = searchcenter[k] - hsml; + searchmax[k] = searchcenter[k] + hsml; + } + + numngb = 0; + no = *startnode; + + while(no >= 0) + { + if(no < All.MaxPart) /* single particle */ + { + p = no; + no = Nextnode[no]; + + if(P[p].Type > 0) + continue; + +#ifdef PERIODIC + if(NGB_PERIODIC_X(P[p].Pos[0] - searchcenter[0]) < -hsml) + continue; + if(NGB_PERIODIC_X(P[p].Pos[0] - searchcenter[0]) > hsml) + continue; + if(NGB_PERIODIC_Y(P[p].Pos[1] - searchcenter[1]) < -hsml) + continue; + if(NGB_PERIODIC_Y(P[p].Pos[1] - searchcenter[1]) > hsml) + continue; + if(NGB_PERIODIC_Z(P[p].Pos[2] - searchcenter[2]) < -hsml) + continue; + if(NGB_PERIODIC_Z(P[p].Pos[2] - searchcenter[2]) > hsml) + continue; +#else + if(P[p].Pos[0] < searchmin[0]) + continue; + if(P[p].Pos[0] > searchmax[0]) + continue; + if(P[p].Pos[1] < searchmin[1]) + continue; + if(P[p].Pos[1] > searchmax[1]) + continue; + if(P[p].Pos[2] < searchmin[2]) + continue; + if(P[p].Pos[2] > searchmax[2]) + continue; +#endif + Ngblist[numngb++] = p; + + if(numngb == MAX_NGB) + { + numngb = ngb_clear_buf(searchcenter, hsml, numngb); + if(numngb == MAX_NGB) + { + printf("ThisTask=%d: Need to do a second neighbour loop for (%g|%g|%g) hsml=%g no=%d\n", + ThisTask, searchcenter[0], searchcenter[1], searchcenter[2], hsml, no); + *startnode = no; + return numngb; + } + } + } + else + { + if(no >= All.MaxPart + MaxNodes) /* pseudo particle */ + { + Exportflag[DomainTask[no - (All.MaxPart + MaxNodes)]] = 1; + no = Nextnode[no - MaxNodes]; + continue; + } + + this = &Nodes[no]; + + no = this->u.d.sibling; /* in case the node can be discarded */ +#ifdef PERIODIC + if((NGB_PERIODIC_X(this->center[0] - searchcenter[0]) + 0.5 * this->len) < -hsml) + continue; + if((NGB_PERIODIC_X(this->center[0] - searchcenter[0]) - 0.5 * this->len) > hsml) + continue; + if((NGB_PERIODIC_Y(this->center[1] - searchcenter[1]) + 0.5 * this->len) < -hsml) + continue; + if((NGB_PERIODIC_Y(this->center[1] - searchcenter[1]) - 0.5 * this->len) > hsml) + continue; + if((NGB_PERIODIC_Z(this->center[2] - searchcenter[2]) + 0.5 * this->len) < -hsml) + continue; + if((NGB_PERIODIC_Z(this->center[2] - searchcenter[2]) - 0.5 * this->len) > hsml) + continue; +#else + if((this->center[0] + 0.5 * this->len) < (searchmin[0])) + continue; + if((this->center[0] - 0.5 * this->len) > (searchmax[0])) + continue; + if((this->center[1] + 0.5 * this->len) < (searchmin[1])) + continue; + if((this->center[1] - 0.5 * this->len) > (searchmax[1])) + continue; + if((this->center[2] + 0.5 * this->len) < (searchmin[2])) + continue; + if((this->center[2] - 0.5 * this->len) > (searchmax[2])) + continue; +#endif + no = this->u.d.nextnode; /* ok, we need to open the node */ + } + } + + *startnode = -1; + return numngb; +} + + + + +/*! The buffer for the neighbour list has a finite length MAX_NGB. For a large + * search region, this buffer can get full, in which case this routine can be + * called to eliminate some of the superfluous particles in the "corners" of + * the search box - only the ones in the inscribed sphere need to be kept. + */ +int ngb_clear_buf(FLOAT searchcenter[3], FLOAT hsml, int numngb) +{ + int i, p; + FLOAT dx, dy, dz, r2; + +#ifdef PERIODIC + double xtmp; +#endif + + for(i = 0; i < numngb; i++) + { + p = Ngblist[i]; +#ifdef PERIODIC + dx = NGB_PERIODIC_X(P[p].Pos[0] - searchcenter[0]); + dy = NGB_PERIODIC_Y(P[p].Pos[1] - searchcenter[1]); + dz = NGB_PERIODIC_Z(P[p].Pos[2] - searchcenter[2]); +#else + dx = P[p].Pos[0] - searchcenter[0]; + dy = P[p].Pos[1] - searchcenter[1]; + dz = P[p].Pos[2] - searchcenter[2]; +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(r2 > hsml * hsml) + { + Ngblist[i] = Ngblist[numngb - 1]; + i--; + numngb--; + } + } + + return numngb; +} + + + +/*! Allocates memory for the neighbour list buffer. + */ +void ngb_treeallocate(int npart) +{ + double totbytes = 0; + size_t bytes; + +#ifdef PERIODIC + boxSize = All.BoxSize; + boxHalf = 0.5 * All.BoxSize; +#ifdef LONG_X + boxHalf_X = boxHalf * LONG_X; + boxSize_X = boxSize * LONG_X; +#endif +#ifdef LONG_Y + boxHalf_Y = boxHalf * LONG_Y; + boxSize_Y = boxSize * LONG_Y; +#endif +#ifdef LONG_Z + boxHalf_Z = boxHalf * LONG_Z; + boxSize_Z = boxSize * LONG_Z; +#endif +#endif + + if(!(Ngblist = malloc(bytes = npart * (long) sizeof(int)))) + { + printf("Failed to allocate %g MB for ngblist array\n", bytes / (1024.0 * 1024.0)); + endrun(78); + } + totbytes += bytes; + + if(ThisTask == 0) + printf("allocated %g Mbyte for ngb search.\n", totbytes / (1024.0 * 1024.0)); +} + + +/*! free memory allocated for neighbour list buffer. + */ +void ngb_treefree(void) +{ + free(Ngblist); +} + +/*! This function constructs the neighbour tree. To this end, we actually need + * to construct the gravitational tree, because we use it now for the + * neighbour search. + */ +void ngb_treebuild(void) +{ + if(ThisTask == 0) + printf("Begin Ngb-tree construction.\n"); + + force_treebuild(N_gas); + + if(ThisTask == 0) + printf("Ngb-Tree contruction finished \n"); +} + diff --git a/src/PyGadget/src/ngb.o b/src/PyGadget/src/ngb.o new file mode 100644 index 0000000..1697a5b Binary files /dev/null and b/src/PyGadget/src/ngb.o differ diff --git a/src/PyGadget/src/peano.c b/src/PyGadget/src/peano.c new file mode 100644 index 0000000..ac86382 --- /dev/null +++ b/src/PyGadget/src/peano.c @@ -0,0 +1,529 @@ +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file peano.c + * \brief Routines to compute a Peano-Hilbert order + * + * This file contains routines to compute Peano-Hilbert keys, and to put the + * particle data into the order of these keys, i.e. into the order of a + * space-filling fractal curve. + */ + + +static struct peano_hilbert_data +{ + peanokey key; + int index; +} + *mp; + +static int *Id; + + +/*! This function puts the particles into Peano-Hilbert order by sorting them + * according to their keys. The latter half already been computed in the + * domain decomposition. Since gas particles need to stay at the beginning of + * the particle list, they are sorted as a separate block. + */ +void peano_hilbert_order(void) +{ + int i; + + if(ThisTask == 0) + printf("begin Peano-Hilbert order...\n"); + + if(N_gas) + { + mp = malloc(sizeof(struct peano_hilbert_data) * N_gas); + Id = malloc(sizeof(int) * N_gas); + + for(i = 0; i < N_gas; i++) + { + mp[i].index = i; + mp[i].key = Key[i]; + } + + qsort(mp, N_gas, sizeof(struct peano_hilbert_data), compare_key); + + for(i = 0; i < N_gas; i++) + Id[mp[i].index] = i; + + reorder_gas(); + + free(Id); + free(mp); + } + + + if(NumPart - N_gas > 0) + { + mp = malloc(sizeof(struct peano_hilbert_data) * (NumPart - N_gas)); + mp -= (N_gas); + + Id = malloc(sizeof(int) * (NumPart - N_gas)); + Id -= (N_gas); + + for(i = N_gas; i < NumPart; i++) + { + mp[i].index = i; + mp[i].key = Key[i]; + } + + qsort(mp + N_gas, NumPart - N_gas, sizeof(struct peano_hilbert_data), compare_key); + + for(i = N_gas; i < NumPart; i++) + Id[mp[i].index] = i; + + reorder_particles(); + + Id += N_gas; + free(Id); + mp += N_gas; + free(mp); + } + + if(ThisTask == 0) + printf("Peano-Hilbert done.\n"); +} + + +#ifdef PY_INTERFACE +/*! This function puts the particles into Peano-Hilbert order by sorting them + * according to their keys. The latter half already been computed in the + * domain decomposition. Since gas particles need to stay at the beginning of + * the particle list, they are sorted as a separate block. + */ +void peano_hilbert_orderQ(void) +{ + int i; + + if(ThisTask == 0) + printf("begin Peano-Hilbert order...\n"); + + if(N_gasQ) + { + mp = malloc(sizeof(struct peano_hilbert_data) * N_gasQ); + Id = malloc(sizeof(int) * N_gasQ); + + for(i = 0; i < N_gasQ; i++) + { + mp[i].index = i; + mp[i].key = Key[i]; + } + + qsort(mp, N_gasQ, sizeof(struct peano_hilbert_data), compare_key); + + for(i = 0; i < N_gasQ; i++) + Id[mp[i].index] = i; + + reorder_gasQ(); + + free(Id); + free(mp); + } + + + if(NumPartQ - N_gasQ > 0) + { + mp = malloc(sizeof(struct peano_hilbert_data) * (NumPartQ - N_gasQ)); + mp -= (N_gasQ); + + Id = malloc(sizeof(int) * (NumPartQ - N_gasQ)); + Id -= (N_gasQ); + + for(i = N_gasQ; i < NumPartQ; i++) + { + mp[i].index = i; + mp[i].key = Key[i]; + } + + qsort(mp + N_gasQ, NumPartQ - N_gasQ, sizeof(struct peano_hilbert_data), compare_key); + + for(i = N_gasQ; i < NumPartQ; i++) + Id[mp[i].index] = i; + + reorder_particlesQ(); + + Id += N_gasQ; + free(Id); + mp += N_gasQ; + free(mp); + } + + if(ThisTask == 0) + printf("Peano-Hilbert done.\n"); +} +#endif + + +/*! This function is a comparison kernel for sorting the Peano-Hilbert keys. + */ +int compare_key(const void *a, const void *b) +{ + if(((struct peano_hilbert_data *) a)->key < (((struct peano_hilbert_data *) b)->key)) + return -1; + + if(((struct peano_hilbert_data *) a)->key > (((struct peano_hilbert_data *) b)->key)) + return +1; + + return 0; +} + + +/*! This function brings the gas particles into the same order as the sorted + * keys. (The sort is first done only on the keys themselves and done + * directly on the gas particles in order to reduce the amount of data that + * needs to be moved in memory. Only once the order is established, the gas + * particles are rearranged, such that each particle has to be moved at most + * once.) + */ +void reorder_gas(void) +{ + int i; + struct particle_data Psave, Psource; + struct sph_particle_data SphPsave, SphPsource; + int idsource, idsave, dest; + + for(i = 0; i < N_gas; i++) + { + if(Id[i] != i) + { + Psource = P[i]; + SphPsource = SphP[i]; + + idsource = Id[i]; + dest = Id[i]; + + do + { + Psave = P[dest]; + SphPsave = SphP[dest]; + idsave = Id[dest]; + + P[dest] = Psource; + SphP[dest] = SphPsource; + Id[dest] = idsource; + + if(dest == i) + break; + + Psource = Psave; + SphPsource = SphPsave; + idsource = idsave; + + dest = idsource; + } + while(1); + } + } +} + + +#ifdef PY_INTERFACE +/*! This function brings the gas particles into the same order as the sorted + * keys. (The sort is first done only on the keys themselves and done + * directly on the gas particles in order to reduce the amount of data that + * needs to be moved in memory. Only once the order is established, the gas + * particles are rearranged, such that each particle has to be moved at most + * once.) + */ +void reorder_gasQ(void) +{ + int i; + struct particle_data Psave, Psource; + struct sph_particle_data SphPsave, SphPsource; + int idsource, idsave, dest; + + for(i = 0; i < N_gasQ; i++) + { + if(Id[i] != i) + { + Psource = Q[i]; + SphPsource = SphQ[i]; + + idsource = Id[i]; + dest = Id[i]; + + do + { + Psave = Q[dest]; + SphPsave = SphQ[dest]; + idsave = Id[dest]; + + Q[dest] = Psource; + SphQ[dest] = SphPsource; + Id[dest] = idsource; + + if(dest == i) + break; + + Psource = Psave; + SphPsource = SphPsave; + idsource = idsave; + + dest = idsource; + } + while(1); + } + } +} +#endif + + +/*! This function brings the collisionless particles into the same order as + * the sorted keys. (The sort is first done only on the keys themselves and + * done directly on the particles in order to reduce the amount of data that + * needs to be moved in memory. Only once the order is established, the + * particles are rearranged, such that each particle has to be moved at most + * once.) + */ +void reorder_particles(void) +{ + int i; + struct particle_data Psave, Psource; + int idsource, idsave, dest; + + for(i = N_gas; i < NumPart; i++) + { + if(Id[i] != i) + { + Psource = P[i]; + idsource = Id[i]; + + dest = Id[i]; + + do + { + Psave = P[dest]; + idsave = Id[dest]; + + P[dest] = Psource; + Id[dest] = idsource; + + if(dest == i) + break; + + Psource = Psave; + idsource = idsave; + + dest = idsource; + } + while(1); + } + } +} + + +#ifdef PY_INTERFACE +/*! This function brings the collisionless particles into the same order as + * the sorted keys. (The sort is first done only on the keys themselves and + * done directly on the particles in order to reduce the amount of data that + * needs to be moved in memory. Only once the order is established, the + * particles are rearranged, such that each particle has to be moved at most + * once.) + */ +void reorder_particlesQ(void) +{ + int i; + struct particle_data Psave, Psource; + int idsource, idsave, dest; + + for(i = N_gasQ; i < NumPartQ; i++) + { + if(Id[i] != i) + { + Psource = Q[i]; + idsource = Id[i]; + + dest = Id[i]; + + do + { + Psave = Q[dest]; + idsave = Id[dest]; + + Q[dest] = Psource; + Id[dest] = idsource; + + if(dest == i) + break; + + Psource = Psave; + idsource = idsave; + + dest = idsource; + } + while(1); + } + } +} +#endif + + +static int quadrants[24][2][2][2] = { + /* rotx=0, roty=0-3 */ + {{{0, 7}, {1, 6}}, {{3, 4}, {2, 5}}}, + {{{7, 4}, {6, 5}}, {{0, 3}, {1, 2}}}, + {{{4, 3}, {5, 2}}, {{7, 0}, {6, 1}}}, + {{{3, 0}, {2, 1}}, {{4, 7}, {5, 6}}}, + /* rotx=1, roty=0-3 */ + {{{1, 0}, {6, 7}}, {{2, 3}, {5, 4}}}, + {{{0, 3}, {7, 4}}, {{1, 2}, {6, 5}}}, + {{{3, 2}, {4, 5}}, {{0, 1}, {7, 6}}}, + {{{2, 1}, {5, 6}}, {{3, 0}, {4, 7}}}, + /* rotx=2, roty=0-3 */ + {{{6, 1}, {7, 0}}, {{5, 2}, {4, 3}}}, + {{{1, 2}, {0, 3}}, {{6, 5}, {7, 4}}}, + {{{2, 5}, {3, 4}}, {{1, 6}, {0, 7}}}, + {{{5, 6}, {4, 7}}, {{2, 1}, {3, 0}}}, + /* rotx=3, roty=0-3 */ + {{{7, 6}, {0, 1}}, {{4, 5}, {3, 2}}}, + {{{6, 5}, {1, 2}}, {{7, 4}, {0, 3}}}, + {{{5, 4}, {2, 3}}, {{6, 7}, {1, 0}}}, + {{{4, 7}, {3, 0}}, {{5, 6}, {2, 1}}}, + /* rotx=4, roty=0-3 */ + {{{6, 7}, {5, 4}}, {{1, 0}, {2, 3}}}, + {{{7, 0}, {4, 3}}, {{6, 1}, {5, 2}}}, + {{{0, 1}, {3, 2}}, {{7, 6}, {4, 5}}}, + {{{1, 6}, {2, 5}}, {{0, 7}, {3, 4}}}, + /* rotx=5, roty=0-3 */ + {{{2, 3}, {1, 0}}, {{5, 4}, {6, 7}}}, + {{{3, 4}, {0, 7}}, {{2, 5}, {1, 6}}}, + {{{4, 5}, {7, 6}}, {{3, 2}, {0, 1}}}, + {{{5, 2}, {6, 1}}, {{4, 3}, {7, 0}}} +}; + + +static int rotxmap_table[24] = { 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 0, 1, 2, 3, 17, 18, 19, 16, 23, 20, 21, 22 +}; + +static int rotymap_table[24] = { 1, 2, 3, 0, 16, 17, 18, 19, + 11, 8, 9, 10, 22, 23, 20, 21, 14, 15, 12, 13, 4, 5, 6, 7 +}; + +static int rotx_table[8] = { 3, 0, 0, 2, 2, 0, 0, 1 }; +static int roty_table[8] = { 0, 1, 1, 2, 2, 3, 3, 0 }; + +static int sense_table[8] = { -1, -1, -1, +1, +1, -1, -1, -1 }; + +static int flag_quadrants_inverse = 1; +static char quadrants_inverse_x[24][8]; +static char quadrants_inverse_y[24][8]; +static char quadrants_inverse_z[24][8]; + + +/*! This function computes a Peano-Hilbert key for an integer triplet (x,y,z), + * with x,y,z in the range between 0 and 2^bits-1. + */ +peanokey peano_hilbert_key(int x, int y, int z, int bits) +{ + int i, quad, bitx, bity, bitz; + int mask, rotation, rotx, roty, sense; + peanokey key; + + + mask = 1 << (bits - 1); + key = 0; + rotation = 0; + sense = 1; + + + for(i = 0; i < bits; i++, mask >>= 1) + { + bitx = (x & mask) ? 1 : 0; + bity = (y & mask) ? 1 : 0; + bitz = (z & mask) ? 1 : 0; + + quad = quadrants[rotation][bitx][bity][bitz]; + + key <<= 3; + key += (sense == 1) ? (quad) : (7 - quad); + + rotx = rotx_table[quad]; + roty = roty_table[quad]; + sense *= sense_table[quad]; + + while(rotx > 0) + { + rotation = rotxmap_table[rotation]; + rotx--; + } + + while(roty > 0) + { + rotation = rotymap_table[rotation]; + roty--; + } + } + + return key; +} + + +/*! This function computes for a given Peano-Hilbert key, the inverse, + * i.e. the integer triplet (x,y,z) with a Peano-Hilbert key equal to the + * input key. (This functionality is actually not needed in the present + * code.) + */ +void peano_hilbert_key_inverse(peanokey key, int bits, int *x, int *y, int *z) +{ + int i, keypart, bitx, bity, bitz, mask, quad, rotation, shift; + char sense, rotx, roty; + + if(flag_quadrants_inverse) + { + flag_quadrants_inverse = 0; + for(rotation = 0; rotation < 24; rotation++) + for(bitx = 0; bitx < 2; bitx++) + for(bity = 0; bity < 2; bity++) + for(bitz = 0; bitz < 2; bitz++) + { + quad = quadrants[rotation][bitx][bity][bitz]; + quadrants_inverse_x[rotation][quad] = bitx; + quadrants_inverse_y[rotation][quad] = bity; + quadrants_inverse_z[rotation][quad] = bitz; + } + } + + shift = 3 * (bits - 1); + mask = 7 << shift; + + rotation = 0; + sense = 1; + + *x = *y = *z = 0; + + for(i = 0; i < bits; i++, mask >>= 3, shift -= 3) + { + keypart = (key & mask) >> shift; + + quad = (sense == 1) ? (keypart) : (7 - keypart); + + *x = (*x << 1) + quadrants_inverse_x[rotation][quad]; + *y = (*y << 1) + quadrants_inverse_y[rotation][quad]; + *z = (*z << 1) + quadrants_inverse_z[rotation][quad]; + + rotx = rotx_table[quad]; + roty = roty_table[quad]; + sense *= sense_table[quad]; + + while(rotx > 0) + { + rotation = rotxmap_table[rotation]; + rotx--; + } + + while(roty > 0) + { + rotation = rotymap_table[rotation]; + roty--; + } + } +} diff --git a/src/PyGadget/src/peano.o b/src/PyGadget/src/peano.o new file mode 100644 index 0000000..91b44f4 Binary files /dev/null and b/src/PyGadget/src/peano.o differ diff --git a/src/PyGadget/src/pm_nonperiodic.c b/src/PyGadget/src/pm_nonperiodic.c new file mode 100644 index 0000000..9f0543a --- /dev/null +++ b/src/PyGadget/src/pm_nonperiodic.c @@ -0,0 +1,1445 @@ +#include +#include +#include +#include +#include + + +/*! \file pm_nonperiodic.c + * \brief code for non-periodic FFT to compute long-range PM force + */ + + +#ifdef PMGRID +#if !defined (PERIODIC) || defined (PLACEHIGHRESREGION) + +#ifdef NOTYPEPREFIX_FFTW +#include +#else +#ifdef DOUBLEPRECISION_FFTW +#include /* double precision FFTW */ +#else +#include +#endif +#endif + +#include "allvars.h" +#include "proto.h" + +#define GRID (2*PMGRID) +#define GRID2 (2*(GRID/2 + 1)) + + + +static rfftwnd_mpi_plan fft_forward_plan, fft_inverse_plan; + +static int slab_to_task[GRID]; +static int *slabs_per_task; +static int *first_slab_of_task; + +static int *meshmin_list, *meshmax_list; + +static int slabstart_x, nslab_x, slabstart_y, nslab_y; + +static int fftsize, maxfftsize; + +static fftw_real *kernel[2], *rhogrid, *forcegrid, *workspace; +static fftw_complex *fft_of_kernel[2], *fft_of_rhogrid; + +/*! This function determines the particle extension of all particles, and for + * those types selected with PLACEHIGHRESREGION if this is used, and then + * determines the boundaries of the non-periodic FFT-mesh that can be placed + * on this region. Note that a sufficient buffer region at the rim of the + * occupied part of the mesh needs to be reserved in order to allow a correct + * finite differencing using a 4-point formula. In addition, to allow + * non-periodic boundaries, the actual FFT mesh used is twice as large in + * each dimension compared with PMGRID. + */ +void pm_init_regionsize(void) +{ + double meshinner[2], xmin[2][3], xmax[2][3]; + int i, j, t; + + /* find enclosing rectangle */ + + for(j = 0; j < 3; j++) + { + xmin[0][j] = xmin[1][j] = 1.0e36; + xmax[0][j] = xmax[1][j] = -1.0e36; + } + + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + { + t = 0; +#ifdef PLACEHIGHRESREGION + if(((1 << P[i].Type) & (PLACEHIGHRESREGION))) + t = 1; +#endif + if(P[i].Pos[j] > xmax[t][j]) + xmax[t][j] = P[i].Pos[j]; + if(P[i].Pos[j] < xmin[t][j]) + xmin[t][j] = P[i].Pos[j]; + } + + MPI_Allreduce(xmin, All.Xmintot, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_Allreduce(xmax, All.Xmaxtot, 6, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + + for(j = 0; j < 2; j++) + { + All.TotalMeshSize[j] = All.Xmaxtot[j][0] - All.Xmintot[j][0]; + All.TotalMeshSize[j] = dmax(All.TotalMeshSize[j], All.Xmaxtot[j][1] - All.Xmintot[j][1]); + All.TotalMeshSize[j] = dmax(All.TotalMeshSize[j], All.Xmaxtot[j][2] - All.Xmintot[j][2]); +#ifdef ENLARGEREGION + All.TotalMeshSize[j] *= ENLARGEREGION; +#endif + + /* symmetrize the box onto the center */ + for(i = 0; i < 3; i++) + { + All.Xmintot[j][i] = (All.Xmintot[j][i] + All.Xmaxtot[j][i]) / 2 - All.TotalMeshSize[j] / 2; + All.Xmaxtot[j][i] = All.Xmintot[j][i] + All.TotalMeshSize[j]; + } + } + + /* this will produce enough room for zero-padding and buffer region to + allow finite differencing of the potential */ + + for(j = 0; j < 2; j++) + { + meshinner[j] = All.TotalMeshSize[j]; + All.TotalMeshSize[j] *= 2.001 * (GRID) / ((double) (GRID - 2 - 8)); + } + + /* move lower left corner by two cells to allow finite differencing of the potential by a 4-point function */ + + for(j = 0; j < 2; j++) + for(i = 0; i < 3; i++) + { + All.Corner[j][i] = All.Xmintot[j][i] - 2.0005 * All.TotalMeshSize[j] / GRID; + All.UpperCorner[j][i] = All.Corner[j][i] + (GRID / 2 - 1) * (All.TotalMeshSize[j] / GRID); + } + + +#ifndef PERIODIC + All.Asmth[0] = ASMTH * All.TotalMeshSize[0] / GRID; + All.Rcut[0] = RCUT * All.Asmth[0]; +#endif + +#ifdef PLACEHIGHRESREGION + All.Asmth[1] = ASMTH * All.TotalMeshSize[1] / GRID; + All.Rcut[1] = RCUT * All.Asmth[1]; +#endif + +#ifdef PLACEHIGHRESREGION + if(2 * All.TotalMeshSize[1] / GRID < All.Rcut[0]) + { + All.TotalMeshSize[1] = 2 * (meshinner[1] + 2 * All.Rcut[0]) * (GRID) / ((double) (GRID - 2)); + + for(i = 0; i < 3; i++) + { + All.Corner[1][i] = All.Xmintot[1][i] - 1.0001 * All.Rcut[0]; + All.UpperCorner[1][i] = All.Corner[1][i] + (GRID / 2 - 1) * (All.TotalMeshSize[1] / GRID); + } + + if(2 * All.TotalMeshSize[1] / GRID > All.Rcut[0]) + { + All.TotalMeshSize[1] = 2 * (meshinner[1] + 2 * All.Rcut[0]) * (GRID) / ((double) (GRID - 10)); + + for(i = 0; i < 3; i++) + { + All.Corner[1][i] = All.Xmintot[1][i] - 1.0001 * (All.Rcut[0] + 2 * All.TotalMeshSize[j] / GRID); + All.UpperCorner[1][i] = All.Corner[1][i] + (GRID / 2 - 1) * (All.TotalMeshSize[1] / GRID); + } + } + + All.Asmth[1] = ASMTH * All.TotalMeshSize[1] / GRID; + All.Rcut[1] = RCUT * All.Asmth[1]; + } +#endif + + if(ThisTask == 0) + { +#ifndef PERIODIC + printf("\nAllowed region for isolated PM mesh (coarse):\n"); + printf("(%g|%g|%g) -> (%g|%g|%g) ext=%g totmeshsize=%g meshsize=%g\n\n", + All.Xmintot[0][0], All.Xmintot[0][1], All.Xmintot[0][2], + All.Xmaxtot[0][0], All.Xmaxtot[0][1], All.Xmaxtot[0][2], meshinner[0], All.TotalMeshSize[0], + All.TotalMeshSize[0] / GRID); +#endif +#ifdef PLACEHIGHRESREGION + printf("\nAllowed region for isolated PM mesh (high-res):\n"); + printf("(%g|%g|%g) -> (%g|%g|%g) ext=%g totmeshsize=%g meshsize=%g\n\n", + All.Xmintot[1][0], All.Xmintot[1][1], All.Xmintot[1][2], + All.Xmaxtot[1][0], All.Xmaxtot[1][1], All.Xmaxtot[1][2], + meshinner[1], All.TotalMeshSize[1], All.TotalMeshSize[1] / GRID); +#endif + } + +} + +/*! Initialization of the non-periodic PM routines. The plan-files for FFTW + * are created. Finally, the routine to set-up the non-periodic Greens + * function is called. + */ +void pm_init_nonperiodic(void) +{ + int i, slab_to_task_local[GRID]; + double bytes_tot = 0; + size_t bytes; + + /* Set up the FFTW plan files. */ + + fft_forward_plan = rfftw3d_mpi_create_plan(MPI_COMM_WORLD, GRID, GRID, GRID, + FFTW_REAL_TO_COMPLEX, FFTW_ESTIMATE | FFTW_IN_PLACE); + fft_inverse_plan = rfftw3d_mpi_create_plan(MPI_COMM_WORLD, GRID, GRID, GRID, + FFTW_COMPLEX_TO_REAL, FFTW_ESTIMATE | FFTW_IN_PLACE); + + /* Workspace out the ranges on each processor. */ + + rfftwnd_mpi_local_sizes(fft_forward_plan, &nslab_x, &slabstart_x, &nslab_y, &slabstart_y, &fftsize); + + + for(i = 0; i < GRID; i++) + slab_to_task_local[i] = 0; + + for(i = 0; i < nslab_x; i++) + slab_to_task_local[slabstart_x + i] = ThisTask; + + MPI_Allreduce(slab_to_task_local, slab_to_task, GRID, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + slabs_per_task = malloc(NTask * sizeof(int)); + MPI_Allgather(&nslab_x, 1, MPI_INT, slabs_per_task, 1, MPI_INT, MPI_COMM_WORLD); + +#ifndef PERIODIC + if(ThisTask == 0) + { + for(i = 0; i < NTask; i++) + printf("Task=%d FFT-Slabs=%d\n", i, slabs_per_task[i]); + } +#endif + + first_slab_of_task = malloc(NTask * sizeof(int)); + MPI_Allgather(&slabstart_x, 1, MPI_INT, first_slab_of_task, 1, MPI_INT, MPI_COMM_WORLD); + + meshmin_list = malloc(3 * NTask * sizeof(int)); + meshmax_list = malloc(3 * NTask * sizeof(int)); + + MPI_Allreduce(&fftsize, &maxfftsize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + + /* now allocate memory to hold the FFT fields */ + +#if !defined(PERIODIC) + if(!(kernel[0] = (fftw_real *) malloc(bytes = fftsize * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-kernel[0]' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + fft_of_kernel[0] = (fftw_complex *) kernel[0]; +#endif + +#if defined(PLACEHIGHRESREGION) + if(!(kernel[1] = (fftw_real *) malloc(bytes = fftsize * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-kernel[1]' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + fft_of_kernel[1] = (fftw_complex *) kernel[1]; +#endif + + if(ThisTask == 0) + printf("\nAllocated %g MByte for FFT kernel(s).\n\n", bytes_tot / (1024.0 * 1024.0)); + +} + + +/*! This function allocates the workspace needed for the non-periodic FFT + * algorithm. Three fields are used, one for the density/potential fields, + * one to hold the force field obtained by finite differencing, and finally + * an additional workspace which is used both in the parallel FFT itself, and + * as a buffer for the communication algorithm. + */ +void pm_init_nonperiodic_allocate(int dimprod) +{ + static int first_alloc = 1; + int dimprodmax; + double bytes_tot = 0; + size_t bytes; + + MPI_Allreduce(&dimprod, &dimprodmax, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + + if(!(rhogrid = (fftw_real *) malloc(bytes = fftsize * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-rhogrid' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + fft_of_rhogrid = (fftw_complex *) rhogrid; + + if(!(forcegrid = (fftw_real *) malloc(bytes = imax(fftsize, dimprodmax) * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-forcegrid' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + if(!(workspace = (fftw_real *) malloc(bytes = imax(maxfftsize, dimprodmax) * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-workspace' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + if(first_alloc == 1) + { + first_alloc = 0; + if(ThisTask == 0) + printf("\nUsing %g MByte for non-periodic FFT computation.\n\n", bytes_tot / (1024.0 * 1024.0)); + } +} + + +/*! This function frees the memory allocated for the non-periodic FFT + * computation. (With the exception of the Greens function(s), which are kept + * statically in memory for the next force computation.) + */ +void pm_init_nonperiodic_free(void) +{ + /* deallocate memory */ + free(workspace); + free(forcegrid); + free(rhogrid); +} + + +/*! This function sets-up the Greens function for the non-periodic potential + * in real space, and then converts it to Fourier space by means of a FFT. + */ +void pm_setup_nonperiodic_kernel(void) +{ + int i, j, k; + double x, y, z, r, u, fac; + double kx, ky, kz, k2, fx, fy, fz, ff; + int ip; + + /* now set up kernel and its Fourier transform */ + + pm_init_nonperiodic_allocate(0); + +#if !defined(PERIODIC) + for(i = 0; i < fftsize; i++) /* clear local density field */ + kernel[0][i] = 0; + + for(i = slabstart_x; i < (slabstart_x + nslab_x); i++) + for(j = 0; j < GRID; j++) + for(k = 0; k < GRID; k++) + { + x = ((double) i) / GRID; + y = ((double) j) / GRID; + z = ((double) k) / GRID; + + if(x >= 0.5) + x -= 1.0; + if(y >= 0.5) + y -= 1.0; + if(z >= 0.5) + z -= 1.0; + + r = sqrt(x * x + y * y + z * z); + + u = 0.5 * r / (((double) ASMTH) / GRID); + + fac = 1 - erfc(u); + + if(r > 0) + kernel[0][GRID * GRID2 * (i - slabstart_x) + GRID2 * j + k] = -fac / r; + else + kernel[0][GRID * GRID2 * (i - slabstart_x) + GRID2 * j + k] = + -1 / (sqrt(M_PI) * (((double) ASMTH) / GRID)); + } + + /* do the forward transform of the kernel */ + + rfftwnd_mpi(fft_forward_plan, 1, kernel[0], workspace, FFTW_TRANSPOSED_ORDER); +#endif + + +#if defined(PLACEHIGHRESREGION) + for(i = 0; i < fftsize; i++) /* clear local density field */ + kernel[1][i] = 0; + + for(i = slabstart_x; i < (slabstart_x + nslab_x); i++) + for(j = 0; j < GRID; j++) + for(k = 0; k < GRID; k++) + { + x = ((double) i) / GRID; + y = ((double) j) / GRID; + z = ((double) k) / GRID; + + if(x >= 0.5) + x -= 1.0; + if(y >= 0.5) + y -= 1.0; + if(z >= 0.5) + z -= 1.0; + + r = sqrt(x * x + y * y + z * z); + + u = 0.5 * r / (((double) ASMTH) / GRID); + + fac = erfc(u * All.Asmth[1] / All.Asmth[0]) - erfc(u); + + if(r > 0) + kernel[1][GRID * GRID2 * (i - slabstart_x) + GRID2 * j + k] = -fac / r; + else + { + fac = 1 - All.Asmth[1] / All.Asmth[0]; + kernel[1][GRID * GRID2 * (i - slabstart_x) + GRID2 * j + k] = + -fac / (sqrt(M_PI) * (((double) ASMTH) / GRID)); + } + } + + /* do the forward transform of the kernel */ + + rfftwnd_mpi(fft_forward_plan, 1, kernel[1], workspace, FFTW_TRANSPOSED_ORDER); +#endif + + /* deconvolve the Greens function twice with the CIC kernel */ + + for(y = slabstart_y; y < slabstart_y + nslab_y; y++) + for(x = 0; x < GRID; x++) + for(z = 0; z < GRID / 2 + 1; z++) + { + if(x > GRID / 2) + kx = x - GRID; + else + kx = x; + if(y > GRID / 2) + ky = y - GRID; + else + ky = y; + if(z > GRID / 2) + kz = z - GRID; + else + kz = z; + + k2 = kx * kx + ky * ky + kz * kz; + + if(k2 > 0) + { + fx = fy = fz = 1; + if(kx != 0) + { + fx = (M_PI * kx) / GRID; + fx = sin(fx) / fx; + } + if(ky != 0) + { + fy = (M_PI * ky) / GRID; + fy = sin(fy) / fy; + } + if(kz != 0) + { + fz = (M_PI * kz) / GRID; + fz = sin(fz) / fz; + } + ff = 1 / (fx * fy * fz); + ff = ff * ff * ff * ff; + + ip = GRID * (GRID / 2 + 1) * (y - slabstart_y) + (GRID / 2 + 1) * x + z; +#if !defined(PERIODIC) + fft_of_kernel[0][ip].re *= ff; + fft_of_kernel[0][ip].im *= ff; +#endif +#if defined(PLACEHIGHRESREGION) + fft_of_kernel[1][ip].re *= ff; + fft_of_kernel[1][ip].im *= ff; +#endif + } + } + /* end deconvolution */ + + pm_init_nonperiodic_free(); +} + + + +/*! Calculates the long-range non-periodic forces using the PM method. The + * potential is Gaussian filtered with Asmth, given in mesh-cell units. The + * potential is finite differenced using a 4-point finite differencing + * formula to obtain the force fields, which are then interpolated to the + * particle positions. We carry out a CIC charge assignment, and compute the + * potenial by Fourier transform methods. The CIC kernel is deconvolved. + */ +int pmforce_nonperiodic(int grnr) +{ + double dx, dy, dz; + double fac, to_slab_fac; + double re, im, acc_dim; + int i, j, slab, level, sendTask, recvTask, flag, flagsum; + int x, y, z, xl, yl, zl, xr, yr, zr, xll, yll, zll, xrr, yrr, zrr, ip, dim; + int slab_x, slab_y, slab_z; + int slab_xx, slab_yy, slab_zz; + int meshmin[3], meshmax[3], sendmin, sendmax, recvmin, recvmax; + int dimx, dimy, dimz, recv_dimx, recv_dimy, recv_dimz; + MPI_Status status; + + if(ThisTask == 0) + printf("Starting non-periodic PM calculation (grid=%d).\n", grnr); + + fac = All.G / pow(All.TotalMeshSize[grnr], 4) * pow(All.TotalMeshSize[grnr] / GRID, 3); /* to get potential */ + fac *= 1 / (2 * All.TotalMeshSize[grnr] / GRID); /* for finite differencing */ + + to_slab_fac = GRID / All.TotalMeshSize[grnr]; + + + /* first, establish the extension of the local patch in GRID (for binning) */ + + for(j = 0; j < 3; j++) + { + meshmin[j] = GRID; + meshmax[j] = 0; + } + + for(i = 0, flag = 0; i < NumPart; i++) + { +#ifdef PLACEHIGHRESREGION + if(grnr == 0 || (grnr == 1 && ((1 << P[i].Type) & (PLACEHIGHRESREGION)))) +#endif + { + for(j = 0; j < 3; j++) + { + if(P[i].Pos[j] < All.Xmintot[grnr][j] || P[i].Pos[j] > All.Xmaxtot[grnr][j]) + { + if(flag == 0) + { + printf + ("Particle Id=%d on task=%d with coordinates (%g|%g|%g) lies outside PM mesh.\nStopping\n", + (int)P[i].ID, ThisTask, P[i].Pos[0], P[i].Pos[1], P[i].Pos[2]); + fflush(stdout); + } + flag++; + break; + } + } + } + + if(flag > 0) + continue; + + if(P[i].Pos[0] >= All.Corner[grnr][0] && P[i].Pos[0] < All.UpperCorner[grnr][0]) + if(P[i].Pos[1] >= All.Corner[grnr][1] && P[i].Pos[1] < All.UpperCorner[grnr][1]) + if(P[i].Pos[2] >= All.Corner[grnr][2] && P[i].Pos[2] < All.UpperCorner[grnr][2]) + { + for(j = 0; j < 3; j++) + { + slab = to_slab_fac * (P[i].Pos[j] - All.Corner[grnr][j]); + + if(slab < meshmin[j]) + meshmin[j] = slab; + + if(slab > meshmax[j]) + meshmax[j] = slab; + } + } + } + + + MPI_Allreduce(&flag, &flagsum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + if(flagsum > 0) + { + if(ThisTask == 0) + { + printf("In total %d particles were outside allowed range.\n", flagsum); + fflush(stdout); + } + return 1; /* error - need to return because particle were outside allowed range */ + } + + MPI_Allgather(meshmin, 3, MPI_INT, meshmin_list, 3, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(meshmax, 3, MPI_INT, meshmax_list, 3, MPI_INT, MPI_COMM_WORLD); + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + + force_treefree(); + + pm_init_nonperiodic_allocate((dimx + 4) * (dimy + 4) * (dimz + 4)); + + for(i = 0; i < dimx * dimy * dimz; i++) + workspace[i] = 0; + + for(i = 0; i < NumPart; i++) + { + if(P[i].Pos[0] < All.Corner[grnr][0] || P[i].Pos[0] >= All.UpperCorner[grnr][0]) + continue; + if(P[i].Pos[1] < All.Corner[grnr][1] || P[i].Pos[1] >= All.UpperCorner[grnr][1]) + continue; + if(P[i].Pos[2] < All.Corner[grnr][2] || P[i].Pos[2] >= All.UpperCorner[grnr][2]) + continue; + + slab_x = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]); + dx = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]) - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]); + dy = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]) - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]); + dz = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]) - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + workspace[(slab_x * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_x * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * dy * (1.0 - dz); + workspace[(slab_x * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * dz; + workspace[(slab_x * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * dy * dz; + + workspace[(slab_xx * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (dx) * dy * (1.0 - dz); + workspace[(slab_xx * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (dx) * (1.0 - dy) * dz; + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (dx) * dy * dz; + } + + + for(i = 0; i < fftsize; i++) /* clear local density field */ + rhogrid[i] = 0; + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + if(recvTask < NTask) + { + /* check how much we have to send */ + sendmin = 2 * GRID; + sendmax = -1; + for(slab_x = meshmin[0]; slab_x < meshmax[0] + 2; slab_x++) + if(slab_to_task[slab_x] == recvTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -1) + sendmin = 0; + + /* check how much we have to receive */ + recvmin = 2 * GRID; + recvmax = -1; + for(slab_x = meshmin_list[3 * recvTask]; slab_x < meshmax_list[3 * recvTask] + 2; slab_x++) + if(slab_to_task[slab_x] == sendTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -1) + recvmin = 0; + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 2; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 2; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 2; + + if(level > 0) + { + MPI_Sendrecv(workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, recvTask, + TAG_NONPERIOD_A, forcegrid, + (recvmax - recvmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_NONPERIOD_A, MPI_COMM_WORLD, &status); + } + else + { + memcpy(forcegrid, workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + + for(slab_x = recvmin; slab_x <= recvmax; slab_x++) + { + slab_xx = slab_x - first_slab_of_task[ThisTask]; + + if(slab_xx >= 0 && slab_xx < slabs_per_task[ThisTask]) + { + for(slab_y = meshmin_list[3 * recvTask + 1]; + slab_y <= meshmax_list[3 * recvTask + 1] + 1; slab_y++) + { + slab_yy = slab_y; + + for(slab_z = meshmin_list[3 * recvTask + 2]; + slab_z <= meshmax_list[3 * recvTask + 2] + 1; slab_z++) + { + slab_zz = slab_z; + + rhogrid[GRID * GRID2 * slab_xx + GRID2 * slab_yy + slab_zz] += + forcegrid[((slab_x - recvmin) * recv_dimy + + (slab_y - meshmin_list[3 * recvTask + 1])) * recv_dimz + + (slab_z - meshmin_list[3 * recvTask + 2])]; + } + } + } + } + } + } + } + + + /* Do the FFT of the density field */ + + rfftwnd_mpi(fft_forward_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + + /* multiply with the Fourier transform of the Green's function (kernel) */ + + for(y = 0; y < nslab_y; y++) + for(x = 0; x < GRID; x++) + for(z = 0; z < GRID / 2 + 1; z++) + { + ip = GRID * (GRID / 2 + 1) * y + (GRID / 2 + 1) * x + z; + + re = + fft_of_rhogrid[ip].re * fft_of_kernel[grnr][ip].re - + fft_of_rhogrid[ip].im * fft_of_kernel[grnr][ip].im; + + im = + fft_of_rhogrid[ip].re * fft_of_kernel[grnr][ip].im + + fft_of_rhogrid[ip].im * fft_of_kernel[grnr][ip].re; + + fft_of_rhogrid[ip].re = re; + fft_of_rhogrid[ip].im = im; + } + + /* get the potential by inverse FFT */ + + rfftwnd_mpi(fft_inverse_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + /* Now rhogrid holds the potential */ + /* construct the potential for the local patch */ + + + /* if we have a high-res mesh, establish the extension of the local patch in GRID (for reading out the + * forces) + */ + +#ifdef PLACEHIGHRESREGION + if(grnr == 1) + { + for(j = 0; j < 3; j++) + { + meshmin[j] = GRID; + meshmax[j] = 0; + } + + for(i = 0; i < NumPart; i++) + { + if(!((1 << P[i].Type) & (PLACEHIGHRESREGION))) + continue; + + + if(P[i].Pos[0] >= All.Corner[grnr][0] && P[i].Pos[0] < All.UpperCorner[grnr][0]) + if(P[i].Pos[1] >= All.Corner[grnr][1] && P[i].Pos[1] < All.UpperCorner[grnr][1]) + if(P[i].Pos[2] >= All.Corner[grnr][2] && P[i].Pos[2] < All.UpperCorner[grnr][2]) + { + for(j = 0; j < 3; j++) + { + slab = to_slab_fac * (P[i].Pos[j] - All.Corner[grnr][j]); + + if(slab < meshmin[j]) + meshmin[j] = slab; + + if(slab > meshmax[j]) + meshmax[j] = slab; + } + } + } + + MPI_Allgather(meshmin, 3, MPI_INT, meshmin_list, 3, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(meshmax, 3, MPI_INT, meshmax_list, 3, MPI_INT, MPI_COMM_WORLD); + } +#endif + + dimx = meshmax[0] - meshmin[0] + 6; + dimy = meshmax[1] - meshmin[1] + 6; + dimz = meshmax[2] - meshmin[2] + 6; + + for(j = 0; j < 3; j++) + { + if(meshmin[j] < 2) + endrun(131231); + if(meshmax[j] > GRID / 2 - 3) + endrun(131288); + } + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + { + /* check how much we have to send */ + sendmin = 2 * GRID; + sendmax = -GRID; + for(slab_x = meshmin_list[3 * recvTask] - 2; slab_x < meshmax_list[3 * recvTask] + 4; slab_x++) + if(slab_to_task[slab_x] == sendTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -GRID) + sendmin = sendmax + 1; + + + /* check how much we have to receive */ + recvmin = 2 * GRID; + recvmax = -GRID; + for(slab_x = meshmin[0] - 2; slab_x < meshmax[0] + 4; slab_x++) + if(slab_to_task[slab_x] == recvTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -GRID) + recvmin = recvmax + 1; + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 6; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 6; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 6; + + /* prepare what we want to send */ + if(sendmax - sendmin >= 0) + { + for(slab_x = sendmin; slab_x <= sendmax; slab_x++) + { + slab_xx = slab_x - first_slab_of_task[ThisTask]; + + for(slab_y = meshmin_list[3 * recvTask + 1] - 2; + slab_y < meshmax_list[3 * recvTask + 1] + 4; slab_y++) + { + slab_yy = slab_y; + + for(slab_z = meshmin_list[3 * recvTask + 2] - 2; + slab_z < meshmax_list[3 * recvTask + 2] + 4; slab_z++) + { + slab_zz = slab_z; + + forcegrid[((slab_x - sendmin) * recv_dimy + + (slab_y - (meshmin_list[3 * recvTask + 1] - 2))) * recv_dimz + + slab_z - (meshmin_list[3 * recvTask + 2] - 2)] = + rhogrid[GRID * GRID2 * slab_xx + GRID2 * slab_yy + slab_zz]; + } + } + } + } + + if(level > 0) + { + MPI_Sendrecv(forcegrid, + (sendmax - sendmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), + MPI_BYTE, recvTask, TAG_NONPERIOD_B, + workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_NONPERIOD_B, MPI_COMM_WORLD, &status); + } + else + { + memcpy(workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + forcegrid, (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + } + } + } + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + recv_dimx = meshmax[0] - meshmin[0] + 6; + recv_dimy = meshmax[1] - meshmin[1] + 6; + recv_dimz = meshmax[2] - meshmin[2] + 6; + + + for(dim = 0; dim < 3; dim++) /* Calculate each component of the force. */ + { + /* get the force component by finite differencing the potential */ + /* note: "workspace" now contains the potential for the local patch, plus a suffiently large buffer region */ + + for(x = 0; x < meshmax[0] - meshmin[0] + 2; x++) + for(y = 0; y < meshmax[1] - meshmin[1] + 2; y++) + for(z = 0; z < meshmax[2] - meshmin[2] + 2; z++) + { + xrr = xll = xr = xl = x; + yrr = yll = yr = yl = y; + zrr = zll = zr = zl = z; + + switch (dim) + { + case 0: + xr = x + 1; + xrr = x + 2; + xl = x - 1; + xll = x - 2; + break; + case 1: + yr = y + 1; + yl = y - 1; + yrr = y + 2; + yll = y - 2; + break; + case 2: + zr = z + 1; + zl = z - 1; + zrr = z + 2; + zll = z - 2; + break; + } + + forcegrid[(x * dimy + y) * dimz + z] + = + fac * ((4.0 / 3) * + (workspace[((xl + 2) * recv_dimy + (yl + 2)) * recv_dimz + (zl + 2)] + - workspace[((xr + 2) * recv_dimy + (yr + 2)) * recv_dimz + (zr + 2)]) - + (1.0 / 6) * + (workspace[((xll + 2) * recv_dimy + (yll + 2)) * recv_dimz + (zll + 2)] - + workspace[((xrr + 2) * recv_dimy + (yrr + 2)) * recv_dimz + (zrr + 2)])); + } + + + /* read out the forces */ + + for(i = 0; i < NumPart; i++) + { +#ifdef PLACEHIGHRESREGION + if(grnr == 1) + if(!((1 << P[i].Type) & (PLACEHIGHRESREGION))) + continue; +#endif + slab_x = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]); + dx = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]) - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]); + dy = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]) - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]); + dz = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]) - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + acc_dim = + forcegrid[(slab_x * dimy + slab_y) * dimz + slab_z] * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + acc_dim += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_z] * (1.0 - dx) * dy * (1.0 - dz); + acc_dim += forcegrid[(slab_x * dimy + slab_y) * dimz + slab_zz] * (1.0 - dx) * (1.0 - dy) * dz; + acc_dim += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_zz] * (1.0 - dx) * dy * dz; + + acc_dim += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_z] * (dx) * (1.0 - dy) * (1.0 - dz); + acc_dim += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_z] * (dx) * dy * (1.0 - dz); + acc_dim += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_zz] * (dx) * (1.0 - dy) * dz; + acc_dim += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_zz] * (dx) * dy * dz; + + P[i].GravPM[dim] += acc_dim; + } + } + + pm_init_nonperiodic_free(); + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + if(ThisTask == 0) + printf("done PM.\n"); + + return 0; +} + + + + +/*! Calculates the long-range non-periodic potential using the PM method. The + * potential is Gaussian filtered with Asmth, given in mesh-cell units. We + * carry out a CIC charge assignment, and compute the potenial by Fourier + * transform methods. The CIC kernel is deconvolved. + */ +int pmpotential_nonperiodic(int grnr) +{ + double dx, dy, dz; + double fac, to_slab_fac; + double re, im, pot; + int i, j, slab, level, sendTask, recvTask, flag, flagsum; + int x, y, z, ip; + int slab_x, slab_y, slab_z; + int slab_xx, slab_yy, slab_zz; + int meshmin[3], meshmax[3], sendmin, sendmax, recvmin, recvmax; + int dimx, dimy, dimz, recv_dimx, recv_dimy, recv_dimz; + MPI_Status status; + + + if(ThisTask == 0) + printf("Starting non-periodic PM-potential calculation.\n"); + + fac = All.G / pow(All.TotalMeshSize[grnr], 4) * pow(All.TotalMeshSize[grnr] / GRID, 3); /* to get potential */ + + to_slab_fac = GRID / All.TotalMeshSize[grnr]; + + /* first, establish the extension of the local patch in GRID (for binning) */ + + for(j = 0; j < 3; j++) + { + meshmin[j] = GRID; + meshmax[j] = 0; + } + + for(i = 0, flag = 0; i < NumPart; i++) + { +#ifdef PLACEHIGHRESREGION + if(grnr == 0 || (grnr == 1 && ((1 << P[i].Type) & (PLACEHIGHRESREGION)))) +#endif + { + for(j = 0; j < 3; j++) + { + if(P[i].Pos[j] < All.Xmintot[grnr][j] || P[i].Pos[j] > All.Xmaxtot[grnr][j]) + { + if(flag == 0) + { + printf + ("Particle Id=%d on task=%d with coordinates (%g|%g|%g) lies outside PM mesh.\nStopping\n", + (int)P[i].ID, ThisTask, P[i].Pos[0], P[i].Pos[1], P[i].Pos[2]); + fflush(stdout); + } + flag++; + break; + } + } + } + + if(flag > 0) + continue; + + if(P[i].Pos[0] >= All.Corner[grnr][0] && P[i].Pos[0] < All.UpperCorner[grnr][0]) + if(P[i].Pos[1] >= All.Corner[grnr][1] && P[i].Pos[1] < All.UpperCorner[grnr][1]) + if(P[i].Pos[2] >= All.Corner[grnr][2] && P[i].Pos[2] < All.UpperCorner[grnr][2]) + { + for(j = 0; j < 3; j++) + { + slab = to_slab_fac * (P[i].Pos[j] - All.Corner[grnr][j]); + + if(slab < meshmin[j]) + meshmin[j] = slab; + + if(slab > meshmax[j]) + meshmax[j] = slab; + } + } + } + + + MPI_Allreduce(&flag, &flagsum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + if(flagsum > 0) + { + if(ThisTask == 0) + { + printf("In total %d particles were outside allowed range.\n", flagsum); + fflush(stdout); + } + return 1; /* error - need to return because particle were outside allowed range */ + } + + + + MPI_Allgather(meshmin, 3, MPI_INT, meshmin_list, 3, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(meshmax, 3, MPI_INT, meshmax_list, 3, MPI_INT, MPI_COMM_WORLD); + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + + force_treefree(); + + pm_init_nonperiodic_allocate((dimx + 4) * (dimy + 4) * (dimz + 4)); + + for(i = 0; i < dimx * dimy * dimz; i++) + workspace[i] = 0; + + for(i = 0; i < NumPart; i++) + { + if(P[i].Pos[0] < All.Corner[grnr][0] || P[i].Pos[0] >= All.UpperCorner[grnr][0]) + continue; + if(P[i].Pos[1] < All.Corner[grnr][1] || P[i].Pos[1] >= All.UpperCorner[grnr][1]) + continue; + if(P[i].Pos[2] < All.Corner[grnr][2] || P[i].Pos[2] >= All.UpperCorner[grnr][2]) + continue; + + slab_x = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]); + dx = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]) - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]); + dy = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]) - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]); + dz = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]) - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + workspace[(slab_x * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_x * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * dy * (1.0 - dz); + workspace[(slab_x * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * dz; + workspace[(slab_x * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * dy * dz; + + workspace[(slab_xx * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (dx) * dy * (1.0 - dz); + workspace[(slab_xx * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (dx) * (1.0 - dy) * dz; + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (dx) * dy * dz; + } + + + for(i = 0; i < fftsize; i++) /* clear local density field */ + rhogrid[i] = 0; + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + if(recvTask < NTask) + { + /* check how much we have to send */ + sendmin = 2 * GRID; + sendmax = -1; + for(slab_x = meshmin[0]; slab_x < meshmax[0] + 2; slab_x++) + if(slab_to_task[slab_x] == recvTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -1) + sendmin = 0; + + /* check how much we have to receive */ + recvmin = 2 * GRID; + recvmax = -1; + for(slab_x = meshmin_list[3 * recvTask]; slab_x < meshmax_list[3 * recvTask] + 2; slab_x++) + if(slab_to_task[slab_x] == sendTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -1) + recvmin = 0; + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 2; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 2; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 2; + + if(level > 0) + { + MPI_Sendrecv(workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, recvTask, + TAG_NONPERIOD_C, forcegrid, + (recvmax - recvmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_NONPERIOD_C, MPI_COMM_WORLD, &status); + } + else + { + memcpy(forcegrid, workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + + for(slab_x = recvmin; slab_x <= recvmax; slab_x++) + { + slab_xx = slab_x - first_slab_of_task[ThisTask]; + + if(slab_xx >= 0 && slab_xx < slabs_per_task[ThisTask]) + { + for(slab_y = meshmin_list[3 * recvTask + 1]; + slab_y <= meshmax_list[3 * recvTask + 1] + 1; slab_y++) + { + slab_yy = slab_y; + + for(slab_z = meshmin_list[3 * recvTask + 2]; + slab_z <= meshmax_list[3 * recvTask + 2] + 1; slab_z++) + { + slab_zz = slab_z; + + rhogrid[GRID * GRID2 * slab_xx + GRID2 * slab_yy + slab_zz] += + forcegrid[((slab_x - recvmin) * recv_dimy + + (slab_y - meshmin_list[3 * recvTask + 1])) * recv_dimz + + (slab_z - meshmin_list[3 * recvTask + 2])]; + } + } + } + } + } + } + } + + + /* Do the FFT of the density field */ + + rfftwnd_mpi(fft_forward_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + + /* multiply with the Fourier transform of the Green's function (kernel) */ + + for(y = 0; y < nslab_y; y++) + for(x = 0; x < GRID; x++) + for(z = 0; z < GRID / 2 + 1; z++) + { + ip = GRID * (GRID / 2 + 1) * y + (GRID / 2 + 1) * x + z; + + re = + fft_of_rhogrid[ip].re * fft_of_kernel[grnr][ip].re - + fft_of_rhogrid[ip].im * fft_of_kernel[grnr][ip].im; + + im = + fft_of_rhogrid[ip].re * fft_of_kernel[grnr][ip].im + + fft_of_rhogrid[ip].im * fft_of_kernel[grnr][ip].re; + + fft_of_rhogrid[ip].re = fac * re; + fft_of_rhogrid[ip].im = fac * im; + } + + /* get the potential by inverse FFT */ + + rfftwnd_mpi(fft_inverse_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + /* Now rhogrid holds the potential */ + /* construct the potential for the local patch */ + + + /* if we have a high-res mesh, establish the extension of the local patch in GRID (for reading out the + * forces) + */ + +#ifdef PLACEHIGHRESREGION + if(grnr == 1) + { + for(j = 0; j < 3; j++) + { + meshmin[j] = GRID; + meshmax[j] = 0; + } + + for(i = 0; i < NumPart; i++) + { + if(!((1 << P[i].Type) & (PLACEHIGHRESREGION))) + continue; + + + if(P[i].Pos[0] >= All.Corner[grnr][0] && P[i].Pos[0] < All.UpperCorner[grnr][0]) + if(P[i].Pos[1] >= All.Corner[grnr][1] && P[i].Pos[1] < All.UpperCorner[grnr][1]) + if(P[i].Pos[2] >= All.Corner[grnr][2] && P[i].Pos[2] < All.UpperCorner[grnr][2]) + { + for(j = 0; j < 3; j++) + { + slab = to_slab_fac * (P[i].Pos[j] - All.Corner[grnr][j]); + + if(slab < meshmin[j]) + meshmin[j] = slab; + + if(slab > meshmax[j]) + meshmax[j] = slab; + } + } + } + + MPI_Allgather(meshmin, 3, MPI_INT, meshmin_list, 3, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(meshmax, 3, MPI_INT, meshmax_list, 3, MPI_INT, MPI_COMM_WORLD); + } +#endif + + dimx = meshmax[0] - meshmin[0] + 6; + dimy = meshmax[1] - meshmin[1] + 6; + dimz = meshmax[2] - meshmin[2] + 6; + + for(j = 0; j < 3; j++) + { + if(meshmin[j] < 2) + endrun(131231); + if(meshmax[j] > GRID / 2 - 3) + endrun(131288); + } + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + { + /* check how much we have to send */ + sendmin = 2 * GRID; + sendmax = -GRID; + for(slab_x = meshmin_list[3 * recvTask] - 2; slab_x < meshmax_list[3 * recvTask] + 4; slab_x++) + if(slab_to_task[slab_x] == sendTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -GRID) + sendmin = sendmax + 1; + + + /* check how much we have to receive */ + recvmin = 2 * GRID; + recvmax = -GRID; + for(slab_x = meshmin[0] - 2; slab_x < meshmax[0] + 4; slab_x++) + if(slab_to_task[slab_x] == recvTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -GRID) + recvmin = recvmax + 1; + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 6; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 6; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 6; + + /* prepare what we want to send */ + if(sendmax - sendmin >= 0) + { + for(slab_x = sendmin; slab_x <= sendmax; slab_x++) + { + slab_xx = slab_x - first_slab_of_task[ThisTask]; + + for(slab_y = meshmin_list[3 * recvTask + 1] - 2; + slab_y < meshmax_list[3 * recvTask + 1] + 4; slab_y++) + { + slab_yy = slab_y; + + for(slab_z = meshmin_list[3 * recvTask + 2] - 2; + slab_z < meshmax_list[3 * recvTask + 2] + 4; slab_z++) + { + slab_zz = slab_z; + + forcegrid[((slab_x - sendmin) * recv_dimy + + (slab_y - (meshmin_list[3 * recvTask + 1] - 2))) * recv_dimz + + slab_z - (meshmin_list[3 * recvTask + 2] - 2)] = + rhogrid[GRID * GRID2 * slab_xx + GRID2 * slab_yy + slab_zz]; + } + } + } + } + + if(level > 0) + { + MPI_Sendrecv(forcegrid, + (sendmax - sendmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), + MPI_BYTE, recvTask, TAG_NONPERIOD_D, + workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_NONPERIOD_D, MPI_COMM_WORLD, &status); + } + else + { + memcpy(workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + forcegrid, (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + } + } + } + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + recv_dimx = meshmax[0] - meshmin[0] + 6; + recv_dimy = meshmax[1] - meshmin[1] + 6; + recv_dimz = meshmax[2] - meshmin[2] + 6; + + + for(x = 0; x < meshmax[0] - meshmin[0] + 2; x++) + for(y = 0; y < meshmax[1] - meshmin[1] + 2; y++) + for(z = 0; z < meshmax[2] - meshmin[2] + 2; z++) + { + forcegrid[(x * dimy + y) * dimz + z] + = workspace[((x + 2) * recv_dimy + (y + 2)) * recv_dimz + (z + 2)]; + } + + + /* read out the potential */ + + for(i = 0; i < NumPart; i++) + { +#ifdef PLACEHIGHRESREGION + if(grnr == 1) + if(!((1 << P[i].Type) & (PLACEHIGHRESREGION))) + continue; +#endif + slab_x = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]); + dx = to_slab_fac * (P[i].Pos[0] - All.Corner[grnr][0]) - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]); + dy = to_slab_fac * (P[i].Pos[1] - All.Corner[grnr][1]) - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]); + dz = to_slab_fac * (P[i].Pos[2] - All.Corner[grnr][2]) - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + pot = forcegrid[(slab_x * dimy + slab_y) * dimz + slab_z] * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + pot += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_z] * (1.0 - dx) * dy * (1.0 - dz); + pot += forcegrid[(slab_x * dimy + slab_y) * dimz + slab_zz] * (1.0 - dx) * (1.0 - dy) * dz; + pot += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_zz] * (1.0 - dx) * dy * dz; + + pot += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_z] * (dx) * (1.0 - dy) * (1.0 - dz); + pot += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_z] * (dx) * dy * (1.0 - dz); + pot += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_zz] * (dx) * (1.0 - dy) * dz; + pot += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_zz] * (dx) * dy * dz; + + P[i].Potential += pot; + } + + pm_init_nonperiodic_free(); + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + if(ThisTask == 0) + printf("done PM-potential.\n"); + + return 0; +} + + +#endif +#endif diff --git a/src/PyGadget/src/pm_nonperiodic.o b/src/PyGadget/src/pm_nonperiodic.o new file mode 100644 index 0000000..a0b225d Binary files /dev/null and b/src/PyGadget/src/pm_nonperiodic.o differ diff --git a/src/PyGadget/src/pm_periodic.c b/src/PyGadget/src/pm_periodic.c new file mode 100644 index 0000000..45db15d --- /dev/null +++ b/src/PyGadget/src/pm_periodic.c @@ -0,0 +1,1158 @@ +#include +#include +#include +#include +#include +#include + +/*! \file pm_periodic.c + * \brief routines for periodic PM-force computation + */ + +#ifdef PMGRID +#ifdef PERIODIC + +#ifdef NOTYPEPREFIX_FFTW +#include +#else +#ifdef DOUBLEPRECISION_FFTW +#include /* double precision FFTW */ +#else +#include +#endif +#endif + + +#include "allvars.h" +#include "proto.h" + +#define PMGRID2 (2*(PMGRID/2 + 1)) + + + + +static rfftwnd_mpi_plan fft_forward_plan, fft_inverse_plan; + +static int slab_to_task[PMGRID]; +static int *slabs_per_task; +static int *first_slab_of_task; +static int *meshmin_list, *meshmax_list; + +static int slabstart_x, nslab_x, slabstart_y, nslab_y, smallest_slab; + +static int fftsize, maxfftsize; + +static fftw_real *rhogrid, *forcegrid, *workspace; +static fftw_complex *fft_of_rhogrid; + + +static FLOAT to_slab_fac; + + +/*! This routines generates the FFTW-plans to carry out the parallel FFTs + * later on. Some auxiliary variables are also initialized. + */ +void pm_init_periodic(void) +{ + int i; + int slab_to_task_local[PMGRID]; + + All.Asmth[0] = ASMTH * All.BoxSize / PMGRID; + All.Rcut[0] = RCUT * All.Asmth[0]; + + /* Set up the FFTW plan files. */ + + fft_forward_plan = rfftw3d_mpi_create_plan(MPI_COMM_WORLD, PMGRID, PMGRID, PMGRID, + FFTW_REAL_TO_COMPLEX, FFTW_ESTIMATE | FFTW_IN_PLACE); + fft_inverse_plan = rfftw3d_mpi_create_plan(MPI_COMM_WORLD, PMGRID, PMGRID, PMGRID, + FFTW_COMPLEX_TO_REAL, FFTW_ESTIMATE | FFTW_IN_PLACE); + + /* Workspace out the ranges on each processor. */ + + rfftwnd_mpi_local_sizes(fft_forward_plan, &nslab_x, &slabstart_x, &nslab_y, &slabstart_y, &fftsize); + + for(i = 0; i < PMGRID; i++) + slab_to_task_local[i] = 0; + + for(i = 0; i < nslab_x; i++) + slab_to_task_local[slabstart_x + i] = ThisTask; + + MPI_Allreduce(slab_to_task_local, slab_to_task, PMGRID, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + MPI_Allreduce(&nslab_x, &smallest_slab, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD); + + slabs_per_task = malloc(NTask * sizeof(int)); + MPI_Allgather(&nslab_x, 1, MPI_INT, slabs_per_task, 1, MPI_INT, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + for(i = 0; i < NTask; i++) + printf("Task=%d FFT-Slabs=%d\n", i, slabs_per_task[i]); + } + + first_slab_of_task = malloc(NTask * sizeof(int)); + MPI_Allgather(&slabstart_x, 1, MPI_INT, first_slab_of_task, 1, MPI_INT, MPI_COMM_WORLD); + + meshmin_list = malloc(3 * NTask * sizeof(int)); + meshmax_list = malloc(3 * NTask * sizeof(int)); + + + to_slab_fac = PMGRID / All.BoxSize; + + MPI_Allreduce(&fftsize, &maxfftsize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); +} + + +/*! This function allocates the memory neeed to compute the long-range PM + * force. Three fields are used, one to hold the density (and its FFT, and + * then the real-space potential), one to hold the force field obtained by + * finite differencing, and finally a workspace field, which is used both as + * workspace for the parallel FFT, and as buffer for the communication + * algorithm used in the force computation. + */ +void pm_init_periodic_allocate(int dimprod) +{ + static int first_alloc = 1; + int dimprodmax; + double bytes_tot = 0; + size_t bytes; + + MPI_Allreduce(&dimprod, &dimprodmax, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + + /* allocate the memory to hold the FFT fields */ + + if(!(rhogrid = (fftw_real *) malloc(bytes = fftsize * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-rhogrid' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + + if(!(forcegrid = (fftw_real *) malloc(bytes = imax(fftsize, dimprodmax) * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-forcegrid' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + if(!(workspace = (fftw_real *) malloc(bytes = imax(maxfftsize, dimprodmax) * sizeof(fftw_real)))) + { + printf("failed to allocate memory for `FFT-workspace' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + bytes_tot += bytes; + + if(first_alloc == 1) + { + first_alloc = 0; + if(ThisTask == 0) + printf("\nAllocated %g MByte for FFT data.\n\n", bytes_tot / (1024.0 * 1024.0)); + } + + fft_of_rhogrid = (fftw_complex *) & rhogrid[0]; +} + + + +/*! This routine frees the space allocated for the parallel FFT algorithm. + */ +void pm_init_periodic_free(void) +{ + /* allocate the memory to hold the FFT fields */ + free(workspace); + free(forcegrid); + free(rhogrid); +} + + + +/*! Calculates the long-range periodic force given the particle positions + * using the PM method. The force is Gaussian filtered with Asmth, given in + * mesh-cell units. We carry out a CIC charge assignment, and compute the + * potenial by Fourier transform methods. The potential is finite differenced + * using a 4-point finite differencing formula, and the forces are + * interpolated tri-linearly to the particle positions. The CIC kernel is + * deconvolved. Note that the particle distribution is not in the slab + * decomposition that is used for the FFT. Instead, overlapping patches + * between local domains and FFT slabs are communicated as needed. + */ +void pmforce_periodic(void) +{ + double k2, kx, ky, kz, smth; + double dx, dy, dz; + double fx, fy, fz, ff; + double asmth2, fac, acc_dim; + int i, j, slab, level, sendTask, recvTask; + int x, y, z, xl, yl, zl, xr, yr, zr, xll, yll, zll, xrr, yrr, zrr, ip, dim; + int slab_x, slab_y, slab_z; + int slab_xx, slab_yy, slab_zz; + int meshmin[3], meshmax[3], sendmin, sendmax, recvmin, recvmax; + int rep, ncont, cont_sendmin[2], cont_sendmax[2], cont_recvmin[2], cont_recvmax[2]; + int dimx, dimy, dimz, recv_dimx, recv_dimy, recv_dimz; + MPI_Status status; + + + if(ThisTask == 0) + { + printf("Starting periodic PM calculation.\n"); + fflush(stdout); + } + + + force_treefree(); + + + asmth2 = (2 * M_PI) * All.Asmth[0] / All.BoxSize; + asmth2 *= asmth2; + + fac = All.G / (M_PI * All.BoxSize); /* to get potential */ + fac *= 1 / (2 * All.BoxSize / PMGRID); /* for finite differencing */ + + /* first, establish the extension of the local patch in the PMGRID */ + + for(j = 0; j < 3; j++) + { + meshmin[j] = PMGRID; + meshmax[j] = 0; + } + + for(i = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) + { + slab = to_slab_fac * P[i].Pos[j]; + if(slab >= PMGRID) + slab = PMGRID - 1; + + if(slab < meshmin[j]) + meshmin[j] = slab; + + if(slab > meshmax[j]) + meshmax[j] = slab; + } + } + + MPI_Allgather(meshmin, 3, MPI_INT, meshmin_list, 3, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(meshmax, 3, MPI_INT, meshmax_list, 3, MPI_INT, MPI_COMM_WORLD); + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + pm_init_periodic_allocate((dimx + 4) * (dimy + 4) * (dimz + 4)); + + for(i = 0; i < dimx * dimy * dimz; i++) + workspace[i] = 0; + + for(i = 0; i < NumPart; i++) + { + slab_x = to_slab_fac * P[i].Pos[0]; + if(slab_x >= PMGRID) + slab_x = PMGRID - 1; + dx = to_slab_fac * P[i].Pos[0] - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * P[i].Pos[1]; + if(slab_y >= PMGRID) + slab_y = PMGRID - 1; + dy = to_slab_fac * P[i].Pos[1] - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * P[i].Pos[2]; + if(slab_z >= PMGRID) + slab_z = PMGRID - 1; + dz = to_slab_fac * P[i].Pos[2] - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + workspace[(slab_x * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_x * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * dy * (1.0 - dz); + workspace[(slab_x * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * dz; + workspace[(slab_x * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * dy * dz; + + workspace[(slab_xx * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (dx) * dy * (1.0 - dz); + workspace[(slab_xx * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (dx) * (1.0 - dy) * dz; + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (dx) * dy * dz; + } + + + for(i = 0; i < fftsize; i++) /* clear local density field */ + rhogrid[i] = 0; + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + if(recvTask < NTask) + { + /* check how much we have to send */ + sendmin = 2 * PMGRID; + sendmax = -1; + for(slab_x = meshmin[0]; slab_x < meshmax[0] + 2; slab_x++) + if(slab_to_task[slab_x % PMGRID] == recvTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -1) + sendmin = 0; + + /* check how much we have to receive */ + recvmin = 2 * PMGRID; + recvmax = -1; + for(slab_x = meshmin_list[3 * recvTask]; slab_x < meshmax_list[3 * recvTask] + 2; slab_x++) + if(slab_to_task[slab_x % PMGRID] == sendTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -1) + recvmin = 0; + + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 2; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 2; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 2; + + if(level > 0) + { + MPI_Sendrecv(workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, recvTask, + TAG_PERIODIC_A, forcegrid, + (recvmax - recvmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_PERIODIC_A, MPI_COMM_WORLD, &status); + } + else + { + memcpy(forcegrid, workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + + for(slab_x = recvmin; slab_x <= recvmax; slab_x++) + { + slab_xx = (slab_x % PMGRID) - first_slab_of_task[ThisTask]; + + if(slab_xx >= 0 && slab_xx < slabs_per_task[ThisTask]) + { + for(slab_y = meshmin_list[3 * recvTask + 1]; + slab_y <= meshmax_list[3 * recvTask + 1] + 1; slab_y++) + { + slab_yy = slab_y; + if(slab_yy >= PMGRID) + slab_yy -= PMGRID; + + for(slab_z = meshmin_list[3 * recvTask + 2]; + slab_z <= meshmax_list[3 * recvTask + 2] + 1; slab_z++) + { + slab_zz = slab_z; + if(slab_zz >= PMGRID) + slab_zz -= PMGRID; + + rhogrid[PMGRID * PMGRID2 * slab_xx + PMGRID2 * slab_yy + slab_zz] += + forcegrid[((slab_x - recvmin) * recv_dimy + + (slab_y - meshmin_list[3 * recvTask + 1])) * recv_dimz + + (slab_z - meshmin_list[3 * recvTask + 2])]; + } + } + } + } + } + } + } + + /* Do the FFT of the density field */ + + rfftwnd_mpi(fft_forward_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + /* multiply with Green's function for the potential */ + + for(y = slabstart_y; y < slabstart_y + nslab_y; y++) + for(x = 0; x < PMGRID; x++) + for(z = 0; z < PMGRID / 2 + 1; z++) + { + if(x > PMGRID / 2) + kx = x - PMGRID; + else + kx = x; + if(y > PMGRID / 2) + ky = y - PMGRID; + else + ky = y; + if(z > PMGRID / 2) + kz = z - PMGRID; + else + kz = z; + + k2 = kx * kx + ky * ky + kz * kz; + + if(k2 > 0) + { + smth = -exp(-k2 * asmth2) / k2; + + /* do deconvolution */ + + fx = fy = fz = 1; + if(kx != 0) + { + fx = (M_PI * kx) / PMGRID; + fx = sin(fx) / fx; + } + if(ky != 0) + { + fy = (M_PI * ky) / PMGRID; + fy = sin(fy) / fy; + } + if(kz != 0) + { + fz = (M_PI * kz) / PMGRID; + fz = sin(fz) / fz; + } + ff = 1 / (fx * fy * fz); + smth *= ff * ff * ff * ff; + + /* end deconvolution */ + + ip = PMGRID * (PMGRID / 2 + 1) * (y - slabstart_y) + (PMGRID / 2 + 1) * x + z; + fft_of_rhogrid[ip].re *= smth; + fft_of_rhogrid[ip].im *= smth; + } + } + + if(slabstart_y == 0) + fft_of_rhogrid[0].re = fft_of_rhogrid[0].im = 0.0; + + /* Do the FFT to get the potential */ + + rfftwnd_mpi(fft_inverse_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + /* Now rhogrid holds the potential */ + /* construct the potential for the local patch */ + + + dimx = meshmax[0] - meshmin[0] + 6; + dimy = meshmax[1] - meshmin[1] + 6; + dimz = meshmax[2] - meshmin[2] + 6; + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + { + + /* check how much we have to send */ + sendmin = 2 * PMGRID; + sendmax = -PMGRID; + for(slab_x = meshmin_list[3 * recvTask] - 2; slab_x < meshmax_list[3 * recvTask] + 4; slab_x++) + if(slab_to_task[(slab_x + PMGRID) % PMGRID] == sendTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -PMGRID) + sendmin = sendmax + 1; + + + /* check how much we have to receive */ + recvmin = 2 * PMGRID; + recvmax = -PMGRID; + for(slab_x = meshmin[0] - 2; slab_x < meshmax[0] + 4; slab_x++) + if(slab_to_task[(slab_x + PMGRID) % PMGRID] == recvTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -PMGRID) + recvmin = recvmax + 1; + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 6; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 6; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 6; + + ncont = 1; + cont_sendmin[0] = sendmin; + cont_sendmax[0] = sendmax; + cont_sendmin[1] = sendmax + 1; + cont_sendmax[1] = sendmax; + + cont_recvmin[0] = recvmin; + cont_recvmax[0] = recvmax; + cont_recvmin[1] = recvmax + 1; + cont_recvmax[1] = recvmax; + + for(slab_x = sendmin; slab_x <= sendmax; slab_x++) + { + if(slab_to_task[(slab_x + PMGRID) % PMGRID] != ThisTask) + { + /* non-contiguous */ + cont_sendmax[0] = slab_x - 1; + while(slab_to_task[(slab_x + PMGRID) % PMGRID] != ThisTask) + slab_x++; + cont_sendmin[1] = slab_x; + ncont++; + } + } + + for(slab_x = recvmin; slab_x <= recvmax; slab_x++) + { + if(slab_to_task[(slab_x + PMGRID) % PMGRID] != recvTask) + { + /* non-contiguous */ + cont_recvmax[0] = slab_x - 1; + while(slab_to_task[(slab_x + PMGRID) % PMGRID] != recvTask) + slab_x++; + cont_recvmin[1] = slab_x; + if(ncont == 1) + ncont++; + } + } + + + for(rep = 0; rep < ncont; rep++) + { + sendmin = cont_sendmin[rep]; + sendmax = cont_sendmax[rep]; + recvmin = cont_recvmin[rep]; + recvmax = cont_recvmax[rep]; + + /* prepare what we want to send */ + if(sendmax - sendmin >= 0) + { + for(slab_x = sendmin; slab_x <= sendmax; slab_x++) + { + slab_xx = ((slab_x + PMGRID) % PMGRID) - first_slab_of_task[ThisTask]; + + for(slab_y = meshmin_list[3 * recvTask + 1] - 2; + slab_y < meshmax_list[3 * recvTask + 1] + 4; slab_y++) + { + slab_yy = (slab_y + PMGRID) % PMGRID; + + for(slab_z = meshmin_list[3 * recvTask + 2] - 2; + slab_z <= meshmax_list[3 * recvTask + 2] + 4; slab_z++) + { + slab_zz = (slab_z + PMGRID) % PMGRID; + + forcegrid[((slab_x - sendmin) * recv_dimy + + (slab_y - (meshmin_list[3 * recvTask + 1] - 2))) * recv_dimz + + slab_z - (meshmin_list[3 * recvTask + 2] - 2)] = + rhogrid[PMGRID * PMGRID2 * slab_xx + PMGRID2 * slab_yy + slab_zz]; + } + } + } + } + + if(level > 0) + { + MPI_Sendrecv(forcegrid, + (sendmax - sendmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), + MPI_BYTE, recvTask, TAG_PERIODIC_B, + workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_PERIODIC_B, MPI_COMM_WORLD, &status); + } + else + { + memcpy(workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + forcegrid, (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + } + } + } + } + + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + recv_dimx = meshmax[0] - meshmin[0] + 6; + recv_dimy = meshmax[1] - meshmin[1] + 6; + recv_dimz = meshmax[2] - meshmin[2] + 6; + + + for(dim = 0; dim < 3; dim++) /* Calculate each component of the force. */ + { + /* get the force component by finite differencing the potential */ + /* note: "workspace" now contains the potential for the local patch, plus a suffiently large buffer region */ + + for(x = 0; x < meshmax[0] - meshmin[0] + 2; x++) + for(y = 0; y < meshmax[1] - meshmin[1] + 2; y++) + for(z = 0; z < meshmax[2] - meshmin[2] + 2; z++) + { + xrr = xll = xr = xl = x; + yrr = yll = yr = yl = y; + zrr = zll = zr = zl = z; + + switch (dim) + { + case 0: + xr = x + 1; + xrr = x + 2; + xl = x - 1; + xll = x - 2; + break; + case 1: + yr = y + 1; + yl = y - 1; + yrr = y + 2; + yll = y - 2; + break; + case 2: + zr = z + 1; + zl = z - 1; + zrr = z + 2; + zll = z - 2; + break; + } + + forcegrid[(x * dimy + y) * dimz + z] + = + fac * ((4.0 / 3) * + (workspace[((xl + 2) * recv_dimy + (yl + 2)) * recv_dimz + (zl + 2)] + - workspace[((xr + 2) * recv_dimy + (yr + 2)) * recv_dimz + (zr + 2)]) - + (1.0 / 6) * + (workspace[((xll + 2) * recv_dimy + (yll + 2)) * recv_dimz + (zll + 2)] - + workspace[((xrr + 2) * recv_dimy + (yrr + 2)) * recv_dimz + (zrr + 2)])); + } + + /* read out the forces */ + + for(i = 0; i < NumPart; i++) + { + slab_x = to_slab_fac * P[i].Pos[0]; + if(slab_x >= PMGRID) + slab_x = PMGRID - 1; + dx = to_slab_fac * P[i].Pos[0] - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * P[i].Pos[1]; + if(slab_y >= PMGRID) + slab_y = PMGRID - 1; + dy = to_slab_fac * P[i].Pos[1] - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * P[i].Pos[2]; + if(slab_z >= PMGRID) + slab_z = PMGRID - 1; + dz = to_slab_fac * P[i].Pos[2] - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + acc_dim = + forcegrid[(slab_x * dimy + slab_y) * dimz + slab_z] * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + acc_dim += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_z] * (1.0 - dx) * dy * (1.0 - dz); + acc_dim += forcegrid[(slab_x * dimy + slab_y) * dimz + slab_zz] * (1.0 - dx) * (1.0 - dy) * dz; + acc_dim += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_zz] * (1.0 - dx) * dy * dz; + + acc_dim += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_z] * (dx) * (1.0 - dy) * (1.0 - dz); + acc_dim += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_z] * (dx) * dy * (1.0 - dz); + acc_dim += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_zz] * (dx) * (1.0 - dy) * dz; + acc_dim += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_zz] * (dx) * dy * dz; + + P[i].GravPM[dim] = acc_dim; + } + } + + pm_init_periodic_free(); + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + if(ThisTask == 0) + { + printf("done PM.\n"); + fflush(stdout); + } +} + + +/*! Calculates the long-range potential using the PM method. The potential is + * Gaussian filtered with Asmth, given in mesh-cell units. We carry out a CIC + * charge assignment, and compute the potenial by Fourier transform + * methods. The CIC kernel is deconvolved. + */ +void pmpotential_periodic(void) +{ + double k2, kx, ky, kz, smth; + double dx, dy, dz; + double fx, fy, fz, ff; + double asmth2, fac; + int i, j, slab, level, sendTask, recvTask; + int x, y, z, ip; + int slab_x, slab_y, slab_z; + int slab_xx, slab_yy, slab_zz; + int meshmin[3], meshmax[3], sendmin, sendmax, recvmin, recvmax; + int rep, ncont, cont_sendmin[2], cont_sendmax[2], cont_recvmin[2], cont_recvmax[2]; + int dimx, dimy, dimz, recv_dimx, recv_dimy, recv_dimz; + MPI_Status status; + + if(ThisTask == 0) + { + printf("Starting periodic PM calculation.\n"); + fflush(stdout); + } + + asmth2 = (2 * M_PI) * All.Asmth[0] / All.BoxSize; + asmth2 *= asmth2; + + fac = All.G / (M_PI * All.BoxSize); /* to get potential */ + + force_treefree(); + + /* first, establish the extension of the local patch in the PMGRID */ + + for(j = 0; j < 3; j++) + { + meshmin[j] = PMGRID; + meshmax[j] = 0; + } + + for(i = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) + { + slab = to_slab_fac * P[i].Pos[j]; + if(slab >= PMGRID) + slab = PMGRID - 1; + + if(slab < meshmin[j]) + meshmin[j] = slab; + + if(slab > meshmax[j]) + meshmax[j] = slab; + } + } + + MPI_Allgather(meshmin, 3, MPI_INT, meshmin_list, 3, MPI_INT, MPI_COMM_WORLD); + MPI_Allgather(meshmax, 3, MPI_INT, meshmax_list, 3, MPI_INT, MPI_COMM_WORLD); + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + pm_init_periodic_allocate((dimx + 4) * (dimy + 4) * (dimz + 4)); + + for(i = 0; i < dimx * dimy * dimz; i++) + workspace[i] = 0; + + for(i = 0; i < NumPart; i++) + { + slab_x = to_slab_fac * P[i].Pos[0]; + if(slab_x >= PMGRID) + slab_x = PMGRID - 1; + dx = to_slab_fac * P[i].Pos[0] - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * P[i].Pos[1]; + if(slab_y >= PMGRID) + slab_y = PMGRID - 1; + dy = to_slab_fac * P[i].Pos[1] - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * P[i].Pos[2]; + if(slab_z >= PMGRID) + slab_z = PMGRID - 1; + dz = to_slab_fac * P[i].Pos[2] - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + workspace[(slab_x * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_x * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (1.0 - dx) * dy * (1.0 - dz); + workspace[(slab_x * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * (1.0 - dy) * dz; + workspace[(slab_x * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (1.0 - dx) * dy * dz; + + workspace[(slab_xx * dimy + slab_y) * dimz + slab_z] += P[i].Mass * (dx) * (1.0 - dy) * (1.0 - dz); + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_z] += P[i].Mass * (dx) * dy * (1.0 - dz); + workspace[(slab_xx * dimy + slab_y) * dimz + slab_zz] += P[i].Mass * (dx) * (1.0 - dy) * dz; + workspace[(slab_xx * dimy + slab_yy) * dimz + slab_zz] += P[i].Mass * (dx) * dy * dz; + } + + + for(i = 0; i < fftsize; i++) /* clear local density field */ + rhogrid[i] = 0; + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + if(recvTask < NTask) + { + /* check how much we have to send */ + sendmin = 2 * PMGRID; + sendmax = -1; + for(slab_x = meshmin[0]; slab_x < meshmax[0] + 2; slab_x++) + if(slab_to_task[slab_x % PMGRID] == recvTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -1) + sendmin = 0; + + /* check how much we have to receive */ + recvmin = 2 * PMGRID; + recvmax = -1; + for(slab_x = meshmin_list[3 * recvTask]; slab_x < meshmax_list[3 * recvTask] + 2; slab_x++) + if(slab_to_task[slab_x % PMGRID] == sendTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -1) + recvmin = 0; + + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 2; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 2; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 2; + + if(level > 0) + { + MPI_Sendrecv(workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, recvTask, + TAG_PERIODIC_C, forcegrid, + (recvmax - recvmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_PERIODIC_C, MPI_COMM_WORLD, &status); + } + else + { + memcpy(forcegrid, workspace + (sendmin - meshmin[0]) * dimy * dimz, + (sendmax - sendmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + + for(slab_x = recvmin; slab_x <= recvmax; slab_x++) + { + slab_xx = (slab_x % PMGRID) - first_slab_of_task[ThisTask]; + + if(slab_xx >= 0 && slab_xx < slabs_per_task[ThisTask]) + { + for(slab_y = meshmin_list[3 * recvTask + 1]; + slab_y <= meshmax_list[3 * recvTask + 1] + 1; slab_y++) + { + slab_yy = slab_y; + if(slab_yy >= PMGRID) + slab_yy -= PMGRID; + + for(slab_z = meshmin_list[3 * recvTask + 2]; + slab_z <= meshmax_list[3 * recvTask + 2] + 1; slab_z++) + { + slab_zz = slab_z; + if(slab_zz >= PMGRID) + slab_zz -= PMGRID; + + rhogrid[PMGRID * PMGRID2 * slab_xx + PMGRID2 * slab_yy + slab_zz] += + forcegrid[((slab_x - recvmin) * recv_dimy + + (slab_y - meshmin_list[3 * recvTask + 1])) * recv_dimz + + (slab_z - meshmin_list[3 * recvTask + 2])]; + } + } + } + } + } + } + } + + + + /* Do the FFT of the density field */ + + rfftwnd_mpi(fft_forward_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + /* multiply with Green's function for the potential */ + + for(y = slabstart_y; y < slabstart_y + nslab_y; y++) + for(x = 0; x < PMGRID; x++) + for(z = 0; z < PMGRID / 2 + 1; z++) + { + if(x > PMGRID / 2) + kx = x - PMGRID; + else + kx = x; + if(y > PMGRID / 2) + ky = y - PMGRID; + else + ky = y; + if(z > PMGRID / 2) + kz = z - PMGRID; + else + kz = z; + + k2 = kx * kx + ky * ky + kz * kz; + + if(k2 > 0) + { + smth = -exp(-k2 * asmth2) / k2 * fac; + /* do deconvolution */ + fx = fy = fz = 1; + if(kx != 0) + { + fx = (M_PI * kx) / PMGRID; + fx = sin(fx) / fx; + } + if(ky != 0) + { + fy = (M_PI * ky) / PMGRID; + fy = sin(fy) / fy; + } + if(kz != 0) + { + fz = (M_PI * kz) / PMGRID; + fz = sin(fz) / fz; + } + ff = 1 / (fx * fy * fz); + smth *= ff * ff * ff * ff; + /* end deconvolution */ + + ip = PMGRID * (PMGRID / 2 + 1) * (y - slabstart_y) + (PMGRID / 2 + 1) * x + z; + fft_of_rhogrid[ip].re *= smth; + fft_of_rhogrid[ip].im *= smth; + } + } + + if(slabstart_y == 0) + fft_of_rhogrid[0].re = fft_of_rhogrid[0].im = 0.0; + + /* Do the FFT to get the potential */ + + rfftwnd_mpi(fft_inverse_plan, 1, rhogrid, workspace, FFTW_TRANSPOSED_ORDER); + + /* note: "rhogrid" now contains the potential */ + + + + dimx = meshmax[0] - meshmin[0] + 6; + dimy = meshmax[1] - meshmin[1] + 6; + dimz = meshmax[2] - meshmin[2] + 6; + + for(level = 0; level < (1 << PTask); level++) /* note: for level=0, target is the same task */ + { + sendTask = ThisTask; + recvTask = ThisTask ^ level; + + if(recvTask < NTask) + { + + /* check how much we have to send */ + sendmin = 2 * PMGRID; + sendmax = -PMGRID; + for(slab_x = meshmin_list[3 * recvTask] - 2; slab_x < meshmax_list[3 * recvTask] + 4; slab_x++) + if(slab_to_task[(slab_x + PMGRID) % PMGRID] == sendTask) + { + if(slab_x < sendmin) + sendmin = slab_x; + if(slab_x > sendmax) + sendmax = slab_x; + } + if(sendmax == -PMGRID) + sendmin = sendmax + 1; + + + /* check how much we have to receive */ + recvmin = 2 * PMGRID; + recvmax = -PMGRID; + for(slab_x = meshmin[0] - 2; slab_x < meshmax[0] + 4; slab_x++) + if(slab_to_task[(slab_x + PMGRID) % PMGRID] == recvTask) + { + if(slab_x < recvmin) + recvmin = slab_x; + if(slab_x > recvmax) + recvmax = slab_x; + } + if(recvmax == -PMGRID) + recvmin = recvmax + 1; + + if((recvmax - recvmin) >= 0 || (sendmax - sendmin) >= 0) /* ok, we have a contribution to the slab */ + { + recv_dimx = meshmax_list[3 * recvTask + 0] - meshmin_list[3 * recvTask + 0] + 6; + recv_dimy = meshmax_list[3 * recvTask + 1] - meshmin_list[3 * recvTask + 1] + 6; + recv_dimz = meshmax_list[3 * recvTask + 2] - meshmin_list[3 * recvTask + 2] + 6; + + ncont = 1; + cont_sendmin[0] = sendmin; + cont_sendmax[0] = sendmax; + cont_sendmin[1] = sendmax + 1; + cont_sendmax[1] = sendmax; + + cont_recvmin[0] = recvmin; + cont_recvmax[0] = recvmax; + cont_recvmin[1] = recvmax + 1; + cont_recvmax[1] = recvmax; + + for(slab_x = sendmin; slab_x <= sendmax; slab_x++) + { + if(slab_to_task[(slab_x + PMGRID) % PMGRID] != ThisTask) + { + /* non-contiguous */ + cont_sendmax[0] = slab_x - 1; + while(slab_to_task[(slab_x + PMGRID) % PMGRID] != ThisTask) + slab_x++; + cont_sendmin[1] = slab_x; + ncont++; + } + } + + for(slab_x = recvmin; slab_x <= recvmax; slab_x++) + { + if(slab_to_task[(slab_x + PMGRID) % PMGRID] != recvTask) + { + /* non-contiguous */ + cont_recvmax[0] = slab_x - 1; + while(slab_to_task[(slab_x + PMGRID) % PMGRID] != recvTask) + slab_x++; + cont_recvmin[1] = slab_x; + if(ncont == 1) + ncont++; + } + } + + + for(rep = 0; rep < ncont; rep++) + { + sendmin = cont_sendmin[rep]; + sendmax = cont_sendmax[rep]; + recvmin = cont_recvmin[rep]; + recvmax = cont_recvmax[rep]; + + /* prepare what we want to send */ + if(sendmax - sendmin >= 0) + { + for(slab_x = sendmin; slab_x <= sendmax; slab_x++) + { + slab_xx = ((slab_x + PMGRID) % PMGRID) - first_slab_of_task[ThisTask]; + + for(slab_y = meshmin_list[3 * recvTask + 1] - 2; + slab_y < meshmax_list[3 * recvTask + 1] + 4; slab_y++) + { + slab_yy = (slab_y + PMGRID) % PMGRID; + + for(slab_z = meshmin_list[3 * recvTask + 2] - 2; + slab_z <= meshmax_list[3 * recvTask + 2] + 4; slab_z++) + { + slab_zz = (slab_z + PMGRID) % PMGRID; + + forcegrid[((slab_x - sendmin) * recv_dimy + + (slab_y - (meshmin_list[3 * recvTask + 1] - 2))) * recv_dimz + + slab_z - (meshmin_list[3 * recvTask + 2] - 2)] = + rhogrid[PMGRID * PMGRID2 * slab_xx + PMGRID2 * slab_yy + slab_zz]; + } + } + } + } + + if(level > 0) + { + MPI_Sendrecv(forcegrid, + (sendmax - sendmin + 1) * recv_dimy * recv_dimz * sizeof(fftw_real), + MPI_BYTE, recvTask, TAG_PERIODIC_D, + workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real), MPI_BYTE, + recvTask, TAG_PERIODIC_D, MPI_COMM_WORLD, &status); + } + else + { + memcpy(workspace + (recvmin - (meshmin[0] - 2)) * dimy * dimz, + forcegrid, (recvmax - recvmin + 1) * dimy * dimz * sizeof(fftw_real)); + } + } + } + } + } + + + dimx = meshmax[0] - meshmin[0] + 2; + dimy = meshmax[1] - meshmin[1] + 2; + dimz = meshmax[2] - meshmin[2] + 2; + + recv_dimx = meshmax[0] - meshmin[0] + 6; + recv_dimy = meshmax[1] - meshmin[1] + 6; + recv_dimz = meshmax[2] - meshmin[2] + 6; + + + + for(x = 0; x < meshmax[0] - meshmin[0] + 2; x++) + for(y = 0; y < meshmax[1] - meshmin[1] + 2; y++) + for(z = 0; z < meshmax[2] - meshmin[2] + 2; z++) + { + forcegrid[(x * dimy + y) * dimz + z] = + workspace[((x + 2) * recv_dimy + (y + 2)) * recv_dimz + (z + 2)]; + } + + + /* read out the potential */ + + for(i = 0; i < NumPart; i++) + { + slab_x = to_slab_fac * P[i].Pos[0]; + if(slab_x >= PMGRID) + slab_x = PMGRID - 1; + dx = to_slab_fac * P[i].Pos[0] - slab_x; + slab_x -= meshmin[0]; + slab_xx = slab_x + 1; + + slab_y = to_slab_fac * P[i].Pos[1]; + if(slab_y >= PMGRID) + slab_y = PMGRID - 1; + dy = to_slab_fac * P[i].Pos[1] - slab_y; + slab_y -= meshmin[1]; + slab_yy = slab_y + 1; + + slab_z = to_slab_fac * P[i].Pos[2]; + if(slab_z >= PMGRID) + slab_z = PMGRID - 1; + dz = to_slab_fac * P[i].Pos[2] - slab_z; + slab_z -= meshmin[2]; + slab_zz = slab_z + 1; + + P[i].Potential += + forcegrid[(slab_x * dimy + slab_y) * dimz + slab_z] * (1.0 - dx) * (1.0 - dy) * (1.0 - dz); + P[i].Potential += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_z] * (1.0 - dx) * dy * (1.0 - dz); + P[i].Potential += forcegrid[(slab_x * dimy + slab_y) * dimz + slab_zz] * (1.0 - dx) * (1.0 - dy) * dz; + P[i].Potential += forcegrid[(slab_x * dimy + slab_yy) * dimz + slab_zz] * (1.0 - dx) * dy * dz; + + P[i].Potential += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_z] * (dx) * (1.0 - dy) * (1.0 - dz); + P[i].Potential += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_z] * (dx) * dy * (1.0 - dz); + P[i].Potential += forcegrid[(slab_xx * dimy + slab_y) * dimz + slab_zz] * (dx) * (1.0 - dy) * dz; + P[i].Potential += forcegrid[(slab_xx * dimy + slab_yy) * dimz + slab_zz] * (dx) * dy * dz; + } + + pm_init_periodic_free(); + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + if(ThisTask == 0) + { + printf("done PM-Potential.\n"); + fflush(stdout); + } +} + +#endif +#endif diff --git a/src/PyGadget/src/pm_periodic.o b/src/PyGadget/src/pm_periodic.o new file mode 100644 index 0000000..b83eee7 Binary files /dev/null and b/src/PyGadget/src/pm_periodic.o differ diff --git a/src/PyGadget/src/potential.c b/src/PyGadget/src/potential.c new file mode 100644 index 0000000..055318b --- /dev/null +++ b/src/PyGadget/src/potential.c @@ -0,0 +1,695 @@ +#include +#include +#include +#include +#include +#include + + +#include "allvars.h" +#include "proto.h" + + +/*! \file potential.c + * \brief Computation of the gravitational potential of particles + */ + + +/*! This function computes the gravitational potential for ALL the particles. + * First, the (short-range) tree potential is computed, and then, if needed, + * the long range PM potential is added. + */ +void compute_potential(void) +{ + int i; + +#ifndef NOGRAVITY + long long ntot, ntotleft; + int j, k, level, sendTask, recvTask; + int ndone; + int maxfill, ngrp, place, nexport; + int *nsend, *noffset, *nsend_local, *nbuffer, *ndonelist, *numlist; + double fac; + double t0, t1, tstart, tend; + MPI_Status status; + double r2; + + t0 = second(); + + if(All.ComovingIntegrationOn) + set_softenings(); + + if(ThisTask == 0) + { + printf("Start computation of potential for all particles...\n"); + fflush(stdout); + } + + + tstart = second(); + if(TreeReconstructFlag) + { + if(ThisTask == 0) + printf("Tree construction.\n"); + + force_treebuild(NumPart); + + TreeReconstructFlag = 0; + + if(ThisTask == 0) + printf("Tree construction done.\n"); + } + tend = second(); + All.CPU_TreeConstruction += timediff(tstart, tend); + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumPart, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + for(nexport = 0, ndone = 0; i < NumPart && nexport < All.BunchSizeForce - NTask; i++) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + +#ifndef PMGRID + force_treeevaluate_potential(i, 0); +#else + force_treeevaluate_potential_shortrange(i, 0); +#endif + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + GravDataGet[nexport].u.Pos[k] = P[i].Pos[k]; +#ifdef UNEQUALSOFTENINGS + GravDataGet[nexport].Type = P[i].Type; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(P[i].Type == 0) + GravDataGet[nexport].Soft = SphP[i].Hsml; +#endif +#endif + GravDataGet[nexport].w.OldAcc = P[i].OldAcc; + + GravDataIndexTable[nexport].Task = j; + GravDataIndexTable[nexport].Index = i; + GravDataIndexTable[nexport].SortIndex = nexport; + + nexport++; + nsend_local[j]++; + } + } + } + + qsort(GravDataIndexTable, nexport, sizeof(struct gravdata_index), grav_tree_compare_key); + + for(j = 0; j < nexport; j++) + GravDataIn[j] = GravDataGet[GravDataIndexTable[j].SortIndex]; + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&GravDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_POTENTIAL_A, + &GravDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_POTENTIAL_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + + for(j = 0; j < nbuffer[ThisTask]; j++) + { +#ifndef PMGRID + force_treeevaluate_potential(j, 1); +#else + force_treeevaluate_potential_shortrange(j, 1); +#endif + } + + + /* get the result */ + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&GravDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_POTENTIAL_B, + &GravDataOut[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_POTENTIAL_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + place = GravDataIndexTable[noffset[recvTask] + j].Index; + + P[place].Potential += GravDataOut[j + noffset[recvTask]].u.Potential; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + /* add correction to exclude self-potential */ + + for(i = 0; i < NumPart; i++) + { + /* remove self-potential */ + P[i].Potential += P[i].Mass / All.SofteningTable[P[i].Type]; + + if(All.ComovingIntegrationOn) + if(All.PeriodicBoundariesOn) + P[i].Potential -= 2.8372975 * pow(P[i].Mass, 2.0 / 3) * + pow(All.Omega0 * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G), 1.0 / 3); + } + + + /* multiply with the gravitational constant */ + + for(i = 0; i < NumPart; i++) + P[i].Potential *= All.G; + + +#ifdef PMGRID + +#ifdef PERIODIC + pmpotential_periodic(); +#ifdef PLACEHIGHRESREGION + i = pmpotential_nonperiodic(1); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + i = pmpotential_nonperiodic(1); /* try again */ + } + if(i == 1) + endrun(88686); +#endif +#else + i = pmpotential_nonperiodic(0); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + i = pmpotential_nonperiodic(0); /* try again */ + } + if(i == 1) + endrun(88687); +#ifdef PLACEHIGHRESREGION + i = pmpotential_nonperiodic(1); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + + i = pmpotential_nonperiodic(1); + } + if(i != 0) + endrun(88688); +#endif +#endif + +#endif + + + + if(All.ComovingIntegrationOn) + { +#ifndef PERIODIC + fac = -0.5 * All.Omega0 * All.Hubble * All.Hubble; + + for(i = 0; i < NumPart; i++) + { + for(k = 0, r2 = 0; k < 3; k++) + r2 += P[i].Pos[k] * P[i].Pos[k]; + + P[i].Potential += fac * r2; + } +#endif + } + else + { + fac = -0.5 * All.OmegaLambda * All.Hubble * All.Hubble; + if(fac != 0) + { + for(i = 0; i < NumPart; i++) + { + for(k = 0, r2 = 0; k < 3; k++) + r2 += P[i].Pos[k] * P[i].Pos[k]; + + P[i].Potential += fac * r2; + } + } + } + + + if(ThisTask == 0) + { + printf("potential done.\n"); + fflush(stdout); + } + + t1 = second(); + + All.CPU_Potential += timediff(t0, t1); + +#else + for(i = 0; i < NumPart; i++) + P[i].Potential = 0; +#endif +} + + + + + + + +#ifdef PY_INTERFACE + +/*! This function computes the gravitational potential for ALL the particles. + * First, the (short-range) tree potential is computed, and then, if needed, + * the long range PM potential is added. + */ +void compute_potential_sub(void) +{ + int i; + +#ifndef NOGRAVITY + long long ntot, ntotleft; + int j, k, level, sendTask, recvTask; + int ndone; + int maxfill, ngrp, place, nexport; + int *nsend, *noffset, *nsend_local, *nbuffer, *ndonelist, *numlist; + double fac; + double t0, t1, tstart, tend; + MPI_Status status; + double r2; + + t0 = second(); + + if(All.ComovingIntegrationOn) + set_softenings(); + + if(ThisTask == 0) + { + printf("Start computation of potential for all particles...\n"); + fflush(stdout); + } + + +// tstart = second(); +// if(TreeReconstructFlag) +// { +// if(ThisTask == 0) +// printf("Tree construction.\n"); +// +// force_treebuild(NumPart); +// +// TreeReconstructFlag = 0; +// +// if(ThisTask == 0) +// printf("Tree construction done.\n"); +// } +// tend = second(); +// All.CPU_TreeConstruction += timediff(tstart, tend); + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumPartQ, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + i = 0; /* beginn with this index */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + for(nexport = 0, ndone = 0; i < NumPartQ && nexport < All.BunchSizeForce - NTask; i++) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + +#ifndef PMGRID + force_treeevaluate_potential_sub(i, 0); +#else + force_treeevaluate_potential_shortrange_sub(i, 0); +#endif + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + GravDataGet[nexport].u.Pos[k] = Q[i].Pos[k]; +#ifdef UNEQUALSOFTENINGS + GravDataGet[nexport].Type = Q[i].Type; +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(Q[i].Type == 0) + GravDataGet[nexport].Soft = SphQ[i].Hsml; +#endif +#endif + GravDataGet[nexport].w.OldAcc = Q[i].OldAcc; + + GravDataIndexTable[nexport].Task = j; + GravDataIndexTable[nexport].Index = i; + GravDataIndexTable[nexport].SortIndex = nexport; + + nexport++; + nsend_local[j]++; + } + } + } + + qsort(GravDataIndexTable, nexport, sizeof(struct gravdata_index), grav_tree_compare_key); + + for(j = 0; j < nexport; j++) + GravDataIn[j] = GravDataGet[GravDataIndexTable[j].SortIndex]; + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&GravDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_POTENTIAL_A, + &GravDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), MPI_BYTE, + recvTask, TAG_POTENTIAL_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + + for(j = 0; j < nbuffer[ThisTask]; j++) + { +#ifndef PMGRID + force_treeevaluate_potential_sub(j, 1); +#else + force_treeevaluate_potential_shortrange_sub(j, 1); +#endif + } + + + /* get the result */ + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeForce) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&GravDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_POTENTIAL_B, + &GravDataOut[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct gravdata_in), + MPI_BYTE, recvTask, TAG_POTENTIAL_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + place = GravDataIndexTable[noffset[recvTask] + j].Index; + + Q[place].Potential += GravDataOut[j + noffset[recvTask]].u.Potential; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + /* add correction to exclude self-potential */ + + for(i = 0; i < NumPartQ; i++) + { + /* remove self-potential */ + Q[i].Potential += Q[i].Mass / All.SofteningTable[Q[i].Type]; + + if(All.ComovingIntegrationOn) + if(All.PeriodicBoundariesOn) + Q[i].Potential -= 2.8372975 * pow(Q[i].Mass, 2.0 / 3) * + pow(All.Omega0 * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G), 1.0 / 3); + } + + + /* multiply with the gravitational constant */ + + for(i = 0; i < NumPartQ; i++) + Q[i].Potential *= All.G; + + +#ifdef PMGRID + +#ifdef PERIODIC + pmpotential_periodic(); +#ifdef PLACEHIGHRESREGION + i = pmpotential_nonperiodic(1); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + i = pmpotential_nonperiodic(1); /* try again */ + } + if(i == 1) + endrun(88686); +#endif +#else + i = pmpotential_nonperiodic(0); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + pm_setup_nonperiodic_kernel(); + i = pmpotential_nonperiodic(0); /* try again */ + } + if(i == 1) + endrun(88687); +#ifdef PLACEHIGHRESREGION + i = pmpotential_nonperiodic(1); + if(i == 1) /* this is returned if a particle lied outside allowed range */ + { + pm_init_regionsize(); + + i = pmpotential_nonperiodic(1); + } + if(i != 0) + endrun(88688); +#endif +#endif + +#endif + + + + if(All.ComovingIntegrationOn) + { +#ifndef PERIODIC + fac = -0.5 * All.Omega0 * All.Hubble * All.Hubble; + + for(i = 0; i < NumPartQ; i++) + { + for(k = 0, r2 = 0; k < 3; k++) + r2 += Q[i].Pos[k] * Q[i].Pos[k]; + + Q[i].Potential += fac * r2; + } +#endif + } + else + { + fac = -0.5 * All.OmegaLambda * All.Hubble * All.Hubble; + if(fac != 0) + { + for(i = 0; i < NumPartQ; i++) + { + for(k = 0, r2 = 0; k < 3; k++) + r2 += Q[i].Pos[k] * Q[i].Pos[k]; + + Q[i].Potential += fac * r2; + } + } + } + + + if(ThisTask == 0) + { + printf("potential done.\n"); + fflush(stdout); + } + + t1 = second(); + + All.CPU_Potential += timediff(t0, t1); + +#else + for(i = 0; i < NumPartQ; i++) + Q[i].Potential = 0; +#endif +} + +#endif + diff --git a/src/PyGadget/src/potential.o b/src/PyGadget/src/potential.o new file mode 100644 index 0000000..4a0ca00 Binary files /dev/null and b/src/PyGadget/src/potential.o differ diff --git a/src/PyGadget/src/predict.c b/src/PyGadget/src/predict.c new file mode 100644 index 0000000..6c936a2 --- /dev/null +++ b/src/PyGadget/src/predict.c @@ -0,0 +1,169 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file predict.c + * \brief drift particles by a small time interval + * + * This function contains code to implement a drift operation on all the + * particles, which represents one part of the leapfrog integration scheme. + */ + + +/*! This function drifts all particles from the current time to the future: + * time0 - > time1 + * + * If there is no explicit tree construction in the following timestep, the + * tree nodes are also drifted and updated accordingly. Note: For periodic + * boundary conditions, the mapping of coordinates onto the interval + * [0,All.BoxSize] is only done before the domain decomposition, or for + * outputs to snapshot files. This simplifies dynamic tree updates, and + * allows the domain decomposition to be carried out only every once in a + * while. + */ +void move_particles(int time0, int time1) +{ + int i, j; + double dt_drift, dt_gravkick, dt_hydrokick, dt_entr; + double t0, t1; + + + t0 = second(); + + if(All.ComovingIntegrationOn) + { + dt_drift = get_drift_factor(time0, time1); + dt_gravkick = get_gravkick_factor(time0, time1); + dt_hydrokick = get_hydrokick_factor(time0, time1); + } + else + { + dt_drift = dt_gravkick = dt_hydrokick = (time1 - time0) * All.Timebase_interval; + } + + for(i = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) + P[i].Pos[j] += P[i].Vel[j] * dt_drift; + + if(P[i].Type == 0) + { +#ifdef PMGRID + for(j = 0; j < 3; j++) + SphP[i].VelPred[j] += + (P[i].GravAccel[j] + P[i].GravPM[j]) * dt_gravkick + SphP[i].HydroAccel[j] * dt_hydrokick; +#else + for(j = 0; j < 3; j++) + SphP[i].VelPred[j] += P[i].GravAccel[j] * dt_gravkick + SphP[i].HydroAccel[j] * dt_hydrokick; +#endif + SphP[i].Density *= exp(-SphP[i].DivVel * dt_drift); + SphP[i].Hsml *= exp(0.333333333333 * SphP[i].DivVel * dt_drift); + + if(SphP[i].Hsml < All.MinGasHsml) + SphP[i].Hsml = All.MinGasHsml; + + dt_entr = (time1 - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval; + + SphP[i].Pressure = (SphP[i].Entropy + SphP[i].DtEntropy * dt_entr) * pow(SphP[i].Density, GAMMA); + } + } + + /* if domain-decomp and tree are not going to be reconstructed, update dynamically. */ + if(All.NumForcesSinceLastDomainDecomp < All.TotNumPart * All.TreeDomainUpdateFrequency) + { + for(i = 0; i < Numnodestree; i++) + for(j = 0; j < 3; j++) + Nodes[All.MaxPart + i].u.d.s[j] += Extnodes[All.MaxPart + i].vs[j] * dt_drift; + + force_update_len(); + + force_update_pseudoparticles(); + } + + t1 = second(); + + All.CPU_Predict += timediff(t0, t1); +} + + + +/*! This function makes sure that all particle coordinates (Pos) are + * periodically mapped onto the interval [0, BoxSize]. After this function + * has been called, a new domain decomposition should be done, which will + * also force a new tree construction. + */ +#ifdef PERIODIC +void do_box_wrapping(void) +{ + int i, j; + double boxsize[3]; + + for(j = 0; j < 3; j++) + boxsize[j] = All.BoxSize; + +#ifdef LONG_X + boxsize[0] *= LONG_X; +#endif +#ifdef LONG_Y + boxsize[1] *= LONG_Y; +#endif +#ifdef LONG_Z + boxsize[2] *= LONG_Z; +#endif + + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + { + while(P[i].Pos[j] < 0) + P[i].Pos[j] += boxsize[j]; + + while(P[i].Pos[j] >= boxsize[j]) + P[i].Pos[j] -= boxsize[j]; + } +} +#endif + +#ifdef PY_INTERFACE +/*! This function makes sure that all particle coordinates (Pos) are + * periodically mapped onto the interval [0, BoxSize]. After this function + * has been called, a new domain decomposition should be done, which will + * also force a new tree construction. + */ +#ifdef PERIODIC +void do_box_wrappingQ(void) +{ + int i, j; + double boxsize[3]; + + for(j = 0; j < 3; j++) + boxsize[j] = All.BoxSize; + +#ifdef LONG_X + boxsize[0] *= LONG_X; +#endif +#ifdef LONG_Y + boxsize[1] *= LONG_Y; +#endif +#ifdef LONG_Z + boxsize[2] *= LONG_Z; +#endif + + for(i = 0; i < NumPartQ; i++) + for(j = 0; j < 3; j++) + { + while(Q[i].Pos[j] < 0) + Q[i].Pos[j] += boxsize[j]; + + while(Q[i].Pos[j] >= boxsize[j]) + Q[i].Pos[j] -= boxsize[j]; + } +} +#endif +#endif diff --git a/src/PyGadget/src/predict.o b/src/PyGadget/src/predict.o new file mode 100644 index 0000000..83efea9 Binary files /dev/null and b/src/PyGadget/src/predict.o differ diff --git a/src/PyGadget/src/proto.h b/src/PyGadget/src/proto.h new file mode 100644 index 0000000..c59c3fc --- /dev/null +++ b/src/PyGadget/src/proto.h @@ -0,0 +1,258 @@ +/*! \file proto.h + * \brief this file contains all function prototypes of the code + */ + +#ifndef ALLVARS_H +#include "allvars.h" +#endif + +#ifdef HAVE_HDF5 +#include +#endif + +void advance_and_find_timesteps(void); +void allocate_commbuffers(void); +#ifdef PY_INTERFACE +void allocate_commbuffersQ(void); +#endif +void allocate_memory(void); +void begrun(void); +int blockpresent(enum iofields blocknr); +void catch_abort(int sig); +void catch_fatal(int sig); +void check_omega(void); +void close_outputfiles(void); +int compare_key(const void *a, const void *b); +void compute_accelerations(int mode); +void compute_global_quantities_of_system(void); +void compute_potential(void); +int dens_compare_key(const void *a, const void *b); +void density(void); +#ifdef PY_INTERFACE +void density_sub(void); +#endif +void density_decouple(void); +void density_evaluate(int i, int mode); +#ifdef PY_INTERFACE +void density_evaluate_sub(int i, int mode); +#endif +void distribute_file(int nfiles, int firstfile, int firsttask, int lasttask, int *filenr, int *master, int *last); +double dmax(double, double); +double dmin(double, double); +void do_box_wrapping(void); +#ifdef PY_INTERFACE +void do_box_wrapping(void); +#endif + +void domain_Decomposition(void); +int domain_compare_key(const void *a, const void *b); +int domain_compare_key(const void *a, const void *b); +int domain_compare_toplist(const void *a, const void *b); +void domain_countToGo(void); +void domain_decompose(void); +void domain_determineTopTree(void); +void domain_exchangeParticles(int partner, int sphflag, int send_count, int recv_count); +void domain_findExchangeNumbers(int task, int partner, int sphflag, int *send, int *recv); +void domain_findExtent(void); +int domain_findSplit(int cpustart, int ncpu, int first, int last); +void domain_shiftSplit(void); +void domain_sumCost(void); +void domain_topsplit(int node, peanokey startkey); +void domain_topsplit_local(int node, peanokey startkey); + +#ifdef PY_INTERFACE +void domain_DecompositionQ(void); +void domain_decomposeQ(void); +int domain_findSplitQ(int cpustart, int ncpu, int first, int last); +void domain_shiftSplitQ(void); +void domain_findExchangeNumbersQ(int task, int partner, int sphflag, int *send, int *recv); +void domain_exchangeParticlesQ(int partner, int sphflag, int send_count, int recv_count); +void domain_countToGoQ(void); +void domain_walktoptreeQ(int no); +void domain_sumCostQ(void); +void domain_findExtentQ(void); + +void domain_determineTopTreeQ(void); +void domain_topsplit_localQ(int node, peanokey startkey); +void domain_topsplitQ(int node, peanokey startkey); +#endif + + +double drift_integ(double a, void *param); +void dump_particles(void); +void empty_read_buffer(enum iofields blocknr, int offset, int pc, int type); +void endrun(int); +void energy_statistics(void); +void every_timestep_stuff(void); + +void ewald_corr(double dx, double dy, double dz, double *fper); +void ewald_force(int ii, int jj, int kk, double x[3], double force[3]); +void ewald_init(void); +double ewald_pot_corr(double dx, double dy, double dz); +double ewald_psi(double x[3]); + +void fill_Tab_IO_Labels(void); +void fill_write_buffer(enum iofields blocknr, int *pindex, int pc, int type); +void find_dt_displacement_constraint(double hfac); +int find_files(char *fname); +int find_next_outputtime(int time); +void find_next_sync_point_and_drift(void); + +void force_create_empty_nodes(int no, int topnode, int bits, int x, int y, int z, int *nodecount, int *nextfree); +void force_exchange_pseudodata(void); +void force_flag_localnodes(void); +void force_insert_pseudo_particles(void); +void force_setupnonrecursive(int no); +void force_treeallocate(int maxnodes, int maxpart); +int force_treebuild(int npart); +int force_treebuild_single(int npart); +int force_treeevaluate(int target, int mode, double *ewaldcountsum); +#ifdef PY_INTERFACE +int force_treeevaluate(int target, int mode, double *ewaldcountsum); +#endif +int force_treeevaluate_direct(int target, int mode); +int force_treeevaluate_ewald_correction(int target, int mode, double pos_x, double pos_y, double pos_z, double aold); +void force_treeevaluate_potential(int target, int type); +void force_treeevaluate_potential_shortrange(int target, int mode); +#ifdef PY_INTERFACE +void force_treeevaluate_potential_sub(int target, int type); +void force_treeevaluate_potential_shortrange_sub(int target, int mode); +#endif +int force_treeevaluate_shortrange(int target, int mode); +#ifdef PY_INTERFACE +int force_treeevaluate_shortrange(int target, int mode); +#endif +void force_treefree(void); +void force_treeupdate_pseudos(void); +void force_update_hmax(void); +void force_update_len(void); +void force_update_node(int no, int flag); +void force_update_node_hmax_local(void); +void force_update_node_hmax_toptree(void); +void force_update_node_len_local(void); +void force_update_node_len_toptree(void); +void force_update_node_recursive(int no, int sib, int father); +void force_update_pseudoparticles(void); +void force_update_size_of_parent_node(int no); + +void free_memory(void); + +int get_bytes_per_blockelement(enum iofields blocknr); +void get_dataset_name(enum iofields blocknr, char *buf); +int get_datatype_in_block(enum iofields blocknr); +double get_drift_factor(int time0, int time1); +double get_gravkick_factor(int time0, int time1); +double get_hydrokick_factor(int time0, int time1); +int get_particles_in_block(enum iofields blocknr, int *typelist); +double get_random_number(int id); +int get_timestep(int p, double *a, int flag); +int get_values_per_blockelement(enum iofields blocknr); + +int grav_tree_compare_key(const void *a, const void *b); +void gravity_forcetest(void); +void gravity_tree(void); +#ifdef PY_INTERFACE +void gravity_tree_sub(void); +#endif +void gravity_tree_shortrange(void); +double gravkick_integ(double a, void *param); + +int hydro_compare_key(const void *a, const void *b); +void hydro_evaluate(int target, int mode); +void hydro_force(void); +double hydrokick_integ(double a, void *param); + +#ifdef PY_INTERFACE +void sph(void); +void sph_evaluate(int target, int mode); +void sph_sub(void); +void sph_evaluate_sub(int target, int mode); + +int sph_compare_key(const void *a, const void *b); +#endif + +int imax(int, int); +int imin(int, int); + +void init(void); +void init_drift_table(void); +void init_peano_map(void); + +void long_range_force(void); +void long_range_init(void); +void long_range_init_regionsize(void); +void move_particles(int time0, int time1); +size_t my_fread(void *ptr, size_t size, size_t nmemb, FILE * stream); +size_t my_fwrite(void *ptr, size_t size, size_t nmemb, FILE * stream); + +int ngb_clear_buf(FLOAT searchcenter[3], FLOAT hguess, int numngb); +void ngb_treeallocate(int npart); +void ngb_treebuild(void); +int ngb_treefind_pairs(FLOAT searchcenter[3], FLOAT hsml, int *startnode); +int ngb_treefind_variable(FLOAT searchcenter[3], FLOAT hguess, int *startnode); +void ngb_treefree(void); +void ngb_treesearch(int); +void ngb_treesearch_pairs(int); +void ngb_update_nodes(void); + +void open_outputfiles(void); + +peanokey peano_hilbert_key(int x, int y, int z, int bits); +void peano_hilbert_order(void); +#ifdef PY_INTERFACE +void peano_hilbert_orderQ(void); +#endif +void pm_init_nonperiodic(void); +void pm_init_nonperiodic_allocate(int dimprod); +void pm_init_nonperiodic_free(void); +void pm_init_periodic(void); +void pm_init_periodic_allocate(int dimprod); +void pm_init_periodic_free(void); +void pm_init_regionsize(void); +void pm_setup_nonperiodic_kernel(void); +int pmforce_nonperiodic(int grnr); +void pmforce_periodic(void); +int pmpotential_nonperiodic(int grnr); +void pmpotential_periodic(void); + +double pow(double, double); /* on some old DEC Alphas, the correct prototype for pow() is missing, even when math.h is included */ + +void read_file(char *fname, int readTask, int lastTask); +void read_header_attributes_in_hdf5(char *fname); +void read_ic(char *fname); +int read_outputlist(char *fname); +void read_parameter_file(char *fname); +void readjust_timebase(double TimeMax_old, double TimeMax_new); + +void reorder_gas(void); +void reorder_particles(void); + +#ifdef PY_INTERFACE +void reorder_gasQ(void); +void reorder_particlesQ(void); +#endif + +void restart(int mod); +void run(void); +void savepositions(int num); + +double second(void); + +void seed_glass(void); +void set_random_numbers(void); +void set_softenings(void); +void set_units(void); +#ifdef PY_INTERFACE +void setup_smoothinglengths(void); +#endif +void setup_smoothinglengths_sub(void); +void statistics(void); +void terminate_processes(void); +double timediff(double t0, double t1); + +#ifdef HAVE_HDF5 +void write_header_attributes_in_hdf5(hid_t handle); +#endif +void write_file(char *fname, int readTask, int lastTask); +void write_pid_file(void); + diff --git a/src/PyGadget/src/python_interface.0.c b/src/PyGadget/src/python_interface.0.c new file mode 100644 index 0000000..aa5fa1a --- /dev/null +++ b/src/PyGadget/src/python_interface.0.c @@ -0,0 +1,646 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +#define TO_INT(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_INT) ,0) ) +#define TO_DOUBLE(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_DOUBLE) ,0) ) +#define TO_FLOAT(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_FLOAT) ,0) ) + + + + + + + + +static int Init() + { + + /* main.c */ + + RestartFlag = 0; + + All.CPU_TreeConstruction = All.CPU_TreeWalk = All.CPU_Gravity = All.CPU_Potential = All.CPU_Domain = + All.CPU_Snapshot = All.CPU_Total = All.CPU_CommSum = All.CPU_Imbalance = All.CPU_Hydro = + All.CPU_HydCompWalk = All.CPU_HydCommSumm = All.CPU_HydImbalance = + All.CPU_EnsureNgb = All.CPU_Predict = All.CPU_TimeLine = All.CPU_PM = All.CPU_Peano = 0; + + CPUThisRun = 0; + + + + /* from init.c, after read ic */ + int i, j; + double a3; + + + + All.Time = All.TimeBegin; + All.Ti_Current = 0; + + if(All.ComovingIntegrationOn) + { + All.Timebase_interval = (log(All.TimeMax) - log(All.TimeBegin)) / TIMEBASE; + a3 = All.Time * All.Time * All.Time; + } + else + { + All.Timebase_interval = (All.TimeMax - All.TimeBegin) / TIMEBASE; + a3 = 1; + } + + set_softenings(); + + All.NumCurrentTiStep = 0; /* setup some counters */ + All.SnapshotFileCount = 0; + if(RestartFlag == 2) + All.SnapshotFileCount = atoi(All.InitCondFile + strlen(All.InitCondFile) - 3) + 1; + + All.TotNumOfForces = 0; + All.NumForcesSinceLastDomainDecomp = 0; + + if(All.ComovingIntegrationOn) + if(All.PeriodicBoundariesOn == 1) + check_omega(); + + All.TimeLastStatistics = All.TimeBegin - All.TimeBetStatistics; + + if(All.ComovingIntegrationOn) /* change to new velocity variable */ + { + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + P[i].Vel[j] *= sqrt(All.Time) * All.Time; + } + + for(i = 0; i < NumPart; i++) /* start-up initialization */ + { + for(j = 0; j < 3; j++) + P[i].GravAccel[j] = 0; +#ifdef PMGRID + for(j = 0; j < 3; j++) + P[i].GravPM[j] = 0; +#endif + P[i].Ti_endstep = 0; + P[i].Ti_begstep = 0; + + P[i].OldAcc = 0; + P[i].GravCost = 1; + P[i].Potential = 0; + } + +#ifdef PMGRID + All.PM_Ti_endstep = All.PM_Ti_begstep = 0; +#endif + +#ifdef FLEXSTEPS + All.PresentMinStep = TIMEBASE; + for(i = 0; i < NumPart; i++) /* start-up initialization */ + { + P[i].FlexStepGrp = (int) (TIMEBASE * get_random_number(P[i].ID)); + } +#endif + + + for(i = 0; i < N_gas; i++) /* initialize sph_properties */ + { + for(j = 0; j < 3; j++) + { + SphP[i].VelPred[j] = P[i].Vel[j]; + SphP[i].HydroAccel[j] = 0; + } + + SphP[i].DtEntropy = 0; + + if(RestartFlag == 0) + { + SphP[i].Hsml = 0; + SphP[i].Density = -1; + } + } + + ngb_treeallocate(MAX_NGB); + + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + Flag_FullStep = 1; /* to ensure that Peano-Hilber order is done */ + + domain_Decomposition(); /* do initial domain decomposition (gives equal numbers of particles) */ + + ngb_treebuild(); /* will build tree */ + + setup_smoothinglengths(); + + TreeReconstructFlag = 1; + + /* at this point, the entropy variable normally contains the + * internal energy, read in from the initial conditions file, unless the file + * explicitly signals that the initial conditions contain the entropy directly. + * Once the density has been computed, we can convert thermal energy to entropy. + */ +#ifndef ISOTHERM_EQS + if(header.flag_entropy_instead_u == 0) + for(i = 0; i < N_gas; i++) + SphP[i].Entropy = GAMMA_MINUS1 * SphP[i].Entropy / pow(SphP[i].Density / a3, GAMMA_MINUS1); +#endif + + + + return 1; + } + + + + +static void Begrun1() + { + + + struct global_data_all_processes all; + + if(ThisTask == 0) + { + printf("\nThis is Gadget, version `%s'.\n", GADGETVERSION); + printf("\nRunning on %d processors.\n", NTask); + } + + read_parameter_file(ParameterFile); /* ... read in parameters for this run */ + + allocate_commbuffers(); /* ... allocate buffer-memory for particle + exchange during force computation */ + set_units(); + + #if defined(PERIODIC) && (!defined(PMGRID) || defined(FORCETEST)) + ewald_init(); + #endif + + open_outputfiles(); + + random_generator = gsl_rng_alloc(gsl_rng_ranlxd1); + gsl_rng_set(random_generator, 42); /* start-up seed */ + + #ifdef PMGRID + long_range_init(); + #endif + + All.TimeLastRestartFile = CPUThisRun; + + if(RestartFlag == 0 || RestartFlag == 2) + { + set_random_numbers(); + + Init(); /* ... read in initial model */ + } + else + { + all = All; /* save global variables. (will be read from restart file) */ + + restart(RestartFlag); /* ... read restart file. Note: This also resets + all variables in the struct `All'. + However, during the run, some variables in the parameter + file are allowed to be changed, if desired. These need to + copied in the way below. + Note: All.PartAllocFactor is treated in restart() separately. + */ + + All.MinSizeTimestep = all.MinSizeTimestep; + All.MaxSizeTimestep = all.MaxSizeTimestep; + All.BufferSize = all.BufferSize; + All.BunchSizeForce = all.BunchSizeForce; + All.BunchSizeDensity = all.BunchSizeDensity; + All.BunchSizeHydro = all.BunchSizeHydro; + All.BunchSizeDomain = all.BunchSizeDomain; + + All.TimeLimitCPU = all.TimeLimitCPU; + All.ResubmitOn = all.ResubmitOn; + All.TimeBetSnapshot = all.TimeBetSnapshot; + All.TimeBetStatistics = all.TimeBetStatistics; + All.CpuTimeBetRestartFile = all.CpuTimeBetRestartFile; + All.ErrTolIntAccuracy = all.ErrTolIntAccuracy; + All.MaxRMSDisplacementFac = all.MaxRMSDisplacementFac; + + All.ErrTolForceAcc = all.ErrTolForceAcc; + + All.TypeOfTimestepCriterion = all.TypeOfTimestepCriterion; + All.TypeOfOpeningCriterion = all.TypeOfOpeningCriterion; + All.NumFilesWrittenInParallel = all.NumFilesWrittenInParallel; + All.TreeDomainUpdateFrequency = all.TreeDomainUpdateFrequency; + + All.SnapFormat = all.SnapFormat; + All.NumFilesPerSnapshot = all.NumFilesPerSnapshot; + All.MaxNumNgbDeviation = all.MaxNumNgbDeviation; + All.ArtBulkViscConst = all.ArtBulkViscConst; + + + All.OutputListOn = all.OutputListOn; + All.CourantFac = all.CourantFac; + + All.OutputListLength = all.OutputListLength; + memcpy(All.OutputListTimes, all.OutputListTimes, sizeof(double) * All.OutputListLength); + + + strcpy(All.ResubmitCommand, all.ResubmitCommand); + strcpy(All.OutputListFilename, all.OutputListFilename); + strcpy(All.OutputDir, all.OutputDir); + strcpy(All.RestartFile, all.RestartFile); + strcpy(All.EnergyFile, all.EnergyFile); + strcpy(All.InfoFile, all.InfoFile); + strcpy(All.CpuFile, all.CpuFile); + strcpy(All.TimingsFile, all.TimingsFile); + strcpy(All.SnapshotFileBase, all.SnapshotFileBase); + + if(All.TimeMax != all.TimeMax) + readjust_timebase(All.TimeMax, all.TimeMax); + } + + } + + +static void Begrun2() + { + + #ifdef PMGRID + long_range_init_regionsize(); + #endif + + if(All.ComovingIntegrationOn) + init_drift_table(); + + if(RestartFlag == 2) + All.Ti_nextoutput = find_next_outputtime(All.Ti_Current + 1); + else + All.Ti_nextoutput = find_next_outputtime(All.Ti_Current); + + + All.TimeLastRestartFile = CPUThisRun; + + } + + + + + + + + + +/************************************************************/ +/* PYTHON INTERFACE */ +/************************************************************/ + + +static PyObject *gadget_Info(PyObject *self, PyObject *args, PyObject *kwds) + { + + printf("I am proc %d among %d procs.\n",ThisTask,NTask); + + return Py_BuildValue("i",1); + } + + + +static PyObject *gadget_InitMPI(PyObject *self, PyObject *args, PyObject *kwds) + { + + //MPI_Init(0, 0); /* this is done in mpi4py */ + MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask); + MPI_Comm_size(MPI_COMM_WORLD, &NTask); + + for(PTask = 0; NTask > (1 << PTask); PTask++); + + return Py_BuildValue("i",1); + } + + +static PyObject * gadget_InitDefaultParameters(PyObject* self) + { + /* list of Gadget parameters */ + + All.ComovingIntegrationOn = 0; + All.PeriodicBoundariesOn = 0; + + All.Omega0 = 0; + All.OmegaLambda = 0; + All.OmegaBaryon = 0; + All.HubbleParam = 0; + All.BoxSize = 0; + + All.ErrTolTheta = 0.7; + All.TypeOfOpeningCriterion = 0; + All.ErrTolForceAcc = 0.005; + + All.DesNumNgb = 33; + All.MaxNumNgbDeviation = 3; + + All.PartAllocFactor = 2.0; + All.TreeAllocFactor = 2.0; + All.BufferSize = 30; + + All.MinGasHsmlFractional = 0.25; + + All.SofteningGas = 0.5; + All.SofteningHalo = 0.5; + All.SofteningDisk = 0.5; + All.SofteningBulge = 0.5; + All.SofteningStars = 0.5; + All.SofteningBndry = 0.5; + + All.SofteningGasMaxPhys = 0.5; + All.SofteningHaloMaxPhys = 0.5; + All.SofteningDiskMaxPhys = 0.5; + All.SofteningBulgeMaxPhys = 0.5; + All.SofteningStarsMaxPhys = 0.5; + All.SofteningBndryMaxPhys = 0.5; + + return Py_BuildValue("i",1); + + } + + + + + + + + + + + + + + +static PyObject *gadget_LoadParticles(PyObject *self, PyObject *args, PyObject *kwds) + + + + { + + + int i,j; + size_t bytes; + + PyArrayObject *ntype,*pos,*vel,*mass,*num,*tpe; + + + static char *kwlist[] = {"npart", "pos","vel","mass","num","tpe", NULL}; + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOOOOO",kwlist,&ntype,&pos,&vel,&mass,&num,&tpe)) + return Py_BuildValue("i",1); + + + + + /* check type */ + if (!(PyArray_Check(pos))) + { + PyErr_SetString(PyExc_ValueError,"aruments 1 must be array."); + return NULL; + } + + /* check type */ + if (!(PyArray_Check(mass))) + { + PyErr_SetString(PyExc_ValueError,"aruments 2 must be array."); + return NULL; + } + + /* check dimension */ + if ( (pos->nd!=2)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 1 must be 2."); + return NULL; + } + + /* check dimension */ + if ( (mass->nd!=1)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 2 must be 1."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[1]!=3)) + { + PyErr_SetString(PyExc_ValueError,"First size of argument must be 3."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[0]!=mass->dimensions[0])) + { + PyErr_SetString(PyExc_ValueError,"Size of argument 1 must be similar to argument 2."); + return NULL; + } + + + /* ensure double */ + ntype = TO_INT(ntype); + pos = TO_FLOAT(pos); + vel = TO_FLOAT(vel); + mass = TO_FLOAT(mass); + num = TO_FLOAT(num); + tpe = TO_FLOAT(tpe); + + + + + + + + /* count number of particles */ + NtypeLocal[0] = *(int*) (ntype->data + 0*(ntype->strides[0])); + NtypeLocal[1] = *(int*) (ntype->data + 1*(ntype->strides[0])); + NtypeLocal[2] = *(int*) (ntype->data + 2*(ntype->strides[0])); + NtypeLocal[3] = *(int*) (ntype->data + 3*(ntype->strides[0])); + NtypeLocal[4] = *(int*) (ntype->data + 4*(ntype->strides[0])); + NtypeLocal[5] = *(int*) (ntype->data + 5*(ntype->strides[0])); + + + NumPart = 0; + N_gas = NtypeLocal[0]; + for (i = 0; i < 6; i++) + NumPart += NtypeLocal[i]; + + MPI_Allreduce(&NumPart, &All.TotNumPart, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(&N_gas, &All.TotN_gas, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + + All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); + All.MaxPartSph = All.PartAllocFactor * (All.TotN_gas / NTask); + All.MinGasHsml = All.MinGasHsmlFractional * All.ForceSoftening[0]; + + + All.BunchSizeDomain = + (All.BufferSize * 1024 * 1024) / (sizeof(struct particle_data) + sizeof(struct sph_particle_data) + + sizeof(peanokey)); + + if(All.BunchSizeDomain & 1) + All.BunchSizeDomain -= 1; /* make sure that All.BunchSizeDomain is even + --> 8-byte alignment of DomainKeyBuf for 64bit processors */ + + + All.BunchSizeForce = + (All.BufferSize * 1024 * 1024) / (sizeof(struct gravdata_index) + 2 * sizeof(struct gravdata_in)); + + if(All.BunchSizeForce & 1) + All.BunchSizeForce -= 1; /* make sure that All.BunchSizeForce is an even number + --> 8-byte alignment for 64bit processors */ + + All.BunchSizeDensity = + (All.BufferSize * 1024 * 1024) / (2 * sizeof(struct densdata_in) + 2 * sizeof(struct densdata_out)); + + + /*********************/ + /* some allocation */ + /*********************/ + + + if(!(CommBuffer = malloc(bytes = All.BufferSize * 1024 * 1024))) + { + printf("failed to allocate memory for `CommBuffer' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(2); + } + + + Exportflag = malloc(NTask * sizeof(char)); + GravDataIndexTable = (struct gravdata_index *) CommBuffer; + GravDataIn = (struct gravdata_in *) (GravDataIndexTable + All.BunchSizeForce); + GravDataGet = GravDataIn + All.BunchSizeForce; + GravDataOut = GravDataIn; + GravDataResult = GravDataGet; + + DensDataIn = (struct densdata_in *) CommBuffer; + DensDataGet = DensDataIn + All.BunchSizeDensity; + DensDataResult = (struct densdata_out *) (DensDataGet + All.BunchSizeDensity); + DensDataPartialResult = DensDataResult + All.BunchSizeDensity; + + + /*********************/ + /* create P */ + /*********************/ + + if(!(P = malloc(bytes = All.MaxPart * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `P' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphP = malloc(bytes = All.MaxPartSph * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphP' (%g MB) %d.\n", bytes / (1024.0 * 1024.0), sizeof(struct sph_particle_data)); + endrun(1); + } + + + /*********************/ + /* init P */ + /*********************/ + + for (i = 0; i < pos->dimensions[0]; i++) + { + P[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + P[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + P[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + P[i].Vel[0] = *(float *) (vel->data + i*(vel->strides[0]) + 0*vel->strides[1]); + P[i].Vel[1] = *(float *) (vel->data + i*(vel->strides[0]) + 1*vel->strides[1]); + P[i].Vel[2] = *(float *) (vel->data + i*(vel->strides[0]) + 2*vel->strides[1]); + P[i].Mass = *(float *) (mass->data + i*(mass->strides[0])); + P[i].ID = *(unsigned int *) (num->data + i*(num->strides[0])); + P[i].Type = *(int *) (tpe->data + i*(tpe->strides[0])); /* this should be changed... */ + //P[i].Active = 1; + } + + + + /*************************************** + * some inits * + /***************************************/ + + RestartFlag = 0; + Begrun(); + + + + + /*************************************** + * init ewald * + /***************************************/ + + //if (All.PeriodicBoundariesOn) + // ewald_init(); + + + + /*************************************** + * domain decomposition construction * + /***************************************/ + + All.NumForcesSinceLastDomainDecomp = 1; /* a changer !!!! */ + All.TreeDomainUpdateFrequency = 0; /* a changer !!!! */ + + allocate_commbuffers(); + domain_Decomposition(); + + + + + + + + + + + + + + + + return Py_BuildValue("i",1); + + } + + + + + + + +/* definition of the method table */ + +static PyMethodDef gadgetMethods[] = { + + {"Info", gadget_Info, METH_VARARGS, + "give some info"}, + + + {"InitMPI", gadget_InitMPI, METH_VARARGS, + "Init MPI"}, + + {"InitDefaultParameters", gadget_InitDefaultParameters, METH_VARARGS, + "Init default parameters"}, + + + {"LoadParticles", gadget_LoadParticles, METH_VARARGS, + "LoadParticles partilces"}, + + + {NULL, NULL, 0, NULL} /* Sentinel */ + }; + + + +void initgadget(void) + { + (void) Py_InitModule("gadget", gadgetMethods); + + import_array(); + } + diff --git a/src/PyGadget/src/python_interface.c b/src/PyGadget/src/python_interface.c new file mode 100644 index 0000000..15a9042 --- /dev/null +++ b/src/PyGadget/src/python_interface.c @@ -0,0 +1,2646 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +#define TO_INT(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_INT) ,0) ) +#define TO_DOUBLE(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_DOUBLE) ,0) ) +#define TO_FLOAT(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_FLOAT) ,0) ) + + + + + + + + +static int Init() + { + + /* main.c */ + + RestartFlag = 0; + + All.CPU_TreeConstruction = All.CPU_TreeWalk = All.CPU_Gravity = All.CPU_Potential = All.CPU_Domain = + All.CPU_Snapshot = All.CPU_Total = All.CPU_CommSum = All.CPU_Imbalance = All.CPU_Hydro = + All.CPU_HydCompWalk = All.CPU_HydCommSumm = All.CPU_HydImbalance = + All.CPU_EnsureNgb = All.CPU_Predict = All.CPU_TimeLine = All.CPU_PM = All.CPU_Peano = 0; + + CPUThisRun = 0; + + + + /* from init.c, after read ic */ + int i, j; + double a3; + + + + All.Time = All.TimeBegin; + All.Ti_Current = 0; + + if(All.ComovingIntegrationOn) + { + All.Timebase_interval = (log(All.TimeMax) - log(All.TimeBegin)) / TIMEBASE; + a3 = All.Time * All.Time * All.Time; + } + else + { + All.Timebase_interval = (All.TimeMax - All.TimeBegin) / TIMEBASE; + a3 = 1; + } + + set_softenings(); + + All.NumCurrentTiStep = 0; /* setup some counters */ + All.SnapshotFileCount = 0; + if(RestartFlag == 2) + All.SnapshotFileCount = atoi(All.InitCondFile + strlen(All.InitCondFile) - 3) + 1; + + All.TotNumOfForces = 0; + All.NumForcesSinceLastDomainDecomp = 0; + + if(All.ComovingIntegrationOn) + if(All.PeriodicBoundariesOn == 1) + check_omega(); + + All.TimeLastStatistics = All.TimeBegin - All.TimeBetStatistics; + + if(All.ComovingIntegrationOn) /* change to new velocity variable */ + { + for(i = 0; i < NumPart; i++) + for(j = 0; j < 3; j++) + P[i].Vel[j] *= sqrt(All.Time) * All.Time; + } + + for(i = 0; i < NumPart; i++) /* start-up initialization */ + { + for(j = 0; j < 3; j++) + P[i].GravAccel[j] = 0; +#ifdef PMGRID + for(j = 0; j < 3; j++) + P[i].GravPM[j] = 0; +#endif + P[i].Ti_endstep = 0; + P[i].Ti_begstep = 0; + + P[i].OldAcc = 0; + P[i].GravCost = 1; + P[i].Potential = 0; + } + +#ifdef PMGRID + All.PM_Ti_endstep = All.PM_Ti_begstep = 0; +#endif + +#ifdef FLEXSTEPS + All.PresentMinStep = TIMEBASE; + for(i = 0; i < NumPart; i++) /* start-up initialization */ + { + P[i].FlexStepGrp = (int) (TIMEBASE * get_random_number(P[i].ID)); + } +#endif + + + for(i = 0; i < N_gas; i++) /* initialize sph_properties */ + { + for(j = 0; j < 3; j++) + { + SphP[i].VelPred[j] = P[i].Vel[j]; + SphP[i].HydroAccel[j] = 0; + } + + SphP[i].DtEntropy = 0; + + if(RestartFlag == 0) + { + SphP[i].Hsml = 0; + SphP[i].Density = -1; + } + } + + ngb_treeallocate(MAX_NGB); + + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + + Flag_FullStep = 1; /* to ensure that Peano-Hilber order is done */ + + domain_Decomposition(); /* do initial domain decomposition (gives equal numbers of particles) */ + + ngb_treebuild(); /* will build tree */ + + setup_smoothinglengths(); + + TreeReconstructFlag = 1; + + /* at this point, the entropy variable normally contains the + * internal energy, read in from the initial conditions file, unless the file + * explicitly signals that the initial conditions contain the entropy directly. + * Once the density has been computed, we can convert thermal energy to entropy. + */ +#ifndef ISOTHERM_EQS + if(header.flag_entropy_instead_u == 0) + for(i = 0; i < N_gas; i++) + SphP[i].Entropy = GAMMA_MINUS1 * SphP[i].Entropy / pow(SphP[i].Density / a3, GAMMA_MINUS1); +#endif + + + + return 1; + } + + + + +static void Begrun1() + { + + + struct global_data_all_processes all; + + if(ThisTask == 0) + { + printf("\nThis is pyGadget, version `%s'.\n", GADGETVERSION); + printf("\nRunning on %d processors.\n", NTask); + } + + //read_parameter_file(ParameterFile); /* ... read in parameters for this run */ + + allocate_commbuffers(); /* ... allocate buffer-memory for particle + exchange during force computation */ + set_units(); + +#if defined(PERIODIC) && (!defined(PMGRID) || defined(FORCETEST)) + ewald_init(); +#endif + + //open_outputfiles(); + + random_generator = gsl_rng_alloc(gsl_rng_ranlxd1); + gsl_rng_set(random_generator, 42); /* start-up seed */ + +#ifdef PMGRID + long_range_init(); +#endif + + All.TimeLastRestartFile = CPUThisRun; + + if(RestartFlag == 0 || RestartFlag == 2) + { + set_random_numbers(); + + } + else + { + all = All; /* save global variables. (will be read from restart file) */ + + restart(RestartFlag); /* ... read restart file. Note: This also resets + all variables in the struct `All'. + However, during the run, some variables in the parameter + file are allowed to be changed, if desired. These need to + copied in the way below. + Note: All.PartAllocFactor is treated in restart() separately. + */ + + All.MinSizeTimestep = all.MinSizeTimestep; + All.MaxSizeTimestep = all.MaxSizeTimestep; + All.BufferSize = all.BufferSize; + All.BunchSizeForce = all.BunchSizeForce; + All.BunchSizeDensity = all.BunchSizeDensity; + All.BunchSizeHydro = all.BunchSizeHydro; + All.BunchSizeDomain = all.BunchSizeDomain; + + All.TimeLimitCPU = all.TimeLimitCPU; + All.ResubmitOn = all.ResubmitOn; + All.TimeBetSnapshot = all.TimeBetSnapshot; + All.TimeBetStatistics = all.TimeBetStatistics; + All.CpuTimeBetRestartFile = all.CpuTimeBetRestartFile; + All.ErrTolIntAccuracy = all.ErrTolIntAccuracy; + All.MaxRMSDisplacementFac = all.MaxRMSDisplacementFac; + + All.ErrTolForceAcc = all.ErrTolForceAcc; + + All.TypeOfTimestepCriterion = all.TypeOfTimestepCriterion; + All.TypeOfOpeningCriterion = all.TypeOfOpeningCriterion; + All.NumFilesWrittenInParallel = all.NumFilesWrittenInParallel; + All.TreeDomainUpdateFrequency = all.TreeDomainUpdateFrequency; + + All.SnapFormat = all.SnapFormat; + All.NumFilesPerSnapshot = all.NumFilesPerSnapshot; + All.MaxNumNgbDeviation = all.MaxNumNgbDeviation; + All.ArtBulkViscConst = all.ArtBulkViscConst; + + + All.OutputListOn = all.OutputListOn; + All.CourantFac = all.CourantFac; + + All.OutputListLength = all.OutputListLength; + memcpy(All.OutputListTimes, all.OutputListTimes, sizeof(double) * All.OutputListLength); + + + strcpy(All.ResubmitCommand, all.ResubmitCommand); + strcpy(All.OutputListFilename, all.OutputListFilename); + strcpy(All.OutputDir, all.OutputDir); + strcpy(All.RestartFile, all.RestartFile); + strcpy(All.EnergyFile, all.EnergyFile); + strcpy(All.InfoFile, all.InfoFile); + strcpy(All.CpuFile, all.CpuFile); + strcpy(All.TimingsFile, all.TimingsFile); + strcpy(All.SnapshotFileBase, all.SnapshotFileBase); + + if(All.TimeMax != all.TimeMax) + readjust_timebase(All.TimeMax, all.TimeMax); + } + + } + + +static void Begrun2() + { + + if(RestartFlag == 0 || RestartFlag == 2) + Init(); /* ... read in initial model */ + + + +#ifdef PMGRID + long_range_init_regionsize(); +#endif + + if(All.ComovingIntegrationOn) + init_drift_table(); + + //if(RestartFlag == 2) + // All.Ti_nextoutput = find_next_outputtime(All.Ti_Current + 1); + //else + // All.Ti_nextoutput = find_next_outputtime(All.Ti_Current); + + + All.TimeLastRestartFile = CPUThisRun; + + } + + + + + + + + + +/************************************************************/ +/* PYTHON INTERFACE */ +/************************************************************/ + + +static PyObject *gadget_Info(PyObject *self, PyObject *args, PyObject *kwds) + { + + printf("I am proc %d among %d procs.\n",ThisTask,NTask); + + return Py_BuildValue("i",1); + } + + + +static PyObject *gadget_InitMPI(PyObject *self, PyObject *args, PyObject *kwds) + { + + //MPI_Init(0, 0); /* this is done in mpi4py */ + MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask); + MPI_Comm_size(MPI_COMM_WORLD, &NTask); + + for(PTask = 0; NTask > (1 << PTask); PTask++); + + return Py_BuildValue("i",1); + } + + +static PyObject * gadget_InitDefaultParameters(PyObject* self) + { + + + /* list of Gadget parameters */ + + + /* + + All.InitCondFile ="ICs/cluster_littleendian.dat"; + All.OutputDir ="cluster/"; + + All.EnergyFile ="energy.txt"; + All.InfoFile ="info.txt"; + All.TimingsFile ="timings.txt"; + All.CpuFile ="cpu.txt"; + + All.RestartFile ="restart"; + All.SnapshotFileBase ="snapshot"; + + All.OutputListFilename ="parameterfiles/outputs_lcdm_gas.txt"; + + */ + + + /* CPU time -limit */ + + All.TimeLimitCPU = 36000; /* = 10 hours */ + All.ResubmitOn = 0; + //All.ResubmitCommand = "my-scriptfile"; + + + + + All.ICFormat = 1; + All.SnapFormat = 1; + All.ComovingIntegrationOn = 0; + + All.TypeOfTimestepCriterion = 0; + All.OutputListOn = 0; + All.PeriodicBoundariesOn = 0; + + /* Caracteristics of run */ + + All.TimeBegin = 0.0; /*% Begin of the simulation (z=23)*/ + All.TimeMax = 1.0; + + All.Omega0 = 0; + All.OmegaLambda = 0; + All.OmegaBaryon = 0; + All.HubbleParam = 0; + All.BoxSize = 0; + + + /* Output frequency */ + + All.TimeBetSnapshot = 0.1; + All.TimeOfFirstSnapshot = 0.0; /*% 5 constant steps in log(a) */ + + All.CpuTimeBetRestartFile = 36000.0; /* here in seconds */ + All.TimeBetStatistics = 0.05; + + All.NumFilesPerSnapshot = 1; + All.NumFilesWrittenInParallel = 1; + + + + /* Accuracy of time integration */ + + All.ErrTolIntAccuracy = 0.025; + All.MaxRMSDisplacementFac = 0.2; + All.CourantFac = 0.15; + All.MaxSizeTimestep = 0.03; + All.MinSizeTimestep = 0.0; + + + + + /* Tree algorithm, force accuracy, domain update frequency */ + + All.ErrTolTheta = 0.7; + All.TypeOfOpeningCriterion = 0; + All.ErrTolForceAcc = 0.005; + + + All.TreeDomainUpdateFrequency = 0.1; + + + /* Further parameters of SPH */ + + All.DesNumNgb = 50; + All.MaxNumNgbDeviation = 2; + All.ArtBulkViscConst = 0.8; + All.InitGasTemp = 0; + All.MinGasTemp = 0; + + + /* Memory allocation */ + + All.PartAllocFactor = 2.0; + All.TreeAllocFactor = 2.0; + All.BufferSize = 30; + + + /* System of units */ + + All.UnitLength_in_cm = 3.085678e21; /* 1.0 kpc */ + All.UnitMass_in_g = 1.989e43; /* 1.0e10 solar masses */ + All.UnitVelocity_in_cm_per_s = 1e5; /* 1 km/sec */ + All.GravityConstantInternal = 0; + + + /* Softening lengths */ + + All.MinGasHsmlFractional = 0.25; + + All.SofteningGas = 0.5; + All.SofteningHalo = 0.5; + All.SofteningDisk = 0.5; + All.SofteningBulge = 0.5; + All.SofteningStars = 0.5; + All.SofteningBndry = 0.5; + + All.SofteningGasMaxPhys = 0.5; + All.SofteningHaloMaxPhys = 0.5; + All.SofteningDiskMaxPhys = 0.5; + All.SofteningBulgeMaxPhys = 0.5; + All.SofteningStarsMaxPhys = 0.5; + All.SofteningBndryMaxPhys = 0.5; + + + + return Py_BuildValue("i",1); + + } + + + + + + + +static PyObject * gadget_GetParameters() +{ + + PyObject *dict; + PyObject *key; + PyObject *value; + + dict = PyDict_New(); + + /* CPU time -limit */ + + key = PyString_FromString("TimeLimitCPU"); + value = PyFloat_FromDouble(All.TimeLimitCPU); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("ResubmitOn"); + value = PyFloat_FromDouble(All.ResubmitOn); + PyDict_SetItem(dict,key,value); + + //All.ResubmitCommand + + + + key = PyString_FromString("ICFormat"); + value = PyInt_FromLong(All.ICFormat); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SnapFormat"); + value = PyInt_FromLong(All.SnapFormat); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("ComovingIntegrationOn"); + value = PyInt_FromLong(All.ComovingIntegrationOn); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TypeOfTimestepCriterion"); + value = PyInt_FromLong(All.TypeOfTimestepCriterion); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("OutputListOn"); + value = PyInt_FromLong(All.OutputListOn); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("PeriodicBoundariesOn"); + value = PyInt_FromLong(All.PeriodicBoundariesOn); + PyDict_SetItem(dict,key,value); + + + /* Caracteristics of run */ + + + key = PyString_FromString("TimeBegin"); + value = PyFloat_FromDouble(All.TimeBegin); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TimeMax"); + value = PyFloat_FromDouble(All.TimeMax); + PyDict_SetItem(dict,key,value); + + + key = PyString_FromString("Omega0"); + value = PyFloat_FromDouble(All.Omega0); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("OmegaLambda"); + value = PyFloat_FromDouble(All.OmegaLambda); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("OmegaBaryon"); + value = PyFloat_FromDouble(All.OmegaBaryon); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("HubbleParam"); + value = PyFloat_FromDouble(All.HubbleParam); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("BoxSize"); + value = PyFloat_FromDouble(All.BoxSize); + PyDict_SetItem(dict,key,value); + + + /* Output frequency */ + + key = PyString_FromString("TimeBetSnapshot"); + value = PyFloat_FromDouble(All.TimeBetSnapshot); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TimeOfFirstSnapshot"); + value = PyFloat_FromDouble(All.TimeOfFirstSnapshot); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("CpuTimeBetRestartFile"); + value = PyFloat_FromDouble(All.CpuTimeBetRestartFile); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TimeBetStatistics"); + value = PyFloat_FromDouble(All.TimeBetStatistics); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("NumFilesPerSnapshot"); + value = PyInt_FromLong(All.NumFilesPerSnapshot); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("NumFilesWrittenInParallel"); + value = PyInt_FromLong(All.NumFilesWrittenInParallel); + PyDict_SetItem(dict,key,value); + + + /* Accuracy of time integration */ + + + key = PyString_FromString("ErrTolIntAccuracy"); + value = PyFloat_FromDouble(All.ErrTolIntAccuracy); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("MaxRMSDisplacementFac"); + value = PyFloat_FromDouble(All.MaxRMSDisplacementFac); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("CourantFac"); + value = PyFloat_FromDouble(All.CourantFac); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("MaxSizeTimestep"); + value = PyFloat_FromDouble(All.MaxSizeTimestep); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("MinSizeTimestep"); + value = PyFloat_FromDouble(All.MinSizeTimestep); + PyDict_SetItem(dict,key,value); + + + /* Tree algorithm, force accuracy, domain update frequency */ + + + key = PyString_FromString("ErrTolTheta"); + value = PyFloat_FromDouble(All.ErrTolTheta); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TypeOfOpeningCriterion"); + value = PyInt_FromLong(All.TypeOfOpeningCriterion); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("ErrTolForceAcc"); + value = PyFloat_FromDouble(All.ErrTolForceAcc); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TreeDomainUpdateFrequency"); + value = PyFloat_FromDouble(All.TreeDomainUpdateFrequency); + PyDict_SetItem(dict,key,value); + + /* Further parameters of SPH */ + + key = PyString_FromString("DesNumNgb"); + value = PyInt_FromLong(All.DesNumNgb); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("MaxNumNgbDeviation"); + value = PyInt_FromLong(All.MaxNumNgbDeviation); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("ArtBulkViscConst"); + value = PyInt_FromLong(All.ArtBulkViscConst); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("InitGasTemp"); + value = PyInt_FromLong(All.InitGasTemp); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("MinGasTemp"); + value = PyInt_FromLong(All.MinGasTemp); + PyDict_SetItem(dict,key,value); + + /* Memory allocation */ + + key = PyString_FromString("PartAllocFactor"); + value = PyFloat_FromDouble(All.PartAllocFactor); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("TreeAllocFactor"); + value = PyFloat_FromDouble(All.TreeAllocFactor); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("BufferSize"); + value = PyInt_FromLong(All.BufferSize); + PyDict_SetItem(dict,key,value); + + /* System of units */ + + key = PyString_FromString("UnitLength_in_cm"); + value = PyFloat_FromDouble(All.UnitLength_in_cm); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("UnitMass_in_g"); + value = PyFloat_FromDouble(All.UnitMass_in_g); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("UnitVelocity_in_cm_per_s"); + value = PyFloat_FromDouble(All.UnitVelocity_in_cm_per_s); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("GravityConstantInternal"); + value = PyFloat_FromDouble(All.GravityConstantInternal); + PyDict_SetItem(dict,key,value); + + + /* Softening lengths */ + + key = PyString_FromString("MinGasHsmlFractional"); + value = PyFloat_FromDouble(All.MinGasHsmlFractional); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningGas"); + value = PyFloat_FromDouble(All.SofteningGas); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningHalo"); + value = PyFloat_FromDouble(All.SofteningHalo); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningDisk"); + value = PyFloat_FromDouble(All.SofteningDisk); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningBulge"); + value = PyFloat_FromDouble(All.SofteningBulge); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningStars"); + value = PyFloat_FromDouble(All.SofteningStars); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningBndry"); + value = PyFloat_FromDouble(All.SofteningBndry); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningGasMaxPhys"); + value = PyFloat_FromDouble(All.SofteningGasMaxPhys); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningHaloMaxPhys"); + value = PyFloat_FromDouble(All.SofteningHaloMaxPhys); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningDiskMaxPhys"); + value = PyFloat_FromDouble(All.SofteningDiskMaxPhys); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningBulgeMaxPhys"); + value = PyFloat_FromDouble(All.SofteningBulgeMaxPhys); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningStarsMaxPhys"); + value = PyFloat_FromDouble(All.SofteningStarsMaxPhys); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("SofteningBndryMaxPhys"); + value = PyFloat_FromDouble(All.SofteningBndryMaxPhys); + PyDict_SetItem(dict,key,value); + + /* + key = PyString_FromString("OutputInfo"); + value = PyFloat_FromDouble(All.OutputInfo); + PyDict_SetItem(dict,key,value); + + key = PyString_FromString("PeanoHilbertOrder"); + value = PyFloat_FromDouble(All.PeanoHilbertOrder); + PyDict_SetItem(dict,key,value); + */ + + return Py_BuildValue("O",dict); + + +} + + + +static PyObject * gadget_SetParameters(PyObject *self, PyObject *args) +{ + + PyObject *dict; + PyObject *key; + PyObject *value; + int ivalue; + float fvalue; + double dvalue; + + + /* here, we can have either arguments or dict directly */ + if(PyDict_Check(args)) + { + dict = args; + } + else + { + if (! PyArg_ParseTuple(args, "O",&dict)) + return NULL; + } + + /* check that it is a PyDictObject */ + if(!PyDict_Check(dict)) + { + PyErr_SetString(PyExc_AttributeError, "argument is not a dictionary."); + return NULL; + } + + Py_ssize_t pos=0; + while(PyDict_Next(dict,&pos,&key,&value)) + { + + if(PyString_Check(key)) + { + + + /* CPU time -limit */ + + if(strcmp(PyString_AsString(key), "TimeLimitCPU")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TimeLimitCPU = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "ResubmitOn")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ResubmitOn = PyFloat_AsDouble(value); + } + + + + + + if(strcmp(PyString_AsString(key), "ICFormat")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ICFormat = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "SnapFormat")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SnapFormat = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "ComovingIntegrationOn")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ComovingIntegrationOn = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "TypeOfTimestepCriterion")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TypeOfTimestepCriterion = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "OutputListOn")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.OutputListOn = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "PeriodicBoundariesOn")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.PeriodicBoundariesOn = PyInt_AsLong(value); + } + + + /* Caracteristics of run */ + + if(strcmp(PyString_AsString(key), "TimeBegin")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TimeBegin = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "TimeMax")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TimeMax = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "Omega0")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.Omega0 = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "OmegaLambda")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.OmegaLambda = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "OmegaBaryon")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.OmegaBaryon = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "HubbleParam")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.HubbleParam = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "BoxSize")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.BoxSize = PyFloat_AsDouble(value); + } + + + /* Output frequency */ + + + if(strcmp(PyString_AsString(key), "TimeBetSnapshot")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TimeBetSnapshot = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "TimeOfFirstSnapshot")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TimeOfFirstSnapshot = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "CpuTimeBetRestartFile")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.CpuTimeBetRestartFile = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "TimeBetStatistics")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TimeBetStatistics = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "NumFilesPerSnapshot")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.NumFilesPerSnapshot = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "NumFilesWrittenInParallel")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.NumFilesWrittenInParallel = PyInt_AsLong(value); + } + + + /* Accuracy of time integration */ + + + if(strcmp(PyString_AsString(key), "ErrTolIntAccuracy")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ErrTolIntAccuracy = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "MaxRMSDisplacementFac")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.MaxRMSDisplacementFac = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "CourantFac")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.CourantFac = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "MaxSizeTimestep")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.MaxSizeTimestep = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "MinSizeTimestep")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.MinSizeTimestep = PyFloat_AsDouble(value); + } + + + /* Tree algorithm, force accuracy, domain update frequency */ + + + if(strcmp(PyString_AsString(key), "ErrTolTheta")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ErrTolTheta = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "TypeOfOpeningCriterion")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TypeOfOpeningCriterion = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "ErrTolForceAcc")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ErrTolForceAcc = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "TreeDomainUpdateFrequency")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TreeDomainUpdateFrequency = PyFloat_AsDouble(value); + } + + + /* Further parameters of SPH */ + + + if(strcmp(PyString_AsString(key), "DesNumNgb")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.DesNumNgb = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "MaxNumNgbDeviation")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.MaxNumNgbDeviation = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "ArtBulkViscConst")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.ArtBulkViscConst = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "InitGasTemp")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.InitGasTemp = PyInt_AsLong(value); + } + + if(strcmp(PyString_AsString(key), "MinGasTemp")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.MinGasTemp = PyInt_AsLong(value); + } + + + /* Memory allocation */ + + if(strcmp(PyString_AsString(key), "PartAllocFactor")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.PartAllocFactor = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "TreeAllocFactor")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.TreeAllocFactor = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "BufferSize")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.BufferSize = PyInt_AsLong(value); + } + + /* System of units */ + + if(strcmp(PyString_AsString(key), "UnitLength_in_cm")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.UnitLength_in_cm = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "UnitMass_in_g")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.UnitMass_in_g = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "UnitVelocity_in_cm_per_s")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.UnitVelocity_in_cm_per_s = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "GravityConstantInternal")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.GravityConstantInternal = PyFloat_AsDouble(value); + } + + + /* Softening lengths */ + + if(strcmp(PyString_AsString(key), "MinGasHsmlFractional")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.MinGasHsmlFractional = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningGas")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningGas = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningHalo")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningHalo = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningDisk")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningDisk = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningBulge")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningBulge = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningStars")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningStars = PyFloat_AsDouble(value); + } + + + if(strcmp(PyString_AsString(key), "SofteningBndry")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningBndry = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningGasMaxPhys")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningGasMaxPhys = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningHaloMaxPhys")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningHaloMaxPhys = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningDiskMaxPhys")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningDiskMaxPhys = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningBulgeMaxPhys")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningBulgeMaxPhys = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningStarsMaxPhys")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningStarsMaxPhys = PyFloat_AsDouble(value); + } + + if(strcmp(PyString_AsString(key), "SofteningBndryMaxPhys")==0) + { + if(PyInt_Check(value)||PyLong_Check(value)||PyFloat_Check(value)) + All.SofteningBndryMaxPhys = PyFloat_AsDouble(value); + } + + + + + + + } + } + + return Py_BuildValue("i",1); +} + + + + + + + + + +static PyObject *gadget_LoadParticles(PyObject *self, PyObject *args, PyObject *kwds) + { + + int i,j; + size_t bytes; + + PyArrayObject *ntype,*pos,*vel,*mass,*num,*tpe; + + + static char *kwlist[] = {"npart", "pos","vel","mass","num","tpe", NULL}; + + //if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOOOOO",kwlist,&ntype,&pos,&vel,&mass,&num,&tpe)) + // return Py_BuildValue("i",1); + + if (! PyArg_ParseTuple(args, "OOOOOO",&ntype,&pos,&vel,&mass,&num,&tpe)) + return Py_BuildValue("i",1); + + + + /* check type */ + if (!(PyArray_Check(pos))) + { + PyErr_SetString(PyExc_ValueError,"aruments 1 must be array."); + return NULL; + } + + /* check type */ + if (!(PyArray_Check(mass))) + { + PyErr_SetString(PyExc_ValueError,"aruments 2 must be array."); + return NULL; + } + + /* check dimension */ + if ( (pos->nd!=2)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 1 must be 2."); + return NULL; + } + + /* check dimension */ + if ( (mass->nd!=1)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 2 must be 1."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[1]!=3)) + { + PyErr_SetString(PyExc_ValueError,"First size of argument must be 3."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[0]!=mass->dimensions[0])) + { + PyErr_SetString(PyExc_ValueError,"Size of argument 1 must be similar to argument 2."); + return NULL; + } + + + /* ensure double */ + ntype = TO_INT(ntype); + pos = TO_FLOAT(pos); + vel = TO_FLOAT(vel); + mass = TO_FLOAT(mass); + num = TO_FLOAT(num); + tpe = TO_FLOAT(tpe); + + + /*************************************** + * some inits * + /***************************************/ + + RestartFlag = 0; + Begrun1(); + + + /*************************************** + + * LOAD PARTILES * + + /***************************************/ + + + + NumPart = 0; + N_gas = *(int*) (ntype->data + 0*(ntype->strides[0])); + for (i = 0; i < 6; i++) + NumPart += *(int*) (ntype->data + i*(ntype->strides[0])); + + + if (NumPart!=pos->dimensions[0]) + { + PyErr_SetString(PyExc_ValueError,"Numpart != pos->dimensions[0]."); + return NULL; + } + + + MPI_Allreduce(&NumPart, &All.TotNumPart, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(&N_gas, &All.TotN_gas, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + + All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); + All.MaxPartSph = All.PartAllocFactor * (All.TotN_gas / NTask); + + /*********************/ + /* allocate P */ + /*********************/ + + if(!(P = malloc(bytes = All.MaxPart * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `P' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphP = malloc(bytes = All.MaxPartSph * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphP' (%g MB) %d.\n", bytes / (1024.0 * 1024.0), sizeof(struct sph_particle_data)); + endrun(1); + } + + + /*********************/ + /* init P */ + /*********************/ + + for (i = 0; i < NumPart; i++) + { + P[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + P[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + P[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + P[i].Vel[0] = *(float *) (vel->data + i*(vel->strides[0]) + 0*vel->strides[1]); + P[i].Vel[1] = *(float *) (vel->data + i*(vel->strides[0]) + 1*vel->strides[1]); + P[i].Vel[2] = *(float *) (vel->data + i*(vel->strides[0]) + 2*vel->strides[1]); + P[i].Mass = *(float *) (mass->data + i*(mass->strides[0])); + P[i].ID = *(unsigned int *) (num->data + i*(num->strides[0])); + P[i].Type = *(int *) (tpe->data + i*(tpe->strides[0])); + //P[i].Active = 1; + } + + + + /*************************************** + + * END LOAD PARTILES * + + /***************************************/ + + + + + /*************************************** + * finish inits * + /***************************************/ + + Begrun2(); + + + + return Py_BuildValue("i",1); + + } + + + + +static PyObject *gadget_LoadParticlesQ(PyObject *self, PyObject *args, PyObject *kwds) + { + + int i,j; + size_t bytes; + + PyArrayObject *ntype,*pos,*vel,*mass,*num,*tpe; + + + static char *kwlist[] = {"npart", "pos","vel","mass","num","tpe", NULL}; + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOOOOO",kwlist,&ntype,&pos,&vel,&mass,&num,&tpe)) + return Py_BuildValue("i",1); + + + + + /* check type */ + if (!(PyArray_Check(pos))) + { + PyErr_SetString(PyExc_ValueError,"aruments 1 must be array."); + return NULL; + } + + /* check type */ + if (!(PyArray_Check(mass))) + { + PyErr_SetString(PyExc_ValueError,"aruments 2 must be array."); + return NULL; + } + + /* check dimension */ + if ( (pos->nd!=2)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 1 must be 2."); + return NULL; + } + + /* check dimension */ + if ( (mass->nd!=1)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 2 must be 1."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[1]!=3)) + { + PyErr_SetString(PyExc_ValueError,"First size of argument must be 3."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[0]!=mass->dimensions[0])) + { + PyErr_SetString(PyExc_ValueError,"Size of argument 1 must be similar to argument 2."); + return NULL; + } + + + /* ensure double */ + ntype = TO_INT(ntype); + pos = TO_FLOAT(pos); + vel = TO_FLOAT(vel); + mass = TO_FLOAT(mass); + num = TO_FLOAT(num); + tpe = TO_FLOAT(tpe); + + + + /*************************************** + * some inits * + /***************************************/ + + allocate_commbuffersQ(); + + + /*************************************** + + * LOAD PARTILES * + + /***************************************/ + + + + NumPartQ = 0; + N_gasQ = *(int*) (ntype->data + 0*(ntype->strides[0])); + for (i = 0; i < 6; i++) + NumPartQ += *(int*) (ntype->data + i*(ntype->strides[0])); + + + if (NumPartQ!=pos->dimensions[0]) + { + PyErr_SetString(PyExc_ValueError,"NumpartQ != pos->dimensions[0]."); + return NULL; + } + + + MPI_Allreduce(&NumPartQ, &All.TotNumPartQ, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(&N_gasQ, &All.TotN_gasQ, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + + All.MaxPartQ = All.PartAllocFactor * (All.TotNumPartQ / NTask); + All.MaxPartSphQ = All.PartAllocFactor * (All.TotN_gasQ / NTask); + + + /*********************/ + /* allocate Q */ + /*********************/ + + if(!(Q = malloc(bytes = All.MaxPartQ * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `Q' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphQ = malloc(bytes = All.MaxPartSphQ * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphQ' (%g MB) %d.\n", bytes / (1024.0 * 1024.0), sizeof(struct sph_particle_data)); + endrun(1); + } + + + /*********************/ + /* init P */ + /*********************/ + + for (i = 0; i < NumPartQ; i++) + { + Q[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + Q[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + Q[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + Q[i].Vel[0] = *(float *) (vel->data + i*(vel->strides[0]) + 0*vel->strides[1]); + Q[i].Vel[1] = *(float *) (vel->data + i*(vel->strides[0]) + 1*vel->strides[1]); + Q[i].Vel[2] = *(float *) (vel->data + i*(vel->strides[0]) + 2*vel->strides[1]); + Q[i].Mass = *(float *) (mass->data + i*(mass->strides[0])); + Q[i].ID = *(unsigned int *) (num->data + i*(num->strides[0])); + Q[i].Type = *(int *) (tpe->data + i*(tpe->strides[0])); + //Q[i].Active = 1; + } + + + + /*************************************** + + * END LOAD PARTILES * + + /***************************************/ + + domain_DecompositionQ(); + + + /*************************************** + * finish inits * + /***************************************/ + + + + + + return Py_BuildValue("i",1); + + } + + + + +static PyObject *gadget_AllPotential(PyObject *self) +{ + compute_potential(); + return Py_BuildValue("i",1); +} + + + +static PyObject * gadget_GetAllPotential(PyObject* self) +{ + + PyArrayObject *pot; + npy_intp ld[1]; + int i; + + ld[0] = NumPart; + + pot = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); + + for (i = 0; i < pot->dimensions[0]; i++) + { + *(float *) (pot->data + i*(pot->strides[0])) = P[i].Potential; + } + + return PyArray_Return(pot); +} + + +static PyObject *gadget_AllAcceleration(PyObject *self) +{ + NumForceUpdate = NumPart; + gravity_tree(); + return Py_BuildValue("i",1); +} + + +static PyObject * gadget_GetAllAcceleration(PyObject* self) +{ + + PyArrayObject *acc; + npy_intp ld[2]; + int i; + + ld[0] = NumPart; + ld[1] = 3; + + acc = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); + + for (i = 0; i < acc->dimensions[0]; i++) + { + *(float *) (acc->data + i*(acc->strides[0]) + 0*acc->strides[1]) = P[i].GravAccel[0]; + *(float *) (acc->data + i*(acc->strides[0]) + 1*acc->strides[1]) = P[i].GravAccel[1]; + *(float *) (acc->data + i*(acc->strides[0]) + 2*acc->strides[1]) = P[i].GravAccel[2]; + } + + return PyArray_Return(acc); +} + + +static PyObject *gadget_GetAllDensities(PyObject* self) +{ + + PyArrayObject *rho; + npy_intp ld[1]; + int i; + + ld[0] = N_gas; + + rho = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); + + for (i = 0; i < rho->dimensions[0]; i++) + { + *(float *) (rho->data + i*(rho->strides[0])) = SphP[i].Density; + } + + return PyArray_Return(rho); +} + +static PyObject *gadget_GetAllHsml(PyObject* self) +{ + + PyArrayObject *hsml; + npy_intp ld[1]; + int i; + + ld[0] = N_gas; + + hsml = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); + + for (i = 0; i < hsml->dimensions[0]; i++) + { + *(float *) (hsml->data + i*(hsml->strides[0])) = SphP[i].Hsml; + } + + return PyArray_Return(hsml); +} + +static PyObject *gadget_GetAllPositions(PyObject* self) +{ + + PyArrayObject *pos; + npy_intp ld[2]; + int i; + + ld[0] = NumPart; + ld[1] = 3; + + pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = P[i].Pos[0]; + *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = P[i].Pos[1]; + *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = P[i].Pos[2]; + } + + return PyArray_Return(pos); + +} + +static PyObject *gadget_GetAllVelocities(PyObject* self) +{ + + PyArrayObject *vel; + npy_intp ld[2]; + int i; + + ld[0] = NumPart; + ld[1] = 3; + + vel = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); + + for (i = 0; i < vel->dimensions[0]; i++) + { + *(float *) (vel->data + i*(vel->strides[0]) + 0*vel->strides[1]) = P[i].Vel[0]; + *(float *) (vel->data + i*(vel->strides[0]) + 1*vel->strides[1]) = P[i].Vel[1]; + *(float *) (vel->data + i*(vel->strides[0]) + 2*vel->strides[1]) = P[i].Vel[2]; + } + + return PyArray_Return(vel); + +} + +static PyObject *gadget_GetAllMasses(PyObject* self) +{ + + PyArrayObject *mass; + npy_intp ld[1]; + int i; + + ld[0] = NumPart; + + mass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); + + for (i = 0; i < mass->dimensions[0]; i++) + { + *(float *) (mass->data + i*(mass->strides[0])) = P[i].Mass; + } + + return PyArray_Return(mass); +} + +static PyObject *gadget_GetAllID(PyObject* self) +{ + + PyArrayObject *id; + npy_intp ld[1]; + int i; + + ld[0] = NumPart; + + id = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_INT); + + for (i = 0; i < id->dimensions[0]; i++) + { + *(float *) (id->data + i*(id->strides[0])) = P[i].ID; + } + + return PyArray_Return(id); +} + + +static PyObject *gadget_GetAllTypes(PyObject* self) +{ + + PyArrayObject *type; + npy_intp ld[1]; + int i; + + ld[0] = NumPart; + + type = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_INT); + + for (i = 0; i < type->dimensions[0]; i++) + { + *(int *) (type->data + i*(type->strides[0])) = P[i].Type; + } + + return PyArray_Return(type); +} + + + + +static PyObject *gadget_GetAllPositionsQ(PyObject* self) +{ + + PyArrayObject *pos; + npy_intp ld[2]; + int i; + + ld[0] = NumPartQ; + ld[1] = 3; + + pos = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = Q[i].Pos[0]; + *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = Q[i].Pos[1]; + *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = Q[i].Pos[2]; + } + + return PyArray_Return(pos); + +} + +static PyObject *gadget_GetAllVelocitiesQ(PyObject* self) +{ + + PyArrayObject *vel; + npy_intp ld[2]; + int i; + + ld[0] = NumPartQ; + ld[1] = 3; + + vel = (PyArrayObject *) PyArray_SimpleNew(2,ld,PyArray_FLOAT); + + for (i = 0; i < vel->dimensions[0]; i++) + { + *(float *) (vel->data + i*(vel->strides[0]) + 0*vel->strides[1]) = Q[i].Vel[0]; + *(float *) (vel->data + i*(vel->strides[0]) + 1*vel->strides[1]) = Q[i].Vel[1]; + *(float *) (vel->data + i*(vel->strides[0]) + 2*vel->strides[1]) = Q[i].Vel[2]; + } + + return PyArray_Return(vel); + +} + +static PyObject *gadget_GetAllMassesQ(PyObject* self) +{ + + PyArrayObject *mass; + npy_intp ld[1]; + int i; + + ld[0] = NumPartQ; + + mass = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); + + for (i = 0; i < mass->dimensions[0]; i++) + { + *(float *) (mass->data + i*(mass->strides[0])) = Q[i].Mass; + } + + return PyArray_Return(mass); +} + +static PyObject *gadget_GetAllIDQ(PyObject* self) +{ + + PyArrayObject *id; + npy_intp ld[1]; + int i; + + ld[0] = NumPartQ; + + id = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_INT); + + for (i = 0; i < id->dimensions[0]; i++) + { + *(float *) (id->data + i*(id->strides[0])) = Q[i].ID; + } + + return PyArray_Return(id); +} + +static PyObject *gadget_GetAllTypesQ(PyObject* self) +{ + + PyArrayObject *type; + npy_intp ld[1]; + int i; + + ld[0] = NumPartQ; + + type = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_INT); + + for (i = 0; i < type->dimensions[0]; i++) + { + *(int *) (type->data + i*(type->strides[0])) = Q[i].Type; + } + + return PyArray_Return(type); +} + + + + + + + +static PyObject *gadget_GetPos(PyObject *self, PyObject *args, PyObject *kwds) +{ + + + + int i,j; + size_t bytes; + + PyArrayObject *pos; + + + + if (! PyArg_ParseTuple(args, "O",&pos)) + return PyString_FromString("error : GetPos"); + + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]) = P[i].Pos[0]; + *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]) = P[i].Pos[1]; + *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]) = P[i].Pos[2]; + + } + + //return PyArray_Return(Py_None); + return Py_BuildValue("i",1); + +} + + + + + +static PyObject * gadget_Potential(PyObject* self, PyObject *args) +{ + + + PyArrayObject *pos; + float eps; + + if (! PyArg_ParseTuple(args, "Of",&pos,&eps)) + return PyString_FromString("error"); + + PyArrayObject *pot; + int i; + npy_intp ld[1]; + int input_dimension; + size_t bytes; + + input_dimension =pos->nd; + + if (input_dimension != 2) + PyErr_SetString(PyExc_ValueError,"dimension of first argument must be 2"); + + + pos = TO_FLOAT(pos); + + + /* create a NumPy object */ + ld[0]=pos->dimensions[0]; + pot = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_FLOAT); + + + NumPartQ = pos->dimensions[0]; + All.ForceSofteningQ = eps; + + + if(!(Q = malloc(bytes = NumPartQ * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `Q' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphQ = malloc(bytes = NumPartQ * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphQ' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + + for (i = 0; i < pos->dimensions[0]; i++) + { + Q[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + Q[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + Q[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + Q[i].Type = 0; + Q[i].Mass = 0; + Q[i].Potential = 0; + } + + + compute_potential_sub(); + + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *)(pot->data + i*(pot->strides[0])) = Q[i].Potential; + } + + + free(Q); + free(SphQ); + + return PyArray_Return(pot); +} + + + + +static PyObject * gadget_Acceleration(PyObject* self, PyObject *args) +{ + + + PyArrayObject *pos; + float eps; + + if (! PyArg_ParseTuple(args, "Of",&pos,&eps)) + return PyString_FromString("error"); + + PyArrayObject *acc; + int i; + int input_dimension; + size_t bytes; + + input_dimension =pos->nd; + + if (input_dimension != 2) + PyErr_SetString(PyExc_ValueError,"dimension of first argument must be 2"); + + + pos = TO_FLOAT(pos); + + + /* create a NumPy object */ + acc = (PyArrayObject *) PyArray_SimpleNew(pos->nd,pos->dimensions,PyArray_FLOAT); + + + NumPartQ = pos->dimensions[0]; + All.ForceSofteningQ = eps; + + + if(!(Q = malloc(bytes = NumPartQ * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `Q' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphQ = malloc(bytes = NumPartQ * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphQ' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + for (i = 0; i < pos->dimensions[0]; i++) + { + Q[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + Q[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + Q[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + Q[i].Type = 0; + Q[i].Mass = 0; + Q[i].GravAccel[0] = 0; + Q[i].GravAccel[1] = 0; + Q[i].GravAccel[2] = 0; + } + + gravity_tree_sub(); + + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *)(acc->data + i*(acc->strides[0]) + 0*acc->strides[1]) = Q[i].GravAccel[0]; + *(float *)(acc->data + i*(acc->strides[0]) + 1*acc->strides[1]) = Q[i].GravAccel[1]; + *(float *)(acc->data + i*(acc->strides[0]) + 2*acc->strides[1]) = Q[i].GravAccel[2]; + } + + + free(Q); + free(SphQ); + + return PyArray_Return(acc); +} + + + + +static PyObject * gadget_InitHsml(PyObject* self, PyObject *args) +{ + + + PyArrayObject *pos,*hsml; + + if (! PyArg_ParseTuple(args, "OO",&pos,&hsml)) + return PyString_FromString("error"); + + int i; + int input_dimension; + size_t bytes; + int ld[1]; + PyArrayObject *vden,*vhsml; + + input_dimension =pos->nd; + + if (input_dimension != 2) + PyErr_SetString(PyExc_ValueError,"dimension of first argument must be 2"); + + if (pos->dimensions[0] != hsml->dimensions[0]) + PyErr_SetString(PyExc_ValueError,"pos and hsml must have the same dimension."); + + + pos = TO_FLOAT(pos); + hsml = TO_FLOAT(hsml); + + /* create a NumPy object */ + ld[0]=pos->dimensions[0]; + vden = (PyArrayObject *) PyArray_SimpleNew(1,pos->dimensions,pos->descr->type_num); + vhsml = (PyArrayObject *) PyArray_SimpleNew(1,pos->dimensions,pos->descr->type_num); + + + + NumPartQ = pos->dimensions[0]; + N_gasQ = NumPartQ; + All.Ti_Current=1; /* need to flag active particles */ + + + if(!(Q = malloc(bytes = NumPartQ * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `Q' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphQ = malloc(bytes = NumPartQ * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphQ' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + for (i = 0; i < pos->dimensions[0]; i++) + { + Q[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + Q[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + Q[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + SphQ[i].Hsml = *(float *) (hsml->data + i*(hsml->strides[0])); + } + + setup_smoothinglengths_sub(); + + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *)(vhsml->data + i*(vhsml->strides[0])) = SphQ[i].Hsml; + *(float *)(vden->data + i*(vden->strides[0])) = SphQ[i].Density; + } + + + free(Q); + free(SphQ); + + return Py_BuildValue("OO",vden,vhsml); +} + + +static PyObject * gadget_Density(PyObject* self, PyObject *args) +{ + + + PyArrayObject *pos,*hsml; + + if (! PyArg_ParseTuple(args, "OO",&pos,&hsml)) + return PyString_FromString("error"); + + int i; + int input_dimension; + size_t bytes; + int ld[1]; + PyArrayObject *vden,*vhsml; + + input_dimension =pos->nd; + + if (input_dimension != 2) + PyErr_SetString(PyExc_ValueError,"dimension of first argument must be 2"); + + if (pos->dimensions[0] != hsml->dimensions[0]) + PyErr_SetString(PyExc_ValueError,"pos and hsml must have the same dimension."); + + + pos = TO_FLOAT(pos); + hsml = TO_FLOAT(hsml); + + /* create a NumPy object */ + ld[0]=pos->dimensions[0]; + vden = (PyArrayObject *) PyArray_SimpleNew(1,pos->dimensions,pos->descr->type_num); + vhsml = (PyArrayObject *) PyArray_SimpleNew(1,pos->dimensions,pos->descr->type_num); + + + + NumPartQ = pos->dimensions[0]; + N_gasQ = NumPartQ; + All.Ti_Current=1; /* need to flag active particles */ + + + if(!(Q = malloc(bytes = NumPartQ * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `Q' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphQ = malloc(bytes = NumPartQ * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphQ' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + for (i = 0; i < pos->dimensions[0]; i++) + { + Q[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + Q[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + Q[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + SphQ[i].Hsml = *(float *) (hsml->data + i*(hsml->strides[0])); + } + + density_sub(); + + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *)(vhsml->data + i*(vhsml->strides[0])) = SphQ[i].Hsml; + *(float *)(vden->data + i*(vden->strides[0])) = SphQ[i].Density; + } + + + free(Q); + free(SphQ); + + return Py_BuildValue("OO",vden,vhsml); +} + + + + +static PyObject * gadget_SphEvaluate(PyObject* self, PyObject *args) +{ + + + PyArrayObject *pos,*hsml,*obs; + + if (! PyArg_ParseTuple(args, "OOO",&pos,&hsml,&obs)) + return PyString_FromString("error"); + + int i; + int input_dimension; + size_t bytes; + int ld[1]; + PyArrayObject *vobs; + + input_dimension =pos->nd; + + if (input_dimension != 2) + PyErr_SetString(PyExc_ValueError,"dimension of first argument must be 2"); + + if (pos->dimensions[0] != hsml->dimensions[0]) + PyErr_SetString(PyExc_ValueError,"pos and hsml must have the same dimension."); + + + if (obs->nd != 1) + PyErr_SetString(PyExc_ValueError,"dimension of obs must be 1."); + + if (obs->dimensions[0] != NumPart) + PyErr_SetString(PyExc_ValueError,"The size of obs must be NumPart."); + + + pos = TO_FLOAT(pos); + hsml = TO_FLOAT(hsml); + obs = TO_FLOAT(obs); + + /* create a NumPy object */ + ld[0]=pos->dimensions[0]; + vobs = (PyArrayObject *) PyArray_SimpleNew(1,pos->dimensions,pos->descr->type_num); + + + + NumPartQ = pos->dimensions[0]; + N_gasQ = NumPartQ; + All.Ti_Current=1; /* need to flag active particles */ + + + if(!(Q = malloc(bytes = NumPartQ * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `Q' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphQ = malloc(bytes = NumPartQ * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphQ' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + for (i = 0; i < pos->dimensions[0]; i++) + { + Q[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + Q[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + Q[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + SphQ[i].Hsml = *(float *) (hsml->data + i*(hsml->strides[0])); + } + + + /* now, give observable value for P */ + + for (i = 0; i < NumPart; i++) + { + SphP[i].Observable = *(float *) (obs->data + i*(obs->strides[0])); + } + + + sph_sub(); + + + for (i = 0; i < pos->dimensions[0]; i++) + { + *(float *)(vobs->data + i*(vobs->strides[0])) = SphQ[i].Observable; + } + + + free(Q); + free(SphQ); + + return Py_BuildValue("O",vobs); +} + + + +static PyObject * gadget_Ngbs(PyObject* self, PyObject *args) +{ + + + PyArrayObject *pos; + float eps; + + if (! PyArg_ParseTuple(args, "Of",&pos,&eps)) + return PyString_FromString("error"); + + PyArrayObject *poss; + int i,j,n,nn; + int input_dimension; + size_t bytes; + int startnode,numngb; + FLOAT searchcenter[3]; + + double dx,dy,dz,r2,eps2; + + input_dimension =pos->nd; + + if (input_dimension != 1) + PyErr_SetString(PyExc_ValueError,"dimension of first argument must be 1"); + + + pos = TO_FLOAT(pos); + eps2 = eps*eps; + + searchcenter[0] = (FLOAT)*(float *) (pos->data + 0*(pos->strides[0])); + searchcenter[1] = (FLOAT)*(float *) (pos->data + 1*(pos->strides[0])); + searchcenter[2] = (FLOAT)*(float *) (pos->data + 2*(pos->strides[0])); + + + startnode = All.MaxPart; + + + + + /* ici, il faut faire une fct qui fonctionne en //, cf hydra --> Exportflag */ + numngb = ngb_treefind_pairs(&searchcenter[0], (FLOAT)eps, &startnode); + + nn=0; + + for(n = 0;n < numngb; n++) + { + j = Ngblist[n]; + + dx = searchcenter[0] - P[j].Pos[0]; + dy = searchcenter[1] - P[j].Pos[1]; + dz = searchcenter[2] - P[j].Pos[2]; + + r2 = dx * dx + dy * dy + dz * dz; + + if (r2<=eps2) + { + printf("%d r=%g\n",nn,sqrt(r2)); + nn++; + } + } + + + + + + + return PyArray_Return(pos); +} + + + + + + + +static PyObject *gadget_LoadParticles2(PyObject *self, PyObject *args, PyObject *kwds) + { + + int i,j; + size_t bytes; + + PyArrayObject *ntype,*pos,*vel,*mass,*num,*tpe; + + + static char *kwlist[] = {"npart", "pos","vel","mass","num","tpe", NULL}; + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOOOOO",kwlist,&ntype,&pos,&vel,&mass,&num,&tpe)) + return Py_BuildValue("i",1); + + + + + /* check type */ + if (!(PyArray_Check(pos))) + { + PyErr_SetString(PyExc_ValueError,"aruments 1 must be array."); + return NULL; + } + + /* check type */ + if (!(PyArray_Check(mass))) + { + PyErr_SetString(PyExc_ValueError,"aruments 2 must be array."); + return NULL; + } + + /* check dimension */ + if ( (pos->nd!=2)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 1 must be 2."); + return NULL; + } + + /* check dimension */ + if ( (mass->nd!=1)) + { + PyErr_SetString(PyExc_ValueError,"Dimension of argument 2 must be 1."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[1]!=3)) + { + PyErr_SetString(PyExc_ValueError,"First size of argument must be 3."); + return NULL; + } + + /* check size */ + if ( (pos->dimensions[0]!=mass->dimensions[0])) + { + PyErr_SetString(PyExc_ValueError,"Size of argument 1 must be similar to argument 2."); + return NULL; + } + + + /* ensure double */ +// ntype = TO_INT(ntype); +// pos = TO_FLOAT(pos); +// vel = TO_FLOAT(vel); +// mass = TO_FLOAT(mass); +// num = TO_FLOAT(num); +// tpe = TO_FLOAT(tpe); + + + + /*************************************** + * some inits * + /***************************************/ + + RestartFlag = 0; + Begrun1(); + + + /*************************************** + + * LOAD PARTILES * + + /***************************************/ + + + + NumPart = 0; + N_gas = *(int*) (ntype->data + 0*(ntype->strides[0])); + for (i = 0; i < 6; i++) + NumPart += *(int*) (ntype->data + i*(ntype->strides[0])); + + + if (NumPart!=pos->dimensions[0]) + { + PyErr_SetString(PyExc_ValueError,"Numpart != pos->dimensions[0]."); + return NULL; + } + + + MPI_Allreduce(&NumPart, &All.TotNumPart, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(&N_gas, &All.TotN_gas, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + + All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); + All.MaxPartSph = All.PartAllocFactor * (All.TotN_gas / NTask); + + /*********************/ + /* allocate P */ + /*********************/ + + if(!(P = malloc(bytes = All.MaxPart * sizeof(struct particle_data)))) + { + printf("failed to allocate memory for `P' (%g MB).\n", bytes / (1024.0 * 1024.0)); + endrun(1); + } + + if(!(SphP = malloc(bytes = All.MaxPartSph * sizeof(struct sph_particle_data)))) + { + printf("failed to allocate memory for `SphP' (%g MB) %d.\n", bytes / (1024.0 * 1024.0), sizeof(struct sph_particle_data)); + endrun(1); + } + + + /*********************/ + /* init P */ + /*********************/ + + float * fpt; + + for (i = 0; i < NumPart; i++) + { + + //P[i].Pos[0] = *(float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + //P[i].Pos[1] = *(float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + //P[i].Pos[2] = *(float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + + //&P[i].Pos[0] = (float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + //&P[i].Pos[1] = (float *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); + //&P[i].Pos[2] = (float *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); + + fpt = (float *) (pos->data + i*(pos->strides[0]) + 0*pos->strides[1]); + + + + + P[i].Vel[0] = *(float *) (vel->data + i*(vel->strides[0]) + 0*vel->strides[1]); + P[i].Vel[1] = *(float *) (vel->data + i*(vel->strides[0]) + 1*vel->strides[1]); + P[i].Vel[2] = *(float *) (vel->data + i*(vel->strides[0]) + 2*vel->strides[1]); + P[i].Mass = *(float *) (mass->data + i*(mass->strides[0])); + P[i].ID = *(unsigned int *) (num->data + i*(num->strides[0])); + P[i].Type = *(int *) (tpe->data + i*(tpe->strides[0])); + //P[i].Active = 1; + } + + + + /*************************************** + + * END LOAD PARTILES * + + /***************************************/ + + + + + /*************************************** + * finish inits * + /***************************************/ + + Begrun2(); + + + + return Py_BuildValue("i",1); + + } + + + + + + + + + + +/* definition of the method table */ + +static PyMethodDef gadgetMethods[] = { + + {"Info", (PyCFunction)gadget_Info, METH_VARARGS, + "give some info"}, + + + {"InitMPI", (PyCFunction)gadget_InitMPI, METH_VARARGS, + "Init MPI"}, + + {"InitDefaultParameters", (PyCFunction)gadget_InitDefaultParameters, METH_VARARGS, + "Init default parameters"}, + + {"GetParameters", (PyCFunction)gadget_GetParameters, METH_VARARGS, + "get gadget parameters"}, + + {"SetParameters", (PyCFunction)gadget_SetParameters, METH_VARARGS, + "Set gadget parameters"}, + + + + + {"LoadParticles", (PyCFunction)gadget_LoadParticles, METH_VARARGS, + "LoadParticles partilces"}, + + {"LoadParticlesQ", (PyCFunction)gadget_LoadParticlesQ, METH_VARARGS, + "LoadParticles partilces Q"}, + + {"LoadParticles2", (PyCFunction)gadget_LoadParticles2, METH_VARARGS, + "LoadParticles partilces"}, + + + + + {"AllPotential", (PyCFunction)gadget_AllPotential, METH_VARARGS, + "Computes the potential for each particle"}, + + {"AllAcceleration", (PyCFunction)gadget_AllAcceleration, METH_VARARGS, + "Computes the gravitational acceleration for each particle"}, + + + + + + + {"GetAllAcceleration", (PyCFunction)gadget_GetAllAcceleration, METH_VARARGS, + "get the gravitational acceleration for each particle"}, + + {"GetAllPotential", (PyCFunction)gadget_GetAllPotential, METH_VARARGS, + "get the potential for each particle"}, + + {"GetAllDensities", (PyCFunction)gadget_GetAllDensities, METH_VARARGS, + "get the densities for each particle"}, + + {"GetAllHsml", (PyCFunction)gadget_GetAllHsml, METH_VARARGS, + "get the sph smoothing length for each particle"}, + + {"GetAllPositions", (PyCFunction)gadget_GetAllPositions, METH_VARARGS, + "get the position for each particle"}, + + {"GetAllVelocities", (PyCFunction)gadget_GetAllVelocities, METH_VARARGS, + "get the velocities for each particle"}, + + {"GetAllMasses", (PyCFunction)gadget_GetAllMasses, METH_VARARGS, + "get the mass for each particle"}, + + {"GetAllID", (PyCFunction)gadget_GetAllID, METH_VARARGS, + "get the ID for each particle"}, + + {"GetAllTypes", (PyCFunction)gadget_GetAllTypes, METH_VARARGS, + "get the type for each particle"}, + + + + + {"GetPos", (PyCFunction)gadget_GetPos, METH_VARARGS, + "get the position for each particle (no memory overhead)"}, + + + {"Potential", (PyCFunction)gadget_Potential, METH_VARARGS, + "get the potential for a givent sample of points"}, + + {"Acceleration", (PyCFunction)gadget_Acceleration, METH_VARARGS, + "get the acceleration for a givent sample of points"}, + + + + + {"InitHsml", (PyCFunction)gadget_InitHsml, METH_VARARGS, + "Init hsml based on the three for a given number of points"}, + + {"Density", (PyCFunction)gadget_Density, METH_VARARGS, + "compute Density based on the three for a given number of points"}, + + {"SphEvaluate", (PyCFunction)gadget_SphEvaluate, METH_VARARGS, + "compute mean value based on the sph convolution for a given number of points"}, + + + + + + + {"Ngbs", (PyCFunction)gadget_Ngbs, METH_VARARGS, + "return the position of the neighbors for a given point"}, + + + + + {"GetAllPositionsQ", (PyCFunction)gadget_GetAllPositionsQ, METH_VARARGS, + "get the position for each particle Q"}, + + {"GetAllVelocitiesQ", (PyCFunction)gadget_GetAllVelocitiesQ, METH_VARARGS, + "get the velocities for each particle Q"}, + + {"GetAllMassesQ", (PyCFunction)gadget_GetAllMassesQ, METH_VARARGS, + "get the mass for each particle Q"}, + + {"GetAllIDQ", (PyCFunction)gadget_GetAllIDQ, METH_VARARGS, + "get the ID for each particle Q"}, + + {"GetAllTypesQ", (PyCFunction)gadget_GetAllTypesQ, METH_VARARGS, + "get the type for each particle Q"}, + + + + + + + + {NULL, NULL, 0, NULL} /* Sentinel */ + }; + + + +void initgadget(void) + { + (void) Py_InitModule("gadget", gadgetMethods); + + import_array(); + } + diff --git a/src/PyGadget/src/python_interface.o b/src/PyGadget/src/python_interface.o new file mode 100644 index 0000000..d31c283 Binary files /dev/null and b/src/PyGadget/src/python_interface.o differ diff --git a/src/PyGadget/src/read_ic.c b/src/PyGadget/src/read_ic.c new file mode 100644 index 0000000..e0cc990 --- /dev/null +++ b/src/PyGadget/src/read_ic.c @@ -0,0 +1,804 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file read_ic.c + * \brief Read initial conditions in one of Gadget's file formats + */ + +/*! This function reads initial conditions, in one of the three possible file + * formats currently supported by Gadget. Note: When a snapshot file is + * started from initial conditions (start-option 0), not all the information + * in the header is used, in particular, the STARTING TIME needs to be set in + * the parameterfile. Also, for gas particles, only the internal energy is + * read, the density and mean molecular weight will be recomputed by the + * code. When InitGasTemp>0 is given, the gas temperature will be initialzed + * to this value assuming a mean colecular weight either corresponding to + * complete neutrality, or full ionization. + * + * However, when the code is started with start-option 2, then all the this + * data in the snapshot files is preserved, i.e. this is also the way to + * resume a simulation from a snapshot file in case a regular restart file is + * not available. + */ +void read_ic(char *fname) +{ + int i, num_files, rest_files, ngroups, gr, filenr, masterTask, lastTask, groupMaster; + double u_init; + char buf[500]; + +#ifndef ISOTHERM_EQS + double molecular_weight; +#endif +#ifdef SFR + double original_gas_mass, mass, masstot; +#endif + + NumPart = 0; + N_gas = 0; + All.TotNumPart = 0; + + num_files = find_files(fname); + + rest_files = num_files; + + fill_Tab_IO_Labels(); + + while(rest_files > NTask) + { + sprintf(buf, "%s.%d", fname, ThisTask + (rest_files - NTask)); + if(All.ICFormat == 3) + sprintf(buf, "%s.%d.hdf5", fname, ThisTask + (rest_files - NTask)); + + ngroups = NTask / All.NumFilesWrittenInParallel; + if((NTask % All.NumFilesWrittenInParallel)) + ngroups++; + groupMaster = (ThisTask / ngroups) * ngroups; + + for(gr = 0; gr < ngroups; gr++) + { + if(ThisTask == (groupMaster + gr)) /* ok, it's this processor's turn */ + read_file(buf, ThisTask, ThisTask); + MPI_Barrier(MPI_COMM_WORLD); + } + + rest_files -= NTask; + } + + + if(rest_files > 0) + { + distribute_file(rest_files, 0, 0, NTask - 1, &filenr, &masterTask, &lastTask); + + if(num_files > 1) + { + sprintf(buf, "%s.%d", fname, filenr); + if(All.ICFormat == 3) + sprintf(buf, "%s.%d.hdf5", fname, filenr); + } + else + { + sprintf(buf, "%s", fname); + if(All.ICFormat == 3) + sprintf(buf, "%s.hdf5", fname); + } + + ngroups = rest_files / All.NumFilesWrittenInParallel; + if((rest_files % All.NumFilesWrittenInParallel)) + ngroups++; + + for(gr = 0; gr < ngroups; gr++) + { + if((filenr / All.NumFilesWrittenInParallel) == gr) /* ok, it's this processor's turn */ + read_file(buf, masterTask, lastTask); + MPI_Barrier(MPI_COMM_WORLD); + } + } + + + /* this makes sure that masses are initialized in the case that the mass-block + is completely empty */ + for(i = 0; i < NumPart; i++) + { + if(All.MassTable[P[i].Type] != 0) + P[i].Mass = All.MassTable[P[i].Type]; + } + + if(RestartFlag == 0) + { + if(All.InitGasTemp > 0) + { + u_init = (BOLTZMANN / PROTONMASS) * All.InitGasTemp; + u_init *= All.UnitMass_in_g / All.UnitEnergy_in_cgs; /* unit conversion */ + +#ifdef ISOTHERM_EQS + u_init *= 1.0; +#else + u_init *= (1.0 / GAMMA_MINUS1); + + if(All.InitGasTemp > 1.0e4) /* assuming FULL ionization */ + molecular_weight = 4 / (8 - 5 * (1 - HYDROGEN_MASSFRAC)); + else /* assuming NEUTRAL GAS */ + molecular_weight = 4 / (1 + 3 * HYDROGEN_MASSFRAC); + + u_init /= molecular_weight; +#endif + + for(i = 0; i < N_gas; i++) + { + if(SphP[i].Entropy == 0) + SphP[i].Entropy = u_init; + + /* Note: the coversion to entropy will be done in the function init(), + after the densities have been computed */ + } + } + } + + for(i = 0; i < N_gas; i++) + SphP[i].Entropy = dmax(All.MinEgySpec, SphP[i].Entropy); + + MPI_Barrier(MPI_COMM_WORLD); + + if(ThisTask == 0) + { + printf("reading done.\n"); + fflush(stdout); + } + + if(ThisTask == 0) + { + printf("Total number of particles : %d%09d\n\n", + (int) (All.TotNumPart / 1000000000), (int) (All.TotNumPart % 1000000000)); + fflush(stdout); + } +} + + +/*! This function reads out the buffer that was filled with particle data, and + * stores it at the appropriate place in the particle structures. + */ +void empty_read_buffer(enum iofields blocknr, int offset, int pc, int type) +{ + int n, k; + float *fp; + +#ifdef LONGIDS + long long *ip; +#else + int *ip; +#endif + + fp = CommBuffer; + ip = CommBuffer; + + switch (blocknr) + { + case IO_POS: /* positions */ + for(n = 0; n < pc; n++) + for(k = 0; k < 3; k++) + P[offset + n].Pos[k] = *fp++; + + for(n = 0; n < pc; n++) + P[offset + n].Type = type; /* initialize type here as well */ + break; + + case IO_VEL: /* velocities */ + for(n = 0; n < pc; n++) + for(k = 0; k < 3; k++) + P[offset + n].Vel[k] = *fp++; + break; + + case IO_ID: /* particle ID */ + for(n = 0; n < pc; n++) + P[offset + n].ID = *ip++; + break; + + case IO_MASS: /* particle mass */ + for(n = 0; n < pc; n++) + P[offset + n].Mass = *fp++; + break; + + case IO_U: /* temperature */ + for(n = 0; n < pc; n++) + SphP[offset + n].Entropy = *fp++; + break; + + case IO_RHO: /* density */ + for(n = 0; n < pc; n++) + SphP[offset + n].Density = *fp++; + break; + + + case IO_HSML: /* SPH smoothing length */ + for(n = 0; n < pc; n++) + SphP[offset + n].Hsml = *fp++; + break; + + + + + /* the other input fields (if present) are not needed to define the + initial conditions of the code */ + + case IO_POT: + case IO_ACCEL: + case IO_DTENTR: + case IO_TSTP: + break; + } +} + + + +/*! This function reads a snapshot file and distributes the data it contains + * to tasks 'readTask' to 'lastTask'. + */ +void read_file(char *fname, int readTask, int lastTask) +{ + int blockmaxlen; + int i, n_in_file, n_for_this_task, ntask, pc, offset = 0, task; + int blksize1, blksize2; + MPI_Status status; + FILE *fd = 0; + int nall; + int type; + char label[4]; + int nstart, bytes_per_blockelement, npart, nextblock, typelist[6]; + enum iofields blocknr; + +#ifdef HAVE_HDF5 + char buf[500]; + int rank, pcsum; + hid_t hdf5_file, hdf5_grp[6], hdf5_dataspace_in_file; + hid_t hdf5_datatype, hdf5_dataspace_in_memory, hdf5_dataset; + hsize_t dims[2], count[2], start[2]; +#endif + +#define SKIP {my_fread(&blksize1,sizeof(int),1,fd);} +#define SKIP2 {my_fread(&blksize2,sizeof(int),1,fd);} + + if(ThisTask == readTask) + { + if(All.ICFormat == 1 || All.ICFormat == 2) + { + if(!(fd = fopen(fname, "r"))) + { + printf("can't open file `%s' for reading initial conditions.\n", fname); + endrun(123); + } + + if(All.ICFormat == 2) + { + SKIP; + my_fread(&label, sizeof(char), 4, fd); + my_fread(&nextblock, sizeof(int), 1, fd); + printf("Reading header => '%c%c%c%c' (%d byte)\n", label[0], label[1], label[2], label[3], + nextblock); + SKIP2; + } + + SKIP; + my_fread(&header, sizeof(header), 1, fd); + SKIP2; + + if(blksize1 != 256 || blksize2 != 256) + { + printf("incorrect header format\n"); + fflush(stdout); + endrun(890); + } + } + + +#ifdef HAVE_HDF5 + if(All.ICFormat == 3) + { + read_header_attributes_in_hdf5(fname); + + hdf5_file = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT); + + for(type = 0; type < 6; type++) + { + if(header.npart[type] > 0) + { + sprintf(buf, "/PartType%d", type); + hdf5_grp[type] = H5Gopen(hdf5_file, buf); + } + } + } +#endif + + for(task = readTask + 1; task <= lastTask; task++) + MPI_Ssend(&header, sizeof(header), MPI_BYTE, task, TAG_HEADER, MPI_COMM_WORLD); + } + else + MPI_Recv(&header, sizeof(header), MPI_BYTE, readTask, TAG_HEADER, MPI_COMM_WORLD, &status); + + + if(All.TotNumPart == 0) + { + if(header.num_files <= 1) + for(i = 0; i < 6; i++) + header.npartTotal[i] = header.npart[i]; + + All.TotN_gas = header.npartTotal[0] + (((long long) header.npartTotalHighWord[0]) << 32); + + for(i = 0, All.TotNumPart = 0; i < 6; i++) + { + All.TotNumPart += header.npartTotal[i]; + All.TotNumPart += (((long long) header.npartTotalHighWord[i]) << 32); + } + + + for(i = 0; i < 6; i++) + All.MassTable[i] = header.mass[i]; + + All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); /* sets the maximum number of particles that may */ + All.MaxPartSph = All.PartAllocFactor * (All.TotN_gas / NTask); /* sets the maximum number of particles that may + reside on a processor */ + allocate_memory(); + + if(RestartFlag == 2) + All.Time = All.TimeBegin = header.time; + } + + if(ThisTask == readTask) + { + for(i = 0, n_in_file = 0; i < 6; i++) + n_in_file += header.npart[i]; + + printf("\nreading file `%s' on task=%d (contains %d particles.)\n" + "distributing this file to tasks %d-%d\n" + "Type 0 (gas): %8d (tot=%6d%09d) masstab=%g\n" + "Type 1 (halo): %8d (tot=%6d%09d) masstab=%g\n" + "Type 2 (disk): %8d (tot=%6d%09d) masstab=%g\n" + "Type 3 (bulge): %8d (tot=%6d%09d) masstab=%g\n" + "Type 4 (stars): %8d (tot=%6d%09d) masstab=%g\n" + "Type 5 (bndry): %8d (tot=%6d%09d) masstab=%g\n\n", fname, ThisTask, n_in_file, readTask, + lastTask, header.npart[0], (int) (header.npartTotal[0] / 1000000000), + (int) (header.npartTotal[0] % 1000000000), All.MassTable[0], header.npart[1], + (int) (header.npartTotal[1] / 1000000000), (int) (header.npartTotal[1] % 1000000000), + All.MassTable[1], header.npart[2], (int) (header.npartTotal[2] / 1000000000), + (int) (header.npartTotal[2] % 1000000000), All.MassTable[2], header.npart[3], + (int) (header.npartTotal[3] / 1000000000), (int) (header.npartTotal[3] % 1000000000), + All.MassTable[3], header.npart[4], (int) (header.npartTotal[4] / 1000000000), + (int) (header.npartTotal[4] % 1000000000), All.MassTable[4], header.npart[5], + (int) (header.npartTotal[5] / 1000000000), (int) (header.npartTotal[5] % 1000000000), + All.MassTable[5]); + fflush(stdout); + } + + + ntask = lastTask - readTask + 1; + + + /* to collect the gas particles all at the beginning (in case several + snapshot files are read on the current CPU) we move the collisionless + particles such that a gap of the right size is created */ + + for(type = 0, nall = 0; type < 6; type++) + { + n_in_file = header.npart[type]; + + n_for_this_task = n_in_file / ntask; + if((ThisTask - readTask) < (n_in_file % ntask)) + n_for_this_task++; + + nall += n_for_this_task; + } + + memmove(&P[N_gas + nall], &P[N_gas], (NumPart - N_gas) * sizeof(struct particle_data)); + nstart = N_gas; + + + + for(blocknr = 0; blocknr < IO_NBLOCKS; blocknr++) + { + if(blockpresent(blocknr)) + { + if(RestartFlag == 0 && blocknr > IO_U) + continue; /* ignore all other blocks in initial conditions */ + + bytes_per_blockelement = get_bytes_per_blockelement(blocknr); + + blockmaxlen = ((int) (All.BufferSize * 1024 * 1024)) / bytes_per_blockelement; + + npart = get_particles_in_block(blocknr, &typelist[0]); + + if(npart > 0) + { + if(ThisTask == readTask) + { + if(All.ICFormat == 2) + { + SKIP; + my_fread(&label, sizeof(char), 4, fd); + my_fread(&nextblock, sizeof(int), 1, fd); + printf("Reading header => '%c%c%c%c' (%d byte)\n", label[0], label[1], label[2], + label[3], nextblock); + SKIP2; + + if(strncmp(label, Tab_IO_Labels[blocknr], 4) != 0) + { + printf("incorrect block-structure!\n"); + printf("expected '%c%c%c%c' but found '%c%c%c%c'\n", + label[0], label[1], label[2], label[3], + Tab_IO_Labels[blocknr][0], Tab_IO_Labels[blocknr][1], + Tab_IO_Labels[blocknr][2], Tab_IO_Labels[blocknr][3]); + fflush(stdout); + endrun(1890); + } + } + + if(All.ICFormat == 1 || All.ICFormat == 2) + SKIP; + } + + for(type = 0, offset = 0; type < 6; type++) + { + n_in_file = header.npart[type]; +#ifdef HAVE_HDF5 + pcsum = 0; +#endif + if(typelist[type] == 0) + { + n_for_this_task = n_in_file / ntask; + if((ThisTask - readTask) < (n_in_file % ntask)) + n_for_this_task++; + + offset += n_for_this_task; + } + else + { + for(task = readTask; task <= lastTask; task++) + { + n_for_this_task = n_in_file / ntask; + if((task - readTask) < (n_in_file % ntask)) + n_for_this_task++; + + if(task == ThisTask) + if(NumPart + n_for_this_task > All.MaxPart) + { + printf("too many particles\n"); + endrun(1313); + } + + + do + { + pc = n_for_this_task; + + if(pc > blockmaxlen) + pc = blockmaxlen; + + if(ThisTask == readTask) + { + if(All.ICFormat == 1 || All.ICFormat == 2) + my_fread(CommBuffer, bytes_per_blockelement, pc, fd); +#ifdef HAVE_HDF5 + if(All.ICFormat == 3) + { + get_dataset_name(blocknr, buf); + hdf5_dataset = H5Dopen(hdf5_grp[type], buf); + + dims[0] = header.npart[type]; + dims[1] = get_values_per_blockelement(blocknr); + if(dims[1] == 1) + rank = 1; + else + rank = 2; + + hdf5_dataspace_in_file = H5Screate_simple(rank, dims, NULL); + + dims[0] = pc; + hdf5_dataspace_in_memory = H5Screate_simple(rank, dims, NULL); + + start[0] = pcsum; + start[1] = 0; + + count[0] = pc; + count[1] = get_values_per_blockelement(blocknr); + pcsum += pc; + + H5Sselect_hyperslab(hdf5_dataspace_in_file, H5S_SELECT_SET, + start, NULL, count, NULL); + + switch (get_datatype_in_block(blocknr)) + { + case 0: + hdf5_datatype = H5Tcopy(H5T_NATIVE_UINT); + break; + case 1: + hdf5_datatype = H5Tcopy(H5T_NATIVE_FLOAT); + break; + case 2: + hdf5_datatype = H5Tcopy(H5T_NATIVE_UINT64); + break; + } + + H5Dread(hdf5_dataset, hdf5_datatype, hdf5_dataspace_in_memory, + hdf5_dataspace_in_file, H5P_DEFAULT, CommBuffer); + + H5Tclose(hdf5_datatype); + H5Sclose(hdf5_dataspace_in_memory); + H5Sclose(hdf5_dataspace_in_file); + H5Dclose(hdf5_dataset); + } +#endif + } + + if(ThisTask == readTask && task != readTask) + MPI_Ssend(CommBuffer, bytes_per_blockelement * pc, MPI_BYTE, task, TAG_PDATA, + MPI_COMM_WORLD); + + if(ThisTask != readTask && task == ThisTask) + MPI_Recv(CommBuffer, bytes_per_blockelement * pc, MPI_BYTE, readTask, + TAG_PDATA, MPI_COMM_WORLD, &status); + + if(ThisTask == task) + { + empty_read_buffer(blocknr, nstart + offset, pc, type); + + offset += pc; + } + + n_for_this_task -= pc; + } + while(n_for_this_task > 0); + } + } + } + if(ThisTask == readTask) + { + if(All.ICFormat == 1 || All.ICFormat == 2) + { + SKIP2; + if(blksize1 != blksize2) + { + printf("incorrect block-sizes detected!\n"); + printf("Task=%d blocknr=%d blksize1=%d blksize2=%d\n", ThisTask, blocknr, + blksize1, blksize2); + fflush(stdout); + endrun(1889); + } + } + } + } + } + } + + + for(type = 0; type < 6; type++) + { + n_in_file = header.npart[type]; + + n_for_this_task = n_in_file / ntask; + if((ThisTask - readTask) < (n_in_file % ntask)) + n_for_this_task++; + + NumPart += n_for_this_task; + + if(type == 0) + N_gas += n_for_this_task; + } + + if(ThisTask == readTask) + { + if(All.ICFormat == 1 || All.ICFormat == 2) + fclose(fd); +#ifdef HAVE_HDF5 + if(All.ICFormat == 3) + { + for(type = 5; type >= 0; type--) + if(header.npart[type] > 0) + H5Gclose(hdf5_grp[type]); + H5Fclose(hdf5_file); + } +#endif + } +} + + + + +/*! This function determines onto how many files a given snapshot is + * distributed. + */ +int find_files(char *fname) +{ + FILE *fd; + char buf[200], buf1[200]; + int dummy; + + sprintf(buf, "%s.%d", fname, 0); + sprintf(buf1, "%s", fname); + + if(All.ICFormat == 3) + { + sprintf(buf, "%s.%d.hdf5", fname, 0); + sprintf(buf1, "%s.hdf5", fname); + } + +#ifndef HAVE_HDF5 + if(All.ICFormat == 3) + { + if(ThisTask == 0) + printf("Code wasn't compiled with HDF5 support enabled!\n"); + endrun(0); + } +#endif + + header.num_files = 0; + + if(ThisTask == 0) + { + if((fd = fopen(buf, "r"))) + { + if(All.ICFormat == 1 || All.ICFormat == 2) + { + if(All.ICFormat == 2) + { + fread(&dummy, sizeof(dummy), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + } + + fread(&dummy, sizeof(dummy), 1, fd); + fread(&header, sizeof(header), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + } + fclose(fd); + +#ifdef HAVE_HDF5 + if(All.ICFormat == 3) + read_header_attributes_in_hdf5(buf); +#endif + } + } + + MPI_Bcast(&header, sizeof(header), MPI_BYTE, 0, MPI_COMM_WORLD); + + if(header.num_files > 0) + return header.num_files; + + if(ThisTask == 0) + { + if((fd = fopen(buf1, "r"))) + { + if(All.ICFormat == 1 || All.ICFormat == 2) + { + if(All.ICFormat == 2) + { + fread(&dummy, sizeof(dummy), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + } + + fread(&dummy, sizeof(dummy), 1, fd); + fread(&header, sizeof(header), 1, fd); + fread(&dummy, sizeof(dummy), 1, fd); + } + fclose(fd); + +#ifdef HAVE_HDF5 + if(All.ICFormat == 3) + read_header_attributes_in_hdf5(buf1); +#endif + header.num_files = 1; + } + } + + MPI_Bcast(&header, sizeof(header), MPI_BYTE, 0, MPI_COMM_WORLD); + + if(header.num_files > 0) + return header.num_files; + + if(ThisTask == 0) + { + printf("\nCan't find initial conditions file."); + printf("neither as '%s'\nnor as '%s'\n", buf, buf1); + fflush(stdout); + } + + endrun(0); + return 0; +} + + + +/*! This function assigns a certain number of files to processors, such that + * each processor is exactly assigned to one file, and the number of cpus per + * file is as homogenous as possible. The number of files may at most be + * equal to the number of processors. + */ +void distribute_file(int nfiles, int firstfile, int firsttask, int lasttask, int *filenr, int *master, + int *last) +{ + int ntask, filesleft, filesright, tasksleft, tasksright; + + if(nfiles > 1) + { + ntask = lasttask - firsttask + 1; + + filesleft = (((double) (ntask / 2)) / ntask) * nfiles; + if(filesleft <= 0) + filesleft = 1; + if(filesleft >= nfiles) + filesleft = nfiles - 1; + + filesright = nfiles - filesleft; + + tasksleft = ntask / 2; + tasksright = ntask - tasksleft; + + distribute_file(filesleft, firstfile, firsttask, firsttask + tasksleft - 1, filenr, master, last); + distribute_file(filesright, firstfile + filesleft, firsttask + tasksleft, lasttask, filenr, master, + last); + } + else + { + if(ThisTask >= firsttask && ThisTask <= lasttask) + { + *filenr = firstfile; + *master = firsttask; + *last = lasttask; + } + } +} + + +/*! This function reads the header information in case the HDF5 file format is + * used. + */ +#ifdef HAVE_HDF5 +void read_header_attributes_in_hdf5(char *fname) +{ + hid_t hdf5_file, hdf5_headergrp, hdf5_attribute; + + + hdf5_file = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT); + hdf5_headergrp = H5Gopen(hdf5_file, "/Header"); + + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "NumPart_ThisFile"); + H5Aread(hdf5_attribute, H5T_NATIVE_INT, header.npart); + H5Aclose(hdf5_attribute); + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "NumPart_Total"); + H5Aread(hdf5_attribute, H5T_NATIVE_UINT, header.npartTotal); + H5Aclose(hdf5_attribute); + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "NumPart_Total_HighWord"); + H5Aread(hdf5_attribute, H5T_NATIVE_UINT, header.npartTotalHighWord); + H5Aclose(hdf5_attribute); + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "MassTable"); + H5Aread(hdf5_attribute, H5T_NATIVE_DOUBLE, header.mass); + H5Aclose(hdf5_attribute); + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "Time"); + H5Aread(hdf5_attribute, H5T_NATIVE_DOUBLE, &header.time); + H5Aclose(hdf5_attribute); + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "NumFilesPerSnapshot"); + H5Aread(hdf5_attribute, H5T_NATIVE_INT, &header.num_files); + H5Aclose(hdf5_attribute); + + hdf5_attribute = H5Aopen_name(hdf5_headergrp, "Flag_Entropy_ICs"); + H5Aread(hdf5_attribute, H5T_NATIVE_INT, &header.flag_entropy_instead_u); + H5Aclose(hdf5_attribute); + + H5Gclose(hdf5_headergrp); + H5Fclose(hdf5_file); +} +#endif diff --git a/src/PyGadget/src/read_ic.o b/src/PyGadget/src/read_ic.o new file mode 100644 index 0000000..0fcc556 Binary files /dev/null and b/src/PyGadget/src/read_ic.o differ diff --git a/src/PyGadget/src/restart.c b/src/PyGadget/src/restart.c new file mode 100644 index 0000000..2a5e498 --- /dev/null +++ b/src/PyGadget/src/restart.c @@ -0,0 +1,306 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file restart.c + * \brief Code for reading and writing restart files + */ + +static FILE *fd; + +static void in(int *x, int modus); +static void byten(void *x, size_t n, int modus); + + +/*! This function reads or writes the restart files. Each processor writes + * its own restart file, with the I/O being done in parallel. To avoid + * congestion of the disks you can tell the program to restrict the number of + * files that are simultaneously written to NumFilesWrittenInParallel. + * + * If modus>0 the restart()-routine reads, if modus==0 it writes a restart + * file. + */ +void restart(int modus) +{ + char buf[200], buf_bak[200], buf_mv[500]; + double save_PartAllocFactor, save_TreeAllocFactor; + int i, nprocgroup, masterTask, groupTask, old_MaxPart, old_MaxNodes; + struct global_data_all_processes all_task0; + + + sprintf(buf, "%s%s.%d", All.OutputDir, All.RestartFile, ThisTask); + sprintf(buf_bak, "%s%s.%d.bak", All.OutputDir, All.RestartFile, ThisTask); + sprintf(buf_mv, "mv %s %s", buf, buf_bak); + + + if((NTask < All.NumFilesWrittenInParallel)) + { + printf + ("Fatal error.\nNumber of processors must be a smaller or equal than `NumFilesWrittenInParallel'.\n"); + endrun(2131); + } + + nprocgroup = NTask / All.NumFilesWrittenInParallel; + + if((NTask % All.NumFilesWrittenInParallel)) + { + nprocgroup++; + } + + masterTask = (ThisTask / nprocgroup) * nprocgroup; + + for(groupTask = 0; groupTask < nprocgroup; groupTask++) + { + if(ThisTask == (masterTask + groupTask)) /* ok, it's this processor's turn */ + { + if(modus) + { + if(!(fd = fopen(buf, "r"))) + { + printf("Restart file '%s' not found.\n", buf); + endrun(7870); + } + } + else + { + system(buf_mv); /* move old restart files to .bak files */ + + if(!(fd = fopen(buf, "w"))) + { + printf("Restart file '%s' cannot be opened.\n", buf); + endrun(7878); + } + } + + + save_PartAllocFactor = All.PartAllocFactor; + save_TreeAllocFactor = All.TreeAllocFactor; + + /* common data */ + byten(&All, sizeof(struct global_data_all_processes), modus); + + if(ThisTask == 0 && modus > 0) + all_task0 = All; + + if(modus > 0 && groupTask == 0) /* read */ + { + MPI_Bcast(&all_task0, sizeof(struct global_data_all_processes), MPI_BYTE, 0, MPI_COMM_WORLD); + } + + old_MaxPart = All.MaxPart; + old_MaxNodes = All.TreeAllocFactor * All.MaxPart; + + if(modus) /* read */ + { + if(All.PartAllocFactor != save_PartAllocFactor) + { + All.PartAllocFactor = save_PartAllocFactor; + All.MaxPart = All.PartAllocFactor * (All.TotNumPart / NTask); + All.MaxPartSph = All.PartAllocFactor * (All.TotN_gas / NTask); + save_PartAllocFactor = -1; + } + + if(All.TreeAllocFactor != save_TreeAllocFactor) + { + All.TreeAllocFactor = save_TreeAllocFactor; + save_TreeAllocFactor = -1; + } + + if(all_task0.Time != All.Time) + { + printf("The restart file on task=%d is not consistent with the one on task=0\n", ThisTask); + fflush(stdout); + endrun(16); + } + + allocate_memory(); + } + + in(&NumPart, modus); + + if(NumPart > All.MaxPart) + { + printf + ("it seems you have reduced(!) 'PartAllocFactor' below the value of %g needed to load the restart file.\n", + NumPart / (((double) All.TotNumPart) / NTask)); + printf("fatal error\n"); + endrun(22); + } + + /* Particle data */ + byten(&P[0], NumPart * sizeof(struct particle_data), modus); + + in(&N_gas, modus); + + if(N_gas > 0) + { + if(N_gas > All.MaxPartSph) + { + printf + ("SPH: it seems you have reduced(!) 'PartAllocFactor' below the value of %g needed to load the restart file.\n", + N_gas / (((double) All.TotN_gas) / NTask)); + printf("fatal error\n"); + endrun(222); + } + /* Sph-Particle data */ + byten(&SphP[0], N_gas * sizeof(struct sph_particle_data), modus); + } + + /* write state of random number generator */ + byten(gsl_rng_state(random_generator), gsl_rng_size(random_generator), modus); + + + /* now store relevant data for tree */ + + if(modus) /* read */ + { + ngb_treeallocate(MAX_NGB); + + force_treeallocate(All.TreeAllocFactor * All.MaxPart, All.MaxPart); + } + + + in(&Numnodestree, modus); + + if(Numnodestree > MaxNodes) + { + printf + ("Tree storage: it seems you have reduced(!) 'PartAllocFactor' below the value needed to load the restart file (task=%d). " + "Numnodestree=%d MaxNodes=%d\n", ThisTask, Numnodestree, MaxNodes); + endrun(221); + } + + byten(Nodes_base, Numnodestree * sizeof(struct NODE), modus); + byten(Extnodes_base, Numnodestree * sizeof(struct extNODE), modus); + + byten(Father, NumPart * sizeof(int), modus); + + byten(Nextnode, NumPart * sizeof(int), modus); + byten(Nextnode + All.MaxPart, MAXTOPNODES * sizeof(int), modus); + + byten(DomainStartList, NTask * sizeof(int), modus); + byten(DomainEndList, NTask * sizeof(int), modus); + byten(DomainTask, MAXTOPNODES * sizeof(int), modus); + byten(DomainNodeIndex, MAXTOPNODES * sizeof(int), modus); + byten(DomainTreeNodeLen, MAXTOPNODES * sizeof(FLOAT), modus); + byten(DomainHmax, MAXTOPNODES * sizeof(FLOAT), modus); + byten(DomainMoment, MAXTOPNODES * sizeof(struct DomainNODE), modus); + + byten(DomainCorner, 3 * sizeof(double), modus); + byten(DomainCenter, 3 * sizeof(double), modus); + byten(&DomainLen, sizeof(double), modus); + byten(&DomainFac, sizeof(double), modus); + byten(&DomainMyStart, sizeof(int), modus); + byten(&DomainMyLast, sizeof(int), modus); + + if(modus) /* read */ + if(All.PartAllocFactor != save_PartAllocFactor || All.TreeAllocFactor != save_TreeAllocFactor) + { + for(i = 0; i < NumPart; i++) + Father[i] += (All.MaxPart - old_MaxPart); + + for(i = 0; i < NumPart; i++) + if(Nextnode[i] >= old_MaxPart) + { + if(Nextnode[i] >= old_MaxPart + old_MaxNodes) + Nextnode[i] += (All.MaxPart - old_MaxPart) + (MaxNodes - old_MaxPart); + else + Nextnode[i] += (All.MaxPart - old_MaxPart); + } + + for(i = 0; i < Numnodestree; i++) + { + if(Nodes_base[i].u.d.sibling >= old_MaxPart) + { + if(Nodes_base[i].u.d.sibling >= old_MaxPart + old_MaxNodes) + Nodes_base[i].u.d.sibling += + (All.MaxPart - old_MaxPart) + (MaxNodes - old_MaxNodes); + else + Nodes_base[i].u.d.sibling += (All.MaxPart - old_MaxPart); + } + + if(Nodes_base[i].u.d.father >= old_MaxPart) + { + if(Nodes_base[i].u.d.father >= old_MaxPart + old_MaxNodes) + Nodes_base[i].u.d.father += (All.MaxPart - old_MaxPart) + (MaxNodes - old_MaxNodes); + else + Nodes_base[i].u.d.father += (All.MaxPart - old_MaxPart); + } + + if(Nodes_base[i].u.d.nextnode >= old_MaxPart) + { + if(Nodes_base[i].u.d.nextnode >= old_MaxPart + old_MaxNodes) + Nodes_base[i].u.d.nextnode += + (All.MaxPart - old_MaxPart) + (MaxNodes - old_MaxNodes); + else + Nodes_base[i].u.d.nextnode += (All.MaxPart - old_MaxPart); + } + } + + for(i = 0; i < MAXTOPNODES; i++) + if(Nextnode[i + All.MaxPart] >= old_MaxPart) + { + if(Nextnode[i + All.MaxPart] >= old_MaxPart + old_MaxNodes) + Nextnode[i + All.MaxPart] += (All.MaxPart - old_MaxPart) + (MaxNodes - old_MaxNodes); + else + Nextnode[i + All.MaxPart] += (All.MaxPart - old_MaxPart); + } + + for(i = 0; i < MAXTOPNODES; i++) + if(DomainNodeIndex[i] >= old_MaxPart) + { + if(DomainNodeIndex[i] >= old_MaxPart + old_MaxNodes) + DomainNodeIndex[i] += (All.MaxPart - old_MaxPart) + (MaxNodes - old_MaxNodes); + else + DomainNodeIndex[i] += (All.MaxPart - old_MaxPart); + } + } + + fclose(fd); + } + else /* wait inside the group */ + { + if(modus > 0 && groupTask == 0) /* read */ + { + MPI_Bcast(&all_task0, sizeof(struct global_data_all_processes), MPI_BYTE, 0, MPI_COMM_WORLD); + } + } + + MPI_Barrier(MPI_COMM_WORLD); + } +} + + + +/*! reads/writes n bytes in restart routine + */ +void byten(void *x, size_t n, int modus) +{ + if(modus) + my_fread(x, n, 1, fd); + else + my_fwrite(x, n, 1, fd); +} + + +/*! reads/writes one `int' variable in restart routine + */ +void in(int *x, int modus) +{ + if(modus) + my_fread(x, 1, sizeof(int), fd); + else + my_fwrite(x, 1, sizeof(int), fd); +} diff --git a/src/PyGadget/src/restart.o b/src/PyGadget/src/restart.o new file mode 100644 index 0000000..93cbe33 Binary files /dev/null and b/src/PyGadget/src/restart.o differ diff --git a/src/PyGadget/src/run.c b/src/PyGadget/src/run.c new file mode 100644 index 0000000..4fbb417 --- /dev/null +++ b/src/PyGadget/src/run.c @@ -0,0 +1,433 @@ +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + +/*! \file run.c + * \brief iterates over timesteps, main loop + */ + +/*! This routine contains the main simulation loop that iterates over single + * timesteps. The loop terminates when the cpu-time limit is reached, when a + * `stop' file is found in the output directory, or when the simulation ends + * because we arrived at TimeMax. + */ +void run(void) +{ + FILE *fd; + int stopflag = 0; + char stopfname[200], contfname[200]; + double t0, t1; + + + sprintf(stopfname, "%sstop", All.OutputDir); + sprintf(contfname, "%scont", All.OutputDir); + unlink(contfname); + + do /* main loop */ + { + t0 = second(); + + find_next_sync_point_and_drift(); /* find next synchronization point and drift particles to this time. + * If needed, this function will also write an output file + * at the desired time. + */ + + every_timestep_stuff(); /* write some info to log-files */ + + + domain_Decomposition(); /* do domain decomposition if needed */ + + + compute_accelerations(0); /* compute accelerations for + * the particles that are to be advanced + */ + + /* check whether we want a full energy statistics */ + if((All.Time - All.TimeLastStatistics) >= All.TimeBetStatistics) + { +#ifdef COMPUTE_POTENTIAL_ENERGY + compute_potential(); +#endif + energy_statistics(); /* compute and output energy statistics */ + All.TimeLastStatistics += All.TimeBetStatistics; + } + + advance_and_find_timesteps(); /* 'kick' active particles in + * momentum space and compute new + * timesteps for them + */ + All.NumCurrentTiStep++; + + /* Check whether we need to interrupt the run */ + if(ThisTask == 0) + { + /* Is the stop-file present? If yes, interrupt the run. */ + if((fd = fopen(stopfname, "r"))) + { + fclose(fd); + stopflag = 1; + unlink(stopfname); + } + + /* are we running out of CPU-time ? If yes, interrupt run. */ + if(CPUThisRun > 0.85 * All.TimeLimitCPU) + { + printf("reaching time-limit. stopping.\n"); + stopflag = 2; + } + } + + MPI_Bcast(&stopflag, 1, MPI_INT, 0, MPI_COMM_WORLD); + + if(stopflag) + { + restart(0); /* write restart file */ + MPI_Barrier(MPI_COMM_WORLD); + + if(stopflag == 2 && ThisTask == 0) + { + if((fd = fopen(contfname, "w"))) + fclose(fd); + } + + if(stopflag == 2 && All.ResubmitOn && ThisTask == 0) + { + close_outputfiles(); + system(All.ResubmitCommand); + } + return; + } + + /* is it time to write a regular restart-file? (for security) */ + if(ThisTask == 0) + { + if((CPUThisRun - All.TimeLastRestartFile) >= All.CpuTimeBetRestartFile) + { + All.TimeLastRestartFile = CPUThisRun; + stopflag = 3; + } + else + stopflag = 0; + } + + MPI_Bcast(&stopflag, 1, MPI_INT, 0, MPI_COMM_WORLD); + + if(stopflag == 3) + { + restart(0); /* write an occasional restart file */ + stopflag = 0; + } + + t1 = second(); + + All.CPU_Total += timediff(t0, t1); + CPUThisRun += timediff(t0, t1); + } + while(All.Ti_Current < TIMEBASE && All.Time <= All.TimeMax); + + restart(0); + + savepositions(All.SnapshotFileCount++); /* write a last snapshot + * file at final time (will + * be overwritten if + * All.TimeMax is increased + * and the run is continued) + */ +} + + +/*! This function finds the next synchronization point of the system (i.e. the + * earliest point of time any of the particles needs a force computation), + * and drifts the system to this point of time. If the system drifts over + * the desired time of a snapshot file, the function will drift to this + * moment, generate an output, and then resume the drift. + */ +void find_next_sync_point_and_drift(void) +{ + int n, min, min_glob, flag, *temp; + double timeold; + double t0, t1; + + t0 = second(); + + timeold = All.Time; + + for(n = 1, min = P[0].Ti_endstep; n < NumPart; n++) + if(min > P[n].Ti_endstep) + min = P[n].Ti_endstep; + + MPI_Allreduce(&min, &min_glob, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD); + + /* We check whether this is a full step where all particles are synchronized */ + flag = 1; + for(n = 0; n < NumPart; n++) + if(P[n].Ti_endstep > min_glob) + flag = 0; + + MPI_Allreduce(&flag, &Flag_FullStep, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD); + +#ifdef PMGRID + if(min_glob >= All.PM_Ti_endstep) + { + min_glob = All.PM_Ti_endstep; + Flag_FullStep = 1; + } +#endif + + /* Determine 'NumForceUpdate', i.e. the number of particles on this processor that are going to be active */ + for(n = 0, NumForceUpdate = 0; n < NumPart; n++) + { + if(P[n].Ti_endstep == min_glob) +#ifdef SELECTIVE_NO_GRAVITY + if(!((1 << P[n].Type) & (SELECTIVE_NO_GRAVITY))) +#endif + NumForceUpdate++; + } + + /* note: NumForcesSinceLastDomainDecomp has type "long long" */ + temp = malloc(NTask * sizeof(int)); + MPI_Allgather(&NumForceUpdate, 1, MPI_INT, temp, 1, MPI_INT, MPI_COMM_WORLD); + for(n = 0; n < NTask; n++) + All.NumForcesSinceLastDomainDecomp += temp[n]; + free(temp); + + + + t1 = second(); + + All.CPU_Predict += timediff(t0, t1); + + while(min_glob >= All.Ti_nextoutput && All.Ti_nextoutput >= 0) + { + move_particles(All.Ti_Current, All.Ti_nextoutput); + + All.Ti_Current = All.Ti_nextoutput; + + if(All.ComovingIntegrationOn) + All.Time = All.TimeBegin * exp(All.Ti_Current * All.Timebase_interval); + else + All.Time = All.TimeBegin + All.Ti_Current * All.Timebase_interval; + +#ifdef OUTPUTPOTENTIAL + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + domain_Decomposition(); + compute_potential(); +#endif + savepositions(All.SnapshotFileCount++); /* write snapshot file */ + + All.Ti_nextoutput = find_next_outputtime(All.Ti_nextoutput + 1); + } + + move_particles(All.Ti_Current, min_glob); + + All.Ti_Current = min_glob; + + if(All.ComovingIntegrationOn) + All.Time = All.TimeBegin * exp(All.Ti_Current * All.Timebase_interval); + else + All.Time = All.TimeBegin + All.Ti_Current * All.Timebase_interval; + + All.TimeStep = All.Time - timeold; +} + + + +/*! this function returns the next output time that is equal or larger to + * ti_curr + */ +int find_next_outputtime(int ti_curr) +{ + int i, ti, ti_next, iter = 0; + double next, time; + + ti_next = -1; + + if(All.OutputListOn) + { + for(i = 0; i < All.OutputListLength; i++) + { + time = All.OutputListTimes[i]; + + if(time >= All.TimeBegin && time <= All.TimeMax) + { + if(All.ComovingIntegrationOn) + ti = log(time / All.TimeBegin) / All.Timebase_interval; + else + ti = (time - All.TimeBegin) / All.Timebase_interval; + + if(ti >= ti_curr) + { + if(ti_next == -1) + ti_next = ti; + + if(ti_next > ti) + ti_next = ti; + } + } + } + } + else + { + if(All.ComovingIntegrationOn) + { + if(All.TimeBetSnapshot <= 1.0) + { + printf("TimeBetSnapshot > 1.0 required for your simulation.\n"); + endrun(13123); + } + } + else + { + if(All.TimeBetSnapshot <= 0.0) + { + printf("TimeBetSnapshot > 0.0 required for your simulation.\n"); + endrun(13123); + } + } + + time = All.TimeOfFirstSnapshot; + + iter = 0; + + while(time < All.TimeBegin) + { + if(All.ComovingIntegrationOn) + time *= All.TimeBetSnapshot; + else + time += All.TimeBetSnapshot; + + iter++; + + if(iter > 1000000) + { + printf("Can't determine next output time.\n"); + endrun(110); + } + } + + while(time <= All.TimeMax) + { + if(All.ComovingIntegrationOn) + ti = log(time / All.TimeBegin) / All.Timebase_interval; + else + ti = (time - All.TimeBegin) / All.Timebase_interval; + + if(ti >= ti_curr) + { + ti_next = ti; + break; + } + + if(All.ComovingIntegrationOn) + time *= All.TimeBetSnapshot; + else + time += All.TimeBetSnapshot; + + iter++; + + if(iter > 1000000) + { + printf("Can't determine next output time.\n"); + endrun(111); + } + } + } + + if(ti_next == -1) + { + ti_next = 2 * TIMEBASE; /* this will prevent any further output */ + + if(ThisTask == 0) + printf("\nThere is no valid time for a further snapshot file.\n"); + } + else + { + if(All.ComovingIntegrationOn) + next = All.TimeBegin * exp(ti_next * All.Timebase_interval); + else + next = All.TimeBegin + ti_next * All.Timebase_interval; + + if(ThisTask == 0) + printf("\nSetting next time for snapshot file to Time_next= %g\n\n", next); + } + + return ti_next; +} + + + + +/*! This routine writes one line for every timestep to two log-files. In + * FdInfo, we just list the timesteps that have been done, while in FdCPU the + * cumulative cpu-time consumption in various parts of the code is stored. + */ +void every_timestep_stuff(void) +{ + double z; + + if(ThisTask == 0) + { + if(All.ComovingIntegrationOn) + { + z = 1.0 / (All.Time) - 1; + fprintf(FdInfo, "\nBegin Step %d, Time: %g, Redshift: %g, Systemstep: %g, Dloga: %g\n", + All.NumCurrentTiStep, All.Time, z, All.TimeStep, + log(All.Time) - log(All.Time - All.TimeStep)); + printf("\nBegin Step %d, Time: %g, Redshift: %g, Systemstep: %g, Dloga: %g\n", All.NumCurrentTiStep, + All.Time, z, All.TimeStep, log(All.Time) - log(All.Time - All.TimeStep)); + fflush(FdInfo); + } + else + { + fprintf(FdInfo, "\nBegin Step %d, Time: %g, Systemstep: %g\n", All.NumCurrentTiStep, All.Time, + All.TimeStep); + printf("\nBegin Step %d, Time: %g, Systemstep: %g\n", All.NumCurrentTiStep, All.Time, All.TimeStep); + fflush(FdInfo); + } + + fprintf(FdCPU, "Step %d, Time: %g, CPUs: %d\n", All.NumCurrentTiStep, All.Time, NTask); + + fprintf(FdCPU, + "%10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f %10.2f\n", + All.CPU_Total, All.CPU_Gravity, All.CPU_Hydro, All.CPU_Domain, All.CPU_Potential, + All.CPU_Predict, All.CPU_TimeLine, All.CPU_Snapshot, All.CPU_TreeWalk, All.CPU_TreeConstruction, + All.CPU_CommSum, All.CPU_Imbalance, All.CPU_HydCompWalk, All.CPU_HydCommSumm, + All.CPU_HydImbalance, All.CPU_EnsureNgb, All.CPU_PM, All.CPU_Peano); + fflush(FdCPU); + } + + set_random_numbers(); +} + + +/*! This routine first calls a computation of various global quantities of the + * particle distribution, and then writes some statistics about the energies + * in the various particle components to the file FdEnergy. + */ +void energy_statistics(void) +{ + compute_global_quantities_of_system(); + + if(ThisTask == 0) + { + fprintf(FdEnergy, + "%g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g\n", + All.Time, SysState.EnergyInt, SysState.EnergyPot, SysState.EnergyKin, SysState.EnergyIntComp[0], + SysState.EnergyPotComp[0], SysState.EnergyKinComp[0], SysState.EnergyIntComp[1], + SysState.EnergyPotComp[1], SysState.EnergyKinComp[1], SysState.EnergyIntComp[2], + SysState.EnergyPotComp[2], SysState.EnergyKinComp[2], SysState.EnergyIntComp[3], + SysState.EnergyPotComp[3], SysState.EnergyKinComp[3], SysState.EnergyIntComp[4], + SysState.EnergyPotComp[4], SysState.EnergyKinComp[4], SysState.EnergyIntComp[5], + SysState.EnergyPotComp[5], SysState.EnergyKinComp[5], SysState.MassComp[0], + SysState.MassComp[1], SysState.MassComp[2], SysState.MassComp[3], SysState.MassComp[4], + SysState.MassComp[5]); + + fflush(FdEnergy); + } +} diff --git a/src/PyGadget/src/run.o b/src/PyGadget/src/run.o new file mode 100644 index 0000000..9f1cb73 Binary files /dev/null and b/src/PyGadget/src/run.o differ diff --git a/src/PyGadget/src/sph.c b/src/PyGadget/src/sph.c new file mode 100644 index 0000000..a5705cb --- /dev/null +++ b/src/PyGadget/src/sph.c @@ -0,0 +1,1007 @@ +#include +#include +#include +#include +#include +#include +#include "allvars.h" +#include "proto.h" + +/*! \file hydra.c + * \brief Computation of SPH forces and rate of entropy generation + * + * This file contains the "second SPH loop", where the SPH forces are + * computed, and where the rate of change of entropy due to the shock heating + * (via artificial viscosity) is computed. + */ + + +static double hubble_a, atime, hubble_a2, fac_mu, fac_vsic_fix, a3inv, fac_egy; + +#ifdef PERIODIC +static double boxSize, boxHalf; + +#ifdef LONG_X +static double boxSize_X, boxHalf_X; +#else +#define boxSize_X boxSize +#define boxHalf_X boxHalf +#endif +#ifdef LONG_Y +static double boxSize_Y, boxHalf_Y; +#else +#define boxSize_Y boxSize +#define boxHalf_Y boxHalf +#endif +#ifdef LONG_Z +static double boxSize_Z, boxHalf_Z; +#else +#define boxSize_Z boxSize +#define boxHalf_Z boxHalf +#endif +#endif + + +/*! This is a comparison kernel for a sort routine, which is used to group + * particles that are going to be exported to the same CPU. + */ +int sph_compare_key(const void *a, const void *b) +{ + if(((struct sphdata_in *) a)->Task < (((struct sphdata_in *) b)->Task)) + return -1; + if(((struct sphdata_in *) a)->Task > (((struct sphdata_in *) b)->Task)) + return +1; + return 0; +} + + +/*! This function is the driver routine for the calculation of hydrodynamical + * force and rate of change of entropy due to shock heating for all active + * particles . + */ +void sph(void) +{ + long long ntot, ntotleft; + int i, j, k, n, ngrp, maxfill, source, ndone; + int *nbuffer, *noffset, *nsend_local, *nsend, *numlist, *ndonelist; + int level, sendTask, recvTask, nexport, place; + double soundspeed_i; + double tstart, tend, sumt, sumcomm; + double timecomp = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance; + MPI_Status status; + +#ifdef PERIODIC + boxSize = All.BoxSize; + boxHalf = 0.5 * All.BoxSize; +#ifdef LONG_X + boxHalf_X = boxHalf * LONG_X; + boxSize_X = boxSize * LONG_X; +#endif +#ifdef LONG_Y + boxHalf_Y = boxHalf * LONG_Y; + boxSize_Y = boxSize * LONG_Y; +#endif +#ifdef LONG_Z + boxHalf_Z = boxHalf * LONG_Z; + boxSize_Z = boxSize * LONG_Z; +#endif +#endif + + if(All.ComovingIntegrationOn) + { + /* Factors for comoving integration of hydro */ + hubble_a = All.Omega0 / (All.Time * All.Time * All.Time) + + (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda; + + hubble_a = All.Hubble * sqrt(hubble_a); + hubble_a2 = All.Time * All.Time * hubble_a; + + fac_mu = pow(All.Time, 3 * (GAMMA - 1) / 2) / All.Time; + + fac_egy = pow(All.Time, 3 * (GAMMA - 1)); + + fac_vsic_fix = hubble_a * pow(All.Time, 3 * GAMMA_MINUS1); + + a3inv = 1 / (All.Time * All.Time * All.Time); + atime = All.Time; + } + else + hubble_a = hubble_a2 = atime = fac_mu = fac_vsic_fix = a3inv = fac_egy = 1.0; + + + /* `NumSphUpdate' gives the number of particles on this processor that want a force update */ + for(n = 0, NumSphUpdate = 0; n < N_gas; n++) + { + if(P[n].Ti_endstep == All.Ti_Current) + NumSphUpdate++; + } + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + + i = 0; /* first particle for this task */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + tstart = second(); + for(nexport = 0, ndone = 0; i < N_gas && nexport < All.BunchSizeHydro - NTask; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + + sph_evaluate(i, 0); + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + { + HydroDataIn[nexport].Pos[k] = P[i].Pos[k]; + HydroDataIn[nexport].Vel[k] = SphP[i].VelPred[k]; + } + HydroDataIn[nexport].Hsml = SphP[i].Hsml; + HydroDataIn[nexport].Mass = P[i].Mass; + HydroDataIn[nexport].DhsmlDensityFactor = SphP[i].DhsmlDensityFactor; + HydroDataIn[nexport].Density = SphP[i].Density; + HydroDataIn[nexport].Pressure = SphP[i].Pressure; + HydroDataIn[nexport].Timestep = P[i].Ti_endstep - P[i].Ti_begstep; + + /* calculation of F1 */ + soundspeed_i = sqrt(GAMMA * SphP[i].Pressure / SphP[i].Density); + HydroDataIn[nexport].F1 = fabs(SphP[i].DivVel) / + (fabs(SphP[i].DivVel) + SphP[i].CurlVel + + 0.0001 * soundspeed_i / SphP[i].Hsml / fac_mu); + + HydroDataIn[nexport].Index = i; + HydroDataIn[nexport].Task = j; + nexport++; + nsend_local[j]++; + } + } + } + tend = second(); + timecomp += timediff(tstart, tend); + + qsort(HydroDataIn, nexport, sizeof(struct hydrodata_in), sph_compare_key); + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + tend = second(); + timeimbalance += timediff(tstart, tend); + + + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeHydro) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&HydroDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct hydrodata_in), MPI_BYTE, + recvTask, TAG_HYDRO_A, + &HydroDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct hydrodata_in), MPI_BYTE, + recvTask, TAG_HYDRO_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + /* now do the imported particles */ + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + sph_evaluate(j, 1); + tend = second(); + timecomp += timediff(tstart, tend); + + /* do a block to measure imbalance */ + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeHydro) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&HydroDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct hydrodata_out), + MPI_BYTE, recvTask, TAG_HYDRO_B, + &HydroDataPartialResult[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct hydrodata_out), + MPI_BYTE, recvTask, TAG_HYDRO_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + source = j + noffset[recvTask]; + place = HydroDataIn[source].Index; + + for(k = 0; k < 3; k++) + SphP[place].HydroAccel[k] += HydroDataPartialResult[source].Acc[k]; + + SphP[place].DtEntropy += HydroDataPartialResult[source].DtEntropy; + + if(SphP[place].MaxSignalVel < HydroDataPartialResult[source].MaxSignalVel) + SphP[place].MaxSignalVel = HydroDataPartialResult[source].MaxSignalVel; + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + + + /* do final operations on results */ + tstart = second(); + + for(i = 0; i < N_gas; i++) + if(P[i].Ti_endstep == All.Ti_Current) + { + SphP[i].DtEntropy *= GAMMA_MINUS1 / (hubble_a2 * pow(SphP[i].Density, GAMMA_MINUS1)); +#ifdef SPH_BND_PARTICLES + if(P[i].ID == 0) + { + SphP[i].DtEntropy = 0; + for(k = 0; k < 3; k++) + SphP[i].HydroAccel[k] = 0; + } +#endif + } + + tend = second(); + timecomp += timediff(tstart, tend); + + /* collect some timing information */ + + MPI_Reduce(&timecomp, &sumt, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timecommsumm, &sumcomm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + MPI_Reduce(&timeimbalance, &sumimbalance, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + + if(ThisTask == 0) + { + All.CPU_HydCompWalk += sumt / NTask; + All.CPU_HydCommSumm += sumcomm / NTask; + All.CPU_HydImbalance += sumimbalance / NTask; + } +} + + +/*! This function is the 'core' of the SPH force computation. A target + * particle is specified which may either be local, or reside in the + * communication buffer. + */ +void sph_evaluate(int target, int mode) +{ + int j, k, n, timestep, startnode, numngb; + FLOAT *pos, *vel; + FLOAT mass, h_i, dhsmlDensityFactor, rho, pressure, f1, f2; + double acc[3], dtEntropy, maxSignalVel; + double dx, dy, dz, dvx, dvy, dvz; + double h_i2, hinv, hinv4; + double p_over_rho2_i, p_over_rho2_j, soundspeed_i, soundspeed_j; + double hfc, dwk_i, vdotr, vdotr2, visc, mu_ij, rho_ij, vsig; + double h_j, dwk_j, r, r2, u, hfc_visc; + +#ifndef NOVISCOSITYLIMITER + double dt; +#endif + + if(mode == 0) + { + pos = P[target].Pos; + vel = SphP[target].VelPred; + h_i = SphP[target].Hsml; + mass = P[target].Mass; + dhsmlDensityFactor = SphP[target].DhsmlDensityFactor; + rho = SphP[target].Density; + pressure = SphP[target].Pressure; + timestep = P[target].Ti_endstep - P[target].Ti_begstep; + soundspeed_i = sqrt(GAMMA * pressure / rho); + f1 = fabs(SphP[target].DivVel) / + (fabs(SphP[target].DivVel) + SphP[target].CurlVel + + 0.0001 * soundspeed_i / SphP[target].Hsml / fac_mu); + } + else + { + pos = HydroDataGet[target].Pos; + vel = HydroDataGet[target].Vel; + h_i = HydroDataGet[target].Hsml; + mass = HydroDataGet[target].Mass; + dhsmlDensityFactor = HydroDataGet[target].DhsmlDensityFactor; + rho = HydroDataGet[target].Density; + pressure = HydroDataGet[target].Pressure; + timestep = HydroDataGet[target].Timestep; + soundspeed_i = sqrt(GAMMA * pressure / rho); + f1 = HydroDataGet[target].F1; + } + + + /* initialize variables before SPH loop is started */ + acc[0] = acc[1] = acc[2] = dtEntropy = 0; + maxSignalVel = 0; + + p_over_rho2_i = pressure / (rho * rho) * dhsmlDensityFactor; + h_i2 = h_i * h_i; + + /* Now start the actual SPH computation for this particle */ + startnode = All.MaxPart; + do + { + numngb = ngb_treefind_pairs(&pos[0], h_i, &startnode); + + for(n = 0; n < numngb; n++) + { + j = Ngblist[n]; + + dx = pos[0] - P[j].Pos[0]; + dy = pos[1] - P[j].Pos[1]; + dz = pos[2] - P[j].Pos[2]; + +#ifdef PERIODIC /* find the closest image in the given box size */ + if(dx > boxHalf_X) + dx -= boxSize_X; + if(dx < -boxHalf_X) + dx += boxSize_X; + if(dy > boxHalf_Y) + dy -= boxSize_Y; + if(dy < -boxHalf_Y) + dy += boxSize_Y; + if(dz > boxHalf_Z) + dz -= boxSize_Z; + if(dz < -boxHalf_Z) + dz += boxSize_Z; +#endif + r2 = dx * dx + dy * dy + dz * dz; + h_j = SphP[j].Hsml; + if(r2 < h_i2 || r2 < h_j * h_j) + { + r = sqrt(r2); + if(r > 0) + { + p_over_rho2_j = SphP[j].Pressure / (SphP[j].Density * SphP[j].Density); + soundspeed_j = sqrt(GAMMA * p_over_rho2_j * SphP[j].Density); + dvx = vel[0] - SphP[j].VelPred[0]; + dvy = vel[1] - SphP[j].VelPred[1]; + dvz = vel[2] - SphP[j].VelPred[2]; + vdotr = dx * dvx + dy * dvy + dz * dvz; + + if(All.ComovingIntegrationOn) + vdotr2 = vdotr + hubble_a2 * r2; + else + vdotr2 = vdotr; + + if(r2 < h_i2) + { + hinv = 1.0 / h_i; +#ifndef TWODIMS + hinv4 = hinv * hinv * hinv * hinv; +#else + hinv4 = hinv * hinv * hinv / boxSize_Z; +#endif + u = r * hinv; + if(u < 0.5) + dwk_i = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4); + else + dwk_i = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u); + } + else + { + dwk_i = 0; + } + + if(r2 < h_j * h_j) + { + hinv = 1.0 / h_j; +#ifndef TWODIMS + hinv4 = hinv * hinv * hinv * hinv; +#else + hinv4 = hinv * hinv * hinv / boxSize_Z; +#endif + u = r * hinv; + if(u < 0.5) + dwk_j = hinv4 * u * (KERNEL_COEFF_3 * u - KERNEL_COEFF_4); + else + dwk_j = hinv4 * KERNEL_COEFF_6 * (1.0 - u) * (1.0 - u); + } + else + { + dwk_j = 0; + } + + if(soundspeed_i + soundspeed_j > maxSignalVel) + maxSignalVel = soundspeed_i + soundspeed_j; + + if(vdotr2 < 0) /* ... artificial viscosity */ + { + mu_ij = fac_mu * vdotr2 / r; /* note: this is negative! */ + + vsig = soundspeed_i + soundspeed_j - 3 * mu_ij; + + if(vsig > maxSignalVel) + maxSignalVel = vsig; + + rho_ij = 0.5 * (rho + SphP[j].Density); + f2 = + fabs(SphP[j].DivVel) / (fabs(SphP[j].DivVel) + SphP[j].CurlVel + + 0.0001 * soundspeed_j / fac_mu / SphP[j].Hsml); + + visc = 0.25 * All.ArtBulkViscConst * vsig * (-mu_ij) / rho_ij * (f1 + f2); + + /* .... end artificial viscosity evaluation */ +#ifndef NOVISCOSITYLIMITER + /* make sure that viscous acceleration is not too large */ + dt = imax(timestep, (P[j].Ti_endstep - P[j].Ti_begstep)) * All.Timebase_interval; + if(dt > 0 && (dwk_i + dwk_j) < 0) + { + visc = dmin(visc, 0.5 * fac_vsic_fix * vdotr2 / + (0.5 * (mass + P[j].Mass) * (dwk_i + dwk_j) * r * dt)); + } +#endif + } + else + visc = 0; + + p_over_rho2_j *= SphP[j].DhsmlDensityFactor; + + hfc_visc = 0.5 * P[j].Mass * visc * (dwk_i + dwk_j) / r; + + hfc = hfc_visc + P[j].Mass * (p_over_rho2_i * dwk_i + p_over_rho2_j * dwk_j) / r; + + acc[0] -= hfc * dx; + acc[1] -= hfc * dy; + acc[2] -= hfc * dz; + dtEntropy += 0.5 * hfc_visc * vdotr2; + } + } + } + } + while(startnode >= 0); + + /* Now collect the result at the right place */ + if(mode == 0) + { + for(k = 0; k < 3; k++) + SphP[target].HydroAccel[k] = acc[k]; + SphP[target].DtEntropy = dtEntropy; + SphP[target].MaxSignalVel = maxSignalVel; + } + else + { + for(k = 0; k < 3; k++) + HydroDataResult[target].Acc[k] = acc[k]; + HydroDataResult[target].DtEntropy = dtEntropy; + HydroDataResult[target].MaxSignalVel = maxSignalVel; + } +} + + + + + + + + + + + + + + + + + + + + + + + + +/*! This function is the driver routine for the calculation of hydrodynamical + * force and rate of change of entropy due to shock heating for all active + * particles . + */ +void sph_sub(void) +{ + long long ntot, ntotleft; + int i, j, k, n, ngrp, maxfill, source, ndone; + int *nbuffer, *noffset, *nsend_local, *nsend, *numlist, *ndonelist; + int level, sendTask, recvTask, nexport, place; + double soundspeed_i; + double tstart, tend, sumt, sumcomm; + double timecomp = 0, timecommsumm = 0, timeimbalance = 0, sumimbalance; + MPI_Status status; + +#ifdef PERIODIC + boxSize = All.BoxSize; + boxHalf = 0.5 * All.BoxSize; +#ifdef LONG_X + boxHalf_X = boxHalf * LONG_X; + boxSize_X = boxSize * LONG_X; +#endif +#ifdef LONG_Y + boxHalf_Y = boxHalf * LONG_Y; + boxSize_Y = boxSize * LONG_Y; +#endif +#ifdef LONG_Z + boxHalf_Z = boxHalf * LONG_Z; + boxSize_Z = boxSize * LONG_Z; +#endif +#endif + + if(All.ComovingIntegrationOn) + { + /* Factors for comoving integration of hydro */ + hubble_a = All.Omega0 / (All.Time * All.Time * All.Time) + + (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda; + + hubble_a = All.Hubble * sqrt(hubble_a); + hubble_a2 = All.Time * All.Time * hubble_a; + + fac_mu = pow(All.Time, 3 * (GAMMA - 1) / 2) / All.Time; + + fac_egy = pow(All.Time, 3 * (GAMMA - 1)); + + fac_vsic_fix = hubble_a * pow(All.Time, 3 * GAMMA_MINUS1); + + a3inv = 1 / (All.Time * All.Time * All.Time); + atime = All.Time; + } + else + hubble_a = hubble_a2 = atime = fac_mu = fac_vsic_fix = a3inv = fac_egy = 1.0; + + + /* `NumSphUpdate' gives the number of particles on this processor that want a force update */ + for(n = 0, NumSphUpdate = 0; n < N_gasQ; n++) + { + + SphQ[n].ObsMoment0=0; + SphQ[n].ObsMoment1=0; + + Q[n].Ti_endstep = All.Ti_Current; + if(Q[n].Ti_endstep == All.Ti_Current) + NumSphUpdate++; + } + + numlist = malloc(NTask * sizeof(int) * NTask); + MPI_Allgather(&NumSphUpdate, 1, MPI_INT, numlist, 1, MPI_INT, MPI_COMM_WORLD); + for(i = 0, ntot = 0; i < NTask; i++) + ntot += numlist[i]; + free(numlist); + + + noffset = malloc(sizeof(int) * NTask); /* offsets of bunches in common list */ + nbuffer = malloc(sizeof(int) * NTask); + nsend_local = malloc(sizeof(int) * NTask); + nsend = malloc(sizeof(int) * NTask * NTask); + ndonelist = malloc(sizeof(int) * NTask); + + + i = 0; /* first particle for this task */ + ntotleft = ntot; /* particles left for all tasks together */ + + while(ntotleft > 0) + { + for(j = 0; j < NTask; j++) + nsend_local[j] = 0; + + /* do local particles and prepare export list */ + //tstart = second(); + for(nexport = 0, ndone = 0; i < N_gasQ && nexport < All.BunchSizeSph - NTask; i++) + if(Q[i].Ti_endstep == All.Ti_Current) + { + ndone++; + + for(j = 0; j < NTask; j++) + Exportflag[j] = 0; + + sph_evaluate_sub(i, 0); + + for(j = 0; j < NTask; j++) + { + if(Exportflag[j]) + { + for(k = 0; k < 3; k++) + { + SphDataIn[nexport].Pos[k] = Q[i].Pos[k]; + SphDataIn[nexport].Vel[k] = SphQ[i].VelPred[k]; + } + SphDataIn[nexport].Hsml = SphQ[i].Hsml; + //SphDataIn[nexport].Mass = Q[i].Mass; + //SphDataIn[nexport].DhsmlDensityFactor = SphQ[i].DhsmlDensityFactor; + //SphDataIn[nexport].Density = SphQ[i].Density; + //SphDataIn[nexport].Pressure = SphQ[i].Pressure; + //SphDataIn[nexport].Timestep = Q[i].Ti_endstep - Q[i].Ti_begstep; + + SphDataIn[nexport].ObsMoment0 = Q[i].Mass; + SphDataIn[nexport].ObsMoment1 = Q[i].Mass; + + + SphDataIn[nexport].Index = i; + SphDataIn[nexport].Task = j; + nexport++; + nsend_local[j]++; + } + } + } + //tend = second(); + //timecomp += timediff(tstart, tend); + + qsort(SphDataIn, nexport, sizeof(struct sphdata_in), sph_compare_key); + + for(j = 1, noffset[0] = 0; j < NTask; j++) + noffset[j] = noffset[j - 1] + nsend_local[j - 1]; + + //tstart = second(); + + MPI_Allgather(nsend_local, NTask, MPI_INT, nsend, NTask, MPI_INT, MPI_COMM_WORLD); + + //tend = second(); + //timeimbalance += timediff(tstart, tend); + + /* now do the particles that need to be exported */ + + for(level = 1; level < (1 << PTask); level++) + { + //tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeSph) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* get the particles */ + MPI_Sendrecv(&SphDataIn[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct sphdata_in), MPI_BYTE, + recvTask, TAG_HYDRO_A, + &SphDataGet[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct sphdata_in), MPI_BYTE, + recvTask, TAG_HYDRO_A, MPI_COMM_WORLD, &status); + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + /* now do the imported particles */ + tstart = second(); + for(j = 0; j < nbuffer[ThisTask]; j++) + sph_evaluate_sub(j, 1); + tend = second(); + timecomp += timediff(tstart, tend); + + /* do a block to measure imbalance */ + tstart = second(); + MPI_Barrier(MPI_COMM_WORLD); + tend = second(); + timeimbalance += timediff(tstart, tend); + + /* get the result */ + tstart = second(); + for(j = 0; j < NTask; j++) + nbuffer[j] = 0; + for(ngrp = level; ngrp < (1 << PTask); ngrp++) + { + maxfill = 0; + for(j = 0; j < NTask; j++) + { + if((j ^ ngrp) < NTask) + if(maxfill < nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]) + maxfill = nbuffer[j] + nsend[(j ^ ngrp) * NTask + j]; + } + if(maxfill >= All.BunchSizeSph) + break; + + sendTask = ThisTask; + recvTask = ThisTask ^ ngrp; + + if(recvTask < NTask) + { + if(nsend[ThisTask * NTask + recvTask] > 0 || nsend[recvTask * NTask + ThisTask] > 0) + { + /* send the results */ + MPI_Sendrecv(&SphDataResult[nbuffer[ThisTask]], + nsend[recvTask * NTask + ThisTask] * sizeof(struct sphdata_out), + MPI_BYTE, recvTask, TAG_HYDRO_B, + &SphDataPartialResult[noffset[recvTask]], + nsend_local[recvTask] * sizeof(struct sphdata_out), + MPI_BYTE, recvTask, TAG_HYDRO_B, MPI_COMM_WORLD, &status); + + /* add the result to the particles */ + for(j = 0; j < nsend_local[recvTask]; j++) + { + source = j + noffset[recvTask]; + place = SphDataIn[source].Index; + + + SphQ[place].ObsMoment0 += SphDataPartialResult[source].ObsMoment0; + SphQ[place].ObsMoment1 += SphDataPartialResult[source].ObsMoment1; + + + } + } + } + + for(j = 0; j < NTask; j++) + if((j ^ ngrp) < NTask) + nbuffer[j] += nsend[(j ^ ngrp) * NTask + j]; + } + tend = second(); + timecommsumm += timediff(tstart, tend); + + level = ngrp - 1; + } + + MPI_Allgather(&ndone, 1, MPI_INT, ndonelist, 1, MPI_INT, MPI_COMM_WORLD); + for(j = 0; j < NTask; j++) + ntotleft -= ndonelist[j]; + } + + free(ndonelist); + free(nsend); + free(nsend_local); + free(nbuffer); + free(noffset); + + /* do final operations on results */ + //tstart = second(); + + for(i = 0; i < N_gasQ; i++) + if(Q[i].Ti_endstep == All.Ti_Current) + { + SphQ[i].Observable = SphQ[i].ObsMoment1/SphQ[i].ObsMoment0; + } + + +} + + +/*! This function is the 'core' of the SPH force computation. A target + * particle is specified which may either be local, or reside in the + * communication buffer. + */ +void sph_evaluate_sub(int target, int mode) +{ + + int j, n, startnode, numngb, numngb_inbox; + double h, h2, fac, hinv, hinv3, hinv4; + double rho, divv, wk, dwk; + double dx, dy, dz, r, r2, u, mass_j; + double dvx, dvy, dvz, rotv[3]; + double weighted_numngb, dhsmlrho; + FLOAT *pos, *vel; + FLOAT mom1,mom0; + + +#ifndef NOVISCOSITYLIMITER + double dt; +#endif + + if(mode == 0) + { + pos = Q[target].Pos; + vel = SphQ[target].VelPred; + h = SphQ[target].Hsml; + mom0 = SphQ[target].ObsMoment0; + mom1 = SphQ[target].ObsMoment1; + } + else + { + pos = SphDataGet[target].Pos; + vel = SphDataGet[target].Vel; + h = SphDataGet[target].Hsml; + mom0 = SphDataGet[target].ObsMoment0; + mom1 = SphDataGet[target].ObsMoment1; + } + + + + h2 = h * h; + hinv = 1.0 / h; +#ifndef TWODIMS + hinv3 = hinv * hinv * hinv; +#else + hinv3 = hinv * hinv / boxSize_Z; +#endif + hinv4 = hinv3 * hinv; + + rho = divv = rotv[0] = rotv[1] = rotv[2] = 0; + weighted_numngb = 0; + dhsmlrho = 0; + + startnode = All.MaxPart; + numngb = 0; + + + do + { + numngb_inbox = ngb_treefind_variable(&pos[0], h, &startnode); + + for(n = 0; n < numngb_inbox; n++) + { + j = Ngblist[n]; + + + dx = pos[0] - P[j].Pos[0]; + dy = pos[1] - P[j].Pos[1]; + dz = pos[2] - P[j].Pos[2]; + +#ifdef PERIODIC /* now find the closest image in the given box size */ + if(dx > boxHalf_X) + dx -= boxSize_X; + if(dx < -boxHalf_X) + dx += boxSize_X; + if(dy > boxHalf_Y) + dy -= boxSize_Y; + if(dy < -boxHalf_Y) + dy += boxSize_Y; + if(dz > boxHalf_Z) + dz -= boxSize_Z; + if(dz < -boxHalf_Z) + dz += boxSize_Z; +#endif + r2 = dx * dx + dy * dy + dz * dz; + + if(r2 < h2) + { + numngb++; + + r = sqrt(r2); + + u = r * hinv; + + if(u < 0.5) + { + wk = hinv3 * (KERNEL_COEFF_1 + KERNEL_COEFF_2 * (u - 1) * u * u); + } + else + { + wk = hinv3 * KERNEL_COEFF_5 * (1.0 - u) * (1.0 - u) * (1.0 - u); + } + + mom1 += P[j].Mass*(SphP[j].Observable)/(SphP[j].Density) * wk; + mom0 += P[j].Mass /(SphP[j].Density) * wk; + + } + } + } + while(startnode >= 0); + + + /* Now collect the result at the right place */ + if(mode == 0) + { + SphQ[target].ObsMoment0 = mom0; + SphQ[target].ObsMoment1 = mom1; + } + else + { + SphDataResult[target].ObsMoment0 = mom0; + SphDataResult[target].ObsMoment1 = mom1; + } +} + + + + + + + + + + + + + + diff --git a/src/PyGadget/src/sph.o b/src/PyGadget/src/sph.o new file mode 100644 index 0000000..b4842c7 Binary files /dev/null and b/src/PyGadget/src/sph.o differ diff --git a/src/PyGadget/src/system.c b/src/PyGadget/src/system.c new file mode 100644 index 0000000..489d9c5 --- /dev/null +++ b/src/PyGadget/src/system.c @@ -0,0 +1,127 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "allvars.h" +#include "proto.h" + + +/*! \file system.c + * \brief contains miscellaneous routines, e.g. elapsed time measurements + */ + + +/*! This routine returns a random number taken from a table of random numbers, + * which is refilled every timestep. This method is used to allow random + * number application to particles independent of the number of processors + * used, and independent of the particular order the particles have. In order + * to work properly, the particle IDs should be set properly to unique + * integer values. + */ +double get_random_number(int id) +{ + return RndTable[(id % RNDTABLE)]; +} + + +/*! This routine fills the random number table. + */ +void set_random_numbers(void) +{ + int i; + + for(i = 0; i < RNDTABLE; i++) + RndTable[i] = gsl_rng_uniform(random_generator); +} + + +/*! returns the number of cpu-ticks in seconds that have elapsed, or the + * wall-clock time obtained with MPI_Wtime(). + */ +double second(void) +{ +#ifdef WALLCLOCK + return MPI_Wtime(); +#else + return ((double) clock()) / CLOCKS_PER_SEC; +#endif + + /* note: on AIX and presumably many other 32bit systems, + * clock() has only a resolution of 10ms=0.01sec + */ +} + + +/*! returns the time difference between two measurements obtained with + * second(). The routine takes care of the possible overflow of the tick + * counter on 32bit systems, but depending on the system, this may not always + * work properly. Similarly, in some MPI implementations, the MPI_Wtime() + * function may also overflow, in which case a negative time difference would + * be returned. The routine returns instead a time difference equal to 0. + */ +double timediff(double t0, double t1) +{ + double dt; + + dt = t1 - t0; + + if(dt < 0) /* overflow has occured (for systems with 32bit tick counter) */ + { +#ifdef WALLCLOCK + dt = 0; +#else + dt = t1 + pow(2, 32) / CLOCKS_PER_SEC - t0; +#endif + } + + return dt; +} + + +/*! returns the maximum of two double + */ +double dmax(double x, double y) +{ + if(x > y) + return x; + else + return y; +} + +/*! returns the minimum of two double + */ +double dmin(double x, double y) +{ + if(x < y) + return x; + else + return y; +} + +/*! returns the maximum of two integers + */ +int imax(int x, int y) +{ + if(x > y) + return x; + else + return y; +} + +/*! returns the minimum of two integers + */ +int imin(int x, int y) +{ + if(x < y) + return x; + else + return y; +} diff --git a/src/PyGadget/src/system.o b/src/PyGadget/src/system.o new file mode 100644 index 0000000..7039bb8 Binary files /dev/null and b/src/PyGadget/src/system.o differ diff --git a/src/PyGadget/src/tags.h b/src/PyGadget/src/tags.h new file mode 100644 index 0000000..2a21de3 --- /dev/null +++ b/src/PyGadget/src/tags.h @@ -0,0 +1,33 @@ +/*! \file tags.h + * \brief declares various tags for labelling MPI messages. + */ + +#define TAG_N 10 /*!< Various tags used for labelling MPI messages */ +#define TAG_HEADER 11 +#define TAG_PDATA 12 +#define TAG_SPHDATA 13 +#define TAG_KEY 14 +#define TAG_DMOM 15 +#define TAG_NODELEN 16 +#define TAG_HMAX 17 +#define TAG_GRAV_A 18 +#define TAG_GRAV_B 19 +#define TAG_DIRECT_A 20 +#define TAG_DIRECT_B 21 +#define TAG_HYDRO_A 22 +#define TAG_HYDRO_B 23 +#define TAG_NFORTHISTASK 24 +#define TAG_PERIODIC_A 25 +#define TAG_PERIODIC_B 26 +#define TAG_PERIODIC_C 27 +#define TAG_PERIODIC_D 28 +#define TAG_NONPERIOD_A 29 +#define TAG_NONPERIOD_B 30 +#define TAG_NONPERIOD_C 31 +#define TAG_NONPERIOD_D 32 +#define TAG_POTENTIAL_A 33 +#define TAG_POTENTIAL_B 34 +#define TAG_DENS_A 35 +#define TAG_DENS_B 36 +#define TAG_LOCALN 37 + diff --git a/src/PyGadget/src/timestep.c b/src/PyGadget/src/timestep.c new file mode 100644 index 0000000..bf6ecef --- /dev/null +++ b/src/PyGadget/src/timestep.c @@ -0,0 +1,644 @@ +#include +#include +#include +#include +#include +#include "allvars.h" +#include "proto.h" + +/*! \file timestep.c + * \brief routines for 'kicking' particles in momentum space and assigning new timesteps + */ + +static double fac1, fac2, fac3, hubble_a, atime, a3inv; +static double dt_displacement = 0; + + +/*! This function advances the system in momentum space, i.e. it does apply + * the 'kick' operation after the forces have been computed. Additionally, it + * assigns new timesteps to particles. At start-up, a half-timestep is + * carried out, as well as at the end of the simulation. In between, the + * half-step kick that ends the previous timestep and the half-step kick for + * the new timestep are combined into one operation. + */ +void advance_and_find_timesteps(void) +{ + int i, j, no, ti_step, ti_min, tend, tstart; + double dt_entr, dt_entr2, dt_gravkick, dt_hydrokick, dt_gravkick2, dt_hydrokick2, t0, t1; + double minentropy, aphys; + FLOAT dv[3]; + +#ifdef FLEXSTEPS + int ti_grp; +#endif +#if defined(PSEUDOSYMMETRIC) && !defined(FLEXSTEPS) + double apred, prob; + int ti_step2; +#endif +#ifdef PMGRID + double dt_gravkickA, dt_gravkickB; +#endif +#ifdef MAKEGLASS + double disp, dispmax, globmax, dmean, fac, disp2sum, globdisp2sum; +#endif + + t0 = second(); + + if(All.ComovingIntegrationOn) + { + fac1 = 1 / (All.Time * All.Time); + fac2 = 1 / pow(All.Time, 3 * GAMMA - 2); + fac3 = pow(All.Time, 3 * (1 - GAMMA) / 2.0); + hubble_a = All.Omega0 / (All.Time * All.Time * All.Time) + + (1 - All.Omega0 - All.OmegaLambda) / (All.Time * All.Time) + All.OmegaLambda; + + hubble_a = All.Hubble * sqrt(hubble_a); + a3inv = 1 / (All.Time * All.Time * All.Time); + atime = All.Time; + } + else + fac1 = fac2 = fac3 = hubble_a = a3inv = atime = 1; + +#ifdef NOPMSTEPADJUSTMENT + dt_displacement = All.MaxSizeTimestep; +#else + if(Flag_FullStep || dt_displacement == 0) + find_dt_displacement_constraint(hubble_a * atime * atime); +#endif + +#ifdef PMGRID + if(All.ComovingIntegrationOn) + dt_gravkickB = get_gravkick_factor(All.PM_Ti_begstep, All.Ti_Current) - + get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2); + else + dt_gravkickB = (All.Ti_Current - (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2) * All.Timebase_interval; + + if(All.PM_Ti_endstep == All.Ti_Current) /* need to do long-range kick */ + { + /* make sure that we reconstruct the domain/tree next time because we don't kick the tree nodes in this case */ + All.NumForcesSinceLastDomainDecomp = 1 + All.TotNumPart * All.TreeDomainUpdateFrequency; + } +#endif + + +#ifdef MAKEGLASS + for(i = 0, dispmax = 0, disp2sum = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) + { + P[i].GravPM[j] *= -1; + P[i].GravAccel[j] *= -1; + P[i].GravAccel[j] += P[i].GravPM[j]; + P[i].GravPM[j] = 0; + } + + disp = sqrt(P[i].GravAccel[0] * P[i].GravAccel[0] + + P[i].GravAccel[1] * P[i].GravAccel[1] + P[i].GravAccel[2] * P[i].GravAccel[2]); + + disp *= 2.0 / (3 * All.Hubble * All.Hubble); + + disp2sum += disp * disp; + + if(disp > dispmax) + dispmax = disp; + } + + MPI_Allreduce(&dispmax, &globmax, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + MPI_Allreduce(&disp2sum, &globdisp2sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + + dmean = pow(P[0].Mass / (All.Omega0 * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)), 1.0 / 3); + + if(globmax > dmean) + fac = dmean / globmax; + else + fac = 1.0; + + if(ThisTask == 0) + { + printf("\nglass-making: dmean= %g global disp-maximum= %g rms= %g\n\n", + dmean, globmax, sqrt(globdisp2sum / All.TotNumPart)); + fflush(stdout); + } + + for(i = 0, dispmax = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) + { + P[i].Vel[j] = 0; + P[i].Pos[j] += fac * P[i].GravAccel[j] * 2.0 / (3 * All.Hubble * All.Hubble); + P[i].GravAccel[j] = 0; + } + } +#endif + + + + + /* Now assign new timesteps and kick */ + +#ifdef FLEXSTEPS + if((All.Ti_Current % (4 * All.PresentMinStep)) == 0) + if(All.PresentMinStep < TIMEBASE) + All.PresentMinStep *= 2; + + for(i = 0; i < NumPart; i++) + { + if(P[i].Ti_endstep == All.Ti_Current) + { + ti_step = get_timestep(i, &aphys, 0); + + /* make it a power 2 subdivision */ + ti_min = TIMEBASE; + while(ti_min > ti_step) + ti_min >>= 1; + ti_step = ti_min; + + if(ti_step < All.PresentMinStep) + All.PresentMinStep = ti_step; + } + } + + ti_step = All.PresentMinStep; + MPI_Allreduce(&ti_step, &All.PresentMinStep, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD); + + if(dt_displacement < All.MaxSizeTimestep) + ti_step = (int) (dt_displacement / All.Timebase_interval); + else + ti_step = (int) (All.MaxSizeTimestep / All.Timebase_interval); + + /* make it a power 2 subdivision */ + ti_min = TIMEBASE; + while(ti_min > ti_step) + ti_min >>= 1; + All.PresentMaxStep = ti_min; + + + if(ThisTask == 0) + printf("Syn Range = %g PresentMinStep = %d PresentMaxStep = %d \n", + (double) All.PresentMaxStep / All.PresentMinStep, All.PresentMinStep, All.PresentMaxStep); + +#endif + + + for(i = 0; i < NumPart; i++) + { + if(P[i].Ti_endstep == All.Ti_Current) + { + ti_step = get_timestep(i, &aphys, 0); + + /* make it a power 2 subdivision */ + ti_min = TIMEBASE; + while(ti_min > ti_step) + ti_min >>= 1; + ti_step = ti_min; + +#ifdef FLEXSTEPS + ti_grp = P[i].FlexStepGrp % All.PresentMaxStep; + ti_grp = (ti_grp / All.PresentMinStep) * All.PresentMinStep; + ti_step = ((P[i].Ti_endstep + ti_grp + ti_step) / ti_step) * ti_step - (P[i].Ti_endstep + ti_grp); +#else + +#ifdef PSEUDOSYMMETRIC + if(P[i].Type != 0) + { + if(P[i].Ti_endstep > P[i].Ti_begstep) + { + apred = aphys + ((aphys - P[i].AphysOld) / (P[i].Ti_endstep - P[i].Ti_begstep)) * ti_step; + if(fabs(apred - aphys) < 0.5 * aphys) + { + ti_step2 = get_timestep(i, &apred, -1); + ti_min = TIMEBASE; + while(ti_min > ti_step2) + ti_min >>= 1; + ti_step2 = ti_min; + + if(ti_step2 < ti_step) + { + get_timestep(i, &apred, ti_step); + prob = + ((apred - aphys) / (aphys - P[i].AphysOld) * (P[i].Ti_endstep - + P[i].Ti_begstep)) / ti_step; + if(prob < get_random_number(P[i].ID)) + ti_step /= 2; + } + else if(ti_step2 > ti_step) + { + get_timestep(i, &apred, 2 * ti_step); + prob = + ((apred - aphys) / (aphys - P[i].AphysOld) * (P[i].Ti_endstep - + P[i].Ti_begstep)) / ti_step; + if(prob < get_random_number(P[i].ID + 1)) + ti_step *= 2; + } + } + } + P[i].AphysOld = aphys; + } +#endif + +#ifdef SYNCHRONIZATION + if(ti_step > (P[i].Ti_endstep - P[i].Ti_begstep)) /* timestep wants to increase */ + { + if(((TIMEBASE - P[i].Ti_endstep) % ti_step) > 0) + ti_step = P[i].Ti_endstep - P[i].Ti_begstep; /* leave at old step */ + } +#endif +#endif /* end of FLEXSTEPS */ + + if(All.Ti_Current == TIMEBASE) /* we here finish the last timestep. */ + ti_step = 0; + + if((TIMEBASE - All.Ti_Current) < ti_step) /* check that we don't run beyond the end */ + ti_step = TIMEBASE - All.Ti_Current; + + tstart = (P[i].Ti_begstep + P[i].Ti_endstep) / 2; /* midpoint of old step */ + tend = P[i].Ti_endstep + ti_step / 2; /* midpoint of new step */ + + if(All.ComovingIntegrationOn) + { + dt_entr = (tend - tstart) * All.Timebase_interval; + dt_entr2 = (tend - P[i].Ti_endstep) * All.Timebase_interval; + dt_gravkick = get_gravkick_factor(tstart, tend); + dt_hydrokick = get_hydrokick_factor(tstart, tend); + dt_gravkick2 = get_gravkick_factor(P[i].Ti_endstep, tend); + dt_hydrokick2 = get_hydrokick_factor(P[i].Ti_endstep, tend); + } + else + { + dt_entr = dt_gravkick = dt_hydrokick = (tend - tstart) * All.Timebase_interval; + dt_gravkick2 = dt_hydrokick2 = dt_entr2 = (tend - P[i].Ti_endstep) * All.Timebase_interval; + } + + P[i].Ti_begstep = P[i].Ti_endstep; + P[i].Ti_endstep = P[i].Ti_begstep + ti_step; + + + /* do the kick */ + + for(j = 0; j < 3; j++) + { + dv[j] = P[i].GravAccel[j] * dt_gravkick; + P[i].Vel[j] += dv[j]; + } + + if(P[i].Type == 0) /* SPH stuff */ + { + for(j = 0; j < 3; j++) + { + dv[j] += SphP[i].HydroAccel[j] * dt_hydrokick; + P[i].Vel[j] += SphP[i].HydroAccel[j] * dt_hydrokick; + + SphP[i].VelPred[j] = + P[i].Vel[j] - dt_gravkick2 * P[i].GravAccel[j] - dt_hydrokick2 * SphP[i].HydroAccel[j]; +#ifdef PMGRID + SphP[i].VelPred[j] += P[i].GravPM[j] * dt_gravkickB; +#endif + } + + /* In case of cooling, we prevent that the entropy (and + hence temperature decreases by more than a factor 0.5 */ + + if(SphP[i].DtEntropy * dt_entr > -0.5 * SphP[i].Entropy) + SphP[i].Entropy += SphP[i].DtEntropy * dt_entr; + else + SphP[i].Entropy *= 0.5; + + if(All.MinEgySpec) + { + minentropy = All.MinEgySpec * GAMMA_MINUS1 / pow(SphP[i].Density * a3inv, GAMMA_MINUS1); + if(SphP[i].Entropy < minentropy) + { + SphP[i].Entropy = minentropy; + SphP[i].DtEntropy = 0; + } + } + + /* In case the timestep increases in the new step, we + make sure that we do not 'overcool' when deriving + predicted temperatures. The maximum timespan over + which prediction can occur is ti_step/2, i.e. from + the middle to the end of the current step */ + + dt_entr = ti_step / 2 * All.Timebase_interval; + if(SphP[i].Entropy + SphP[i].DtEntropy * dt_entr < 0.5 * SphP[i].Entropy) + SphP[i].DtEntropy = -0.5 * SphP[i].Entropy / dt_entr; + } + + + /* if tree is not going to be reconstructed, kick parent nodes dynamically. + */ + if(All.NumForcesSinceLastDomainDecomp < All.TotNumPart * All.TreeDomainUpdateFrequency) + { + no = Father[i]; + while(no >= 0) + { + for(j = 0; j < 3; j++) + Extnodes[no].vs[j] += dv[j] * P[i].Mass / Nodes[no].u.d.mass; + + no = Nodes[no].u.d.father; + } + } + } + } + + + +#ifdef PMGRID + if(All.PM_Ti_endstep == All.Ti_Current) /* need to do long-range kick */ + { + ti_step = TIMEBASE; + while(ti_step > (dt_displacement / All.Timebase_interval)) + ti_step >>= 1; + + if(ti_step > (All.PM_Ti_endstep - All.PM_Ti_begstep)) /* PM-timestep wants to increase */ + { + /* we only increase if an integer number of steps will bring us to the end */ + if(((TIMEBASE - All.PM_Ti_endstep) % ti_step) > 0) + ti_step = All.PM_Ti_endstep - All.PM_Ti_begstep; /* leave at old step */ + } + + if(All.Ti_Current == TIMEBASE) /* we here finish the last timestep. */ + ti_step = 0; + + tstart = (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2; + tend = All.PM_Ti_endstep + ti_step / 2; + + if(All.ComovingIntegrationOn) + dt_gravkick = get_gravkick_factor(tstart, tend); + else + dt_gravkick = (tend - tstart) * All.Timebase_interval; + + All.PM_Ti_begstep = All.PM_Ti_endstep; + All.PM_Ti_endstep = All.PM_Ti_begstep + ti_step; + + if(All.ComovingIntegrationOn) + dt_gravkickB = -get_gravkick_factor(All.PM_Ti_begstep, (All.PM_Ti_begstep + All.PM_Ti_endstep) / 2); + else + dt_gravkickB = + -((All.PM_Ti_begstep + All.PM_Ti_endstep) / 2 - All.PM_Ti_begstep) * All.Timebase_interval; + + for(i = 0; i < NumPart; i++) + { + for(j = 0; j < 3; j++) /* do the kick */ + P[i].Vel[j] += P[i].GravPM[j] * dt_gravkick; + + if(P[i].Type == 0) + { + if(All.ComovingIntegrationOn) + { + dt_gravkickA = get_gravkick_factor(P[i].Ti_begstep, All.Ti_Current) - + get_gravkick_factor(P[i].Ti_begstep, (P[i].Ti_begstep + P[i].Ti_endstep) / 2); + dt_hydrokick = get_hydrokick_factor(P[i].Ti_begstep, All.Ti_Current) - + get_hydrokick_factor(P[i].Ti_begstep, (P[i].Ti_begstep + P[i].Ti_endstep) / 2); + } + else + dt_gravkickA = dt_hydrokick = + (All.Ti_Current - (P[i].Ti_begstep + P[i].Ti_endstep) / 2) * All.Timebase_interval; + + for(j = 0; j < 3; j++) + SphP[i].VelPred[j] = P[i].Vel[j] + + P[i].GravAccel[j] * dt_gravkickA + + SphP[i].HydroAccel[j] * dt_hydrokick + P[i].GravPM[j] * dt_gravkickB; + } + } + } +#endif + + t1 = second(); + All.CPU_TimeLine += timediff(t0, t1); +} + + + + +/*! This function normally (for flag==0) returns the maximum allowed timestep + * of a particle, expressed in terms of the integer mapping that is used to + * represent the total simulated timespan. The physical acceleration is + * returned in `aphys'. The latter is used in conjunction with the + * PSEUDOSYMMETRIC integration option, which also makes of the second + * function of get_timestep. When it is called with a finite timestep for + * flag, it returns the physical acceleration that would lead to this + * timestep, assuming timestep criterion 0. + */ +int get_timestep(int p, /*!< particle index */ + double *aphys, /*!< acceleration (physical units) */ + int flag /*!< either 0 for normal operation, or finite timestep to get corresponding + aphys */ ) +{ + double ax, ay, az, ac, csnd; + double dt = 0, dt_courant = 0, dt_accel; + int ti_step; + +#ifdef CONDUCTION + double dt_cond; +#endif + + if(flag == 0) + { + ax = fac1 * P[p].GravAccel[0]; + ay = fac1 * P[p].GravAccel[1]; + az = fac1 * P[p].GravAccel[2]; + +#ifdef PMGRID + ax += fac1 * P[p].GravPM[0]; + ay += fac1 * P[p].GravPM[1]; + az += fac1 * P[p].GravPM[2]; +#endif + + if(P[p].Type == 0) + { + ax += fac2 * SphP[p].HydroAccel[0]; + ay += fac2 * SphP[p].HydroAccel[1]; + az += fac2 * SphP[p].HydroAccel[2]; + } + + ac = sqrt(ax * ax + ay * ay + az * az); /* this is now the physical acceleration */ + *aphys = ac; + } + else + ac = *aphys; + + if(ac == 0) + ac = 1.0e-30; + + switch (All.TypeOfTimestepCriterion) + { + case 0: + if(flag > 0) + { + dt = flag * All.Timebase_interval; + dt /= hubble_a; /* convert dloga to physical timestep */ + ac = 2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / (dt * dt); + *aphys = ac; + return flag; + } + dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / ac); +#ifdef ADAPTIVE_GRAVSOFT_FORGAS + if(P[p].Type == 0) + dt = dt_accel = sqrt(2 * All.ErrTolIntAccuracy * atime * SphP[p].Hsml / 2.8 / ac); +#endif + break; + default: + endrun(888); + break; + } + + if(P[p].Type == 0) + { + csnd = sqrt(GAMMA * SphP[p].Pressure / SphP[p].Density); + + if(All.ComovingIntegrationOn) + dt_courant = 2 * All.CourantFac * All.Time * SphP[p].Hsml / (fac3 * SphP[p].MaxSignalVel); + else + dt_courant = 2 * All.CourantFac * SphP[p].Hsml / SphP[p].MaxSignalVel; + + if(dt_courant < dt) + dt = dt_courant; + } + + /* convert the physical timestep to dloga if needed. Note: If comoving integration has not been selected, + hubble_a=1. + */ + dt *= hubble_a; + + if(dt >= All.MaxSizeTimestep) + dt = All.MaxSizeTimestep; + + if(dt >= dt_displacement) + dt = dt_displacement; + + if(dt < All.MinSizeTimestep) + { +#ifndef NOSTOP_WHEN_BELOW_MINTIMESTEP + printf("warning: Timestep wants to be below the limit `MinSizeTimestep'\n"); + + if(P[p].Type == 0) + { + printf + ("Part-ID=%d dt=%g dtc=%g ac=%g xyz=(%g|%g|%g) hsml=%g maxsignalvel=%g dt0=%g eps=%g\n", + (int) P[p].ID, dt, dt_courant * hubble_a, ac, P[p].Pos[0], P[p].Pos[1], P[p].Pos[2], + SphP[p].Hsml, SphP[p].MaxSignalVel, + sqrt(2 * All.ErrTolIntAccuracy * atime * All.SofteningTable[P[p].Type] / ac) * hubble_a, + All.SofteningTable[P[p].Type]); + } + else + { + printf("Part-ID=%d dt=%g ac=%g xyz=(%g|%g|%g)\n", (int) P[p].ID, dt, ac, P[p].Pos[0], P[p].Pos[1], + P[p].Pos[2]); + } + fflush(stdout); + endrun(888); +#endif + dt = All.MinSizeTimestep; + } + + ti_step = dt / All.Timebase_interval; + + if(!(ti_step > 0 && ti_step < TIMEBASE)) + { + printf("\nError: A timestep of size zero was assigned on the integer timeline!\n" + "We better stop.\n" + "Task=%d Part-ID=%d dt=%g tibase=%g ti_step=%d ac=%g xyz=(%g|%g|%g) tree=(%g|%g%g)\n\n", + ThisTask, (int) P[p].ID, dt, All.Timebase_interval, ti_step, ac, + P[p].Pos[0], P[p].Pos[1], P[p].Pos[2], P[p].GravAccel[0], P[p].GravAccel[1], P[p].GravAccel[2]); +#ifdef PMGRID + printf("pm_force=(%g|%g|%g)\n", P[p].GravPM[0], P[p].GravPM[1], P[p].GravPM[2]); +#endif + if(P[p].Type == 0) + printf("hydro-frc=(%g|%g|%g)\n", SphP[p].HydroAccel[0], SphP[p].HydroAccel[1], SphP[p].HydroAccel[2]); + + fflush(stdout); + endrun(818); + } + + return ti_step; +} + + +/*! This function computes an upper limit ('dt_displacement') to the global + * timestep of the system based on the rms velocities of particles. For + * cosmological simulations, the criterion used is that the rms displacement + * should be at most a fraction MaxRMSDisplacementFac of the mean particle + * separation. Note that the latter is estimated using the assigned particle + * masses, separately for each particle type. If comoving integration is not + * used, the function imposes no constraint on the timestep. + */ +void find_dt_displacement_constraint(double hfac /*!< should be a^2*H(a) */ ) +{ + int i, j, type, *temp; + int count[6]; + long long count_sum[6]; + double v[6], v_sum[6], mim[6], min_mass[6]; + double dt, dmean, asmth = 0; + + dt_displacement = All.MaxSizeTimestep; + + if(All.ComovingIntegrationOn) + { + for(type = 0; type < 6; type++) + { + count[type] = 0; + v[type] = 0; + mim[type] = 1.0e30; + } + + for(i = 0; i < NumPart; i++) + { + v[P[i].Type] += P[i].Vel[0] * P[i].Vel[0] + P[i].Vel[1] * P[i].Vel[1] + P[i].Vel[2] * P[i].Vel[2]; + if(mim[P[i].Type] > P[i].Mass) + mim[P[i].Type] = P[i].Mass; + count[P[i].Type]++; + } + + MPI_Allreduce(v, v_sum, 6, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + MPI_Allreduce(mim, min_mass, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + + temp = malloc(NTask * 6 * sizeof(int)); + MPI_Allgather(count, 6, MPI_INT, temp, 6, MPI_INT, MPI_COMM_WORLD); + for(i = 0; i < 6; i++) + { + count_sum[i] = 0; + for(j = 0; j < NTask; j++) + count_sum[i] += temp[j * 6 + i]; + } + free(temp); + + for(type = 0; type < 6; type++) + { + if(count_sum[type] > 0) + { + if(type == 0) + dmean = + pow(min_mass[type] / (All.OmegaBaryon * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)), + 1.0 / 3); + else + dmean = + pow(min_mass[type] / + ((All.Omega0 - All.OmegaBaryon) * 3 * All.Hubble * All.Hubble / (8 * M_PI * All.G)), + 1.0 / 3); + + dt = All.MaxRMSDisplacementFac * hfac * dmean / sqrt(v_sum[type] / count_sum[type]); + +#ifdef PMGRID + asmth = All.Asmth[0]; +#ifdef PLACEHIGHRESREGION + if(((1 << type) & (PLACEHIGHRESREGION))) + asmth = All.Asmth[1]; +#endif + if(asmth < dmean) + dt = All.MaxRMSDisplacementFac * hfac * asmth / sqrt(v_sum[type] / count_sum[type]); +#endif + + if(ThisTask == 0) + printf("type=%d dmean=%g asmth=%g minmass=%g a=%g sqrt()=%g dlogmax=%g\n", + type, dmean, asmth, min_mass[type], All.Time, sqrt(v_sum[type] / count_sum[type]), dt); + + if(dt < dt_displacement) + dt_displacement = dt; + } + } + + if(ThisTask == 0) + printf("displacement time constraint: %g (%g)\n", dt_displacement, All.MaxSizeTimestep); + } +} diff --git a/src/PyGadget/src/timestep.o b/src/PyGadget/src/timestep.o new file mode 100644 index 0000000..3f9de82 Binary files /dev/null and b/src/PyGadget/src/timestep.o differ diff --git a/src/tessel/tessel/Makefile b/src/tessel/tessel/Makefile index 68c5ae9..6e1f98c 100644 --- a/src/tessel/tessel/Makefile +++ b/src/tessel/tessel/Makefile @@ -1,24 +1,24 @@ PGM = tessel #INCLUDEPY = /home/revaz/local/include/python2.4 -INCLUDEPY = /usr/include/python2.5/ -INCLUDENUMPY = /usr/lib/python2.5/site-packages/numpy/core/include/ -INCLUDENUMARRAY = /usr/lib/python2.5/site-packages/numpy/numarray/ +INCLUDEPY = /usr/include/python2.6/ +INCLUDENUMPY = /usr/lib/python2.6/site-packages/numpy/core/include/ +INCLUDENUMARRAY = /usr/lib/python2.6/site-packages/numpy/numarray/ CFLAGS= -I$(INCLUDEPY) -I$(INCLUDENUMARRAY) -I$(INCLUDENUMPY) $(PGM).so : $(PGM).o gcc -fPIC -shared $(PGM).o -o $(PGM).so $(PGM).o : $(PGM).c gcc -fPIC $(CFLAGS) -c $(PGM).c clean: rm -f *.o *.exe *.so diff --git a/src/tessel/tessel/tessel.c b/src/tessel/tessel/tessel.c index ca8d9bf..282d752 100644 --- a/src/tessel/tessel/tessel.c +++ b/src/tessel/tessel/tessel.c @@ -1,2409 +1,2409 @@ #include #include #include #include #include #define MAX_REAL_NUMBER 1e+37 #define MIN_REAL_NUMBER 1e-37 #define TO_DOUBLE(a) ( (PyArrayObject*) PyArray_CastToType(a, PyArray_DescrFromType(NPY_DOUBLE) ,0) ) -#define MAXNUMTRIANGLES 10000000 +#define MAXNUMTRIANGLES 10000 #define PI 3.1415926535897931 struct global_data_all_processes { int MaxPart; /*!< This gives the maxmimum number of particles that can be stored on one processor. */ } All; /*! This structure holds all the information that is * stored for each particle of the simulation. */ struct Point /* struct particle_data */ { double Pos[3]; /*!< particle position at its current time */ double Mass; int IsDone; } *P; /*!< holds particle data on local processor */ struct Triangle { struct Point Pt1[3]; struct Point Pt2[3]; struct Point Pt3[3]; }; struct TriangleInList { int idx; /* index of current triangle (used for checks) */ struct Point* P[3]; /* pointers towards the 3 point */ struct TriangleInList* T[3]; /* pointers towards the 3 triangles */ int idxe[3]; /* index of point in the first triangle, opposite to the common edge */ struct Median* Med[3]; }; struct Median { double a; double b; double c; struct Point Ps; /* starting point of a segment */ struct Point Pe; /* stopping point */ }; /* some global varables */ int nT=0,numTinStack=0; /* number of triangles in the list */ struct TriangleInList Triangles[MAXNUMTRIANGLES]; /* list of triangles */ struct TriangleInList *TStack[MAXNUMTRIANGLES]; /* index of triangles to check */ struct Median MediansList[MAXNUMTRIANGLES][3]; int NumPart; double domainRadius,domainCenter[3]; struct Point Pe[3]; /* edges */ void endrun(int ierr) { int ThisTask=0; if(ierr) { printf("task %d: endrun called with an error level of %d\n\n\n", ThisTask, ierr); fflush(stdout); exit(0); } exit(0); } /*! This routine allocates memory for particle storage, both the * collisionless and the SPH particles. */ void allocate_memory(void) { size_t bytes; double bytes_tot = 0; if(All.MaxPart > 0) { if(!(P = malloc(bytes = All.MaxPart * sizeof(struct Point)))) { printf("failed to allocate memory for `P' (%g MB).\n", bytes / (1024.0 * 1024.0)); endrun(1); } bytes_tot += bytes; printf("\nAllocated %g MByte for particle storage. %d\n\n", bytes_tot / (1024.0 * 1024.0), sizeof(struct Point)); } } void lines_intersections(double a0, double b0, double c0, double a1, double b1, double c1, double *x, double *y) { *x = (c1*b0 - c0*b1)/(a0*b1 - a1*b0); *y = (c1*a0 - c0*a1)/(a1*b0 - a0*b1); } /*! */ struct Triangle TriangleInList2Triangle(struct TriangleInList Tl) { struct Triangle T; T.Pt1->Pos[0] = Tl.P[0]->Pos[0]; T.Pt1->Pos[1] = Tl.P[0]->Pos[1]; T.Pt2->Pos[0] = Tl.P[1]->Pos[0]; T.Pt2->Pos[1] = Tl.P[1]->Pos[1]; T.Pt3->Pos[0] = Tl.P[2]->Pos[0]; T.Pt3->Pos[1] = Tl.P[2]->Pos[1]; return T; } /*! For a set of three points, construct a triangle */ struct Triangle MakeTriangleFromPoints(struct Point Pt1,struct Point Pt2,struct Point Pt3) { struct Triangle T; T.Pt1->Pos[0] = Pt1.Pos[0]; T.Pt1->Pos[1] = Pt1.Pos[1]; T.Pt2->Pos[0] = Pt2.Pos[0]; T.Pt2->Pos[1] = Pt2.Pos[1]; T.Pt3->Pos[0] = Pt3.Pos[0]; T.Pt3->Pos[1] = Pt3.Pos[1]; return T; } /*! For a set of three points, this function computes the 3 medians. */ void TriangleMedians(struct Point Pt1,struct Point Pt2,struct Point Pt3,struct Point *Pmm1,struct Point *Pmm2,struct Point *Pmm3,struct Point *Pme1,struct Point *Pme2,struct Point *Pme3) { double ma1,mb1,mc1; double ma2,mb2,mc2; double ma3,mb3,mc3; /* median 0-1 */ ma1 = 2*(Pt2.Pos[0] - Pt1.Pos[0]); mb1 = 2*(Pt2.Pos[1] - Pt1.Pos[1]); mc1 = (Pt1.Pos[0]*Pt1.Pos[0]) - (Pt2.Pos[0]*Pt2.Pos[0]) + (Pt1.Pos[1]*Pt1.Pos[1]) - (Pt2.Pos[1]*Pt2.Pos[1]); /* median 1-2 */ ma2 = 2*(Pt3.Pos[0] - Pt2.Pos[0]); mb2 = 2*(Pt3.Pos[1] - Pt2.Pos[1]); mc2 = (Pt2.Pos[0]*Pt2.Pos[0]) - (Pt3.Pos[0]*Pt3.Pos[0]) + (Pt2.Pos[1]*Pt2.Pos[1]) - (Pt3.Pos[1]*Pt3.Pos[1]); /* median 2-0 */ ma3 = 2*(Pt1.Pos[0] - Pt3.Pos[0]); mb3 = 2*(Pt1.Pos[1] - Pt3.Pos[1]); mc3 = (Pt3.Pos[0]*Pt3.Pos[0]) - (Pt1.Pos[0]*Pt1.Pos[0]) + (Pt3.Pos[1]*Pt3.Pos[1]) - (Pt1.Pos[1]*Pt1.Pos[1]); /* intersection m0-1 -- m1-2 */ Pmm1->Pos[0] = (mc2*mb1 - mc1*mb2)/(ma1*mb2 - ma2*mb1); Pmm1->Pos[1] = (mc2*ma1 - mc1*ma2)/(ma2*mb1 - ma1*mb2); /* intersection m1-2 -- m2-0 */ Pmm2->Pos[0] = (mc2*mb1 - mc1*mb2)/(ma1*mb2 - ma2*mb1); Pmm2->Pos[1] = (mc2*ma1 - mc1*ma2)/(ma2*mb1 - ma1*mb2); /* intersection m2-0 -- m0-1 */ Pmm3->Pos[0] = (mc2*mb1 - mc1*mb2)/(ma1*mb2 - ma2*mb1); Pmm3->Pos[1] = (mc2*ma1 - mc1*ma2)/(ma2*mb1 - ma1*mb2); /* intersection m1-2 -- e1-2 */ Pme1->Pos[0] = 0.5*(Pt1.Pos[0] + Pt2.Pos[0]); Pme1->Pos[1] = 0.5*(Pt1.Pos[1] + Pt2.Pos[1]); /* intersection m2-3 -- e3-1 */ Pme2->Pos[0] = 0.5*(Pt2.Pos[0] + Pt3.Pos[0]); Pme2->Pos[1] = 0.5*(Pt2.Pos[1] + Pt3.Pos[1]); /* intersection m3-1 -- e1-2 */ Pme3->Pos[0] = 0.5*(Pt3.Pos[0] + Pt1.Pos[0]); Pme3->Pos[1] = 0.5*(Pt3.Pos[1] + Pt1.Pos[1]); } /*! For a set of three points, this function computes their cirum-circle. * Its radius is return, while the center is return using pointers. */ double CircumCircleProperties(struct Point Pt1,struct Point Pt2,struct Point Pt3, double *xc, double *yc) { double r; double x21,x32,y21,y32; double x12mx22,y12my22,x22mx32,y22my32; double c1,c2; x21 = Pt2.Pos[0]-Pt1.Pos[0]; x32 = Pt3.Pos[0]-Pt2.Pos[0]; y21 = Pt2.Pos[1]-Pt1.Pos[1]; y32 = Pt3.Pos[1]-Pt2.Pos[1]; x12mx22 = (Pt1.Pos[0]*Pt1.Pos[0])-(Pt2.Pos[0]*Pt2.Pos[0]); y12my22 = (Pt1.Pos[1]*Pt1.Pos[1])-(Pt2.Pos[1]*Pt2.Pos[1]); x22mx32 = (Pt2.Pos[0]*Pt2.Pos[0])-(Pt3.Pos[0]*Pt3.Pos[0]); y22my32 = (Pt2.Pos[1]*Pt2.Pos[1])-(Pt3.Pos[1]*Pt3.Pos[1]); c1 = x12mx22 + y12my22; c2 = x22mx32 + y22my32; *xc = (y32*c1 - y21*c2)/2.0/( x32*y21 - x21*y32 ); *yc = (x32*c1 - x21*c2)/2.0/( x21*y32 - x32*y21 ); r = sqrt( (Pt1.Pos[0]-*xc)*(Pt1.Pos[0]-*xc) + (Pt1.Pos[1]-*yc)*(Pt1.Pos[1]-*yc) ) ; return r; } /*! For a given triangle T, the routine tells if the point P4 is in the circum circle of the triangle or not. */ int InCircumCircle(struct Triangle T,struct Point Pt4) { double a,b,c; double d,e,f; double g,h,i; double det; /* a = T.Pt1->Pos[0] - Pt4.Pos[0]; b = T.Pt1->Pos[1] - Pt4.Pos[1]; c = (T.Pt1->Pos[0]*T.Pt1->Pos[0] - Pt4.Pos[0]*Pt4.Pos[0]) + (T.Pt1->Pos[1]*T.Pt1->Pos[1] - Pt4.Pos[1]*Pt4.Pos[1]); d = T.Pt2->Pos[0] - Pt4.Pos[0]; e = T.Pt2->Pos[1] - Pt4.Pos[1]; f = (T.Pt2->Pos[0]*T.Pt2->Pos[0] - Pt4.Pos[0]*Pt4.Pos[0]) + (T.Pt2->Pos[1]*T.Pt2->Pos[1] - Pt4.Pos[1]*Pt4.Pos[1]); g = T.Pt3->Pos[0] - Pt4.Pos[0]; h = T.Pt3->Pos[1] - Pt4.Pos[1]; i = (T.Pt3->Pos[0]*T.Pt3->Pos[0] - Pt4.Pos[0]*Pt4.Pos[0]) + (T.Pt3->Pos[1]*T.Pt3->Pos[1] - Pt4.Pos[1]*Pt4.Pos[1]); */ /* Volker Formula */ a = T.Pt2->Pos[0] - T.Pt1->Pos[0]; b = T.Pt2->Pos[1] - T.Pt1->Pos[1]; c = a*a + b*b; d = T.Pt3->Pos[0] - T.Pt1->Pos[0]; e = T.Pt3->Pos[1] - T.Pt1->Pos[1]; f = d*d + e*e; g = Pt4.Pos[0] - T.Pt1->Pos[0]; h = Pt4.Pos[1] - T.Pt1->Pos[1]; i = g*g + h*h; det = a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g; if (det<0) return 1; /* inside */ else return 0; /* outside */ } /*! For a given triangle T, the routine tells if the point P4 lie inside the triangle or not. */ int InTriangle(struct Triangle T,struct Point Pt4) { double c1,c2,c3; /* here, we use the cross product */ c1 = (T.Pt2->Pos[0]-T.Pt1->Pos[0])*(Pt4.Pos[1]-T.Pt1->Pos[1]) - (T.Pt2->Pos[1]-T.Pt1->Pos[1])*(Pt4.Pos[0]-T.Pt1->Pos[0]); c2 = (T.Pt3->Pos[0]-T.Pt2->Pos[0])*(Pt4.Pos[1]-T.Pt2->Pos[1]) - (T.Pt3->Pos[1]-T.Pt2->Pos[1])*(Pt4.Pos[0]-T.Pt2->Pos[0]); c3 = (T.Pt1->Pos[0]-T.Pt3->Pos[0])*(Pt4.Pos[1]-T.Pt3->Pos[1]) - (T.Pt1->Pos[1]-T.Pt3->Pos[1])*(Pt4.Pos[0]-T.Pt3->Pos[0]); if ( (c1>0) && (c2>0) && (c3>0) ) /* inside */ return 1; else return 0; } int InTriangleOrOutside(struct Triangle T,struct Point Pt4) { double c1,c2,c3; c1 = (T.Pt2->Pos[0]-T.Pt1->Pos[0])*(Pt4.Pos[1]-T.Pt1->Pos[1]) - (T.Pt2->Pos[1]-T.Pt1->Pos[1])*(Pt4.Pos[0]-T.Pt1->Pos[0]); if (c1<0) return 2; /* to triangle T[2] */ c2 = (T.Pt3->Pos[0]-T.Pt2->Pos[0])*(Pt4.Pos[1]-T.Pt2->Pos[1]) - (T.Pt3->Pos[1]-T.Pt2->Pos[1])*(Pt4.Pos[0]-T.Pt2->Pos[0]); if (c2<0) return 0; /* to triangle T[1] */ c3 = (T.Pt1->Pos[0]-T.Pt3->Pos[0])*(Pt4.Pos[1]-T.Pt3->Pos[1]) - (T.Pt1->Pos[1]-T.Pt3->Pos[1])*(Pt4.Pos[0]-T.Pt3->Pos[0]); if (c3<0) return 1; /* to triangle T[0] */ return -1; /* the point is inside */ } /*! For a given triangle, orient it positively. */ struct Triangle OrientTriangle(struct Triangle T) { double a,b,c,d; double det; struct Point Ptsto; a = T.Pt2->Pos[0] - T.Pt1->Pos[0]; b = T.Pt2->Pos[1] - T.Pt1->Pos[1]; c = T.Pt3->Pos[0] - T.Pt1->Pos[0]; d = T.Pt3->Pos[1] - T.Pt1->Pos[1]; det = (a*d) - (b*c); if (det<0) { Ptsto.Pos[0] = T.Pt1->Pos[0]; Ptsto.Pos[1] = T.Pt1->Pos[1]; T.Pt1->Pos[0] = T.Pt3->Pos[0]; T.Pt1->Pos[1] = T.Pt3->Pos[1]; T.Pt3->Pos[0] = Ptsto.Pos[0]; T.Pt3->Pos[1] = Ptsto.Pos[1]; T = OrientTriangle(T); } return T; } /*! For a given triangle, orient it positively. */ struct TriangleInList OrientTriangleInList(struct TriangleInList T) { double a,b,c,d; double det; struct Point Ptsto; a = T.P[1]->Pos[0] - T.P[0]->Pos[0]; b = T.P[1]->Pos[1] - T.P[0]->Pos[1]; c = T.P[2]->Pos[0] - T.P[0]->Pos[0]; d = T.P[2]->Pos[1] - T.P[0]->Pos[1]; det = (a*d) - (b*c); if (det<0) { Ptsto.Pos[0] = T.P[0]->Pos[0]; Ptsto.Pos[1] = T.P[0]->Pos[1]; T.P[0]->Pos[0] = T.P[2]->Pos[0]; T.P[0]->Pos[1] = T.P[2]->Pos[1]; T.P[2]->Pos[0] = Ptsto.Pos[0]; T.P[2]->Pos[1] = Ptsto.Pos[1]; T = OrientTriangleInList(T); } return T; } void FindExtent() { int i,j; double xmin[3], xmax[3],len; /* determine local extension */ for(j = 0; j < 3; j++) { xmin[j] = MAX_REAL_NUMBER; xmax[j] = -MAX_REAL_NUMBER; } for(i = 0; i < NumPart; i++) { for(j = 0; j < 3; j++) { if(xmin[j] > P[i].Pos[j]) xmin[j] = P[i].Pos[j]; if(xmax[j] < P[i].Pos[j]) xmax[j] = P[i].Pos[j]; } } len = 0; for(j = 0; j < 3; j++) { if(xmax[j] - xmin[j] > len) len = xmax[j] - xmin[j]; } for(j = 0; j < 3; j++) domainCenter[j] = xmin[j] + len/2.; domainRadius = len*1.5; printf("domainRadius = %g\n",domainRadius); printf("domainCenter = (%g %g)\n",domainCenter[0],domainCenter[1]); } int FindSegmentInTriangle(struct TriangleInList *T,double v,struct Point P[3]) { double v0,v1,v2; double x0,x1,x2; double y0,y1,y2; double f; double x,y; int iP; /* if the triangle as an edge point, do nothing */ if ( (T->P[0]==&Pe[0]) || (T->P[1]==&Pe[0]) || (T->P[2]==&Pe[0]) ) return 0; /* if the triangle as an edge point, do nothing */ if ( (T->P[0]==&Pe[1]) || (T->P[1]==&Pe[1]) || (T->P[2]==&Pe[1]) ) return 0; /* if the triangle as an edge point, do nothing */ if ( (T->P[0]==&Pe[2]) || (T->P[1]==&Pe[2]) || (T->P[2]==&Pe[2]) ) return 0; iP = 0; v0 = T->P[0]->Mass; v1 = T->P[1]->Mass; v2 = T->P[2]->Mass; //printf("Triangle %d : %g %g %g\n",T->idx,v0,v1,v2); /* we could also use the sign v-v0 * v-v1 ??? */ if (( ((v>v0)&&(vv1)&&(vP[0]->Pos[0]; y0 = T->P[0]->Pos[1]; x1 = T->P[1]->Pos[0]; y1 = T->P[1]->Pos[1]; f = (v-v0)/(v1-v0); P[iP].Pos[0] = f*(x1-x0) + x0; P[iP].Pos[1] = f*(y1-y0) + y0; iP++; } if (( ((v>v1)&&(vv2)&&(vP[1]->Pos[0]; y0 = T->P[1]->Pos[1]; x1 = T->P[2]->Pos[0]; y1 = T->P[2]->Pos[1]; f = (v-v1)/(v2-v1); P[iP].Pos[0] = f*(x1-x0) + x0; P[iP].Pos[1] = f*(y1-y0) + y0; iP++; } if (( ((v>v2)&&(vv0)&&(vP[2]->Pos[0]; y0 = T->P[2]->Pos[1]; x1 = T->P[0]->Pos[0]; y1 = T->P[0]->Pos[1]; f = (v-v2)/(v0-v2); P[iP].Pos[0] = f*(x1-x0) + x0; P[iP].Pos[1] = f*(y1-y0) + y0; iP++; } return iP; } void CheckTriangles(void) { int iT; struct TriangleInList *T,*Te; for (iT=0;iTT[0]; if (Te!=NULL) { if ((Te->T[0]!=NULL)&&(Te->T[0] == T)) { } else if ((Te->T[1]!=NULL)&&(Te->T[1] == T)) { } else if ((Te->T[2]!=NULL)&&(Te->T[2] == T)) { } else { printf("Triangle %d does not point towards %d, while T->T2=%d\n",Te->idx,T->idx,T->T[0]->idx); exit(-1); } } Te = T->T[1]; if (Te!=NULL) { if ((Te->T[0]!=NULL)&&(Te->T[0] == T)) { } else if ((Te->T[1]!=NULL)&&(Te->T[1] == T)) { } else if ((Te->T[2]!=NULL)&&(Te->T[2] == T)) { } else { printf("Triangle %d does not point towards %d, while T->T2=%d\n",Te->idx,T->idx,T->T[1]->idx); exit(-1); } } Te = T->T[2]; if (Te!=NULL) { if ((Te->T[0]!=NULL)&&(Te->T[0] == T)) { } else if ((Te->T[1]!=NULL)&&(Te->T[1] == T)) { } else if ((Te->T[2]!=NULL)&&(Te->T[2] == T)) { } else { printf("Triangle %d does not point towards %d, while T->T2=%d\n",Te->idx,T->idx,T->T[2]->idx); exit(-1); } } } } /*! Flip two triangles. Te = T.T[i] */ void FlipTriangle(int i,struct TriangleInList *T,struct TriangleInList *Te,struct TriangleInList *T1,struct TriangleInList *T2) { struct TriangleInList Ts1,Ts2; int i0,i1,i2; int j0,j1,j2; int j; Ts1 = *T; /* save the content of the pointed triangle */ Ts2 = *Te; /* save the content of the pointed triangle */ j = T->idxe[i]; /* index of point opposite to i */ i0= i; i1= (i+1) % 3; i2= (i+2) % 3; j0= j; j1= (j+1) % 3; j2= (j+2) % 3; /* triangle 1 */ T1->P[0] = Ts1.P[i0]; T1->P[1] = Ts1.P[i1]; T1->P[2] = Ts2.P[j0]; T1->T[0] = Ts2.T[j1]; T1->T[1] = T2; T1->T[2] = Ts1.T[i2]; T1->idxe[0] = Ts2.idxe[j1]; T1->idxe[1] = 1; T1->idxe[2] = Ts1.idxe[i2]; /* triangle 2 */ T2->P[0] = Ts2.P[j0]; T2->P[1] = Ts2.P[j1]; T2->P[2] = Ts1.P[i0]; T2->T[0] = Ts1.T[i1]; T2->T[1] = T1; T2->T[2] = Ts2.T[j2]; T2->idxe[0] = Ts1.idxe[i1]; T2->idxe[1] = 1; T2->idxe[2] = Ts2.idxe[j2]; /* restore links with adjacents triangles */ if (Ts1.T[i1]!=NULL) { Ts1.T[i1]->T[ Ts1.idxe[i1] ] = T2; Ts1.T[i1]->idxe[ Ts1.idxe[i1] ] = 0; } if (Ts1.T[i2] !=NULL) { Ts1.T[i2]->T[ Ts1.idxe[i2] ] = T1; Ts1.T[i2]->idxe[ Ts1.idxe[i2] ] = 2; } if (Ts2.T[j1] !=NULL) { Ts2.T[j1]->T[ Ts2.idxe[j1] ] = T1; Ts2.T[j1]->idxe[ Ts2.idxe[j1] ] = 0; } if (Ts2.T[j2] !=NULL) { Ts2.T[j2]->T[ Ts2.idxe[j2] ] = T2; Ts2.T[j2]->idxe[ Ts2.idxe[j2] ] = 2; } } void DoTrianglesInStack(void) { struct TriangleInList *T,*Te,*T1,*T2,*Tee; struct TriangleInList Ts1,Ts2; struct Point P; int istack; int idx1,idx2; int i; istack=0; while(numTinStack>0) { int insphere=0; T = TStack[istack]; //printf(" DoInStack T=%d (istack=%d, numTinStack=%d)\n",T->idx,istack,numTinStack); /* find the opposite point of the 3 adjacent triangles */ /*******************/ /* triangle 1 */ /*******************/ i = 0; Te = T->T[i]; if (Te!=NULL) { /* index of opposite point */ P = *Te->P[T->idxe[i]]; insphere = InCircumCircle(TriangleInList2Triangle(*T),P); if (insphere) { //printf("insphere (1)... %g %g %g in T=%d\n",P.Pos[0],P.Pos[1],P.Pos[2],T->idx); /* index of the new triangles */ idx1 = T->idx; idx2 = Te->idx; T1 = &Triangles[idx1]; T2 = &Triangles[idx2]; FlipTriangle(i,T,Te,T1,T2); /* add triangles in stack */ if (numTinStack+1>MAXNUMTRIANGLES) { printf("\nNo more memory !\n"); printf("numTinStack+1=%d > MAXNUMTRIANGLES=%d\n",numTinStack+1,MAXNUMTRIANGLES); printf("You should increase MAXNUMTRIANGLES\n\n"); exit(-1); } TStack[istack ] = T1; TStack[istack+numTinStack] = T2; numTinStack++; continue; } } /*******************/ /* triangle 2 */ /*******************/ i = 1; Te = T->T[i]; if (Te!=NULL) { /* index of opposite point */ P = *Te->P[T->idxe[i]]; insphere = InCircumCircle(TriangleInList2Triangle(*T),P); if (insphere) { //printf("insphere (2)... %g %g %g in T=%d\n",P.Pos[0],P.Pos[1],P.Pos[2],T->idx); /* index of the new triangles */ idx1 = T->idx; idx2 = Te->idx; T1 = &Triangles[idx1]; T2 = &Triangles[idx2]; FlipTriangle(i,T,Te,T1,T2); /* add triangles in stack */ if (numTinStack+1>MAXNUMTRIANGLES) { printf("\nNo more memory !\n"); printf("numTinStack+1=%d > MAXNUMTRIANGLES=%d\n",numTinStack+1,MAXNUMTRIANGLES); printf("You should increase MAXNUMTRIANGLES\n\n"); exit(-1); } TStack[istack ] = T1; TStack[istack+numTinStack] = T2; numTinStack++; continue; } } /*******************/ /* triangle 3 */ /*******************/ i = 2; Te = T->T[i]; if (Te!=NULL) { /* index of opposite point */ P = *Te->P[T->idxe[i]]; insphere = InCircumCircle(TriangleInList2Triangle(*T),P); if (insphere) { //printf("insphere (3)... %g %g %g in T=%d\n",P.Pos[0],P.Pos[1],P.Pos[2],T->idx); /* index of the new triangles */ idx1 = T->idx; idx2 = Te->idx; T1 = &Triangles[idx1]; T2 = &Triangles[idx2]; FlipTriangle(i,T,Te,T1,T2); /* add triangles in stack */ if (numTinStack+1>MAXNUMTRIANGLES) { printf("\nNo more memory !\n"); printf("numTinStack+1=%d > MAXNUMTRIANGLES=%d\n",numTinStack+1,MAXNUMTRIANGLES); printf("You should increase MAXNUMTRIANGLES\n\n"); exit(-1); } TStack[istack ] = T1; TStack[istack+numTinStack] = T2; numTinStack++; continue; } } numTinStack--; istack++; //printf("one triangle less...(istack=%d numTinStack=%d)\n",istack,numTinStack); } } void Check(void) { int iT; printf("===========================\n"); for(iT=0;iTPos[0],Triangles[iT].P[0]->Pos[1],Triangles[iT].P[0]->Pos[2]); printf("pt2 %g %g %g\n",Triangles[iT].P[1]->Pos[0],Triangles[iT].P[1]->Pos[1],Triangles[iT].P[1]->Pos[2]); printf("pt3 %g %g %g\n",Triangles[iT].P[2]->Pos[0],Triangles[iT].P[2]->Pos[1],Triangles[iT].P[2]->Pos[2]); if (Triangles[iT].T[0]!=NULL) printf("T1 %d\n",Triangles[iT].T[0]->idx); else printf("T1 x\n"); if (Triangles[iT].T[1]!=NULL) printf("T2 %d\n",Triangles[iT].T[1]->idx); else printf("T2 x\n"); if (Triangles[iT].T[2]!=NULL) printf("T3 %d\n",Triangles[iT].T[2]->idx); else printf("T3 x\n"); } printf("===========================\n"); } /*! Split a triangle in 3, using the point P inside it. Update the global list. */ void SplitTriangle(struct TriangleInList *pT,struct Point *Pt) { struct TriangleInList T,*T0,*T1,*T2,*Te; int idx,idx0,idx1,idx2; T = *pT; /* save the content of the pointed triangle */ idx = T.idx; /* index of the new triangles */ idx0 = idx; idx1 = nT; idx2 = nT+1; /* increment counter */ nT=nT+2; /* check memory */ if (nT>MAXNUMTRIANGLES) { printf("\nNo more memory !\n"); printf("nT=%d > MAXNUMTRIANGLES=%d\n",nT,MAXNUMTRIANGLES); printf("You should increase MAXNUMTRIANGLES\n\n"); exit(-1); } /* create pointers towards the triangles */ T0 = &Triangles[idx0]; T1 = &Triangles[idx1]; T2 = &Triangles[idx2]; /* first */ T0->idx = idx0; T0->P[0] = T.P[0]; T0->P[1] = T.P[1]; T0->P[2] = Pt; /* second */ T1->idx = idx1; T1->P[0] = T.P[1]; T1->P[1] = T.P[2]; T1->P[2] = Pt; /* third */ T2->idx = idx2; T2->P[0] = T.P[2]; T2->P[1] = T.P[0]; T2->P[2] = Pt; /* add adjacents */ T0->T[0] = T1; T0->T[1] = T2; T0->T[2] = T.T[2]; T1->T[0] = T2; T1->T[1] = T0; T1->T[2] = T.T[0]; T2->T[0] = T0; T2->T[1] = T1; T2->T[2] = T.T[1]; /* add ext point */ T0->idxe[0] = 1; T0->idxe[1] = 0; T0->idxe[2] = T.idxe[2]; T1->idxe[0] = 1; T1->idxe[1] = 0; T1->idxe[2] = T.idxe[0]; T2->idxe[0] = 1; T2->idxe[1] = 0; T2->idxe[2] = T.idxe[1]; /* restore links with adgacents triangles */ Te = T0->T[2]; if (Te!=NULL) { Te->T[ T0->idxe[2]] = T0; Te->idxe[T0->idxe[2]] = 2; } Te = T1->T[2]; if (Te!=NULL) { Te->T[ T1->idxe[2]] = T1; Te->idxe[T1->idxe[2]] = 2; } Te = T2->T[2]; if (Te!=NULL) { Te->T[ T2->idxe[2]] = T2; Te->idxe[T2->idxe[2]] = 2; } /* add the new triangles in the stack */ TStack[numTinStack] = T0; numTinStack++; TStack[numTinStack] = T1; numTinStack++; TStack[numTinStack] = T2; numTinStack++; //printf("--> add in stack %d %d %d\n",T0->idx,T1->idx,T2->idx); } int FindTriangle(struct Point *Pt) { int iT; /* find triangle containing the point */ for(iT=0;iTidx,e,T->T[e]->idx); if (e==-1) /* the point is inside */ break; T = T->T[e]; if (T==NULL) { printf("point lie outside the limits.\n"); exit(-1); } } //printf("done with find triangle (T=%d)\n",T->idx); return T->idx; } /*! Add a new point in the tesselation */ void AddPoint(struct Point *Pt) { int iT; /* find the triangle that contains the point P */ //iT= FindTriangle(Pt); iT= NewFindTriangle(Pt); /* create the new triangles */ SplitTriangle(&Triangles[iT],Pt); /* test the new triangles and divide and modify if necessary */ DoTrianglesInStack(); /* check */ //CheckTriangles(); } /*! Compute all medians properties (a,b,c) */ void ComputeMediansProperties() { int iT; /* loop over all triangles */ for(iT=0;iTPos[0]; Pt0.Pos[1] = Triangles[iT].P[0]->Pos[1]; Pt1.Pos[0] = Triangles[iT].P[1]->Pos[0]; Pt1.Pos[1] = Triangles[iT].P[1]->Pos[1]; Pt2.Pos[0] = Triangles[iT].P[2]->Pos[0]; Pt2.Pos[1] = Triangles[iT].P[2]->Pos[1]; /* median 0-1 */ MediansList[iT][2].a = 2*(Pt1.Pos[0] - Pt0.Pos[0]); MediansList[iT][2].b = 2*(Pt1.Pos[1] - Pt0.Pos[1]); MediansList[iT][2].c = (Pt0.Pos[0]*Pt0.Pos[0]) - (Pt1.Pos[0]*Pt1.Pos[0]) + (Pt0.Pos[1]*Pt0.Pos[1]) - (Pt1.Pos[1]*Pt1.Pos[1]); /* median 1-2 */ MediansList[iT][0].a = 2*(Pt2.Pos[0] - Pt1.Pos[0]); MediansList[iT][0].b = 2*(Pt2.Pos[1] - Pt1.Pos[1]); MediansList[iT][0].c = (Pt1.Pos[0]*Pt1.Pos[0]) - (Pt2.Pos[0]*Pt2.Pos[0]) + (Pt1.Pos[1]*Pt1.Pos[1]) - (Pt2.Pos[1]*Pt2.Pos[1]); /* median 2-0 */ MediansList[iT][1].a = 2*(Pt0.Pos[0] - Pt2.Pos[0]); MediansList[iT][1].b = 2*(Pt0.Pos[1] - Pt2.Pos[1]); MediansList[iT][1].c = (Pt2.Pos[0]*Pt2.Pos[0]) - (Pt0.Pos[0]*Pt0.Pos[0]) + (Pt2.Pos[1]*Pt2.Pos[1]) - (Pt0.Pos[1]*Pt0.Pos[1]); /* link The triangle with the MediansList */ Triangles[iT].Med[0] = &MediansList[iT][0]; Triangles[iT].Med[1] = &MediansList[iT][1]; Triangles[iT].Med[2] = &MediansList[iT][2]; } } /* compute the intersetions of medians around a point of index p (index of the point in the triangle T) */ void ComputeMediansAroundPoint(struct TriangleInList *Tstart,int iPstart) { /* Tstart : pointer to first triangle iPstart : index of master point relative to triangle Tstart if p = 0: T1 = T0->T[iTn]; pn=1 if p = 1: T1 = T0->T[iTn]; pn=2 if p = 0: T1 = T0->T[iTn]; pn=3 iTn = (p+1) % 3; */ double x,y; struct TriangleInList *T0,*T1; int iP0,iP1; int iT1; struct Point *initialPoint; int iM0,iM1; T0 = Tstart; iP0 = iPstart; initialPoint = T0->P[iP0]; //printf("\n--> rotating around T=%d p=%d\n",T0->idx,iP0); /* rotate around the point */ while (1) { /* next triangle */ iT1= (iP0+1) % 3; T1 = T0->T[iT1]; if (T1==NULL) { //printf("reach an edge\n"); T0->P[iP0]->IsDone=2; //printf("%g %g\n",T0->P[iP0]->Pos[0],T0->P[iP0]->Pos[1]); return; } //printf(" next triangle = %d\n",T1->idx); /* index of point in the triangle */ iP1 = T0->idxe[iT1]; /* index of point opposite to iTn */ iP1 = (iP1+1) % 3; /* next index of point opposite to iTn */ //printf(" initial point=%g %g current point =%g %g iP1=%d\n",initialPoint->Pos[0],initialPoint->Pos[1],T1->P[iP1]->Pos[0],T1->P[iP1]->Pos[1],iP1); /* check */ if (initialPoint!=T1->P[iP1]) { printf(" problem : initial point=%g %g current point =%g %g iP1=%d\n",initialPoint->Pos[0],initialPoint->Pos[1],T1->P[iP1]->Pos[0],T1->P[iP1]->Pos[1],iP1); exit(-1); } /* compute the intersection of the two medians */ iM0 = (iP0+1) % 3; iM1 = (iP1+1) % 3; lines_intersections(T0->Med[iM0]->a,T0->Med[iM0]->b,T0->Med[iM0]->c,T1->Med[iM1]->a,T1->Med[iM1]->b,T1->Med[iM1]->c,&x,&y); /* end point for T0 */ T0->Med[iM0]->Pe.Pos[0] = x; T0->Med[iM0]->Pe.Pos[1] = y; /* start point for T0 */ T1->Med[iM1]->Ps.Pos[0] = x; T1->Med[iM1]->Ps.Pos[1] = y; //printf(" --> T0=%d iM0=%d T1=%d iM1=%d : (%g %g)\n",T0->idx,iM0,T1->idx,iM1,x,y); if (T1==Tstart) /* end of loop */ { //printf(" end of loop\n"); break; } T0 = T1; iP0 = iP1; } } /*! Compute all medians intersections and define Ps and Pe */ void ComputeMediansIntersections() { int i,p,iT; for (i=0;iIsDone)) { //printf("in Triangle T %d do point %d\n",iT,p); Triangles[iT].P[p]->IsDone = 1; ComputeMediansAroundPoint(&Triangles[iT],p); } } } } /************************************************************/ /* PYTHON INTERFACE */ /************************************************************/ static PyObject * tessel_TriangleMedians(self, args) PyObject *self; PyObject *args; { PyArrayObject *p1 = NULL; PyArrayObject *p2 = NULL; PyArrayObject *p3 = NULL; struct Point Pt1,Pt2,Pt3; struct Point Pmm1,Pmm2,Pmm3,Pme1,Pme2,Pme3; if (!PyArg_ParseTuple(args,"OOO",&p1,&p2,&p3)) return NULL; /* check type */ if (!(PyArray_Check(p1) && PyArray_Check(p2) && PyArray_Check(p3))) { PyErr_SetString(PyExc_ValueError,"aruments are not all arrays."); return NULL; } /* check dimension */ if ( (p1->nd!=1) || (p2->nd!=1) || (p3->nd!=1) ) { PyErr_SetString(PyExc_ValueError,"Dimension of arguments must be 1."); return NULL; } /* check size */ if ( (p1->dimensions[0]!=3) || (p2->dimensions[0]!=3) || (p3->dimensions[0]!=3) ) { PyErr_SetString(PyExc_ValueError,"Size of arguments must be 3."); return NULL; } /* ensure double */ p1 = TO_DOUBLE(p1); p2 = TO_DOUBLE(p2); p3 = TO_DOUBLE(p3); Pt1.Pos[0] = *(double *) (p1->data + 0*(p1->strides[0])); Pt1.Pos[1] = *(double *) (p1->data + 1*(p1->strides[0])); Pt2.Pos[0] = *(double *) (p2->data + 0*(p2->strides[0])); Pt2.Pos[1] = *(double *) (p2->data + 1*(p2->strides[0])); Pt3.Pos[0] = *(double *) (p3->data + 0*(p3->strides[0])); Pt3.Pos[1] = *(double *) (p3->data + 1*(p3->strides[0])); TriangleMedians(Pt1,Pt2,Pt3,&Pmm1,&Pmm2,&Pmm3,&Pme1,&Pme2,&Pme3); /* create the outputs */ PyArrayObject *aPmm1,*aPmm2,*aPmm3,*aPme1,*aPme2,*aPme3; npy_intp ld[1]; ld[0]=3; aPmm1 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPmm2 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPmm3 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPme1 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPme2 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPme3 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPmm1->data + 0*(aPmm1->strides[0])) = Pmm1.Pos[0]; *(double *) (aPmm1->data + 1*(aPmm1->strides[0])) = Pmm1.Pos[1]; *(double *) (aPmm1->data + 2*(aPmm1->strides[0])) = 0; *(double *) (aPmm2->data + 0*(aPmm2->strides[0])) = Pmm2.Pos[0]; *(double *) (aPmm2->data + 1*(aPmm2->strides[0])) = Pmm2.Pos[1]; *(double *) (aPmm2->data + 2*(aPmm2->strides[0])) = 0; *(double *) (aPmm3->data + 0*(aPmm3->strides[0])) = Pmm3.Pos[0]; *(double *) (aPmm3->data + 1*(aPmm3->strides[0])) = Pmm3.Pos[1]; *(double *) (aPmm3->data + 2*(aPmm3->strides[0])) = 0; *(double *) (aPme1->data + 0*(aPme1->strides[0])) = Pme1.Pos[0]; *(double *) (aPme1->data + 1*(aPme1->strides[0])) = Pme1.Pos[1]; *(double *) (aPme1->data + 2*(aPme1->strides[0])) = 0; *(double *) (aPme2->data + 0*(aPme2->strides[0])) = Pme2.Pos[0]; *(double *) (aPme2->data + 1*(aPme2->strides[0])) = Pme2.Pos[1]; *(double *) (aPme2->data + 2*(aPme2->strides[0])) = 0; *(double *) (aPme3->data + 0*(aPme3->strides[0])) = Pme3.Pos[0]; *(double *) (aPme3->data + 1*(aPme3->strides[0])) = Pme3.Pos[1]; *(double *) (aPme3->data + 2*(aPme3->strides[0])) = 0; return Py_BuildValue("(OOOOOO)",aPmm1,aPmm2,aPmm3,aPme1,aPme2,aPme3); } static PyObject * tessel_CircumCircleProperties(self, args) PyObject *self; PyObject *args; { PyArrayObject *p1 = NULL; PyArrayObject *p2 = NULL; PyArrayObject *p3 = NULL; struct Point Pt1,Pt2,Pt3; double xc,yc,r; if (!PyArg_ParseTuple(args,"OOO",&p1,&p2,&p3)) return NULL; /* check type */ if (!(PyArray_Check(p1) && PyArray_Check(p2) && PyArray_Check(p3))) { PyErr_SetString(PyExc_ValueError,"aruments are not all arrays."); return NULL; } /* check dimension */ if ( (p1->nd!=1) || (p2->nd!=1) || (p3->nd!=1) ) { PyErr_SetString(PyExc_ValueError,"Dimension of arguments must be 1."); return NULL; } /* check size */ if ( (p1->dimensions[0]!=3) || (p2->dimensions[0]!=3) || (p3->dimensions[0]!=3) ) { PyErr_SetString(PyExc_ValueError,"Size of arguments must be 3."); return NULL; } /* ensure double */ p1 = TO_DOUBLE(p1); p2 = TO_DOUBLE(p2); p3 = TO_DOUBLE(p3); Pt1.Pos[0] = *(double *) (p1->data + 0*(p1->strides[0])); Pt1.Pos[1] = *(double *) (p1->data + 1*(p1->strides[0])); Pt2.Pos[0] = *(double *) (p2->data + 0*(p2->strides[0])); Pt2.Pos[1] = *(double *) (p2->data + 1*(p2->strides[0])); Pt3.Pos[0] = *(double *) (p3->data + 0*(p3->strides[0])); Pt3.Pos[1] = *(double *) (p3->data + 1*(p3->strides[0])); r = CircumCircleProperties(Pt1,Pt2,Pt3,&xc,&yc); return Py_BuildValue("(ddd)",r,xc,yc); } static PyObject * tessel_InTriangle(self, args) PyObject *self; PyObject *args; { PyArrayObject *p1 = NULL; PyArrayObject *p2 = NULL; PyArrayObject *p3 = NULL; PyArrayObject *p4 = NULL; struct Point Pt1,Pt2,Pt3,Pt4; struct Triangle T; int b; if (!PyArg_ParseTuple(args,"OOOO",&p1,&p2,&p3,&p4)) return NULL; /* check type */ if (!(PyArray_Check(p1) && PyArray_Check(p2) && PyArray_Check(p3) && PyArray_Check(p4))) { PyErr_SetString(PyExc_ValueError,"aruments are not all arrays."); return NULL; } /* check dimension */ if ( (p1->nd!=1) || (p2->nd!=1) || (p3->nd!=1) || (p4->nd!=1)) { PyErr_SetString(PyExc_ValueError,"Dimension of arguments must be 1."); return NULL; } /* check size */ if ( (p1->dimensions[0]!=3) || (p2->dimensions[0]!=3) || (p3->dimensions[0]!=3) || (p4->dimensions[0]!=3)) { PyErr_SetString(PyExc_ValueError,"Size of arguments must be 3."); return NULL; } /* ensure double */ p1 = TO_DOUBLE(p1); p2 = TO_DOUBLE(p2); p3 = TO_DOUBLE(p3); p3 = TO_DOUBLE(p3); Pt1.Pos[0] = *(double *) (p1->data + 0*(p1->strides[0])); Pt1.Pos[1] = *(double *) (p1->data + 1*(p1->strides[0])); Pt2.Pos[0] = *(double *) (p2->data + 0*(p2->strides[0])); Pt2.Pos[1] = *(double *) (p2->data + 1*(p2->strides[0])); Pt3.Pos[0] = *(double *) (p3->data + 0*(p3->strides[0])); Pt3.Pos[1] = *(double *) (p3->data + 1*(p3->strides[0])); Pt4.Pos[0] = *(double *) (p4->data + 0*(p4->strides[0])); Pt4.Pos[1] = *(double *) (p4->data + 1*(p4->strides[0])); T = MakeTriangleFromPoints(Pt1,Pt2,Pt3); T = OrientTriangle(T); b = InTriangle(T,Pt4); return Py_BuildValue("i",b); } static PyObject * tessel_InTriangleOrOutside(self, args) PyObject *self; PyObject *args; { PyArrayObject *p1 = NULL; PyArrayObject *p2 = NULL; PyArrayObject *p3 = NULL; PyArrayObject *p4 = NULL; struct Point Pt1,Pt2,Pt3,Pt4; struct Triangle T; int b; if (!PyArg_ParseTuple(args,"OOOO",&p1,&p2,&p3,&p4)) return NULL; /* check type */ if (!(PyArray_Check(p1) && PyArray_Check(p2) && PyArray_Check(p3) && PyArray_Check(p4))) { PyErr_SetString(PyExc_ValueError,"aruments are not all arrays."); return NULL; } /* check dimension */ if ( (p1->nd!=1) || (p2->nd!=1) || (p3->nd!=1) || (p4->nd!=1)) { PyErr_SetString(PyExc_ValueError,"Dimension of arguments must be 1."); return NULL; } /* check size */ if ( (p1->dimensions[0]!=3) || (p2->dimensions[0]!=3) || (p3->dimensions[0]!=3) || (p4->dimensions[0]!=3)) { PyErr_SetString(PyExc_ValueError,"Size of arguments must be 3."); return NULL; } /* ensure double */ p1 = TO_DOUBLE(p1); p2 = TO_DOUBLE(p2); p3 = TO_DOUBLE(p3); p3 = TO_DOUBLE(p3); Pt1.Pos[0] = *(double *) (p1->data + 0*(p1->strides[0])); Pt1.Pos[1] = *(double *) (p1->data + 1*(p1->strides[0])); Pt2.Pos[0] = *(double *) (p2->data + 0*(p2->strides[0])); Pt2.Pos[1] = *(double *) (p2->data + 1*(p2->strides[0])); Pt3.Pos[0] = *(double *) (p3->data + 0*(p3->strides[0])); Pt3.Pos[1] = *(double *) (p3->data + 1*(p3->strides[0])); Pt4.Pos[0] = *(double *) (p4->data + 0*(p4->strides[0])); Pt4.Pos[1] = *(double *) (p4->data + 1*(p4->strides[0])); T = MakeTriangleFromPoints(Pt1,Pt2,Pt3); T = OrientTriangle(T); b = InTriangleOrOutside(T,Pt4); return Py_BuildValue("i",b); } static PyObject * tessel_InCircumCircle(self, args) PyObject *self; PyObject *args; { PyArrayObject *p1 = NULL; PyArrayObject *p2 = NULL; PyArrayObject *p3 = NULL; PyArrayObject *p4 = NULL; struct Point Pt1,Pt2,Pt3,Pt4; struct Triangle T; int b; if (!PyArg_ParseTuple(args,"OOOO",&p1,&p2,&p3,&p4)) return NULL; /* check type */ if (!(PyArray_Check(p1) && PyArray_Check(p2) && PyArray_Check(p3) && PyArray_Check(p4))) { PyErr_SetString(PyExc_ValueError,"aruments are not all arrays."); return NULL; } /* check dimension */ if ( (p1->nd!=1) || (p2->nd!=1) || (p3->nd!=1) || (p4->nd!=1)) { PyErr_SetString(PyExc_ValueError,"Dimension of arguments must be 1."); return NULL; } /* check size */ if ( (p1->dimensions[0]!=3) || (p2->dimensions[0]!=3) || (p3->dimensions[0]!=3) || (p4->dimensions[0]!=3)) { PyErr_SetString(PyExc_ValueError,"Size of arguments must be 3."); return NULL; } /* ensure double */ p1 = TO_DOUBLE(p1); p2 = TO_DOUBLE(p2); p3 = TO_DOUBLE(p3); p3 = TO_DOUBLE(p3); Pt1.Pos[0] = *(double *) (p1->data + 0*(p1->strides[0])); Pt1.Pos[1] = *(double *) (p1->data + 1*(p1->strides[0])); Pt2.Pos[0] = *(double *) (p2->data + 0*(p2->strides[0])); Pt2.Pos[1] = *(double *) (p2->data + 1*(p2->strides[0])); Pt3.Pos[0] = *(double *) (p3->data + 0*(p3->strides[0])); Pt3.Pos[1] = *(double *) (p3->data + 1*(p3->strides[0])); Pt4.Pos[0] = *(double *) (p4->data + 0*(p4->strides[0])); Pt4.Pos[1] = *(double *) (p4->data + 1*(p4->strides[0])); T = MakeTriangleFromPoints(Pt1,Pt2,Pt3); T = OrientTriangle(T); b = InCircumCircle(T,Pt4); return Py_BuildValue("i",b); } static PyObject * tessel_ConstructDelaunay(self, args) PyObject *self; PyObject *args; { PyArrayObject *pos = NULL; PyArrayObject *mass = NULL; int i,j; if (!PyArg_ParseTuple(args,"OO",&pos,&mass)) return NULL; /* check type */ if (!(PyArray_Check(pos))) { PyErr_SetString(PyExc_ValueError,"aruments 1 must be array."); return NULL; } /* check type */ if (!(PyArray_Check(mass))) { PyErr_SetString(PyExc_ValueError,"aruments 2 must be array."); return NULL; } /* check dimension */ if ( (pos->nd!=2)) { PyErr_SetString(PyExc_ValueError,"Dimension of argument 1 must be 2."); return NULL; } /* check dimension */ if ( (mass->nd!=1)) { PyErr_SetString(PyExc_ValueError,"Dimension of argument 2 must be 1."); return NULL; } /* check size */ if ( (pos->dimensions[1]!=3)) { PyErr_SetString(PyExc_ValueError,"First size of argument must be 3."); return NULL; } /* check size */ if ( (pos->dimensions[0]!=mass->dimensions[0])) { PyErr_SetString(PyExc_ValueError,"Size of argument 1 must be similar to argument 2."); return NULL; } /* ensure double */ pos = TO_DOUBLE(pos); mass = TO_DOUBLE(mass); NumPart = pos->dimensions[0]; /* add first triangle */ /* init */ All.MaxPart = NumPart; /* allocate memory */ allocate_memory(); /* init P */ /* loop over all points */ for (i=0;idata + i*(pos->strides[0]) + 0*pos->strides[1]); P[i].Pos[1] = *(double *) (pos->data + i*(pos->strides[0]) + 1*pos->strides[1]); P[i].Pos[2] = *(double *) (pos->data + i*(pos->strides[0]) + 2*pos->strides[1]); P[i].Mass = *(double *) (mass->data + i*(mass->strides[0]) ); } /* find domain extent */ FindExtent(); /* set edges */ for (j=0;j<3;j++) { Pe[j].Pos[0] = domainCenter[0] + domainRadius * cos(2./3.*PI*j); Pe[j].Pos[1] = domainCenter[1] + domainRadius * sin(2./3.*PI*j); Pe[j].Pos[2] = 0; Pe[j].Mass = 0; } /* Triangle list */ Triangles[0].idx = 0; Triangles[0].P[0] = &Pe[0]; Triangles[0].P[1] = &Pe[1]; Triangles[0].P[2] = &Pe[2]; Triangles[0].T[0] = NULL; Triangles[0].T[1] = NULL; Triangles[0].T[2] = NULL; Triangles[0].idxe[0] = -1; Triangles[0].idxe[1] = 1; Triangles[0].idxe[2] = -1; nT++; OrientTriangleInList(Triangles[0]); /* loop over all points */ for (i=0;idimensions[0];i++) { AddPoint(&P[i]); } /* check */ CheckTriangles(); return Py_BuildValue("i",1); } static PyObject * tessel_GetTriangles(self, args) PyObject *self; PyObject *args; { PyObject *OutputList; PyObject *OutputDict; PyArrayObject *tri = NULL; npy_intp dim[2]; int iT; /* loop over all triangles */ OutputList = PyList_New(0); for (iT=0;iTdata + 0*(tri->strides[0]) + 0*tri->strides[1]) = Triangles[iT].P[0]->Pos[0]; *(double *) (tri->data + 0*(tri->strides[0]) + 1*tri->strides[1]) = Triangles[iT].P[0]->Pos[1]; *(double *) (tri->data + 0*(tri->strides[0]) + 2*tri->strides[1]) = 0; *(double *) (tri->data + 1*(tri->strides[0]) + 0*tri->strides[1]) = Triangles[iT].P[1]->Pos[0]; *(double *) (tri->data + 1*(tri->strides[0]) + 1*tri->strides[1]) = Triangles[iT].P[1]->Pos[1]; *(double *) (tri->data + 1*(tri->strides[0]) + 2*tri->strides[1]) = 0; *(double *) (tri->data + 2*(tri->strides[0]) + 0*tri->strides[1]) = Triangles[iT].P[2]->Pos[0]; *(double *) (tri->data + 2*(tri->strides[0]) + 1*tri->strides[1]) = Triangles[iT].P[2]->Pos[1]; *(double *) (tri->data + 2*(tri->strides[0]) + 2*tri->strides[1]) = 0; OutputDict = PyDict_New(); PyDict_SetItem(OutputDict,PyString_FromString("id"),PyInt_FromLong(Triangles[iT].idx) ); PyDict_SetItem(OutputDict,PyString_FromString("coord"),(PyObject*)tri); //(PyObject*)tri PyList_Append(OutputList, OutputDict ); } return Py_BuildValue("O",OutputList); } static PyObject * tessel_ComputeIsoContours(self, args) PyObject *self; PyObject *args; { double val; int iT; struct Point P[3]; int nP,iP; PyObject *OutputXList; PyObject *OutputYList; if (!PyArg_ParseTuple(args,"d",&val)) return NULL; OutputXList = PyList_New(0); OutputYList = PyList_New(0); /* find triangle containing the point */ for(iT=0;iT0) switch(nP) { case 1: printf("we are in trouble here (ComputeIsoContours)\n"); exit(-1); break; case 2: PyList_Append(OutputXList, PyFloat_FromDouble(P[0].Pos[0])); PyList_Append(OutputXList, PyFloat_FromDouble(P[1].Pos[0])); PyList_Append(OutputYList, PyFloat_FromDouble(P[0].Pos[1])); PyList_Append(OutputYList, PyFloat_FromDouble(P[1].Pos[1])); break; case 3: PyList_Append(OutputXList, PyFloat_FromDouble(P[0].Pos[0])); PyList_Append(OutputXList, PyFloat_FromDouble(P[1].Pos[0])); PyList_Append(OutputXList, PyFloat_FromDouble(P[2].Pos[0])); PyList_Append(OutputXList, PyFloat_FromDouble(P[0].Pos[0])); PyList_Append(OutputYList, PyFloat_FromDouble(P[0].Pos[1])); PyList_Append(OutputYList, PyFloat_FromDouble(P[1].Pos[1])); PyList_Append(OutputYList, PyFloat_FromDouble(P[2].Pos[1])); PyList_Append(OutputYList, PyFloat_FromDouble(P[0].Pos[1])); break; } } return Py_BuildValue("(O,O)",OutputXList,OutputYList); } static PyObject * tessel_GetVoronoi(self, args) PyObject *self; PyObject *args; { double val; int iT; int Tloc; struct Point Pt1,Pt2,Pt3; int nP,iP; struct Point Pmm1,Pmm2,Pmm3,Pme1,Pme2,Pme3; PyArrayObject *aPmm1,*aPmm2,*aPmm3,*aPme1,*aPme2,*aPme3; npy_intp ld[1]; PyObject *OutputList; PyObject *SegmentList; OutputList = PyList_New(0); /* create the outputs */ ld[0]=3; ComputeMediansProperties(); ComputeMediansIntersections(); /* loop over all triangles */ for(iT=0;iTIsDone==2) { //printf("T=%d P %d (%g %g) incomplete\n",Triangles[iT].idx,Triangles[iT].P[0]->Pos[0],Triangles[iT].P[0]->Pos[1]); continue; } if ( Triangles[iT].P[1]->IsDone==2) { //printf("T=%d P %d (%g %g) incomplete\n",Triangles[iT].idx,Triangles[iT].P[1]->Pos[0],Triangles[iT].P[1]->Pos[1]); continue; } if ( Triangles[iT].P[2]->IsDone==2) { //printf("T=%d P %d (%g %g) incomplete\n",Triangles[iT].idx,Triangles[iT].P[2]->Pos[0],Triangles[iT].P[2]->Pos[1]); continue; } aPmm1 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPmm1->data + 0*(aPmm1->strides[0])) = Triangles[iT].Med[0]->Ps.Pos[0]; *(double *) (aPmm1->data + 1*(aPmm1->strides[0])) = Triangles[iT].Med[0]->Ps.Pos[1]; *(double *) (aPmm1->data + 2*(aPmm1->strides[0])) = 0; aPme1 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPme1->data + 0*(aPme1->strides[0])) = Triangles[iT].Med[0]->Pe.Pos[0]; *(double *) (aPme1->data + 1*(aPme1->strides[0])) = Triangles[iT].Med[0]->Pe.Pos[1]; *(double *) (aPme1->data + 2*(aPme1->strides[0])) = 0; SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm1); PyList_Append(SegmentList,(PyObject *)aPme1); PyList_Append(OutputList,SegmentList ); aPmm2 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPmm2->data + 0*(aPmm2->strides[0])) = Triangles[iT].Med[1]->Ps.Pos[0]; *(double *) (aPmm2->data + 1*(aPmm2->strides[0])) = Triangles[iT].Med[1]->Ps.Pos[1]; *(double *) (aPmm2->data + 2*(aPmm2->strides[0])) = 0; aPme2 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPme2->data + 0*(aPme2->strides[0])) = Triangles[iT].Med[1]->Pe.Pos[0]; *(double *) (aPme2->data + 1*(aPme2->strides[0])) = Triangles[iT].Med[1]->Pe.Pos[1]; *(double *) (aPme2->data + 2*(aPme2->strides[0])) = 0; SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm2); PyList_Append(SegmentList,(PyObject *)aPme2); PyList_Append(OutputList,SegmentList ); aPmm3 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPmm3->data + 0*(aPmm3->strides[0])) = Triangles[iT].Med[2]->Ps.Pos[0]; *(double *) (aPmm3->data + 1*(aPmm3->strides[0])) = Triangles[iT].Med[2]->Ps.Pos[1]; *(double *) (aPmm3->data + 2*(aPmm3->strides[0])) = 0; aPme3 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPme3->data + 0*(aPme3->strides[0])) = Triangles[iT].Med[2]->Pe.Pos[0]; *(double *) (aPme3->data + 1*(aPme3->strides[0])) = Triangles[iT].Med[2]->Pe.Pos[1]; *(double *) (aPme3->data + 2*(aPme3->strides[0])) = 0; SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm3); PyList_Append(SegmentList,(PyObject *)aPme3); PyList_Append(OutputList,SegmentList ); } if (0) { /* find triangle containing the point */ for(iT=0;iTPos[0]; Pt1.Pos[1] = Triangles[iT].P[0]->Pos[1]; Pt2.Pos[0] = Triangles[iT].P[1]->Pos[0]; Pt2.Pos[1] = Triangles[iT].P[1]->Pos[1]; Pt3.Pos[0] = Triangles[iT].P[2]->Pos[0]; Pt3.Pos[1] = Triangles[iT].P[2]->Pos[1]; TriangleMedians(Pt1,Pt2,Pt3,&Pmm1,&Pmm2,&Pmm3,&Pme1,&Pme2,&Pme3); aPmm1 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPmm2 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPmm3 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPme1 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPme2 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); aPme3 = (PyArrayObject *) PyArray_SimpleNew(1,ld,PyArray_DOUBLE); *(double *) (aPmm1->data + 0*(aPmm1->strides[0])) = Pmm1.Pos[0]; *(double *) (aPmm1->data + 1*(aPmm1->strides[0])) = Pmm1.Pos[1]; *(double *) (aPmm1->data + 2*(aPmm1->strides[0])) = 0; *(double *) (aPmm2->data + 0*(aPmm2->strides[0])) = Pmm2.Pos[0]; *(double *) (aPmm2->data + 1*(aPmm2->strides[0])) = Pmm2.Pos[1]; *(double *) (aPmm2->data + 2*(aPmm2->strides[0])) = 0; *(double *) (aPmm3->data + 0*(aPmm3->strides[0])) = Pmm3.Pos[0]; *(double *) (aPmm3->data + 1*(aPmm3->strides[0])) = Pmm3.Pos[1]; *(double *) (aPmm3->data + 2*(aPmm3->strides[0])) = 0; *(double *) (aPme1->data + 0*(aPme1->strides[0])) = Pme1.Pos[0]; *(double *) (aPme1->data + 1*(aPme1->strides[0])) = Pme1.Pos[1]; *(double *) (aPme1->data + 2*(aPme1->strides[0])) = 0; *(double *) (aPme2->data + 0*(aPme2->strides[0])) = Pme2.Pos[0]; *(double *) (aPme2->data + 1*(aPme2->strides[0])) = Pme2.Pos[1]; *(double *) (aPme2->data + 2*(aPme2->strides[0])) = 0; *(double *) (aPme3->data + 0*(aPme3->strides[0])) = Pme3.Pos[0]; *(double *) (aPme3->data + 1*(aPme3->strides[0])) = Pme3.Pos[1]; *(double *) (aPme3->data + 2*(aPme3->strides[0])) = 0; /* check if the interesection is inside the triangle */ Tloc = InTriangleOrOutside(TriangleInList2Triangle( Triangles[iT] ),Pmm1); /* return 2; to triangle T[2] return 0; to triangle T[1] return 1; to triangle T[0] return -1; the point is inside */ if (Tloc==-1) { SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm1); PyList_Append(SegmentList,(PyObject *)aPme1); PyList_Append(OutputList,SegmentList ); SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm2); PyList_Append(SegmentList,(PyObject *)aPme2); PyList_Append(OutputList,SegmentList ); SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm3); PyList_Append(SegmentList,(PyObject *)aPme3); PyList_Append(OutputList,SegmentList ); } if (Tloc==0) { } if (Tloc==1) { SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm1); PyList_Append(SegmentList,(PyObject *)aPme1); PyList_Append(OutputList,SegmentList ); SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm2); PyList_Append(SegmentList,(PyObject *)aPme2); PyList_Append(OutputList,SegmentList ); } if (Tloc==2) { SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm1); PyList_Append(SegmentList,(PyObject *)aPme1); PyList_Append(OutputList,SegmentList ); SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm2); PyList_Append(SegmentList,(PyObject *)aPme2); PyList_Append(OutputList,SegmentList ); SegmentList = PyList_New(0); PyList_Append(SegmentList,(PyObject *)aPmm3); PyList_Append(SegmentList,(PyObject *)aPme3); PyList_Append(OutputList,SegmentList ); } } } return Py_BuildValue("O",OutputList); } static PyObject * tessel_info(self, args) PyObject *self; PyObject *args; { int iT,iP,iTe; /* find triangle containing the point */ for(iT=0;iTPos[0],Triangles[iT].P[iP]->Pos[1]); iP=1; printf(" P=%d :%g %g\n",iP,Triangles[iT].P[iP]->Pos[0],Triangles[iT].P[iP]->Pos[1]); iP=2; printf(" P=%d :%g %g\n",iP,Triangles[iT].P[iP]->Pos[0],Triangles[iT].P[iP]->Pos[1]); iTe=0; if (Triangles[iT].T[iTe]!=NULL) printf(" T=%d :%d\n",iTe,Triangles[iT].T[iTe]->idx); else printf(" T=%d :-\n",iTe); iTe=1; if (Triangles[iT].T[iTe]!=NULL) printf(" T=%d :%d\n",iTe,Triangles[iT].T[iTe]->idx); else printf(" T=%d :-\n",iTe); iTe=2; if (Triangles[iT].T[iTe]!=NULL) printf(" T=%d :%d\n",iTe,Triangles[iT].T[iTe]->idx); else printf(" T=%d :-\n",iTe); ; iTe=0; if (Triangles[iT].T[iTe]!=NULL) printf(" Pe=%d :%d\n",iTe,Triangles[iT].idxe[iTe]); else printf(" Pe=%d :-\n",iTe); iTe=1; if (Triangles[iT].T[iTe]!=NULL) printf(" Pe=%d :%d\n",iTe,Triangles[iT].idxe[iTe]); else printf(" Pe=%d :-\n",iTe); iTe=2; if (Triangles[iT].T[iTe]!=NULL) printf(" Pe=%d :%d\n",iTe,Triangles[iT].idxe[iTe]); else printf(" Pe=%d :-\n",iTe); ; printf("\n"); } return Py_BuildValue("i",1); } /*********************************/ /* test */ /*********************************/ static PyObject * tessel_test(self, args) PyObject *self; PyObject *args; { return Py_BuildValue("i",1); } /* definition of the method table */ static PyMethodDef tesselMethods[] = { {"test", tessel_test, METH_VARARGS, "Simple Test"}, {"info", tessel_info, METH_VARARGS, "info on tesselation"}, {"TriangleMedians", tessel_TriangleMedians, METH_VARARGS, "Get Triangle Medians"}, {"CircumCircleProperties", tessel_CircumCircleProperties, METH_VARARGS, "Get Circum Circle Properties"}, {"InTriangle", tessel_InTriangle, METH_VARARGS, "Return if the triangle (P1,P2,P3) contains the point P4"}, {"InTriangleOrOutside", tessel_InTriangleOrOutside, METH_VARARGS, "Return if the triangle (P1,P2,P3) contains the point P4"}, {"InCircumCircle", tessel_InCircumCircle, METH_VARARGS, "Return if the circum circle of the triangle (P1,P2,P3) contains the point P4"}, {"ConstructDelaunay", tessel_ConstructDelaunay, METH_VARARGS, "Construct the Delaunay tesselation for a given sample of points"}, {"GetTriangles", tessel_GetTriangles, METH_VARARGS, "Get the trianles in a list of 3x3 arrays."}, {"ComputeIsoContours", tessel_ComputeIsoContours, METH_VARARGS, "Compute iso-contours."}, {"GetVoronoi", tessel_GetVoronoi, METH_VARARGS, "Get a list of segements corresponding to the voronoi."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; void inittessel(void) { (void) Py_InitModule("tessel", tesselMethods); import_array(); } diff --git a/src/tessel/tessel/tessel.o b/src/tessel/tessel/tessel.o new file mode 100644 index 0000000..98ddc64 Binary files /dev/null and b/src/tessel/tessel/tessel.o differ diff --git a/src/tessel/tessel/tessel.so b/src/tessel/tessel/tessel.so index fc35a72..b4f40ba 100755 Binary files a/src/tessel/tessel/tessel.so and b/src/tessel/tessel/tessel.so differ