Page MenuHomec4science

simulation.cpp
No OneTemporary

File Metadata

Created
Tue, May 28, 07:17

simulation.cpp

//
// Created by Arnaud Pannatier on 06.05.18.
// Based on the code of :
// - Nicolas Richart <nicolas.richart@epfl.ch>
// - Vincent Keller <vincent.keller@epfl.ch>
// - Vittoria Rezzonico <vittoria.rezzonico@epfl.ch>
// See the files AUTHORS and COPYRIGHT for the concerning information
//
/* -------------------------------------------------------------------------- */
#include "simulation.h"
#include "reader.h"
/* -------------------------------------------------------------------------- */
#include <math.h>
#include <sstream>
#include <chrono>
Simulation::Simulation (int NX_, double G_, int SIZE_, double TEND_, double DX_, std::string dir_, std::string output_, MPI_Comm communicator_)
:NX(NX_), NY(NX_),dir(dir_),outputFilename(output_), G(G_),TEND(TEND_),DX(DX_),dt(0),T(0), SIZE(SIZE_){
// Initialize the communicator
communicator = communicator_;
// retrieving the number of proc and the rank in the proc pool
MPI_Comm_rank(communicator, &prank);
MPI_Comm_size(communicator, &psize);
// computation of the local size of the grid the remainder is spread equally
// on the first processors
local_x = NX / psize + (prank < NX % psize ? 1 : 0);
local_y = NY;
// adding the ghosts lines if needed
if (psize > 1)
local_x += (prank == 0 || prank == psize - 1) ? 1 : 2;
//local size without ghost cells
write_x = local_x;
write_y = local_y;
// removing the ghosts lines if needed
if (psize > 1)
write_x -= (prank == 0 || prank == psize - 1) ? 1 : 2;
x_start_mu = (prank == 0 ? 0 : 1);
x_end_mu = (prank == psize - 1 ? local_x : local_x - 1);
// computing the offsets of the local grid in the global one
offset_x = (NX / psize) * prank + (prank < NX % psize ? prank : NX % psize);
offset_y = 0;
// resizing the different grids
resizeGrids(local_x, local_y);
// determining the rank of the neighbors
north_prank = (prank == 0 ? MPI_PROC_NULL : prank - 1);
south_prank = (prank == (psize - 1) ? MPI_PROC_NULL : prank + 1);
// Some info if needed to debug
// std::cout << prank << " " << NX << " " << NY << " "
// << local_x << " " << local_y << " " << offset_x << " "
// << offset_y << " " << north_prank << " " << south_prank
// << std::endl;
};
void Simulation::readInitialConditionsFromFile () {
// Creating the filestream that stores the name of the differents files needed by the programm
std::stringstream filename, filenameH, filenameHU,filenameHV, filenameZdx, filenameZdy;
filename << dir << "Data_nx" << NX << "_" << SIZE << "km_T" <<"0.2";
filenameH << filename.str() << "_h.bin";
filenameHU << filename.str() << "_hu.bin";
filenameHV << filename.str() << "_hv.bin";
filenameZdx << filename.str() << "_Zdx.bin";
filenameZdy << filename.str() << "_Zdy.bin";
// Load initial condition from data files
H.current() = Reader::ParallelReadFromColumnMajorFile (filenameH.str(), write_x,write_y,offset_x, MPI_COMM_WORLD);
HU.current() = Reader::ParallelReadFromColumnMajorFile(filenameHU.str(), write_x,write_y,offset_x, MPI_COMM_WORLD);
HV.current() = Reader::ParallelReadFromColumnMajorFile(filenameHV.str(), write_x,write_y,offset_x, MPI_COMM_WORLD);
// Load topography slopes from data files
Zdx = Reader::ParallelReadFromColumnMajorFile(filenameZdx.str(), write_x,write_y,offset_x, MPI_COMM_WORLD);
Zdy = Reader::ParallelReadFromColumnMajorFile(filenameZdy.str(), write_x,write_y,offset_x, MPI_COMM_WORLD);
std::cout << " Process " << prank+1 << " Initialized ! " << std::endl;
}
/* -------------------------------------------------------------------------- */
std::tuple<double, int> Simulation::compute() {
// Number of steps to go through the loop
int s = 0;
// Timer for the performance
double totalTime = 0;
auto begin = std::chrono::high_resolution_clock::now ();
readInitialConditionsFromFile();
while(T < TEND){
s++;
auto start = std::chrono::high_resolution_clock::now ();
// Compute the next interval of time dt
compute_mu_and_set_dt ();
//std::cout << " Process " << prank+1 << "dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << std::endl;
// Compute the value of H, HU, HV for the next time step
compute_step();
//update time
T = T + dt;
// Compute the time taken by a single step
auto end = std::chrono::high_resolution_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
//std::cout <<" Process " << prank+1 << " Time for a step : " << ms << std::endl;
// Compute the information of the timestep on a signle line, does not flood the output
std::cout << "\r" << " Process " << prank+1 << " dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << " --------- Time for a step : " << ms << std::flush;
//std::cout << std::endl << "Step : " << s << " Process " << prank+1 << " dt : " << dt << " Computing T: " << T << ". " << (100*T/TEND) <<"%" << " --------- Time for a step : " << ms << std::flush;
}
//After the computation store the result in a binary file
writeResultsToFile ();
// Compute the time for the whole computation
auto end = std::chrono::high_resolution_clock::now();
totalTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-begin).count();
//Return the number of time and the number of steps
return std::make_tuple(totalTime, s);
}
/* -------------------------------------------------------------------------- */
void Simulation::compute_mu_and_set_dt () {
double Max = 0.0;
double Current;
// Reference to the grid to to avoid a big number of call to the function current()
Grid& cH = H.current();
Grid& cHU = HU.current();
Grid& cHV = HV.current();
// Find the maximum value for mu, No need to take account of the ghost cell for computing the timesteps
for (int x = x_start_mu; x < x_end_mu; x++) {
for (int y = 0; y < local_y; y++) {
Current = sqrt( +pow(std::max( abs(cHU(x,y)/cH(x,y) - sqrt(cH(x,y)*G)) ,
abs(cHU(x,y)/cH(x,y) + sqrt(cH(x,y)*G)) ) ,2)
+pow(std::max( abs(cHV(x,y)/cH(x,y) - sqrt(cH(x,y)*G)) ,
abs(cHV(x,y)/cH(x,y) + sqrt(cH(x,y)*G)) ),2));
if(Current > Max){
Max = Current;
}
}
}
// Getting the max the value of all the processors together
MPI_Allreduce(MPI_IN_PLACE, & Max, 1, MPI_DOUBLE, MPI_MAX, communicator);
// Compute the needed quantities.
dt = DX/(sqrt(2)*Max);
C = 0.5*dt/DX;
// don't pass over the endtime of the simulation
if(T+dt > TEND){
dt = TEND-T;
}
}
void Simulation::compute_step () {
//as we compute n+1, we exchange the old information with the last one computed
H.swap();
HU.swap();
HV.swap();
// Mirror condition in border TODO: check
H.old().applyBoundaryConditions();
HU.old().applyBoundaryConditions();
HV.old().applyBoundaryConditions();
// Reference to the grid to to avoid a big number of call to the function old()
Grid &oH = H.old();
Grid & oHU = HU.old();
Grid & oHV = HV.old();
// Taking care of communications going up (so receiving from bottom)
// oH
MPI_Sendrecv(&oH(1, 0), local_y, MPI_DOUBLE, north_prank, 0,
&oH(local_x - 1, 0), local_y, MPI_DOUBLE, south_prank, 0,
communicator, MPI_STATUS_IGNORE);
// oHU
MPI_Sendrecv(&oHV(1, 0), local_y, MPI_DOUBLE, north_prank, 0,
&oHV(local_x - 1, 0), local_y, MPI_DOUBLE, south_prank, 0,
communicator, MPI_STATUS_IGNORE);
//oHV
MPI_Sendrecv(&(oHU(1, 0)), local_y, MPI_DOUBLE, north_prank, 0,
&(oHU(local_x - 1, 0)), local_y, MPI_DOUBLE, south_prank, 0,
communicator, MPI_STATUS_IGNORE);
// Taking care of communications going down (so receiving from top)
MPI_Sendrecv(&oH(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0,
&oH(0, 0), local_y, MPI_DOUBLE, north_prank, 0,
communicator, MPI_STATUS_IGNORE);
MPI_Sendrecv(&oHU(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0,
&oHU(0, 0), local_y, MPI_DOUBLE, north_prank, 0,
communicator, MPI_STATUS_IGNORE);
MPI_Sendrecv(&oHV(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0,
&oHV(0, 0), local_y, MPI_DOUBLE, north_prank, 0,
communicator, MPI_STATUS_IGNORE);
// compute the value for all x inside the current local grid TODO : check
for (int x(1); x < local_x-1; x++) {
compute_row (x);
}
// Apply to avoid numerical issues
H.current().imposeTolerances (0.0, 1e-5, H.current());
HU.current().imposeTolerances (1e-4, 0.0, H.current());
HV.current().imposeTolerances (1e-4, 0.0, H.current());
}
void Simulation::resizeGrids (int nx, int ny) {
// Create new empty grid of the appropriate size
H = DoubleBuffer(nx, ny);
HU = DoubleBuffer(nx, ny);
HV = DoubleBuffer(nx, ny);
Zdx = Grid(nx,ny);
Zdy = Grid(nx,ny);
}
void Simulation::compute_row (int i) {
// Reference to the grid to to avoid a big number of call to the function old() or current()
Grid& oH = H.old();
Grid& oHU = HU.old();
Grid& oHV = HV.old();
Grid& cH = H.current();
// compute the values for the next timestep
for(int j(1);j<y()-1;j++) {
H.current()(i,j) = + 0.25 * ( + oH ( i , j-1 ) + oH ( i , j+1 ) + oH ( i-1 , j ) + oH ( i+1 , j))
+ C * ( + oHU( i , j-1 ) - oHU( i , j+1 )
+ oHV( i-1 , j ) - oHV( i+1 , j ));
HU.current()(i,j) = + 0.25 * ( + oHU( i , j-1 ) + oHU( i , j+1 ) + oHU( i-1 , j ) + oHU( i+1 , j )) - dt * G * cH( i , j ) * Zdx( i , j )
+ C * ( + pow( oHU( i , j-1 ) ,2) / oH( i , j-1 ) + 0.5 * G * pow( oH( i , j-1 ) ,2)
- pow( oHU( i , j+1 ) ,2) / oH( i , j+1 ) - 0.5 * G * pow( oH( i , j+1 ) ,2) )
+ C * ( + oHU( i-1 , j ) * oHV( i-1 , j ) / oH( i-1 , j )
- oHU( i+1 , j ) * oHV( i+1 , j ) / oH( i+1 , j ) );
HV.current ()(i,j) = + 0.25 * ( + oHV( i , j-1 ) + oHV( i , j+1 ) + oHV( i-1 , j ) + oHV( i+1 , j )) - dt * G * cH( i , j ) * Zdy( i , j )
+ C * ( + pow( oHV( i-1 , j ),2) / oH( i-1 , j ) + 0.5 * G * pow( oH( i-1 , j ),2)
- pow( oHV( i+1 , j ),2) / oH( i+1 , j ) - 0.5 * G * pow( oH( i+1 , j ),2) )
+ C * ( + oHU( i , j-1 ) * oHV( i , j-1 ) / oH( i , j-1 )
- oHU( i , j+1 ) * oHV( i , j+1 ) / oH( i , j+1 ) );
}
}
void Simulation::writeResultsToFile() {
// Prepare the grid (Remove the ghost cells)
std::vector<double> woGhost = H.current().RemoveGhostCellsAndPrepareToWrite(psize,prank,local_x,local_y);
// Matlab is a column major language
std::vector<double> toPrint = Reader::PrepareVectorForColumnMajor(woGhost, write_x, write_y);
// Parallel writing
Reader::ParallelwriteGridInColumnMajorFile (&toPrint[0],outputFilename, write_x,write_y,offset_x, MPI_COMM_WORLD);
}

Event Timeline