diff --git a/main.cpp b/main.cpp index 4c8e94c..a6aec8c 100644 --- a/main.cpp +++ b/main.cpp @@ -1,48 +1,48 @@ #include #include #include #include "simulation.h" // Define the constants #define G 127267.200000000 // Gravity, 9.82*(3.6)^2*1000 in [km / hr^2] #define SIZE 500 // Size of map, Size*Size [km] #define NX 2001 // Number of cells in each direction on the grid #define TEND 0.2 // Simulation time in hours [hr] #define DIR "../MatlabSource/" #define OUTPUT "../MatlabSource/OUT.bin" int main (int argc, char *argv[]) { // initialize MPI MPI_Init(&argc, &argv); int prank, psize; // Usefull to debug MPI_Comm_rank(MPI_COMM_WORLD, &prank); MPI_Comm_size(MPI_COMM_WORLD, &psize); //printf("I’m process %d out of %d\n", prank+1, psize); // compute the constant double DX = SIZE*1.0/NX; //std::cout << G << " " << SIZE << " " << NX << " " << TEND << " dx: "<< DX << std::endl; // Construct the simulation Simulation simu(NX,G,SIZE,TEND,DX, DIR,OUTPUT, MPI_COMM_WORLD); // Compute the simulation float totalTime; int k; std::tie(totalTime, k) = simu.compute(); if(prank ==0) { std::cout << "Total time : " << totalTime << " Number of steps : " << k << std::endl; } // Close MPI MPI_Finalize(); -} \ No newline at end of file +} diff --git a/simulation.cpp b/simulation.cpp index ed40ee9..62621aa 100644 --- a/simulation.cpp +++ b/simulation.cpp @@ -1,286 +1,288 @@ // // Created by Arnaud Pannatier on 06.05.18. // Based on the code of : // - Nicolas Richart // - Vincent Keller // - Vittoria Rezzonico // See the files AUTHORS and COPYRIGHT for the concerning information // /* -------------------------------------------------------------------------- */ #include "simulation.h" #include "reader.h" /* -------------------------------------------------------------------------- */ #include #include #include Simulation::Simulation (int NX_, double G_, int SIZE_, double TEND_, double DX_, std::string dir_, std::string output_, MPI_Comm communicator_) :NX(NX_), NY(NX_),dir(dir_),outputFilename(output_), G(G_),TEND(TEND_),DX(DX_),dt(0),T(0), SIZE(SIZE_){ // Initialize the communicator communicator = communicator_; // retrieving the number of proc and the rank in the proc pool MPI_Comm_rank(communicator, &prank); MPI_Comm_size(communicator, &psize); // computation of the local size of the grid the remainder is spread equally // on the first processors local_x = NX / psize + (prank < NX % psize ? 1 : 0); local_y = NY; // adding the ghosts lines if needed if (psize > 1) local_x += (prank == 0 || prank == psize - 1) ? 1 : 2; //local size without ghost cells write_x = local_x; write_y = local_y; // removing the ghosts lines if needed if (psize > 1) write_x -= (prank == 0 || prank == psize - 1) ? 1 : 2; x_start_mu = (prank == 0 ? 0 : 1); x_end_mu = (prank == psize - 1 ? local_x : local_x - 1); // computing the offsets of the local grid in the global one offset_x = (NX / psize) * prank + (prank < NX % psize ? prank : NX % psize); offset_y = 0; // resizing the different grids resizeGrids(local_x, local_y); // determining the rank of the neighbors north_prank = (prank == 0 ? MPI_PROC_NULL : prank - 1); south_prank = (prank == (psize - 1) ? MPI_PROC_NULL : prank + 1); // Some info if needed to debug // std::cout << prank << " " << NX << " " << NY << " " // << local_x << " " << local_y << " " << offset_x << " " // << offset_y << " " << north_prank << " " << south_prank // << std::endl; }; void Simulation::readInitialConditionsFromFile () { // Creating the filestream that stores the name of the differents files needed by the programm std::stringstream filename, filenameH, filenameHU,filenameHV, filenameZdx, filenameZdy; filename << dir << "Data_nx" << NX << "_" << SIZE << "km_T" <<"0.2"; filenameH << filename.str() << "_h.bin"; filenameHU << filename.str() << "_hu.bin"; filenameHV << filename.str() << "_hv.bin"; filenameZdx << filename.str() << "_Zdx.bin"; filenameZdy << filename.str() << "_Zdy.bin"; // Load initial condition from data files H.current() = Reader::ParallelReadFromColumnMajorFile (filenameH.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); HU.current() = Reader::ParallelReadFromColumnMajorFile(filenameHU.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); HV.current() = Reader::ParallelReadFromColumnMajorFile(filenameHV.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); // Load topography slopes from data files Zdx = Reader::ParallelReadFromColumnMajorFile(filenameZdx.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); Zdy = Reader::ParallelReadFromColumnMajorFile(filenameZdy.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); if(prank == psize-1) { std::cout << " Process " << prank + 1 << " Initialized ! " << std::endl; } } /* -------------------------------------------------------------------------- */ std::tuple Simulation::compute() { // Number of steps to go through the loop int s = 0; // Timer for the performance double totalTime = 0; auto begin = std::chrono::high_resolution_clock::now (); readInitialConditionsFromFile(); while(T < TEND){ s++; auto start = std::chrono::high_resolution_clock::now (); // Compute the next interval of time dt compute_mu_and_set_dt (); //std::cout << " Process " << prank+1 << "dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << std::endl; // Compute the value of H, HU, HV for the next time step compute_step(); //update time T = T + dt; // Compute the time taken by a single step auto end = std::chrono::high_resolution_clock::now(); auto ms = std::chrono::duration_cast(end-start).count(); //std::cout <<" Process " << prank+1 << " Time for a step : " << ms << std::endl; // Compute the information of the timestep on a signle line, does not flood the output - std::cout << "\r" << " Process " << prank+1 << " dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << " --------- Time for a step : " << ms << std::flush; + if(prank == 0){ + std::cout << "\r" << " Process " << prank+1 << " dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << " --------- Time for a step : " << ms << " " << std::flush; + } //std::cout << std::endl << "Step : " << s << " Process " << prank+1 << " dt : " << dt << " Computing T: " << T << ". " << (100*T/TEND) <<"%" << " --------- Time for a step : " << ms << std::flush; } //After the computation store the result in a binary file writeResultsToFile (); // Compute the time for the whole computation auto end = std::chrono::high_resolution_clock::now(); totalTime = std::chrono::duration_cast(end-begin).count(); //Return the number of time and the number of steps return std::make_tuple(totalTime, s); } /* -------------------------------------------------------------------------- */ void Simulation::compute_mu_and_set_dt () { double Max = 0.0; double Current; // Reference to the grid to to avoid a big number of call to the function current() Grid& cH = H.current(); Grid& cHU = HU.current(); Grid& cHV = HV.current(); // Find the maximum value for mu, No need to take account of the ghost cell for computing the timesteps for (int x = x_start_mu; x < x_end_mu; x++) { for (int y = 0; y < local_y; y++) { Current = sqrt( +pow(std::max( abs(cHU(x,y)/cH(x,y) - sqrt(cH(x,y)*G)) , abs(cHU(x,y)/cH(x,y) + sqrt(cH(x,y)*G)) ) ,2) +pow(std::max( abs(cHV(x,y)/cH(x,y) - sqrt(cH(x,y)*G)) , abs(cHV(x,y)/cH(x,y) + sqrt(cH(x,y)*G)) ),2)); if(Current > Max){ Max = Current; } } } // Getting the max the value of all the processors together MPI_Allreduce(MPI_IN_PLACE, & Max, 1, MPI_DOUBLE, MPI_MAX, communicator); // Compute the needed quantities. dt = DX/(sqrt(2)*Max); C = 0.5*dt/DX; // don't pass over the endtime of the simulation if(T+dt > TEND){ dt = TEND-T; } } void Simulation::compute_step () { //as we compute n+1, we exchange the old information with the last one computed H.swap(); HU.swap(); HV.swap(); // Mirror condition in border TODO: check H.old().applyBoundaryConditions(); HU.old().applyBoundaryConditions(); HV.old().applyBoundaryConditions(); // Reference to the grid to to avoid a big number of call to the function old() Grid &oH = H.old(); Grid & oHU = HU.old(); Grid & oHV = HV.old(); // Taking care of communications going up (so receiving from bottom) // oH MPI_Sendrecv(&oH(1, 0), local_y, MPI_DOUBLE, north_prank, 0, &oH(local_x - 1, 0), local_y, MPI_DOUBLE, south_prank, 0, communicator, MPI_STATUS_IGNORE); // oHU MPI_Sendrecv(&oHV(1, 0), local_y, MPI_DOUBLE, north_prank, 0, &oHV(local_x - 1, 0), local_y, MPI_DOUBLE, south_prank, 0, communicator, MPI_STATUS_IGNORE); //oHV MPI_Sendrecv(&(oHU(1, 0)), local_y, MPI_DOUBLE, north_prank, 0, &(oHU(local_x - 1, 0)), local_y, MPI_DOUBLE, south_prank, 0, communicator, MPI_STATUS_IGNORE); // Taking care of communications going down (so receiving from top) MPI_Sendrecv(&oH(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0, &oH(0, 0), local_y, MPI_DOUBLE, north_prank, 0, communicator, MPI_STATUS_IGNORE); MPI_Sendrecv(&oHU(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0, &oHU(0, 0), local_y, MPI_DOUBLE, north_prank, 0, communicator, MPI_STATUS_IGNORE); MPI_Sendrecv(&oHV(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0, &oHV(0, 0), local_y, MPI_DOUBLE, north_prank, 0, communicator, MPI_STATUS_IGNORE); // compute the value for all x inside the current local grid TODO : check for (int x(1); x < local_x-1; x++) { compute_row (x); } // Apply to avoid numerical issues H.current().imposeTolerances (0.0, 1e-5, H.current()); HU.current().imposeTolerances (1e-4, 0.0, H.current()); HV.current().imposeTolerances (1e-4, 0.0, H.current()); } void Simulation::resizeGrids (int nx, int ny) { // Create new empty grid of the appropriate size H = DoubleBuffer(nx, ny); HU = DoubleBuffer(nx, ny); HV = DoubleBuffer(nx, ny); Zdx = Grid(nx,ny); Zdy = Grid(nx,ny); } void Simulation::compute_row (int i) { // Reference to the grid to to avoid a big number of call to the function old() or current() Grid& oH = H.old(); Grid& oHU = HU.old(); Grid& oHV = HV.old(); Grid& cH = H.current(); // compute the values for the next timestep for(int j(1);j woGhost = H.current().RemoveGhostCellsAndPrepareToWrite(psize,prank,local_x,local_y); // Matlab is a column major language std::vector toPrint = Reader::PrepareVectorForColumnMajor(woGhost, write_x, write_y); // Parallel writing Reader::ParallelwriteGridInColumnMajorFile (&toPrint[0],outputFilename, write_x,write_y,offset_x, MPI_COMM_WORLD); }