diff --git a/ReadAndWrite.cpp b/ReadAndWrite.cpp index a3430b6..6875d47 100644 --- a/ReadAndWrite.cpp +++ b/ReadAndWrite.cpp @@ -1,40 +1,40 @@ // // Created by Arnaud Pannatier on 02.06.18. // #include "reader.h" #include "grid.h" int main(){ /* Grid A(12,25); for(int x(0); x<12; x++){ for (int y(0); y<25;y++){ A(x,y) = y+25*x; std::cout << A(x,y) << " "; } } std::cout << std::endl; std::vector ret = A.RemoveGhostCellsAndPrepareToWrite(3,1,12,25); for(auto x: ret){ std::cout << x << " "; } */ //std::cout << A(0,12) << std::endl; //std::cout << A << std:: endl; //Reader::writeGridInFile (A,"10zeros.bin", 10,10); - Grid B = Reader::readGridFromFile ("ParallelWriting.bin", 25,25); + Grid B = Reader::readGridFromFile ("ParallelWriting4.bin", 25,25); std::cout << B << std::endl; return 0; } \ No newline at end of file diff --git a/Test.cpp b/Test.cpp index 97e119b..9231ba9 100644 --- a/Test.cpp +++ b/Test.cpp @@ -1,89 +1,114 @@ // // Created by Arnaud Pannatier on 11.05.18. // #include #include "grid.h" #include "reader.h" #include #include int main(int argc, char *argv[]){ // initialize MPI MPI_Init(&argc, &argv); int prank, psize; // Usefull to debug MPI_Comm_rank(MPI_COMM_WORLD, &prank); MPI_Comm_size(MPI_COMM_WORLD, &psize); printf("I’m process %d out of %d\n", prank+1, psize); int size = 25; // computation of the local size of the grid the remainder is spread equally // on the first processors int local_x = size / psize + (prank < size % psize ? 1 : 0); int local_y = size; // adding the ghosts lines if needed if (psize > 1) local_x += (prank == 0 || prank == psize - 1) ? 1 : 2; // computing the offsets of the local grid in the global one int offset_x = (size / psize) * prank + (prank < size % psize ? prank : size % psize); int offset_y = 0; // resizing the different grids int x_start = (prank == 0 ? 0 : 1); int x_end = (prank == psize - 1 ? local_x : local_x - 1); int write_x = local_x; int write_y = local_y; // removing the ghosts lines if needed if (psize > 1) write_x -= (prank == 0 || prank == psize - 1) ? 1 : 2; //std::cout << " Start : " << (offset_y ) + local_y* (offset_x - x_start) << std::endl; - /* - Grid A(local_x,local_y); - for(std::size_t x(0);x toPrint = A.RemoveGhostCellsAndPrepareToWrite(psize,prank,local_x, local_y); - std::cout << "To Print [0 ] " << toPrint[0] << std::endl; +// Grid A(local_x,local_y); +// for(std::size_t x(0);x woGhost = A.RemoveGhostCellsAndPrepareToWrite(psize,prank,local_x, local_y); + + //std::vector toPrint = Reader::PrepareVectorForColumnMajor(woGhost, write_x, write_y); + + +// if(prank ==0){ +// for(int x(0); x < local_x;x++){ +// for(int y(0); y // - Vincent Keller // - Vittoria Rezzonico // See the files AUTHORS and COPYRIGHT for the concerning informations // #ifndef PHPCTSUNAMIPROJECT_DOUBLE_BUFFER_H #define PHPCTSUNAMIPROJECT_DOUBLE_BUFFER_H #include #include "grid.h" class DoubleBuffer { public: // Constructor + DoubleBuffer() = default; DoubleBuffer(int m, int n); // return the current value Grid & current(); // return the old value Grid & old(); void swap(); - // TODO : remove useless - void resize(int nx,int ny); private: // Keep a pointer to the actual place in memory, assure that we don't duplicate the memory std::unique_ptr m_current; std::unique_ptr m_old; }; #endif //PHPCTSUNAMIPROJECT_DOUBLE_BUFFER_H diff --git a/grid.cpp b/grid.cpp index cdc2fcb..c55b831 100644 --- a/grid.cpp +++ b/grid.cpp @@ -1,205 +1,199 @@ // // Created by Arnaud Pannatier on 06.05.18. // Based on the code of : // - Nicolas Richart // - Vincent Keller // - Vittoria Rezzonico // See the files AUTHORS and COPYRIGHT for the concerning information // #include #include "grid.h" // TODO : Clean the order Grid::Grid (int m, int n): m_x(m), m_y(n), m_grid(m * n) {} /* -------------------------------------------------------------------------- */ void Grid::clear() { std::fill(m_grid.begin(), m_grid.end(), 0.); } /* -------------------------------------------------------------------------- */ int Grid::x() const { return m_x; } int Grid::y() const { return m_y; } /* -------------------------------------------------------------------------- */ Grid& Grid::operator*= (const double a) { for(auto& x:m_grid){ x = x*a; } return *this; } Grid& Grid::operator*= (Grid const &b) { if(m_x != b.x() || m_y != b.y()){ throw "The Grid has the wrong size"; } for(int x(0); x Grid::RemoveGhostCellsAndPrepareToWrite (int psize, int prank, int local_x, int local_y) { if(psize > 1){ if(prank == 0){ return std::vector (&m_grid[0], &m_grid[(local_x-1)*local_y]); }else if(prank == psize-1){ return std::vector (&m_grid[local_y], &m_grid[local_x*local_y]); }else{ return std::vector (&m_grid[local_y], &m_grid[(local_x-1)*local_y]); } } return m_grid; } Grid operator* (double a, Grid const &g) { Grid ret(g); return ret*=a; } Grid operator* (Grid const &a, Grid const &b) { Grid ret(a); return ret*=b; } Grid operator+ (double a, Grid const &g) { Grid ret(g); return ret+=a; } Grid operator+ (Grid const &a, Grid const &b) { Grid ret(a); return ret+=b; } Grid sqrt (Grid const &a) { Grid ret(a.x(),a.y()); for(int x(0); x // - Vincent Keller // - Vittoria Rezzonico // See the files AUTHORS and COPYRIGHT for the concerning information // #ifndef PHPCTSUNAMIPROJECT_GRID_H #define PHPCTSUNAMIPROJECT_GRID_H #include #include class Grid { public: + Grid() = default; Grid(int m, int n); /// Getter and Const Getter inline double & get(int i, int j){ return m_grid[i * m_y + j]; }; inline const double & get(int i, int j) const { return m_grid[i * m_y + j]; }; /// access the value [i][j] of the grid inline double & operator()(int i, int j) { return m_grid[i * m_y + j]; } inline const double & operator()(int i, int j) const { return m_grid[i * m_y + j]; } /// Sequential matrix multiplication operators, never used. Grid& operator*= (const double a); Grid& operator*= (Grid const& b); Grid& operator+= (const double a); Grid& operator+= (Grid const& b); Grid operator-() const; Grid inv() const; /// set the grid to 0 void clear(); /// Change the shape of the grid void resize(int nx, int ny); /// Remove ghosts cells and return vector double std::vector RemoveGhostCellsAndPrepareToWrite(int psize, int prank, int local_x, int local_y); - /// TODO : check the boudary conditions + /// Apply the mirror boundary conditions void applyBoundaryConditions(); /// Impose the tolerances to avoid numerical issues void imposeTolerances(double tol, double value, const Grid&); /// Get the size of the grid int x() const; int y() const; /// print the grid friend std::ostream& operator<<(std::ostream& os, const Grid&); private: /// size int m_x, m_y; /// data std::vector m_grid; }; /// Overloaded Matrix operation, but are not really good for perfomance so never used. Grid operator*(double a, Grid const& g); inline Grid operator*(Grid const& g, double a) { return a*g; } Grid operator*(Grid const& a, Grid const& b); Grid operator+(double a, Grid const& g); inline Grid operator+(Grid const& g,double a){ return a+g; }; Grid operator+(Grid const& a, Grid const& b); inline Grid operator-(double a, Grid const& g){ return a+(-g);}; inline Grid operator-(Grid const& g,double a){ return a-g; }; inline Grid operator-(Grid const& a, Grid const& b){ return a+(-b); }; inline Grid operator/(double a, Grid const& g){ return a*g.inv(); }; inline Grid operator/(Grid const& g,double a){ return a/g; }; inline Grid operator/(Grid const& a, Grid const& b){ return a * b.inv(); }; Grid sqrt(Grid const& a); Grid pow(Grid const& a, double p); double max(Grid const& a); Grid maxAbs(Grid const&, Grid const&); #endif //PHPCTSUNAMIPROJECT_GRID_H diff --git a/main.cpp b/main.cpp index d55f037..71376b9 100644 --- a/main.cpp +++ b/main.cpp @@ -1,48 +1,47 @@ #include #include #include #include "simulation.h" // Define the constants #define G 127267.200000000 // Gravity, 9.82*(3.6)^2*1000 in [km / hr^2] #define SIZE 500 // Size of map, Size*Size [km] #define NX 2001 // Number of cells in each direction on the grid -#define TEND 0.20 // Simulation time in hours [hr] +#define TEND 0.002 // Simulation time in hours [hr] #define DIR "../MatlabSource/" #define OUTPUT "../MatlabSource/OUT.bin" int main (int argc, char *argv[]) { - // initialize MPI MPI_Init(&argc, &argv); int prank, psize; // Usefull to debug MPI_Comm_rank(MPI_COMM_WORLD, &prank); MPI_Comm_size(MPI_COMM_WORLD, &psize); printf("I’m process %d out of %d\n", prank+1, psize); // compute the constant double DX = SIZE*1.0/NX; //std::cout << G << " " << SIZE << " " << NX << " " << TEND << " dx: "<< DX << std::endl; // Construct the simulation Simulation simu(NX,G,SIZE,TEND,DX, DIR,OUTPUT, MPI_COMM_WORLD); // Compute the simulation float totalTime; int k; std::tie(totalTime, k) = simu.compute(); std::cout << "Total time : " << totalTime << " Number of steps : " << k << std::endl; // Close MPI MPI_Finalize(); } \ No newline at end of file diff --git a/reader.cpp b/reader.cpp index 8383f83..9b0cd84 100644 --- a/reader.cpp +++ b/reader.cpp @@ -1,160 +1,266 @@ // // Created by Arnaud Pannatier on 06.05.18. // #include "reader.h" #include #include #include -// TODO : check for memory leaks Grid Reader::readGridFromFile (std::string filename,int nx, int ny) { // Prepare the grid of the good size Grid ret = Grid(nx,ny); - - //std::cout << filename << std::endl; // create the stream to read the file std::ifstream file (filename, std::ios::in | std::ios::binary); if (file.is_open()) { // Store all the binary data in memory in a big array fo char char * memblock; // Count the size of the file file.ignore( std::numeric_limits::max() ); std::streamsize size = file.gcount(); file.clear(); //std::cout << size << " -- size " << std::endl; // prepare the memory block memblock = new char[size]; // Read "size" bytes from the files file.seekg (0, std::ios::beg); file.read (memblock, size); // Reading is done. file.close(); //std::cout << "the entire file content is in memory" << std::endl; // Recast the value in memory in the proper format in a big array of double double* double_values = reinterpret_cast(memblock); // Assign the good values to the right place for(int x(0); x 1) + local_x += (prank == 0 || prank == psize - 1) ? 1 : 2; + + /// Assign only the non-ghost cells + int x_start = (prank == 0 ? 0 : 1); + int x_end = (prank == psize - 1 ? local_x : local_x - 1); + + /// Prepare the return grid of the proper size (with place for ghost cells + Grid ret(local_x, local_y); + + /// Open file with MPI + MPI_File file; + MPI_File_open( communicator, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file ); + + /// Prepare a buffer of the process size + double* buffer; + buffer = new double[nx*ny]; + + /// Read contiguous elements + MPI_File_read_ordered (file, buffer,nx*ny, MPI_DOUBLE, &status); + + //std::cout<< "Reading ! " << std::endl; + + /// the data is stored in a long vector we just have to read it in the right way + int k = 0; + for(int x(x_start); x 1) + local_x += (prank == 0 || prank == psize - 1) ? 1 : 2; + + /// Assign only the non-ghost cells + int x_start = (prank == 0 ? 0 : 1); + int x_end = (prank == psize - 1 ? local_x : local_x - 1); + + /// Define a new type for reading in column major, assume that we want to read the following grid [1 2 3] in column major with two process + /// [4 5 6] + /// [7 8 9] + /// We read ny number of block , each block contains nx elements (corresponding to the row), and with ny elements between the start of each block + /// For the second process : ny = 3, nx = 1, ny = 3 -> we read [3,6,9] + MPI_Datatype vec; + MPI_Type_vector (ny,nx,ny,MPI_DOUBLE,&vec); + MPI_Type_commit (&vec); + + /// Open the file with MPI + MPI_File file; + MPI_File_open( communicator, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file ); + + /// Offset for the process + MPI_Offset disp = sizeof(double)*offset; + /// Set the view for the file with our new datatype + MPI_File_set_view(file,disp, MPI_DOUBLE, vec,"native", MPI_INFO_NULL); + + /// Prepare a buffer + double* values; + values = new double[nx*ny]; + + MPI_File_read_all (file, values,nx*ny, MPI_DOUBLE, &status); + + /// Prepare the grid + Grid ret(local_x, local_y); + /// the data is stored in a long vector we just have to read it in the right way + int k = 0; + for (int y (0); y < ny; y++) { + for(int x(x_start); x array of doubles -> array of char -> binary double* double_values; double_values= new double[nx*ny]; - // prepare the array of douoble + // prepare the array of double for(int x(0); x(double_values); // writer std::ofstream file(filename, std::ios::out | std::ios::binary); // write file.write (memblock, sizeof(memblock)*nx*ny); std::cout << "Grid Saved ! " << std::endl; // Clean the pointers delete double_values; } void Reader::ParallelwriteGridInFile (double* g,std::string filename, int nx, int ny, MPI_Comm communicator) { + //std::cout << std::endl << "Writer " << std::endl; - + /// Get info on the parallel information from communicator int prank, psize; MPI_Comm_rank(communicator, &prank); MPI_Comm_size(communicator, &psize); MPI_Status status; + /// Open file with MPI MPI_File file; - MPI_File_open( communicator, filename.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &file ); + /// Write the contiguous elements MPI_File_write_ordered ( file, g, nx*ny, MPI_DOUBLE, &status ); + /// Clean MPI_File_close( &file ); } -Grid Reader::ParallelReadFromFile (std::string filename,int nx, int ny, MPI_Comm communicator) { - // Get info on the parallel information from communicator + + +void Reader::ParallelwriteGridInColumnMajorFile(double* g,std::string filename, int nx, int ny,int offset, MPI_Comm communicator) { + /// Get info on the parallel information from communicator int prank, psize; MPI_Comm_rank(communicator, &prank); MPI_Comm_size(communicator, &psize); MPI_Status status; - // Prepare the final size with ghost cells - int local_x = nx; - int local_y = ny; - - // adding the ghosts lines if needed - if (psize > 1) - local_x += (prank == 0 || prank == psize - 1) ? 1 : 2; - - // Assign only the non-ghost cells - int x_start = (prank == 0 ? 0 : 1); - int x_end = (prank == psize - 1 ? local_x : local_x - 1); - - Grid ret(local_x, local_y); + /// Define a new type for reading in column major, assume that we want to read the following grid [1 2 3] in column major with two process + /// [4 5 6] + /// [7 8 9] + /// We read ny number of block , each block contains nx elements (corresponding to the row), and with ny elements between the start of each block + /// For the second process : ny = 3, nx = 1, ny = 3 -> we read [3,6,9] + MPI_Datatype vec; + MPI_Type_vector (ny,nx,ny,MPI_DOUBLE,&vec); + MPI_Type_commit (&vec); + /// Open file with MPI MPI_File file; + MPI_File_open( communicator, filename.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &file ); - MPI_File_open( communicator, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file ); + /// Set the offset and the view for the file + MPI_Offset disp = sizeof(double)*offset; + MPI_File_set_view(file,disp, MPI_DOUBLE, vec,"native", MPI_INFO_NULL); - double* buffer; - buffer = new double[nx*ny]; + /// Write in Column major + MPI_File_write_all ( file, g, nx*ny, MPI_DOUBLE, &status ); - MPI_File_read_ordered (file, buffer,nx*ny, MPI_DOUBLE, &status); + /// Clean + MPI_File_close( &file ); - //std::cout<< "Reading ! " << std::endl; +} - // the data is stored in a long vector we just have to read it in the right way - int k = 0; - for(int x(x_start); x Reader::PrepareVectorForColumnMajor (std::vector arr, int nx, int ny) { + /// Transpose the local grid for the writing in column major + std::vector ret(ny*nx); + int k =0; + for(int y(0); y #include #include "grid.h" class Reader { public: - // Binary reader + /// Binary reader - Classical approach - row major static Grid readGridFromFile(std::string, int, int); - // Binary writer - static void writeGridInFile(Grid&, std::string, int,int); - static void ParallelwriteGridInFile(double*, std::string, int,int, MPI_Comm); + /// Binary Parallel reader - row major static Grid ParallelReadFromFile(std::string, int, int, MPI_Comm); + /// Binary Parallel reader - column major + static Grid ParallelReadFromColumnMajorFile(std::string, int, int,int, MPI_Comm); + /// Binary writer - Classical approach - row major + static void writeGridInFile(Grid&, std::string, int,int); + /// Binary Parallel Writer - row major + static void ParallelwriteGridInFile(double*, std::string, int,int, MPI_Comm); + /// Binary Parallel Writer - Column major + static void ParallelwriteGridInColumnMajorFile(double*, std::string, int,int,int, MPI_Comm); + /// Transpose the grid for Column major writing + static std::vector PrepareVectorForColumnMajor(std::vector arr, int, int); }; #endif //PHPCTSUNAMIPROJECT_READER_H diff --git a/simulation.cpp b/simulation.cpp index 4964ba3..d5f1ad1 100644 --- a/simulation.cpp +++ b/simulation.cpp @@ -1,306 +1,285 @@ // // Created by Arnaud Pannatier on 06.05.18. // Based on the code of : // - Nicolas Richart // - Vincent Keller // - Vittoria Rezzonico // See the files AUTHORS and COPYRIGHT for the concerning information // /* -------------------------------------------------------------------------- */ #include "simulation.h" #include "reader.h" /* -------------------------------------------------------------------------- */ #include #include #include Simulation::Simulation (int NX_, double G_, int SIZE_, double TEND_, double DX_, std::string dir_, std::string output_, MPI_Comm communicator_) - :NX(NX_), NY(NX_),dir(dir_),outputFilename(output_), G(G_),TEND(TEND_),DX(DX_),dt(0),T(0),nt(0), - SIZE(SIZE_), H(DoubleBuffer(NX,NX)), HU(DoubleBuffer(NX,NX)), HV(DoubleBuffer(NX,NX)), - Zdx(Grid(NX,NX)), Zdy(Grid(NX,NX)){ + :NX(NX_), NY(NX_),dir(dir_),outputFilename(output_), G(G_),TEND(TEND_),DX(DX_),dt(0),T(0), SIZE(SIZE_){ // Initialize the communicator communicator = communicator_; // retrieving the number of proc and the rank in the proc pool MPI_Comm_rank(communicator, &prank); MPI_Comm_size(communicator, &psize); // computation of the local size of the grid the remainder is spread equally // on the first processors local_x = NX / psize + (prank < NX % psize ? 1 : 0); local_y = NY; // adding the ghosts lines if needed if (psize > 1) local_x += (prank == 0 || prank == psize - 1) ? 1 : 2; //local size without ghost cells write_x = local_x; write_y = local_y; // removing the ghosts lines if needed if (psize > 1) write_x -= (prank == 0 || prank == psize - 1) ? 1 : 2; - x_start = (prank == 0 ? 0 : 1); - x_end = (prank == psize - 1 ? local_x : local_x - 1); + x_start_mu = (prank == 0 ? 0 : 1); + x_end_mu = (prank == psize - 1 ? local_x : local_x - 1); + // computing the offsets of the local grid in the global one offset_x = (NX / psize) * prank + (prank < NX % psize ? prank : NX % psize); offset_y = 0; // resizing the different grids - resizeGrids(local_x, local_y); // determining the rank of the neighbors north_prank = (prank == 0 ? MPI_PROC_NULL : prank - 1); south_prank = (prank == (psize - 1) ? MPI_PROC_NULL : prank + 1); // Some info if needed to debug - /*std::cout << prank << " " << NX << " " << NY << " " - << local_x << " " << local_y << " " << offset_x << " " - << offset_y << " " << north_prank << " " << south_prank - << std::endl;*/ +// std::cout << prank << " " << NX << " " << NY << " " +// << local_x << " " << local_y << " " << offset_x << " " +// << offset_y << " " << north_prank << " " << south_prank +// << std::endl; }; void Simulation::readInitialConditionsFromFile () { // Creating the filestream that stores the name of the differents files needed by the programm std::stringstream filename, filenameH, filenameHU,filenameHV, filenameZdx, filenameZdy; filename << dir << "Data_nx" << NX << "_" << SIZE << "km_T" <<"0.2"; filenameH << filename.str() << "_h.bin"; filenameHU << filename.str() << "_hu.bin"; filenameHV << filename.str() << "_hv.bin"; filenameZdx << filename.str() << "_Zdx.bin"; filenameZdy << filename.str() << "_Zdy.bin"; // Load initial condition from data files - H.current() = Reader::ParallelReadFromFile(filenameH.str(), write_x,write_y, MPI_COMM_WORLD); - HU.current() = Reader::ParallelReadFromFile(filenameHU.str(), write_x,write_y, MPI_COMM_WORLD); - HV.current() = Reader::ParallelReadFromFile(filenameHV.str(), write_x,write_y, MPI_COMM_WORLD); + H.current() = Reader::ParallelReadFromColumnMajorFile (filenameH.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); + HU.current() = Reader::ParallelReadFromColumnMajorFile(filenameHU.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); + HV.current() = Reader::ParallelReadFromColumnMajorFile(filenameHV.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); // Load topography slopes from data files - Zdx = Reader::ParallelReadFromFile(filenameZdx.str(), write_x,write_y, MPI_COMM_WORLD); - Zdy = Reader::ParallelReadFromFile(filenameZdy.str(), write_x,write_y, MPI_COMM_WORLD); - - // If the process is the first of the last we don't need to initialize a line of ghost cells at the beginning or at the end of the array - // Filling the local grid from the full one that is read in the file - //for (int x = x_start; x < x_end; x++) { - // for (int y = 0; y < local_y; y++) { -// H.current()(x,y) = HRead(offset_x + x - x_start,offset_y + y); -// HU.current()(x,y) = HURead(offset_x + x - x_start,offset_y + y); -// HV.current()(x,y) = HVRead(offset_x + x - x_start,offset_y + y); -// -// Zdx(x,y) = ZdxRead(offset_x + x - x_start,offset_y + y); -// Zdy(x,y) = ZdyRead(offset_x + x - x_start,offset_y + y); -// -// } -// } + Zdx = Reader::ParallelReadFromColumnMajorFile(filenameZdx.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); + Zdy = Reader::ParallelReadFromColumnMajorFile(filenameZdy.str(), write_x,write_y,offset_x, MPI_COMM_WORLD); + std::cout << " Process " << prank+1 << " Initialized ! " << std::endl; } /* -------------------------------------------------------------------------- */ std::tuple Simulation::compute() { // Number of steps to go through the loop int s = 0; // Timer for the performance double totalTime = 0; auto begin = std::chrono::high_resolution_clock::now (); readInitialConditionsFromFile(); while(T < TEND){ s++; auto start = std::chrono::high_resolution_clock::now (); // Compute the next interval of time dt compute_mu_and_set_dt (); //std::cout << " Process " << prank+1 << "dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << std::endl; // Compute the value of H, HU, HV for the next time step compute_step(); //update time T = T + dt; - // TODO : delete redundant information - nt = nt + 1; - // Compute the time taken by a single step auto end = std::chrono::high_resolution_clock::now(); auto ms = std::chrono::duration_cast(end-start).count(); //std::cout <<" Process " << prank+1 << " Time for a step : " << ms << std::endl; // Compute the information of the timestep on a signle line, does not flood the output std::cout << "\r" << " Process " << prank+1 << " dt : " << dt << " Computing T: " << (T+dt) << ". " << (100*(T+dt)/TEND) <<"%" << " --------- Time for a step : " << ms << std::flush; + //std::cout << std::endl << "Step : " << s << " Process " << prank+1 << " dt : " << dt << " Computing T: " << T << ". " << (100*T/TEND) <<"%" << " --------- Time for a step : " << ms << std::flush; } //After the computation store the result in a binary file writeResultsToFile (); // Compute the time for the whole computation auto end = std::chrono::high_resolution_clock::now(); totalTime = std::chrono::duration_cast(end-begin).count(); //Return the number of time and the number of steps return std::make_tuple(totalTime, s); } - - - /* -------------------------------------------------------------------------- */ - void Simulation::compute_mu_and_set_dt () { double Max = 0.0; double Current; // Reference to the grid to to avoid a big number of call to the function current() Grid& cH = H.current(); Grid& cHU = HU.current(); Grid& cHV = HV.current(); // Find the maximum value for mu, No need to take account of the ghost cell for computing the timesteps - for (int x = x_start; x < x_end; x++) { + for (int x = x_start_mu; x < x_end_mu; x++) { for (int y = 0; y < local_y; y++) { Current = sqrt( +pow(std::max( abs(cHU(x,y)/cH(x,y) - sqrt(cH(x,y)*G)) , abs(cHU(x,y)/cH(x,y) + sqrt(cH(x,y)*G)) ) ,2) +pow(std::max( abs(cHV(x,y)/cH(x,y) - sqrt(cH(x,y)*G)) , abs(cHV(x,y)/cH(x,y) + sqrt(cH(x,y)*G)) ),2)); if(Current > Max){ Max = Current; } } } // Getting the max the value of all the processors together MPI_Allreduce(MPI_IN_PLACE, & Max, 1, MPI_DOUBLE, MPI_MAX, communicator); // Compute the needed quantities. dt = DX/(sqrt(2)*Max); C = 0.5*dt/DX; // don't pass over the endtime of the simulation if(T+dt > TEND){ dt = TEND-T; } } void Simulation::compute_step () { - - // Set as we compute n+1, we exchange the old information with the last one computed + //as we compute n+1, we exchange the old information with the last one computed H.swap(); HU.swap(); HV.swap(); // Mirror condition in border TODO: check H.old().applyBoundaryConditions(); HU.old().applyBoundaryConditions(); HV.old().applyBoundaryConditions(); // Reference to the grid to to avoid a big number of call to the function old() Grid &oH = H.old(); Grid & oHU = HU.old(); Grid & oHV = HV.old(); // Taking care of communications going up (so receiving from bottom) // oH MPI_Sendrecv(&oH(1, 0), local_y, MPI_DOUBLE, north_prank, 0, &oH(local_x - 1, 0), local_y, MPI_DOUBLE, south_prank, 0, communicator, MPI_STATUS_IGNORE); // oHU MPI_Sendrecv(&oHV(1, 0), local_y, MPI_DOUBLE, north_prank, 0, &oHV(local_x - 1, 0), local_y, MPI_DOUBLE, south_prank, 0, communicator, MPI_STATUS_IGNORE); //oHV MPI_Sendrecv(&(oHU(1, 0)), local_y, MPI_DOUBLE, north_prank, 0, &(oHU(local_x - 1, 0)), local_y, MPI_DOUBLE, south_prank, 0, communicator, MPI_STATUS_IGNORE); // Taking care of communications going down (so receiving from top) MPI_Sendrecv(&oH(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0, &oH(0, 0), local_y, MPI_DOUBLE, north_prank, 0, communicator, MPI_STATUS_IGNORE); MPI_Sendrecv(&oHU(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0, &oHU(0, 0), local_y, MPI_DOUBLE, north_prank, 0, communicator, MPI_STATUS_IGNORE); MPI_Sendrecv(&oHV(local_x - 2, 0), local_y, MPI_DOUBLE, south_prank, 0, &oHV(0, 0), local_y, MPI_DOUBLE, north_prank, 0, communicator, MPI_STATUS_IGNORE); // compute the value for all x inside the current local grid TODO : check for (int x(1); x < local_x-1; x++) { compute_row (x); } // Apply to avoid numerical issues H.current().imposeTolerances (0.0, 1e-5, H.current()); HU.current().imposeTolerances (1e-4, 0.0, H.current()); HV.current().imposeTolerances (1e-4, 0.0, H.current()); } void Simulation::resizeGrids (int nx, int ny) { // Create new empty grid of the appropriate size H = DoubleBuffer(nx, ny); HU = DoubleBuffer(nx, ny); HV = DoubleBuffer(nx, ny); Zdx = Grid(nx,ny); Zdy = Grid(nx,ny); } void Simulation::compute_row (int i) { // Reference to the grid to to avoid a big number of call to the function old() or current() Grid& oH = H.old(); Grid& oHU = HU.old(); Grid& oHV = HV.old(); Grid& cH = H.current(); // compute the values for the next timestep for(int j(1);j toPrint = H.current ().RemoveGhostCellsAndPrepareToWrite(psize,prank,local_x,local_y); - + std::vector woGhost = H.current().RemoveGhostCellsAndPrepareToWrite(psize,prank,local_x,local_y); + // Matlab is a column major language + std::vector toPrint = Reader::PrepareVectorForColumnMajor(woGhost, write_x, write_y); // Parallel writing - Reader::ParallelwriteGridInFile (&toPrint[0],outputFilename, write_x,write_y, MPI_COMM_WORLD); + Reader::ParallelwriteGridInColumnMajorFile (&toPrint[0],outputFilename, write_x,write_y,offset_x, MPI_COMM_WORLD); } diff --git a/simulation.h b/simulation.h index 164f74a..9c74323 100644 --- a/simulation.h +++ b/simulation.h @@ -1,95 +1,95 @@ // // Created by Arnaud Pannatier on 06.05.18. // Based on the code of : // - Nicolas Richart // - Vincent Keller // - Vittoria Rezzonico // See the files AUTHORS and COPYRIGHT for the concerning information // #ifndef PHPCTSUNAMIPROJECT_SIMULATION_H #define PHPCTSUNAMIPROJECT_SIMULATION_H #include #include #include #include "double_buffer.h" #include "grid.h" class Simulation { public: Simulation(int NX_,double G_, int SIZE_,double TEND_, double DX_, std::string dir_, std::string output_, MPI_Comm communicator_); /// Simulation I/O void readInitialConditionsFromFile(); void writeResultsToFile(); /// perform the simulation std::tuple compute(); inline int x(){ return NX; }; inline int y(){ return NY; }; protected: /// compute one step, fetch ghost cells and call compute row void compute_step(); // Sequential computation that compute the next value for the i-est row void compute_row (int i); // Compute the next time step void compute_mu_and_set_dt (); // Usefull for creating grids of the proper local size void resizeGrids(int nx, int ny); private: /// Global problem size int NX,NY; /// Directory of the files where to read the grids and write the results std::string dir; /// Output filename std::string outputFilename; /// Constant, timesteps and scalar variables useful for the simulation - double G, TEND, DX, dt, T,nt, C; + double G, TEND, DX, dt, T, C; /// Size of the Grid int SIZE; /// Grid that stores the actual data for the height and the velocity of the water DoubleBuffer H, HV,HU; /// Topology of the map Grid Zdx,Zdy; /// Local problem size int local_x, local_y; /// Local problem size without ghost cells int write_x, write_y; /// variables for iteration without the ghost cells - int x_start, x_end; + int x_start_mu, x_end_mu; /// local offset int offset_x, offset_y; /// local neighbors int north_prank, south_prank; /// proc rank int prank; /// communicator size int psize; /// communicator MPI_Comm communicator; }; #endif //PHPCTSUNAMIPROJECT_SIMULATION_H