Page MenuHomec4science

simulation.cc
No OneTemporary

File Metadata

Created
Fri, May 3, 05:54

simulation.cc

/* -------------------------------------------------------------------------- */
#include "simulation.hh"
/* -------------------------------------------------------------------------- */
#include <assert.h>
#include <cmath>
#include <iostream>
#include <numeric>
#include <mpi.h>
/* -------------------------------------------------------------------------- */
Simulation::Simulation(int m, int n, MPI_Comm communicator) {
// retrieving the number of proc and the rank in the proc pool
MPI_Comm_size(communicator, &m_psize);
m_dims = {0, 0};
std::array<int, 2> periods{0, 0};
MPI_Dims_create(m_psize, m_dims.size(), m_dims.data());
assert(m_dims[0] * m_dims[1] == m_psize && "MPI_Dims_create is fucked up");
MPI_Cart_create(communicator, m_dims.size(), m_dims.data(), periods.data(), 0,
&m_communicator);
MPI_Comm_rank(m_communicator, &m_prank);
MPI_Cart_coords(m_communicator, m_prank, m_coords.size(), m_coords.data());
m_dumper = std::make_unique<DumperBinary>(m_communicator);
m_global_size = {m, n};
std::array<int, 2> ghosts{0, 0};
for (std::size_t i = 0; i < m_local_size.size(); ++i) {
// computation of the local size of the grid the remainder is spread equally
// on the first processors
m_local_size[i] = m_global_size[i] / m_dims[i] +
(m_coords[i] < m_global_size[i] % m_dims[i] ? 1 : 0);
// adding the ghosts lines if needed
if (m_dims[i] > 1)
ghosts[i] = (m_coords[i] == 0 || m_coords[i] == m_dims[i] - 1) ? 1 : 2;
m_local_size[i] += ghosts[i];
// computing the offsets of the local grid in the global one
m_offset[i] = (m_global_size[i] / m_dims[i]) * m_coords[i] +
(m_coords[i] < m_global_size[i] % m_dims[i]
? m_coords[i]
: m_global_size[i] % m_dims[i]);
m_h[i] = 1. / m_global_size[i];
}
#if defined(_DEBUG_MPI)
for (int i = 0; i < m_psize; ++i) {
if (m_prank == i)
std::cerr << m_prank << " (" << m_coords[0] << ", " << m_coords[1] << ")"
<< " - " << m_local_size[0] << "x" << m_local_size[1] << " + "
<< m_offset[0] << "x" << m_offset[1] << " ++ [" << ghosts[0]
<< "x" << ghosts[1] << "]\n";
MPI_Barrier(m_communicator);
}
#endif
// resizing the different grids
m_grids.resize(m_local_size[0], m_local_size[1]);
m_f.resize(m_local_size[0], m_local_size[1]);
m_grids.current().initializeHaloCommunications(m_communicator, ghosts);
m_grids.old().initializeHaloCommunications(m_communicator, ghosts);
}
/* -------------------------------------------------------------------------- */
void Simulation::set_initial_conditions() {
std::array<int, 2> start = {0, 0};
std::array<int, 2> end = m_local_size;
for (int i = 0; i < 2; ++i) {
start[i] += (m_coords[i] == 0 ? 0 : 1);
end[i] -= (m_coords[i] == m_dims[i] - 1 ? 0 : 1);
}
float min = 0., max = 0.;
float a = 5.5 * M_PI;
for (int i = start[0]; i < end[0]; i++) {
for (int j = start[1]; j < end[1]; j++) {
m_f(i, j) =
-2. * a * a * std::sin(a * (m_offset[0] + i - start[0]) * m_h[0]) *
std::sin(a * (m_offset[1] + j - start[1]) * m_h[1]) * m_h[0] * m_h[1];
min = std::min(min, m_f(i, j));
max = std::max(max, m_f(i, j));
}
}
m_dumper->set_min(min);
m_dumper->set_max(max);
m_dumper->dump(m_f, 1);
}
/* -------------------------------------------------------------------------- */
std::tuple<float, int> Simulation::compute() {
int s = 0;
// std::cout << m_h_m << " " << m_h_n << " " << m_epsilon << std::endl;
float l2 = 0.;
do {
l2 = compute_step();
DEBUG::print(std::to_string(m_prank) + "do_step: " + std::to_string(l2));
m_grids.swap();
MPI_Allreduce(MPI_IN_PLACE, &l2, 1, MPI_FLOAT, MPI_SUM, m_communicator);
DEBUG::print(std::to_string(m_prank) + " MPI_Allreduce: " + std::to_string(l2) + " / " + std::to_string(m_epsilon));
++s;
} while (l2 > m_epsilon);
m_dumper->set_min(min());
m_dumper->set_max(max());
m_dumper->dump(m_grids.old(), 0);
return std::make_tuple(std::sqrt(l2), s);
}
/* -------------------------------------------------------------------------- */
void Simulation::set_epsilon(float epsilon) { m_epsilon = epsilon * epsilon; }
/* -------------------------------------------------------------------------- */
float Simulation::epsilon() const { return std::sqrt(m_epsilon); }
/* -------------------------------------------------------------------------- */
inline float Simulation::compute_row(int i) {
float l2 = 0.;
Grid & u = m_grids.current();
Grid & uo = m_grids.old();
for (int j = 1; j < m_local_size[1] - 1; j++) {
// computation of the new step
u(i, j) = (uo(i - 1, j) + uo(i + 1, j) + uo(i, j - 1) + uo(i, j + 1) -
m_f(i, j)) /
4.;
// L2 norm
l2 += (uo(i, j) - u(i, j)) * (uo(i, j) - u(i, j));
}
return l2;
}
/* -------------------------------------------------------------------------- */
float Simulation::compute_step() {
float l2 = 0.;
auto & uo = m_grids.old();
auto & u = m_grids.current();
auto _compute = [&](auto && i, auto && j) {
u(i, j) = (uo(i - 1, j) + uo(i + 1, j) + uo(i, j - 1) + uo(i, j + 1) -
m_f(i, j)) /
4.;
l2 += (uo(i, j) - u(i, j)) * (uo(i, j) - u(i, j));
};
// start the permanent communications
uo.startSynchronization();
// computing the inner rows that do not depend on the ghosts
for (int i = 2; i < m_local_size[0] - 2; i++) {
for (int j = 2; j < m_local_size[1] - 2; j++) {
_compute(i, j);
}
}
/// wait to receive the ghosts before using them for them computation
uo.waitReceives();
for (int i = 2; i < m_local_size[1] - 2; i++) {
_compute(1, i);
}
for (int i = 2; i < m_local_size[1] - 2; i++) {
_compute(m_local_size[0] - 2, i);
}
for (int i = 2; i < m_local_size[0] - 2; i++) {
_compute(i, 1);
}
for (int i = 2; i < m_local_size[0] - 2; i++) {
_compute(i, m_local_size[1] - 2);
}
/// wait to send everything before changing the buffers
uo.waitSends();
return l2;
}
namespace {
template <typename G, typename Func>
auto accumulate_grid(const G & grid, float init, Func && func) {
auto m = grid.m();
auto n = grid.n();
for (decltype(m) i = 1; i < m - 1; i++) {
for (decltype(n) j = 1; j < n - 1; j++) {
init = func(init, grid(i, j));
}
}
return init;
}
} // namespace
/* -------------------------------------------------------------------------- */
float Simulation::min() const {
const auto & u0 = m_grids.old();
auto tmp = accumulate_grid(
u0, u0(1, 1), [](auto && a, auto && b) { return std::min(a, b); });
MPI_Allreduce(MPI_IN_PLACE, &tmp, 1, MPI_FLOAT, MPI_MIN, m_communicator);
return tmp;
}
/* -------------------------------------------------------------------------- */
float Simulation::max() const {
const auto & u0 = m_grids.old();
auto tmp = accumulate_grid(
u0, u0(1, 1), [](auto && a, auto && b) { return std::max(a, b); });
MPI_Allreduce(MPI_IN_PLACE, &tmp, 1, MPI_FLOAT, MPI_MAX, m_communicator);
return tmp;
}
/* -------------------------------------------------------------------------- */

Event Timeline