Page MenuHomec4science

simulation.cc
No OneTemporary

File Metadata

Created
Sun, Apr 28, 16:38

simulation.cc

/* -------------------------------------------------------------------------- */
#include "simulation.hh"
/* -------------------------------------------------------------------------- */
#include <cmath>
#include <iostream>
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
Simulation::Simulation(int m, int n, MPI_Comm communicator)
: m_global_m(m), m_global_n(n), m_epsilon(1e-7), m_h_m(1. / m),
m_h_n(1. / n), m_dumper(new DumperBinary(m_grids.old(), communicator)),
m_communicator(communicator) {
// retrieving the number of proc and the rank in the proc pool
MPI_Comm_rank(m_communicator, &m_prank);
MPI_Comm_size(m_communicator, &m_psize);
// computation of the local size of the grid the remainder is spread equally
// on the first processors
m_local_m = m / m_psize + (m_prank < m % m_psize ? 1 : 0);
m_local_n = n;
// adding the ghosts lines if needed
if (m_psize > 1)
m_local_m += (m_prank == 0 || m_prank == m_psize - 1) ? 1 : 2;
// computing the offsets of the local grid in the global one
m_offset_m =
(m / m_psize) * m_prank + (m_prank < m % m_psize ? m_prank : m % m_psize);
m_offset_n = 0;
// resizing the different grids
m_grids.resize(m_local_m, m_local_n);
m_f.resize(m_local_m, m_local_n);
// determining the rank of the neighbors
m_north_prank = (m_prank == 0 ? MPI_PROC_NULL : m_prank - 1);
m_south_prank = (m_prank == (m_psize - 1) ? MPI_PROC_NULL : m_prank + 1);
auto create_persistent_comm = [&] (Grid & g) {
auto & requests = g.getRequests();
requests.resize(4);
// Posting the receives requests
MPI_Recv_init(&g(0, 0), m_local_n, MPI_FLOAT, m_north_prank, 0,
m_communicator, &requests[0]);
MPI_Recv_init(&g(m_local_m - 1, 0), m_local_n, MPI_FLOAT, m_south_prank, 0,
m_communicator, &requests[1]);
// posting send requests
MPI_Send_init(&g(1, 0), m_local_n, MPI_FLOAT, m_north_prank, 0,
m_communicator, &requests[2]);
MPI_Send_init(&g(m_local_m - 2, 0), m_local_n, MPI_FLOAT, m_south_prank, 0,
m_communicator, &requests[3]);
};
create_persistent_comm(m_grids.old());
create_persistent_comm(m_grids.current());
// Some info if needed to debug
// std::cout << m_prank << " " << m_global_m << " " << m_global_n << " "
// << m_local_m << " " << m_local_n << " " << m_offset_m << " "
// << m_offset_n << " " << m_north_prank << " " << m_south_prank
// << std::endl;
}
/* -------------------------------------------------------------------------- */
void Simulation::set_initial_conditions() {
int i_start = (m_prank == 0 ? 0 : 1);
int i_end = (m_prank == m_psize - 1 ? m_local_m : m_local_m - 1);
for (int i = i_start; i < i_end; i++) {
for (int j = 0; j < m_local_n; j++) {
m_f(i, j) = -2. * 100. * M_PI * M_PI *
std::sin(10. * M_PI * (m_offset_m + i - i_start) * m_h_m) *
std::sin(10. * M_PI * (m_offset_n + j) * m_h_n);
}
}
}
/* -------------------------------------------------------------------------- */
std::tuple<float, int> Simulation::compute() {
int s = 0;
float l2 = 0;
do {
l2 = compute_step();
m_grids.swap();
// m_dumper->dump(s);
++s;
} while (l2 > m_epsilon);
return std::make_tuple(std::sqrt(l2), s);
}
/* -------------------------------------------------------------------------- */
void Simulation::set_epsilon(float epsilon) { m_epsilon = epsilon * epsilon; }
/* -------------------------------------------------------------------------- */
float Simulation::epsilon() const { return m_epsilon; }
/* -------------------------------------------------------------------------- */
inline float Simulation::compute_row(int i) {
float l2 = 0;
Grid & u = m_grids.current();
Grid & uo = m_grids.old();
for (int j = 1; j < m_local_n - 1; j++) {
// computation of the new step
u(i, j) = 0.25 * (uo(i - 1, j) + uo(i + 1, j) + uo(i, j - 1) +
uo(i, j + 1) - m_f(i, j) * m_h_m * m_h_n);
// L2 norm
l2 += (uo(i, j) - u(i, j)) * (uo(i, j) - u(i, j));
}
return l2;
}
/* -------------------------------------------------------------------------- */
float Simulation::compute_step() {
float l2 = 0.;
Grid & uo = m_grids.old();
auto & requests = uo.getRequests();
MPI_Startall(requests.size(), requests.data());
// computing the inner rows that do not depend on the ghosts
for (int i = 2; i < m_local_m - 2; i++) {
l2 += compute_row(i);
}
/// wait to receive the ghosts before using them for them computation
MPI_Waitall(2, requests.data(), MPI_STATUS_IGNORE);
// computing the line that depends on the ghosts
l2 += compute_row(1);
l2 += compute_row(m_local_m - 2);
/// wait to send everything before changing the buffers
MPI_Waitall(2, requests.data() + 2, MPI_STATUS_IGNORE);
// Summing the value of all the processors together
MPI_Allreduce(MPI_IN_PLACE, &l2, 1, MPI_FLOAT, MPI_SUM, m_communicator);
return l2;
}

Event Timeline