Page MenuHomec4science

mpi_interface.hh
No OneTemporary

File Metadata

Created
Sat, May 11, 12:43

mpi_interface.hh

/*
* SPDX-License-Indentifier: AGPL-3.0-or-later
*
* Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne),
* Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
/* -------------------------------------------------------------------------- */
#ifndef MPI_INTERFACE_HH
#define MPI_INTERFACE_HH
/* -------------------------------------------------------------------------- */
#include "static_types.hh"
#include "tamaas.hh"
#include <type_traits>
#ifdef TAMAAS_USE_MPI
#include <mpi.h>
#endif
/* -------------------------------------------------------------------------- */
namespace tamaas {
/* -------------------------------------------------------------------------- */
/// Contains mock mpi functions
namespace mpi_dummy {
struct comm {
static comm world;
};
struct sequential {
static void enter(){};
static void exit(){};
};
struct sequential_guard {
sequential_guard() { sequential::enter(); }
~sequential_guard() { sequential::exit(); }
};
enum class thread : int { single, funneled, serialized, multiple };
inline bool initialized() { return false; }
inline bool finalized() { return false; }
inline int init(int* /*unused*/, char*** /*unused*/) { return 0; }
inline int init_thread(int* /*unused*/, char*** /*unused*/, thread /*unused*/,
thread* provided) {
*provided = thread::funneled;
return 0;
}
inline int finalize() { return 0; }
inline int rank(comm /*unused*/ = comm::world) { return 0; }
inline int size(comm /*unused*/ = comm::world) { return 1; }
template <operation op = operation::plus, typename T>
inline decltype(auto) reduce(T&& val, comm /*unused*/ = comm::world) {
return std::forward<T>(val);
}
template <operation op = operation::plus, typename T>
inline decltype(auto) allreduce(T&& val, comm /*unused*/ = comm::world) {
return std::forward<T>(val);
}
template <typename T>
inline decltype(auto) gather(const T* send, T* recv, int count,
comm /*unused*/ = comm::world) {
if (send == recv)
return;
thrust::copy_n(send, count, recv);
}
template <typename T>
inline decltype(auto) bcast(T* /*unused*/, int /*unused*/,
comm /*unused*/ = comm::world) {}
} // namespace mpi_dummy
/* -------------------------------------------------------------------------- */
#ifdef TAMAAS_USE_MPI
/// Contains real mpi functions
namespace mpi_impl {
/// MPI_Comm wrapper
struct comm {
MPI_Comm _comm;
operator MPI_Comm() { return _comm; }
static comm& world();
};
struct sequential {
static void enter() { comm::world()._comm = MPI_COMM_SELF; }
static void exit() { comm::world()._comm = MPI_COMM_WORLD; }
};
struct sequential_guard {
sequential_guard() { sequential::enter(); }
~sequential_guard() { sequential::exit(); }
};
/// MPI Thread level
enum class thread : int {
single = MPI_THREAD_SINGLE,
funneled = MPI_THREAD_FUNNELED,
serialized = MPI_THREAD_SERIALIZED,
multiple = MPI_THREAD_MULTIPLE
};
template <typename T>
struct type_trait;
#define TYPE(t, mpi_t) \
template <> \
struct type_trait<t> { \
static constexpr MPI_Datatype value = mpi_t; \
}
TYPE(double, MPI_DOUBLE);
TYPE(int, MPI_INT);
TYPE(unsigned int, MPI_UNSIGNED);
TYPE(long double, MPI_LONG_DOUBLE);
TYPE(long, MPI_LONG);
TYPE(unsigned long, MPI_UNSIGNED_LONG);
TYPE(::thrust::complex<double>, MPI_CXX_DOUBLE_COMPLEX);
TYPE(::thrust::complex<long double>, MPI_CXX_LONG_DOUBLE_COMPLEX);
TYPE(bool, MPI_CXX_BOOL);
#undef TYPE
template <operation op>
struct operation_trait;
#define OPERATION(op, mpi_op) \
template <> \
struct operation_trait<operation::op> { \
static constexpr MPI_Op value = mpi_op; \
}
OPERATION(plus, MPI_SUM);
OPERATION(min, MPI_MIN);
OPERATION(max, MPI_MAX);
OPERATION(times, MPI_PROD);
#undef OPERATION
inline bool initialized() {
int has_init;
MPI_Initialized(&has_init);
return has_init != 0;
}
inline bool finalized() {
int has_final;
MPI_Finalized(&has_final);
return has_final != 0;
}
inline int init(int* argc, char*** argv) { return MPI_Init(argc, argv); }
inline int init_thread(int* argc, char*** argv, thread required,
thread* provided) {
return MPI_Init_thread(argc, argv, static_cast<int>(required),
reinterpret_cast<int*>(provided));
}
inline int finalize() { return MPI_Finalize(); }
inline int rank(comm communicator = comm::world()) {
int rank;
MPI_Comm_rank(communicator, &rank);
return rank;
}
inline int size(comm communicator = comm::world()) {
int size;
MPI_Comm_size(communicator, &size);
return size;
}
template <operation op = operation::plus, typename T>
inline decltype(auto) reduce(T val, comm communicator = comm::world()) {
MPI_Reduce(&val, &val, 1, type_trait<T>::value, operation_trait<op>::value, 0,
communicator);
return val;
}
template <operation op = operation::plus, typename T,
typename = std::enable_if_t<std::is_arithmetic<T>::value or
std::is_same<T, Complex>::value>>
inline decltype(auto) allreduce(T val, comm communicator = comm::world()) {
MPI_Allreduce(&val, &val, 1, type_trait<T>::value, operation_trait<op>::value,
communicator);
return val;
}
template <operation op = operation::plus, typename DT, typename ST, UInt n>
inline decltype(auto) allreduce(const StaticVector<DT, ST, n>& v,
comm communicator = comm::world()) {
Vector<DT, n> res;
MPI_Allreduce(v.begin(), res.begin(), n, type_trait<DT>::value,
operation_trait<op>::value, communicator);
return res;
}
template <operation op = operation::plus, typename DT, typename ST, UInt n,
UInt m>
inline decltype(auto) allreduce(const StaticMatrix<DT, ST, n, m>& v,
comm communicator = comm::world()) {
Matrix<DT, n, m> res;
MPI_Allreduce(v.begin(), res.begin(), n * m, type_trait<DT>::value,
operation_trait<op>::value, communicator);
return res;
}
template <typename T>
inline decltype(auto) gather(const T* send, T* recv, int count,
comm communicator = comm::world()) {
MPI_Gather(send, count, type_trait<T>::value, recv, count,
type_trait<T>::value, 0, communicator);
}
template <typename T>
inline decltype(auto) bcast(T* buffer, int count,
comm communicator = comm::world()) {
MPI_Bcast(buffer, count, type_trait<T>::value, 0, communicator);
}
} // namespace mpi_impl
namespace mpi = mpi_impl;
#else
namespace mpi = mpi_dummy;
#endif // TAMAAS_USE_MPI
} // namespace tamaas
/* -------------------------------------------------------------------------- */
#endif // MPI_INTERFACE_HH

Event Timeline