diff --git a/src/communicator/comm_group.hh b/src/communicator/comm_group.hh index 23884e8..6ed2537 100644 --- a/src/communicator/comm_group.hh +++ b/src/communicator/comm_group.hh @@ -1,180 +1,184 @@ #ifndef __LIBMULTISCALE_COMM_GROUP_HH__ #define __LIBMULTISCALE_COMM_GROUP_HH__ /* -------------------------------------------------------------------------- */ #include "lm_common.hh" #include "lm_object.hh" /* -------------------------------------------------------------------------- */ #include #include /* -------------------------------------------------------------------------- */ __BEGIN_LIBMULTISCALE__ /* -------------------------------------------------------------------------- */ struct Request { MPI_Request request; }; /* -------------------------------------------------------------------------- */ struct MPIProc { MPIProc() : sequence_number_recv(0), sequence_number_send(0){}; UInt getAbsoluteRank() { return absolute_mpi_rank; }; UInt id; UInt mpi_rank; UInt sequence_number_recv; UInt sequence_number_send; UInt absolute_mpi_rank; }; /* -------------------------------------------------------------------------- */ inline std::ostream &operator<<(std::ostream &os, const MPIProc &proc) { os << "proc: " << proc.id << " mpi_rank: " << proc.mpi_rank << " sequence_number_recv: " << proc.sequence_number_recv << " sequence_number_send: " << proc.sequence_number_send << " absolute_mpi_rank: " << proc.absolute_mpi_rank; return os; } /* -------------------------------------------------------------------------- */ struct CommGroup : public LMObject { inline CommGroup(const LMID &id, int color, UInt nb_procs); bool operator==(CommGroup &grp) { return (grp.getID() == this->getID()); } bool operator!=(CommGroup &grp) { return !(grp == *this); } auto getMPIComm() { return mpi_comm; }; UInt getMyRank() const { if (mpi_comm == MPI_COMM_NULL) LM_FATAL("No member of the group: this call should be secured"); int rank_in_group; MPI_Comm_rank(mpi_comm, &rank_in_group); return rank_in_group; } // UInt real2GroupRank(UInt real_rank); const LMID &getID() const override { auto &id = LMObject::getID(); if (id == "invalid") LM_FATAL("Communication group is invalid"); return id; }; template void allReduce(BufType &&contrib, const std::string &comment, Operator op); template void allReduce(T *contrib, UInt nb, const std::string &comment, Operator op); template void reduce(BufType &&contrib, const std::string &comment, Operator op, UInt root_rank = 0); template void reduce(T *contrib, UInt nb, const std::string &comment, Operator op, UInt root_rank = 0); template void send(T *d, UInt nb, UInt to, const std::string &comment); template Request isend(T *d, UInt nb, UInt to, const std::string &comment); template inline void receive(T *d, UInt nb, UInt from, const std::string &comment); template inline void broadcast(BufType &&buf, UInt root, const std::string &comment); template inline void broadcast(T *buf, UInt nb, UInt root, const std::string &comment); template inline void gather(T *sendbuf, UInt nb, T *recvbuf, UInt to, const std::string &comment); template inline void gather(BufType &&sendbuf, BufType &&recvbuf, UInt to, const std::string &comment); template inline void scatter(T *sendbuf, T *recvbuf, UInt nb_recv, UInt root, const std::string &comment); template inline void scatter(BufType &&sendbuf, BufType &&recvbuf, UInt root, const std::string &comment); template inline void allGatherv(T *send_buffer, UInt send_nb, T *recv_buffer, std::vector &recv_counts, const std::string &comment); template inline void allGatherv(BufType &&send_buffer, BufType &&recv_buffer, std::vector &recv_counts, const std::string &comment); template inline void allGather(T *send_buffer, UInt nb, T *recv_buffer, const std::string &comment); template inline void allGather(BufType &&send_buffer, BufType &&recv_buffer, const std::string &comment); auto size() const { return processors.size(); }; auto begin() { return processors.begin(); }; auto end() { return processors.end(); }; auto &operator[](UInt i) { return processors[i]; } inline bool isInGroup(UInt mpi_rank) const; bool amIinGroup() { return is_current_proc_in_group; }; void synchronize() { MPI_Barrier(mpi_comm); }; void wait(Request &req) { MPI_Status status; MPI_Wait(&req.request, &status); }; void printself(std::ostream &os) const override; template inline void send(Vec &&d, UInt to, const std::string &comment); template inline void receive(Vec &&d, UInt from, const std::string &comment); template inline int probe(UInt from, const std::string &comment); protected: //! the intra communicator of the group MPI_Comm mpi_comm; //! the mpi group MPI_Group mpi_group; //! vector of processors std::vector processors; //! stores the color used at creation of the communicator bool is_current_proc_in_group; }; /* -------------------------------------------------------------------------- */ inline std::ostream &operator<<(std::ostream &os, const CommGroup &group) { group.printself(os); return os; } /* -------------------------------------------------------------------------- */ struct SelfGroup : public CommGroup { inline SelfGroup(); }; +struct AllGroup : public CommGroup { + inline AllGroup(); +}; + __END_LIBMULTISCALE__ #endif //__LIBMULTISCALE_COMM_GROUP_HH__ diff --git a/src/communicator/comm_group_inline_impl.hh b/src/communicator/comm_group_inline_impl.hh index 099e53d..93e2f5b 100644 --- a/src/communicator/comm_group_inline_impl.hh +++ b/src/communicator/comm_group_inline_impl.hh @@ -1,533 +1,541 @@ #include "comm_group.hh" __BEGIN_LIBMULTISCALE__ /* -------------------------------------------------------------------------- */ template ::value>> auto get_mpi_type() {} /* -------------------------------------------------------------------------- */ #define MPI_TYPE_MAP(__C_TYPE__, __MPI_TYPE__) \ template <> \ inline auto get_mpi_type< \ __C_TYPE__, std::enable_if_t::value>>() { \ return __MPI_TYPE__; \ } /* -------------------------------------------------------------------------- */ MPI_TYPE_MAP(Real, MPI_DOUBLE) MPI_TYPE_MAP(UInt, MPI_INT) MPI_TYPE_MAP(char, MPI_CHAR) MPI_TYPE_MAP(MPIProc, Communicator::mpi_type_processor) #undef MPI_TYPE_MAP /* -------------------------------------------------------------------------- */ template ::value> struct PackedBuffer { using value_type = typename BufType::value_type; PackedBuffer(BufType &data) : _data(data){}; auto mpi_type() { return get_mpi_type(); }; auto *data() { return _data.data(); }; UInt size() { return _data.size(); } void resize(UInt sz) { _data.resize(sz); }; void unpack(){}; BufType &_data; }; /* -------------------------------------------------------------------------- */ template class ContainerArray; template struct PackedBuffer, true> { using BufType = ContainerArray; using value_type = typename BufType::value_type; PackedBuffer(BufType &data) : _data(data){}; auto mpi_type() { return get_mpi_type(); }; auto *data() { return _data.data(); }; UInt size() { return _data.size(); } void resize(UInt sz) { _data.resize(sz / _data.cols(), _data.cols()); }; void unpack(){}; BufType &_data; }; /* -------------------------------------------------------------------------- */ template void encode_to_sstr(T *var, std::stringstream &sstr) { char *ptr = reinterpret_cast(var); for (unsigned long i = 0; i < sizeof(T); ++i) sstr << ptr[i]; } template void decode_from_sstr(T *var, std::stringstream &sstr) { char *ptr = reinterpret_cast(var); for (unsigned long i = 0; i < sizeof(T); ++i) sstr >> ptr[i]; } template struct PackedBuffer { PackedBuffer(BufType &data) : _data(data) { std::stringstream sstr; for (UInt i = 0; i < data.size(); ++i) { auto &c = _data[i]; c.pack([&](auto &var) { encode_to_sstr(&var, sstr); }); } auto str = sstr.str(); std::copy(str.begin(), str.end(), std::back_inserter(pack_data)); } using value_type = char; void unpack() { std::string str; std::copy(pack_data.begin(), pack_data.end(), std::back_inserter(str)); std::stringstream sstr(str); UInt i = 0; while (sstr.tellg() < (long)str.size()) { auto &c = _data[i]; c.unpack([&](auto &var) { decode_from_sstr(&var, sstr); }); ++i; } } char *data() { return pack_data.data(); }; UInt size() { return pack_data.size(); } void resize(UInt sz) { pack_data.resize(sz); } BufType &_data; std::vector pack_data; }; /* -------------------------------------------------------------------------- */ template decltype(auto) make_pack(BufType &&data) { return PackedBuffer>(data); } /* -------------------------------------------------------------------------- */ inline CommGroup::CommGroup(const LMID &id, int color, UInt nb_procs) : LMObject(id) { DUMP("creating comm_group: " << id << "(" << std::hex << this << ")", DBG_MESSAGE); is_current_proc_in_group = color != MPI_UNDEFINED; MPI_Comm_split(MPI_COMM_WORLD, color, lm_my_proc_id, &mpi_comm); if (mpi_comm != MPI_COMM_NULL) { processors.resize(nb_procs); MPI_Comm_group(mpi_comm, &mpi_group); MPI_Group world_group; MPI_Comm_group(MPI_COMM_WORLD, &world_group); std::vector group_ranks(nb_procs); for (UInt i = 0; i < nb_procs; ++i) group_ranks[i] = i; std::vector absolute_ranks(nb_procs); MPI_Group_translate_ranks(mpi_group, nb_procs, group_ranks.data(), world_group, absolute_ranks.data()); for (UInt i = 0; i < nb_procs; ++i) { processors[i].absolute_mpi_rank = absolute_ranks[i]; processors[i].mpi_rank = group_ranks[i]; processors[i].id = i; } } if (id == "all" or id == "self") return; auto &all_group = Communicator::getCommunicator().getGroup("all"); std::vector tmp_procs(all_group.size()); if (mpi_comm != MPI_COMM_NULL) { auto rank = this->getMyRank(); auto &tmp [[gnu::unused]] = processors[rank]; DUMP("local proc: " << tmp.mpi_rank << " " << tmp.absolute_mpi_rank, DBG_INFO); all_group.allGather(&processors[rank], 1, tmp_procs.data(), "gather proc information about group " + id); } else { MPIProc tmp; tmp.mpi_rank = -1; tmp.absolute_mpi_rank = -1; DUMP("local proc: " << tmp.mpi_rank << " " << tmp.absolute_mpi_rank, DBG_INFO); all_group.allGather(&tmp, 1, tmp_procs.data(), "gather proc information about group " + id); } if (mpi_comm == MPI_COMM_NULL) { UInt cpt = 0; for (auto &&p : tmp_procs) { if (p.mpi_rank == UInt(-1)) continue; DUMP("proc: " << cpt << " " << p.mpi_rank << " " << p.absolute_mpi_rank, DBG_INFO); processors.push_back(p); ++cpt; } } DUMPBYPROC("created comm_group: " << id, DBG_MESSAGE, 0); for (auto &&proc : processors) { DUMPBYPROC("member " << proc, DBG_MESSAGE, 0); } } /* -------------------------------------------------------------------------- */ #define CHECK_MEMBERSHIP_MPI_ROUTINE() \ if (!this->amIinGroup()) { \ LM_FATAL("MPI Routine cannot be called if not member of the group: " \ << comment); \ } /* -------------------------------------------------------------------------- */ template inline int CommGroup::probe(UInt from, const std::string &comment) { CHECK_MEMBERSHIP_MPI_ROUTINE(); MPI_Status status; auto &group = *this; auto &proc = group[from]; DUMP("probing receive " << typeid(T).name() << " from " << proc.mpi_rank << " - seq number " << proc.sequence_number_recv << " for " << comment, DBG_INFO); MPI_Probe(proc.mpi_rank, proc.sequence_number_recv, this->mpi_comm, &status); int nb_tmp; MPI_Get_count(&status, get_mpi_type(), &nb_tmp); return nb_tmp; } /* -------------------------------------------------------------------------- */ inline bool CommGroup::isInGroup(UInt mpi_rank [[gnu::unused]]) const { if (this->getID() == "all") return true; if (this->getID() == "none") return false; DUMP("testing if " << mpi_rank << " is in " << *this, DBG_DETAIL); LM_TOIMPLEMENT; } /* -------------------------------------------------------------------------- */ template inline void CommGroup::receive(T *d, UInt nb, UInt from, const std::string &comment [[gnu::unused]]) { CHECK_MEMBERSHIP_MPI_ROUTINE(); MPI_Status status; auto &group = *this; auto &proc = group[from]; using btype = std::remove_pointer_t>; DUMP("receiving " << nb << " " << typeid(T).name() << " from " << proc.mpi_rank << " - seq number " << proc.sequence_number_recv << " for " << comment, DBG_INFO); MPI_Recv(d, nb, get_mpi_type(), proc.mpi_rank, proc.sequence_number_recv, this->mpi_comm, &status); DUMP("received " << nb << " " << typeid(T).name() << " from " << proc.mpi_rank << " - seq number " << proc.sequence_number_recv << " for " << comment, DBG_INFO); ++proc.sequence_number_recv; } /* -------------------------------------------------------------------------- */ template inline void CommGroup::receive(Vec &&d, UInt from, const std::string &comment) { auto unpack = make_pack(d); auto nb = probe(from, comment); unpack.resize(nb); receive(unpack.data(), unpack.size(), from, comment); } /* -------------------------------------------------------------------------- */ template void CommGroup::send(T *d, UInt nb, UInt dest, const std::string &comment [[gnu::unused]]) { CHECK_MEMBERSHIP_MPI_ROUTINE(); auto &group = *this; auto &proc = group[dest]; using btype = std::remove_pointer_t>; DUMP("sending " << nb << " " << typeid(T).name() << " to " << proc.mpi_rank << " - seq number " << proc.sequence_number_send << " for " << comment, DBG_INFO); MPI_Send(d, nb, get_mpi_type(), proc.mpi_rank, proc.sequence_number_send, this->mpi_comm); DUMP("sent " << nb << " " << typeid(T).name() << " to " << proc.mpi_rank << " - seq number " << proc.sequence_number_send << " for " << comment, DBG_INFO); ++proc.sequence_number_send; } /* -------------------------------------------------------------------------- */ template Request CommGroup::isend(T *d, UInt nb, UInt dest, const std::string &comment [[gnu::unused]]) { CHECK_MEMBERSHIP_MPI_ROUTINE(); Request req; auto &group = *this; auto &proc = group[dest]; using btype = std::remove_pointer_t>; DUMP("sending " << nb << " " << typeid(T).name() << " to " << proc.mpi_rank << " - seq number " << proc.sequence_number_send << " for " << comment, DBG_INFO); MPI_Isend(d, nb, get_mpi_type(), proc.mpi_rank, proc.sequence_number_send, this->mpi_comm, &req.request); DUMP("sent " << nb << " " << typeid(T).name() << " to " << proc.mpi_rank << " - seq number " << proc.sequence_number_send << " for " << comment, DBG_INFO); ++proc.sequence_number_send; return req; } /* -------------------------------------------------------------------------- */ template inline void CommGroup::send(Vec &&d, UInt to, const std::string &comment) { auto pack = make_pack(d); send(pack.data(), pack.size(), to, comment); } /* -------------------------------------------------------------------------- */ inline auto getMPIOperator(Operator op) { MPI_Op mpi_op; switch (op) { case OP_SUM: mpi_op = MPI_SUM; break; case OP_MAX: mpi_op = MPI_MAX; break; case OP_MIN: mpi_op = MPI_MIN; break; default: LM_FATAL("unknown operator " << op); } return mpi_op; } /* -------------------------------------------------------------------------- */ template void CommGroup::reduce(T *contrib, UInt nb, const std::string &comment, Operator op, UInt root_rank) { CHECK_MEMBERSHIP_MPI_ROUTINE(); MPI_Op mpi_op = getMPIOperator(op); std::vector result(nb); MPI_Reduce(&contrib[0], result.data(), nb, get_mpi_type(), mpi_op, root_rank, this->mpi_comm); std::copy(result.begin(), result.end(), contrib); } /* -------------------------------------------------------------------------- */ template void CommGroup::allReduce(T *contrib, UInt nb, const std::string &comment, Operator op) { CHECK_MEMBERSHIP_MPI_ROUTINE(); MPI_Op mpi_op = getMPIOperator(op); std::vector result(nb); MPI_Allreduce(&contrib[0], &result[0], nb, get_mpi_type(), mpi_op, this->mpi_comm); std::copy(result.begin(), result.end(), contrib); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::allGatherv(BufType &&send_buffer, BufType &&recv_buffer, std::vector &recv_counts, const std::string &comment) { allGatherv(send_buffer.data(), send_buffer.size(), recv_buffer.data(), recv_counts, comment); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::allGatherv(T *send_buffer [[gnu::unused]], UInt send_nb [[gnu::unused]], T *recv_buffer [[gnu::unused]], std::vector &recv_counts [[gnu::unused]], const std::string &comment [[gnu::unused]]) { CHECK_MEMBERSHIP_MPI_ROUTINE(); LM_TOIMPLEMENT; } /* -------------------------------------------------------------------------- */ template inline void CommGroup::allGather(BufType &&send_buffer, BufType &&recv_buffer, const std::string &comment) { recv_buffer.resize(this->size()); allGatherv(send_buffer.data(), send_buffer.size(), recv_buffer.data(), recv_buffer.size(), comment); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::allGather(T *send_buffer, UInt nb, T *recv_buffer, const std::string &comment) { CHECK_MEMBERSHIP_MPI_ROUTINE(); DUMP("Allgather " << nb << " " << typeid(T).name() << " for " << comment, DBG_INFO); MPI_Allgather(send_buffer, nb, get_mpi_type(), recv_buffer, nb, get_mpi_type(), this->mpi_comm); DUMP("Allgather done for " << comment, DBG_INFO); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::gather(T *sendbuf, UInt nb, T *recvbuf, UInt root, const std::string &comment) { CHECK_MEMBERSHIP_MPI_ROUTINE(); MPI_Gather(sendbuf, nb, get_mpi_type(), recvbuf, nb, get_mpi_type(), root, this->mpi_comm); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::gather(BufType &&sendbuf, BufType &&recvbuf, UInt root, const std::string &comment) { if (this->getMyRank() == root) recvbuf.resize(sendbuf.size() * this->size()); auto pack = make_pack(sendbuf); auto unpack = make_pack(recvbuf); gather(pack.data(), pack.size(), unpack.data(), root, comment); if (this->getMyRank() == root) unpack.unpack(); } /* -------------------------------------------------------------------------- */ inline void CommGroup::printself(std::ostream &os) const { os << "Communication Group #" << this->getID(); os << ", mpi_ID: " << this->mpi_comm << ", size: " << this->size(); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::broadcast(BufType &&buf, UInt root, const std::string &comment) { auto pack = make_pack(buf); broadcast(pack.data(), pack.size(), root, comment); pack.unpack(); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::broadcast(T *buf, UInt nb, UInt root, const std::string &comment) { CHECK_MEMBERSHIP_MPI_ROUTINE(); DUMP("broadcast " << nb << " " << typeid(T).name() << " from " << root << " for " << comment, DBG_INFO); MPI_Bcast(buf, nb, get_mpi_type(), root, this->mpi_comm); DUMP("done broadcast " << nb << " " << typeid(T).name() << " from " << root << " for " << comment, DBG_INFO); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::scatter(T *sendbuf, T *recvbuf, UInt nb_recv, UInt root, const std::string &comment) { CHECK_MEMBERSHIP_MPI_ROUTINE(); DUMP("scatter " << nb_recv << " " << typeid(T).name() << " from " << root << " for " << comment, DBG_INFO); MPI_Scatter(sendbuf, nb_recv, get_mpi_type(), recvbuf, nb_recv, get_mpi_type(), root, this->mpi_comm); DUMP("done scatter " << nb_recv << " " << typeid(T).name() << " from " << root << " for " << comment, DBG_INFO); } /* -------------------------------------------------------------------------- */ template inline void CommGroup::scatter(BufType &&sendbuf, BufType &&recvbuf, UInt root, const std::string &comment) { if (this->getMyRank() == root) LM_ASSERT(sendbuf.size() == recvbuf.size() * this->size(), "buffers not having a proper size"); scatter(sendbuf.data(), recvbuf.data(), recvbuf.size(), root, comment); } /* -------------------------------------------------------------------------- */ inline SelfGroup::SelfGroup() : CommGroup("self", 1, 1) { auto &proc = processors[0]; proc.absolute_mpi_rank = lm_my_proc_id; proc.mpi_rank = 0; is_current_proc_in_group = true; mpi_comm = MPI_COMM_SELF; } /* -------------------------------------------------------------------------- */ +inline AllGroup::AllGroup() : CommGroup("all", 1, lm_world_size) { + auto &proc = processors[0]; + proc.absolute_mpi_rank = lm_my_proc_id; + proc.mpi_rank = lm_my_proc_id; + is_current_proc_in_group = true; + mpi_comm = MPI_COMM_WORLD; +} +/* -------------------------------------------------------------------------- */ __END_LIBMULTISCALE__ diff --git a/src/communicator/lm_communicator.cc b/src/communicator/lm_communicator.cc index 0b05b91..22c0452 100644 --- a/src/communicator/lm_communicator.cc +++ b/src/communicator/lm_communicator.cc @@ -1,178 +1,171 @@ /** * @file communicator.cc * * @author Guillaume Anciaux * * @date Fri Mar 08 09:36:00 2013 * * @brief Mother class of LM communicators * * @section LICENSE * * Copyright (©) 2010-2011 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * LibMultiScale is free software: you can redistribute it and/or modify it * under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * LibMultiScale is distributed in the hope that it will be useful, but * WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with LibMultiScale. If not, see . * */ #include "lm_communicator.hh" #include "comm_group.hh" #include "id_manager.hh" #include "lm_common.hh" /* -------------------------------------------------------------------------- */ __BEGIN_LIBMULTISCALE__ /* -------------------------------------------------------------------------- */ template <> std::unique_ptr> IDManager::static_pointer = nullptr; /* -------------------------------------------------------------------------- */ Communicator &Communicator::getCommunicator() { if (!static_pointer) createCommunicator(); Communicator &ref = dynamic_cast(*static_pointer); return ref; } /* -------------------------------------------------------------------------- */ void Communicator::createCommunicator() { Communicator::getManager(); } /* -------------------------------------------------------------------------- */ void Communicator::addGroup(const LMID &id, UInt nb_procs) { auto free_procs = this->getNumberFreeProcs(); if (free_procs < nb_procs) { LM_FATAL( "group " << id << " While trying to add " << nb_procs << " procs: There are not enough processors for the required topology!" << "(only " << free_procs << " are available)"); } DUMP("free_procs " << free_procs, DBG_INFO); DUMP("nb_procs " << nb_procs, DBG_INFO); int color; if (lm_my_proc_id < free_procs && lm_my_proc_id >= free_procs - nb_procs) color = 0; else color = MPI_UNDEFINED; DUMP("my color " << color, DBG_INFO); auto new_group = std::make_shared(id, color, nb_procs); this->addObject(new_group); #ifndef LM_OPTIMIZED auto nb_groups = objects.size(); #endif DUMP("Building processor group number " << nb_groups, DBG_INFO); DUMP("free_procs " << free_procs << " nb_procs " << nb_procs << " diff " << free_procs - nb_procs << " color " << color, DBG_ALL); DUMP("group pointer (" << nb_groups << ")=" << new_group, DBG_DETAIL); MPI_Barrier(MPI_COMM_WORLD); DUMP("for proc " << lm_my_proc_id << ", group " << nb_groups << " built", DBG_INFO); - - // UInt proc_id = 0; - // for (auto &p : *new_group) { - // p.mpi_rank = proc_id + free_procs; - // p.id = proc_id; - // ++proc_id; - // } } /* -------------------------------------------------------------------------- */ UInt Communicator::getNbGroups() { return Communicator::getCommunicator().objects.size(); } /* -------------------------------------------------------------------------- */ decltype(Communicator::objects) &Communicator::getGroups() { return Communicator::getCommunicator().objects; } /* -------------------------------------------------------------------------- */ CommGroup &Communicator::getGroup(const LMID &id) { if (lm_world_size == 1) return Communicator::getCommunicator().getObject("all"); return Communicator::getCommunicator().getObject(id); } /* -------------------------------------------------------------------------- */ UInt Communicator::getNumberFreeProcs() { DUMP("world size is: " << lm_world_size, DBG_INFO); int used_procs = 0; for (auto &group : objects) { if (group.first == "all" or group.first == "self") continue; DUMP("group " << group.first << ": " << group.second->size(), DBG_INFO); used_procs += group.second->size(); } return lm_world_size - used_procs; } /* -------------------------------------------------------------------------- */ void Communicator::waitForPendingComs() { DUMP("this routine does nothing. At the moment every communication is " "synchronized ?", DBG_INFO); } /* -------------------------------------------------------------------------- */ template void register_mpi_type(MPI_Datatype &type) { int err = MPI_Type_contiguous(sizeof(T), MPI_BYTE, &type); if (err != MPI_SUCCESS) LM_FATAL("biip"); MPI_Type_commit(&type); } Communicator::Communicator() { register_mpi_type(Communicator::mpi_type_processor); - auto new_group = std::make_shared("all", 1, lm_world_size); + auto new_group = std::make_shared(); this->addObject(new_group); auto self_group = std::make_shared(); this->addObject(self_group); } /* -------------------------------------------------------------------------- */ Communicator::~Communicator() {} /* -------------------------------------------------------------------------- */ MPI_Datatype Communicator::mpi_type_processor; /* -------------------------------------------------------------------------- */ __END_LIBMULTISCALE__