diff --git a/extra_packages/extra-materials b/extra_packages/extra-materials index dea972981..413cd3016 160000 --- a/extra_packages/extra-materials +++ b/extra_packages/extra-materials @@ -1 +1 @@ -Subproject commit dea9729814e7967a4088ac100749be7f3e0ca6cd +Subproject commit 413cd301657897dbf57f36e0250da810fd81eb7a diff --git a/extra_packages/igfem b/extra_packages/igfem index 078a747c6..fcacdece3 160000 --- a/extra_packages/igfem +++ b/extra_packages/igfem @@ -1 +1 @@ -Subproject commit 078a747c6f453ab4e35def4a195d98ec34d7a57b +Subproject commit fcacdece31fc1c406208b3780b50b623939e8656 diff --git a/src/synchronizer/static_communicator_mpi.cc b/src/synchronizer/static_communicator_mpi.cc index ec568319d..f6f25a63f 100644 --- a/src/synchronizer/static_communicator_mpi.cc +++ b/src/synchronizer/static_communicator_mpi.cc @@ -1,493 +1,492 @@ /** * @file static_communicator_mpi.cc * * @author Nicolas Richart * * @date creation: Sun Sep 26 2010 * @date last modification: Mon Jul 21 2014 * * @brief StaticCommunicatorMPI implementation * * @section LICENSE * * Copyright (©) 2010-2012, 2014 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ #include "static_communicator_mpi.hh" #include "mpi_type_wrapper.hh" /* -------------------------------------------------------------------------- */ __BEGIN_AKANTU__ MPI_Op MPITypeWrapper::synchronizer_operation_to_mpi_op[_so_null + 1] = { MPI_SUM, MPI_MIN, MPI_MAX, MPI_PROD, MPI_LAND, MPI_BAND, MPI_LOR, MPI_BOR, MPI_LXOR, MPI_BXOR, MPI_MINLOC, MPI_MAXLOC, MPI_OP_NULL }; class CommunicationRequestMPI : public CommunicationRequest { public: CommunicationRequestMPI(UInt source, UInt dest); ~CommunicationRequestMPI(); MPI_Request * getMPIRequest() { return request; }; private: MPI_Request * request; }; /* -------------------------------------------------------------------------- */ /* Implementation */ /* -------------------------------------------------------------------------- */ CommunicationRequestMPI::CommunicationRequestMPI(UInt source, UInt dest) : CommunicationRequest(source, dest) { request = new MPI_Request; } /* -------------------------------------------------------------------------- */ CommunicationRequestMPI::~CommunicationRequestMPI() { delete request; } /* -------------------------------------------------------------------------- */ StaticCommunicatorMPI::StaticCommunicatorMPI(int & argc, char ** & argv) : RealStaticCommunicator(argc, argv) { - int is_initialized = false; - MPI_Initialized(&is_initialized); - if (!is_initialized) + if(argc != 0) { MPI_Init(&argc, &argv); + } mpi_data = new MPITypeWrapper(*this); mpi_data->setMPICommunicator(MPI_COMM_WORLD); } /* -------------------------------------------------------------------------- */ StaticCommunicatorMPI::~StaticCommunicatorMPI() { - // MPI_Finalize(); + MPI_Finalize(); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::send(T * buffer, Int size, Int receiver, Int tag) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Send(buffer, size, type, receiver, tag, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Send."); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::receive(T * buffer, Int size, Int sender, Int tag) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Status status; MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Recv(buffer, size, type, sender, tag, communicator, &status); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Recv."); } /* -------------------------------------------------------------------------- */ template CommunicationRequest * StaticCommunicatorMPI::asyncSend(T * buffer, Int size, Int receiver, Int tag) { MPI_Comm communicator = mpi_data->getMPICommunicator(); CommunicationRequestMPI * request = new CommunicationRequestMPI(prank, receiver); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Isend(buffer, size, type, receiver, tag, communicator, request->getMPIRequest()); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Isend."); return request; } /* -------------------------------------------------------------------------- */ template CommunicationRequest * StaticCommunicatorMPI::asyncReceive(T * buffer, Int size, Int sender, Int tag) { MPI_Comm communicator = mpi_data->getMPICommunicator(); CommunicationRequestMPI * request = new CommunicationRequestMPI(sender, prank); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Irecv(buffer, size, type, sender, tag, communicator, request->getMPIRequest()); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Irecv."); return request; } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::probe(Int sender, Int tag, CommunicationStatus & status) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Status mpi_status; #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Probe(sender, tag, communicator, &mpi_status); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Probe."); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); int count; MPI_Get_count(&mpi_status, type, &count); status.setSource(mpi_status.MPI_SOURCE); status.setTag(mpi_status.MPI_TAG); status.setSize(count); } /* -------------------------------------------------------------------------- */ bool StaticCommunicatorMPI::testRequest(CommunicationRequest * request) { MPI_Status status; int flag; CommunicationRequestMPI * req_mpi = static_cast(request); MPI_Request * req = req_mpi->getMPIRequest(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Test(req, &flag, &status); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Test."); return (flag != 0); } /* -------------------------------------------------------------------------- */ void StaticCommunicatorMPI::wait(CommunicationRequest * request) { MPI_Status status; CommunicationRequestMPI * req_mpi = static_cast(request); MPI_Request * req = req_mpi->getMPIRequest(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Wait(req, &status); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Wait."); } /* -------------------------------------------------------------------------- */ void StaticCommunicatorMPI::waitAll(std::vector & requests) { MPI_Status status; std::vector::iterator it; for(it = requests.begin(); it != requests.end(); ++it) { MPI_Request * req = static_cast(*it)->getMPIRequest(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Wait(req, &status); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Wait."); } } /* -------------------------------------------------------------------------- */ void StaticCommunicatorMPI::barrier() { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Barrier(communicator); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::reduce(T * values, int nb_values, const SynchronizerOperation & op, int root) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Reduce(MPI_IN_PLACE, values, nb_values, type, MPITypeWrapper::getMPISynchronizerOperation(op), root, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Allreduce."); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::allReduce(T * values, int nb_values, const SynchronizerOperation & op) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Allreduce(MPI_IN_PLACE, values, nb_values, type, MPITypeWrapper::getMPISynchronizerOperation(op), communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Allreduce."); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::allGather(T * values, int nb_values) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Allgather(MPI_IN_PLACE, nb_values, type, values, nb_values, type, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Allgather."); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::allGatherV(T * values, int * nb_values) { MPI_Comm communicator = mpi_data->getMPICommunicator(); int * displs = new int[psize]; displs[0] = 0; for (int i = 1; i < psize; ++i) { displs[i] = displs[i-1] + nb_values[i-1]; } MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Allgatherv(MPI_IN_PLACE, *nb_values, type, values, nb_values, displs, type, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Gather."); delete [] displs; } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::gather(T * values, int nb_values, int root) { MPI_Comm communicator = mpi_data->getMPICommunicator(); T * send_buf = NULL, * recv_buf = NULL; if(prank == root) { send_buf = (T *) MPI_IN_PLACE; recv_buf = values; } else { send_buf = values; } MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Gather(send_buf, nb_values, type, recv_buf, nb_values, type, root, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Gather."); } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::gatherV(T * values, int * nb_values, int root) { MPI_Comm communicator = mpi_data->getMPICommunicator(); int * displs = NULL; if(prank == root) { displs = new int[psize]; displs[0] = 0; for (int i = 1; i < psize; ++i) { displs[i] = displs[i-1] + nb_values[i-1]; } } T * send_buf = NULL, * recv_buf = NULL; if(prank == root) { send_buf = (T *) MPI_IN_PLACE; recv_buf = values; } else send_buf = values; MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Gatherv(send_buf, *nb_values, type, recv_buf, nb_values, displs, type, root, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Gather."); if(prank == root) { delete [] displs; } } /* -------------------------------------------------------------------------- */ template void StaticCommunicatorMPI::broadcast(T * values, int nb_values, int root) { MPI_Comm communicator = mpi_data->getMPICommunicator(); MPI_Datatype type = MPITypeWrapper::getMPIDatatype(); #if !defined(AKANTU_NDEBUG) int ret = #endif MPI_Bcast(values, nb_values, type, root, communicator); AKANTU_DEBUG_ASSERT(ret == MPI_SUCCESS, "Error in MPI_Gather."); } /* -------------------------------------------------------------------------- */ int StaticCommunicatorMPI::getMaxTag() { return MPI_TAG_UB; } /* -------------------------------------------------------------------------- */ int StaticCommunicatorMPI::getMinTag() { return 0; } /* -------------------------------------------------------------------------- */ // template // MPI_Datatype StaticCommunicatorMPI::getMPIDatatype() { // return MPI_DATATYPE_NULL; // } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_CHAR; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_FLOAT; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_DOUBLE; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_LONG_DOUBLE; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_INT; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_UNSIGNED; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_LONG; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_UNSIGNED_LONG; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_LONG_LONG; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype() { return MPI_UNSIGNED_LONG_LONG; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype< SCMinMaxLoc >() { return MPI_DOUBLE_INT; } template<> MPI_Datatype MPITypeWrapper::getMPIDatatype< SCMinMaxLoc >() { return MPI_FLOAT_INT; } /* -------------------------------------------------------------------------- */ /* Template instantiation */ /* -------------------------------------------------------------------------- */ #define AKANTU_MPI_COMM_INSTANTIATE(T) \ template void StaticCommunicatorMPI::send (T * buffer, Int size, Int receiver, Int tag); \ template void StaticCommunicatorMPI::receive(T * buffer, Int size, Int sender, Int tag); \ template CommunicationRequest * StaticCommunicatorMPI::asyncSend (T * buffer, Int size, Int receiver, Int tag); \ template CommunicationRequest * StaticCommunicatorMPI::asyncReceive(T * buffer, Int size, Int sender, Int tag); \ template void StaticCommunicatorMPI::probe(Int sender, Int tag, CommunicationStatus & status); \ template void StaticCommunicatorMPI::allGather (T * values, int nb_values); \ template void StaticCommunicatorMPI::allGatherV(T * values, int * nb_values); \ template void StaticCommunicatorMPI::gather (T * values, int nb_values, int root); \ template void StaticCommunicatorMPI::gatherV(T * values, int * nb_values, int root); \ template void StaticCommunicatorMPI::broadcast(T * values, int nb_values, int root); \ template void StaticCommunicatorMPI::allReduce(T * values, int nb_values, const SynchronizerOperation & op); AKANTU_MPI_COMM_INSTANTIATE(Real); AKANTU_MPI_COMM_INSTANTIATE(UInt); AKANTU_MPI_COMM_INSTANTIATE(Int); AKANTU_MPI_COMM_INSTANTIATE(char); template void StaticCommunicatorMPI::send > (SCMinMaxLoc * buffer, Int size, Int receiver, Int tag); template void StaticCommunicatorMPI::receive >(SCMinMaxLoc * buffer, Int size, Int sender, Int tag); template CommunicationRequest * StaticCommunicatorMPI::asyncSend > (SCMinMaxLoc * buffer, Int size, Int receiver, Int tag); template CommunicationRequest * StaticCommunicatorMPI::asyncReceive >(SCMinMaxLoc * buffer, Int size, Int sender, Int tag); template void StaticCommunicatorMPI::probe >(Int sender, Int tag, CommunicationStatus & status); template void StaticCommunicatorMPI::allGather > (SCMinMaxLoc * values, int nb_values); template void StaticCommunicatorMPI::allGatherV >(SCMinMaxLoc * values, int * nb_values); template void StaticCommunicatorMPI::gather > (SCMinMaxLoc * values, int nb_values, int root); template void StaticCommunicatorMPI::gatherV >(SCMinMaxLoc * values, int * nb_values, int root); template void StaticCommunicatorMPI::broadcast >(SCMinMaxLoc * values, int nb_values, int root); template void StaticCommunicatorMPI::allReduce >(SCMinMaxLoc * values, int nb_values, const SynchronizerOperation & op); #if AKANTU_INTEGER_SIZE > 4 AKANTU_MPI_COMM_INSTANTIATE(int); #endif __END_AKANTU__ diff --git a/test/test_python_interface/test_multiple_init.py b/test/test_python_interface/test_multiple_init.py index 3e6360a58..7efa1e65f 100644 --- a/test/test_python_interface/test_multiple_init.py +++ b/test/test_python_interface/test_multiple_init.py @@ -1,62 +1,56 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # =============================================================================== # @file test_multiple_init.py # # @author Fabian Barras # # @date creation: Tue Jan 5 2016 # # @brief Testing multiple initialize calls through Python # # @section LICENSE # # Copyright (©) 2014 EPFL (Ecole Polytechnique Fédérale de Lausanne) # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # Akantu is free software: you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # Akantu is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Akantu. If not, see . # # =============================================================================== import sys import os -from mpi4py import MPI - -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - sys.path.append(sys.argv[1]+'/python/') import akantu as aka os.system('gmsh -order 2 -2 -o mesh_dcb_2d.msh mesh_dcb_2d.geo') print 'First initialisation' aka.initialize('input_test.dat') mesh = aka.Mesh(2) mesh.read('mesh_dcb_2d.msh') model = aka.SolidMechanicsModelCohesive(mesh) model.initFull(aka.SolidMechanicsModelCohesiveOptions(aka._static)) del model del mesh aka.finalize() print 'Second initialisation' aka.initialize('input_test.dat') mesh = aka.Mesh(2) mesh.read('mesh_dcb_2d.msh') model = aka.SolidMechanicsModelCohesive(mesh) model.initFull(aka.SolidMechanicsModelCohesiveOptions(aka._static)) del model del mesh aka.finalize() -MPI.Finalize() print 'All right'