Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F75886917
compute.cc
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Sun, Aug 4, 22:08
Size
12 KB
Mime Type
text/x-c++
Expires
Tue, Aug 6, 22:08 (2 d)
Engine
blob
Format
Raw Data
Handle
19626503
Attached To
rLIBMULTISCALE LibMultiScale
compute.cc
View Options
/**
* @file compute.cc
*
* @author Guillaume Anciaux <guillaume.anciaux@epfl.ch>
* @author Jaehyun Cho <jaehyun.cho@epfl.ch>
* @author Moseley Philip Arthur <philip.moseley@epfl.ch>
*
* @date Wed Jul 09 21:59:47 2014
*
* @brief This is the mother class of all computes
*
* @section LICENSE
*
* Copyright (©) 2010-2011 EPFL (Ecole Polytechnique Fédérale de Lausanne)
* Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
*
* LibMultiScale is free software: you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option) any
* later version.
*
* LibMultiScale is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with LibMultiScale. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include "lm_common.hh"
#include "lib_dumper.hh"
#include "lib_filter.hh"
#include "lib_stimulation.hh"
#include "filter_manager.hh"
#include "action_manager.hh"
#include "factory_multiscale_inline_impl.hh"
#include "lib_md.hh"
#include "lib_continuum.hh"
#include "lib_dd.hh"
/* -------------------------------------------------------------------------- */
__BEGIN_LIBMULTISCALE__
/* -------------------------------------------------------------------------- */
template <typename _Input>
void Compute<_Input>::connect(FilterManager & f){
f.create<Compute<_Input> >(*this);
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
void Compute<_Input>::connect(ActionManager & f){
f.create<Compute<_Input> >(*this);
}
/* -------------------------------------------------------------------------- */
template <>
void Compute<ComponentNull>::connect(FilterManager & f){
f.create<Compute<ComponentNull> >(*this);
}
/* -------------------------------------------------------------------------- */
template <>
void Compute<ComponentNull>::connect(ActionManager & f){
f.create<Compute<ComponentNull> >(*this);
}
/* -------------------------------------------------------------------------- */
// UInt Compute::computeTotalNbEntries(){
// UInt nb_total_dofs = 0;
// UInt nb_local_dofs = this->nbElem();
// Communicator & comm = Communicator::getCommunicator();
// CommGroup group = this->getCommGroup();
// UInt nb_procs = comm.getNBprocsOnGroup(group);
// UInt my_rank = comm.groupRank(lm_my_proc_id, group);
// std::vector<UInt> nb_local_per_proc(nb_procs);
// comm.gather(&nb_local_dofs,1,&nb_local_per_proc[0],0,group);
// DUMP("Gather done",DBG_INFO);
// if (my_rank == 0){
// for (UInt i = 0 ; i < nb_procs ; ++i){
// DUMP ("nb_local[" << i << "]=" << nb_local_per_proc[i] << " " << nb_local_dofs,DBG_INFO);
// nb_total_dofs += nb_local_per_proc[i];
// }
// }
// return nb_total_dofs;
// }
// /* -------------------------------------------------------------------------- */
// void Compute::reallocBuffers(UInt stride,Cont & cont){
// bool need_reallocate = false;
// if (allocated_size < cont.nbElem() && cont.nbElem() != 0)
// need_reallocate = true;
// if (need_reallocate){
// // allocation des array de stockage
// allocated_size = nb_local_dofs;
// DUMP("(re)allocating for size = " << nb_local_dofs,DBG_INFO);
// if (data != NULL)
// data = (Real*)realloc(data,nb_local_dofs*sizeof(Real)*stride);
// else{
// data = (Real*)malloc(nb_local_dofs*sizeof(Real)*stride);
// memset(data,0,sizeof(Real)*nb_local_dofs*stride);
// }
// }
// if (lm_my_proc_id == 0){
// // je switch le pointeur pour les donnees locales
// data_par_proc[0] = data;
// for (UInt i = 1 ; i < lm_world_size ; ++i){
// if (nb_local_par_proc[i] == 0) continue;
// if (data_par_proc[i] != NULL)
// data_par_proc[i] = (Real *)realloc(data_par_proc[i],sizeof(Real)*nb_local_par_proc[i]*stride);
// else{
// data_par_proc[i] = (Real *)malloc(sizeof(Real)*nb_local_par_proc[i]*stride);
// memset(data_par_proc[i],0,sizeof(Real)*nb_local_par_proc[i]*stride);
// }
// }
// }
// }
/* -------------------------------------------------------------------------- */
UInt ComputeInterface::getTotalNbData(UInt root_rank){
Communicator & comm = Communicator::getCommunicator();
CommGroup group = this->getCommGroup();
UInt nb_data = this->nbElem();
comm.reduce(&nb_data,1,group,"reduce total number of data element",OP_SUM);
return nb_data;
}
/* -------------------------------------------------------------------------- */
std::vector<Real> & ComputeInterface::gatherAllData(UInt root_rank){
Communicator & comm = Communicator::getCommunicator();
CommGroup group = this->getCommGroup();
//CommGroup group = comm.getCommGroup();
UInt my_rank = comm.groupRank(lm_my_proc_id, group);
UInt nb_data = this->nbElem();
std::vector<UInt> nb_data_per_proc;
#ifndef LM_OPTIMIZED
UInt total_data = this->getTotalNbData(root_rank);
comm.synchronize(group);
#endif //LM_OPTIMIZED
gather_flag = true;
gather_root_proc = root_rank;
if (root_rank == my_rank){
//prepare the array to resizes the sizes
UInt nb_procs = comm.getNBprocsOnGroup(group);
DUMP("receive " << nb_procs << " procs",DBG_INFO);
DUMP("my rank is " << my_rank,DBG_INFO);
nb_data_per_proc.resize(nb_procs);
for (UInt p = 0; p < nb_procs; ++p) {
if (p == my_rank) nb_data_per_proc[p] = nb_data;
else {
DUMP("receive from proc " << p,DBG_INFO);
comm.receive(&nb_data_per_proc[p],1,p,group,"gatherData: receive number");
}
}
//compute total size of the gathered data
UInt tot_size = 0;
for (UInt p = 0; p < nb_procs; ++p) {
DUMP("nb_data_per_proc[" << p << "]=" << nb_data_per_proc[p],DBG_INFO);
tot_size += nb_data_per_proc[p];
}
#ifndef LM_OPTIMIZED
LM_ASSERT(total_data == tot_size, "mismatched of global sizes");
#endif //LM_OPTIMIZED
//resize the receiving buffer
data_gather.resize(tot_size);
//receive the data from the other processors
UInt offset = 0;
for (UInt p = 0; p < nb_procs; ++p) {
//if p is my_rank/root_rank copy the data
if (p == my_rank) {
for (UInt i = 0; i < nb_data; ++i) {
data_gather[offset+i] = (*this)[i];
}
}
// else receive from distant proc
else if (nb_data_per_proc[p]) {
comm.receive(&data_gather[offset],nb_data_per_proc[p],p,group,"gatherData: receive data");
}
//increment the offset
offset += nb_data_per_proc[p];
}
}
else {
DUMP("send my nb_data = " << nb_data,DBG_INFO);
// send the amount of data I have
comm.send(&nb_data,1,root_rank,group,"gatherData: send number");
// if there is data to be sent : do so
if (nb_data != 0)
comm.send(static_cast<std::vector<Real>& >(*this),root_rank,group,"gatherData: send data");
}
return data_gather;
}
/* -------------------------------------------------------------------------- */
std::vector<Real> & ComputeInterface::gatherData(UInt source_rank, UInt root_rank){
Communicator & comm = Communicator::getCommunicator();
CommGroup group = this->getCommGroup();
UInt my_rank = comm.groupRank(lm_my_proc_id, group);
UInt nb_data = this->nbElem();
if (root_rank == my_rank){
if (source_rank == root_rank) return *this;
comm.receive(&nb_data,1,source_rank,group,"gatherData: receive number");
if (nb_data != 0) {
data_gather.resize(this->nbElem());
comm.receive(data_gather,source_rank,group,"gatherData: receive data");
}
}
else if (source_rank == my_rank){
comm.send(&nb_data,1,root_rank,group,"gatherData: send number");
if (nb_data != 0){
comm.send(static_cast<std::vector<Real>& >(*this),root_rank,group,"gatherData: send data");
}
}
return data_gather;
}
/* -------------------------------------------------------------------------- */
std::vector<Real> & ComputeInterface::allGatherAllData(){
CommGroup group = this->getCommGroup();
UInt localSize = this->nbElem();
Communicator &comm = Communicator::getCommunicator();
MPI_Comm mpiComm = comm.getMpiGroup(group);
UInt np = comm.getNBprocsOnGroup(group);
rcnts.resize(np);
std::vector<int> rcntscnts(np,1), displs(np);
for(UInt i=0; i<np; ++i) displs[i] = i;
MPI_Allgatherv(&localSize, 1, MPI_INT, &rcnts[0],
&rcntscnts[0], &displs[0], MPI_INT, mpiComm);
UInt globalSize = 0;
for(UInt i=0; i<np; ++i){
displs[i] = globalSize;
globalSize += rcnts[i];
}
data_gather.resize(globalSize);
MPI_Allgatherv(&static_cast<std::vector<Real>& >(*this)[0], localSize, MPI_DOUBLE,
&data_gather[0], &rcnts[0], &displs[0], MPI_DOUBLE, mpiComm);
return data_gather;
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
Compute<_Input>::Compute(const std::string & name, ComponentLMInterface & d):
ComponentLMTemplate<ComponentLMIn<_Input>,ComputeOutput>(d){
this->setID(name);
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
Compute<_Input>::Compute(const std::string & name, ComponentLMInterface & d, UInt dim):
ComponentLMTemplate<ComponentLMIn<_Input>,ComputeOutput>(d,dim){
this->setID(name);
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
Compute<_Input>::Compute(const std::string & name, UInt dim):
ComponentLMTemplate<ComponentLMIn<_Input>,ComputeOutput>(dim){
this->setID(name);
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
Compute<_Input>::~Compute(){
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
void Compute<_Input>::visit(ComponentLMOut<_Input> & obj){
this->copyContainerInfo(obj.getOutput());
if (obj.getOutput().getRelease() != UINT_MAX &&
obj.getOutput().getRelease() <= this->getRelease()) return;
this->copyReleaseInfo(obj);
Communicator & comm = Communicator::getCommunicator();
CommGroup group = obj.getOutput().getCommGroup();
if (comm.amIinGroup(group))
this->build(obj);
}
/* -------------------------------------------------------------------------- */
template <>
void Compute<ComponentNull>::visit(ComponentLMOut<ComponentNull> & obj){
};
/* -------------------------------------------------------------------------- */
template <typename _Input>
void Compute<_Input>::build(){
if (!this->input) {
DUMP("this filter has no input",DBG_WARNING);
}
else this->input->accept(*this);
}
/* -------------------------------------------------------------------------- */
template <typename _Input>
void Compute<_Input>::manualBuild(_Input & obj){
this->copyContainerInfo(obj);
if (obj.getRelease() != UINT_MAX &&
obj.getRelease() <= this->getRelease()) return;
this->copyReleaseInfo(obj);
Communicator & comm = Communicator::getCommunicator();
CommGroup group = obj.getCommGroup();
if (group.getID() == CommGroup::id_invalid)
LM_FATAL("invalid group: this object was not correctly configured for parallelism");
if (comm.amIinGroup(group))
this->build(obj);
}
/* -------------------------------------------------------------------------- */
template <>
void Compute<ComponentNull>::manualBuild(ComponentNull & obj){
};
/* -------------------------------------------------------------------------- */
#define BOOST_INSTANCIATE_COMPUTE_CLASS_TEMPLATE(r,data,x) \
template class Compute<BOOST_PP_TUPLE_ELEM(3,0,x)::ContainerSubset>; \
template class Compute<BOOST_PP_TUPLE_ELEM(3,0,x)::ContainerPoints>;
#define DECLARE_COMPUTE_CLASS(list) \
BOOST_PP_SEQ_FOR_EACH(BOOST_INSTANCIATE_COMPUTE_CLASS_TEMPLATE,"",list)
/* -------------------------------------------------------------------------- */
DECLARE_COMPUTE_CLASS(LIST_ATOM_MODEL);
DECLARE_COMPUTE_CLASS(LIST_CONTINUUM_MODEL);
DECLARE_COMPUTE_CLASS(LIST_DD_MODEL);
template class Compute<ComputeInterface>;
template class Compute<ContainerArray<RefPointData> >;
template class Compute<ComponentNull >;
template class Compute<ContainerGenericMesh>;
__END_LIBMULTISCALE__
Event Timeline
Log In to Comment