diff --git a/python/py_model.cc b/python/py_model.cc index ed0dff0cb..d8b90a3f1 100644 --- a/python/py_model.cc +++ b/python/py_model.cc @@ -1,96 +1,96 @@ /* -------------------------------------------------------------------------- */ #include "py_aka_array.hh" /* -------------------------------------------------------------------------- */ #include #include #include /* -------------------------------------------------------------------------- */ #include #include #include /* -------------------------------------------------------------------------- */ namespace py = pybind11; /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ void register_model(py::module & mod) { py::class_(mod, "SparseMatrix") .def("getMatrixType", &SparseMatrix::getMatrixType) .def("size", &SparseMatrix::size) .def("clear", &SparseMatrix::clear) .def("saveProfile", &SparseMatrix::saveProfile) - .def("saveMatrix", &SparseMatrix::saveMatrix) + .def("saveMatrix", &SparseMatrix::saveMatrix); + + py::class_(mod, "SparseMatrixAIJ") + .def("getIRN", &SparseMatrixAIJ::getIRN) + .def("getJCN", &SparseMatrixAIJ::getJCN) + .def("getA", &SparseMatrixAIJ::getA) .def( - "add", [](SparseMatrix & self, UInt i, UInt j) { self.add(i, j); }, + "add", [](SparseMatrixAIJ & self, UInt i, UInt j) { self.add(i, j); }, "Add entry in the profile") .def( "add", - [](SparseMatrix & self, UInt i, UInt j, Real value) { + [](SparseMatrixAIJ & self, UInt i, UInt j, Real value) { self.add(i, j, value); }, "Add the value to the matrix") - .def("__call__", [](const SparseMatrix & self, UInt i, UInt j) { + .def("__call__", [](const SparseMatrixAIJ & self, UInt i, UInt j) { return self(i, j); }); - py::class_(mod, "SparseMatrixAIJ") - .def("getIRN", &SparseMatrixAIJ::getIRN) - .def("getJCN", &SparseMatrixAIJ::getJCN) - .def("getA", &SparseMatrixAIJ::getA); - py::class_(mod, "DOFManager") .def("getMatrix", &DOFManager::getMatrix, py::return_value_policy::reference) .def( "getNewMatrix", [](DOFManager & self, const std::string & name, const std::string & matrix_to_copy_id) -> decltype(auto) { return self.getNewMatrix(name, matrix_to_copy_id); }, py::return_value_policy::reference); py::class_(mod, "NonLinearSolver") .def( "set", [](NonLinearSolver & self, const std::string & id, const Real & val) { if (id == "max_iterations") self.set(id, int(val)); else self.set(id, val); }) .def("set", [](NonLinearSolver & self, const std::string & id, const SolveConvergenceCriteria & val) { self.set(id, val); }); py::class_(mod, "ModelSolver", py::multiple_inheritance()) .def("getNonLinearSolver", (NonLinearSolver & (ModelSolver::*)(const ID &)) & ModelSolver::getNonLinearSolver, py::arg("solver_id") = "", py::return_value_policy::reference) .def("solveStep", [](ModelSolver & self) { self.solveStep(); }) .def("solveStep", [](ModelSolver & self, const ID & solver_id) { self.solveStep(solver_id); }); py::class_(mod, "Model", py::multiple_inheritance()) .def("setBaseName", &Model::setBaseName) .def("getFEEngine", &Model::getFEEngine, py::arg("name") = "", py::return_value_policy::reference) .def("getFEEngineBoundary", &Model::getFEEngine, py::arg("name") = "", py::return_value_policy::reference) .def("addDumpFieldVector", &Model::addDumpFieldVector) .def("addDumpField", &Model::addDumpField) .def("setBaseNameToDumper", &Model::setBaseNameToDumper) .def("addDumpFieldVectorToDumper", &Model::addDumpFieldVectorToDumper) .def("addDumpFieldToDumper", &Model::addDumpFieldToDumper) .def("dump", &Model::dump) .def("initNewSolver", &Model::initNewSolver) .def("getDOFManager", &Model::getDOFManager, py::return_value_policy::reference); } } // namespace akantu diff --git a/src/common/aka_common.hh b/src/common/aka_common.hh index dccfa2371..416c76ca5 100644 --- a/src/common/aka_common.hh +++ b/src/common/aka_common.hh @@ -1,643 +1,644 @@ /** * @file aka_common.hh * * @author Guillaume Anciaux * @author Nicolas Richart * * @date creation: Mon Jun 14 2010 * @date last modification: Mon Feb 12 2018 * * @brief common type descriptions for akantu * * @section LICENSE * * Copyright (©) 2010-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * * @section DESCRIPTION * * All common things to be included in the projects files * */ /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_COMMON_HH__ #define __AKANTU_COMMON_HH__ #include "aka_compatibilty_with_cpp_standard.hh" /* -------------------------------------------------------------------------- */ #define __BEGIN_AKANTU_DUMPER__ namespace dumper { #define __END_AKANTU_DUMPER__ } /* -------------------------------------------------------------------------- */ #if defined(WIN32) #define __attribute__(x) #endif /* -------------------------------------------------------------------------- */ #include "aka_config.hh" #include "aka_error.hh" #include "aka_safe_enum.hh" /* -------------------------------------------------------------------------- */ #include #include #include #include #include #include #include /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ /* Constants */ /* -------------------------------------------------------------------------- */ namespace { __attribute__((unused)) constexpr UInt _all_dimensions{ std::numeric_limits::max()}; #ifdef AKANTU_NDEBUG __attribute__((unused)) constexpr Real REAL_INIT_VALUE{0.}; #else __attribute__((unused)) constexpr Real REAL_INIT_VALUE{ std::numeric_limits::quiet_NaN()}; #endif } // namespace /* -------------------------------------------------------------------------- */ /* Common types */ /* -------------------------------------------------------------------------- */ using ID = std::string; using MemoryID = UInt; } // namespace akantu /* -------------------------------------------------------------------------- */ #include "aka_enum_macros.hh" /* -------------------------------------------------------------------------- */ #include "aka_element_classes_info.hh" namespace akantu { /* -------------------------------------------------------------------------- */ /* Mesh/FEM/Model types */ /* -------------------------------------------------------------------------- */ /// small help to use names for directions enum SpatialDirection { _x = 0, _y = 1, _z = 2 }; /// enum MeshIOType type of mesh reader/writer enum MeshIOType { _miot_auto, ///< Auto guess of the reader to use based on the extension _miot_gmsh, ///< Gmsh files _miot_gmsh_struct, ///< Gsmh reader with reintpretation of elements has /// structures elements _miot_diana, ///< TNO Diana mesh format _miot_abaqus ///< Abaqus mesh format }; /// enum MeshEventHandlerPriority defines relative order of execution of /// events enum EventHandlerPriority { _ehp_highest = 0, _ehp_mesh = 5, _ehp_fe_engine = 9, _ehp_synchronizer = 10, _ehp_dof_manager = 20, _ehp_model = 94, _ehp_non_local_manager = 100, _ehp_lowest = 100 }; // clang-format off #define AKANTU_MODEL_TYPES \ (model) \ (solid_mechanics_model) \ (solid_mechanics_model_cohesive) \ (heat_transfer_model) \ (structural_mechanics_model) \ (embedded_model) // clang-format on /// enum ModelType defines which type of physics is solved AKANTU_CLASS_ENUM_DECLARE(ModelType, AKANTU_MODEL_TYPES) AKANTU_CLASS_ENUM_OUTPUT_STREAM(ModelType, AKANTU_MODEL_TYPES) AKANTU_CLASS_ENUM_INPUT_STREAM(ModelType, AKANTU_MODEL_TYPES) /// enum AnalysisMethod type of solving method used to solve the equation of /// motion enum AnalysisMethod { _static = 0, _implicit_dynamic = 1, _explicit_lumped_mass = 2, _explicit_lumped_capacity = 2, _explicit_consistent_mass = 3 }; /// enum DOFSupportType defines which kind of dof that can exists enum DOFSupportType { _dst_nodal, _dst_generic }; #if !defined(DOXYGEN) // clang-format off #define AKANTU_NON_LINEAR_SOLVER_TYPES \ (linear) \ (newton_raphson) \ (newton_raphson_modified) \ (lumped) \ (gmres) \ (bfgs) \ (cg) \ (auto) // clang-format on AKANTU_CLASS_ENUM_DECLARE(NonLinearSolverType, AKANTU_NON_LINEAR_SOLVER_TYPES) AKANTU_CLASS_ENUM_OUTPUT_STREAM(NonLinearSolverType, AKANTU_NON_LINEAR_SOLVER_TYPES) AKANTU_CLASS_ENUM_INPUT_STREAM(NonLinearSolverType, AKANTU_NON_LINEAR_SOLVER_TYPES) #else /// Type of non linear resolution available in akantu enum class NonLinearSolverType { _linear, ///< No non linear convergence loop _newton_raphson, ///< Regular Newton-Raphson _newton_raphson_modified, ///< Newton-Raphson with initial tangent _lumped, ///< Case of lumped mass or equivalent matrix _gmres, _bfgs, _cg, _auto ///< This will take a default value that make sense in case of /// model::getNewSolver }; #endif #if !defined(DOXYGEN) // clang-format off #define AKANTU_TIME_STEP_SOLVER_TYPE \ (static) \ (dynamic) \ (dynamic_lumped) \ (not_defined) // clang-format on AKANTU_CLASS_ENUM_DECLARE(TimeStepSolverType, AKANTU_TIME_STEP_SOLVER_TYPE) AKANTU_CLASS_ENUM_OUTPUT_STREAM(TimeStepSolverType, AKANTU_TIME_STEP_SOLVER_TYPE) AKANTU_CLASS_ENUM_INPUT_STREAM(TimeStepSolverType, AKANTU_TIME_STEP_SOLVER_TYPE) #else /// Type of time stepping solver enum class TimeStepSolverType { _static, ///< Static solution _dynamic, ///< Dynamic solver _dynamic_lumped, ///< Dynamic solver with lumped mass _not_defined, ///< For not defined cases }; #endif #if !defined(DOXYGEN) // clang-format off #define AKANTU_INTEGRATION_SCHEME_TYPE \ (pseudo_time) \ (forward_euler) \ (trapezoidal_rule_1) \ (backward_euler) \ (central_difference) \ (fox_goodwin) \ (trapezoidal_rule_2) \ (linear_acceleration) \ (newmark_beta) \ (generalized_trapezoidal) // clang-format on AKANTU_CLASS_ENUM_DECLARE(IntegrationSchemeType, AKANTU_INTEGRATION_SCHEME_TYPE) AKANTU_CLASS_ENUM_OUTPUT_STREAM(IntegrationSchemeType, AKANTU_INTEGRATION_SCHEME_TYPE) AKANTU_CLASS_ENUM_INPUT_STREAM(IntegrationSchemeType, AKANTU_INTEGRATION_SCHEME_TYPE) #else /// Type of integration scheme enum class IntegrationSchemeType { _pseudo_time, ///< Pseudo Time _forward_euler, ///< GeneralizedTrapezoidal(0) _trapezoidal_rule_1, ///< GeneralizedTrapezoidal(1/2) _backward_euler, ///< GeneralizedTrapezoidal(1) _central_difference, ///< NewmarkBeta(0, 1/2) _fox_goodwin, ///< NewmarkBeta(1/6, 1/2) _trapezoidal_rule_2, ///< NewmarkBeta(1/2, 1/2) _linear_acceleration, ///< NewmarkBeta(1/3, 1/2) _newmark_beta, ///< generic NewmarkBeta with user defined /// alpha and beta _generalized_trapezoidal ///< generic GeneralizedTrapezoidal with user /// defined alpha }; #endif #if !defined(DOXYGEN) // clang-format off #define AKANTU_SOLVE_CONVERGENCE_CRITERIA \ (residual) \ (solution) \ (residual_mass_wgh) // clang-format on AKANTU_CLASS_ENUM_DECLARE(SolveConvergenceCriteria, AKANTU_SOLVE_CONVERGENCE_CRITERIA) AKANTU_CLASS_ENUM_OUTPUT_STREAM(SolveConvergenceCriteria, AKANTU_SOLVE_CONVERGENCE_CRITERIA) AKANTU_CLASS_ENUM_INPUT_STREAM(SolveConvergenceCriteria, AKANTU_SOLVE_CONVERGENCE_CRITERIA) #else /// enum SolveConvergenceCriteria different convergence criteria enum class SolveConvergenceCriteria { _residual, ///< Use residual to test the convergence _solution, ///< Use solution to test the convergence _residual_mass_wgh ///< Use residual weighted by inv. nodal mass to ///< testb }; #endif /// enum CohesiveMethod type of insertion of cohesive elements enum CohesiveMethod { _intrinsic, _extrinsic }; /// @enum SparseMatrixType type of sparse matrix used enum MatrixType { _unsymmetric, _symmetric, _mt_not_defined }; enum class SizeType { _local, ///<< thie m ans n sizes are local to a processor _global, ///<< the m and m sizes are global accross processors }; /* -------------------------------------------------------------------------- */ /* Ghosts handling */ /* -------------------------------------------------------------------------- */ /// @enum CommunicatorType type of communication method to use enum CommunicatorType { _communicator_mpi, _communicator_dummy }; #if !defined(DOXYGEN) // clang-format off #define AKANTU_SYNCHRONIZATION_TAG \ (whatever) \ (update) \ (ask_nodes) \ (size) \ (smm_mass) \ (smm_for_gradu) \ (smm_boundary) \ (smm_uv) \ (smm_res) \ (smm_init_mat) \ (smm_stress) \ (smmc_facets) \ (smmc_facets_conn) \ (smmc_facets_stress) \ (smmc_damage) \ (giu_global_conn) \ (ce_groups) \ (gm_clusters) \ (htm_temperature) \ (htm_gradient_temperature) \ (htm_phi) \ (htm_gradient_phi) \ (mnl_for_average) \ (mnl_weight) \ (nh_criterion) \ (test) \ (user_1) \ (user_2) \ (material_id) \ (for_dump) \ (cf_nodal) \ (cf_incr) \ (solver_solution) // clang-format on AKANTU_CLASS_ENUM_DECLARE(SynchronizationTag, AKANTU_SYNCHRONIZATION_TAG) AKANTU_CLASS_ENUM_OUTPUT_STREAM(SynchronizationTag, AKANTU_SYNCHRONIZATION_TAG) #else /// @enum SynchronizationTag type of synchronizations enum class SynchronizationTag { //--- Generic tags --- _whatever, _update, _ask_nodes, _size, //--- SolidMechanicsModel tags --- _smm_mass, ///< synchronization of the SolidMechanicsModel.mass _smm_for_gradu, ///< synchronization of the /// SolidMechanicsModel.displacement _smm_boundary, ///< synchronization of the boundary, forces, velocities /// and displacement _smm_uv, ///< synchronization of the nodal velocities and displacement _smm_res, ///< synchronization of the nodal residual _smm_init_mat, ///< synchronization of the data to initialize materials _smm_stress, ///< synchronization of the stresses to compute the ///< internal /// forces _smmc_facets, ///< synchronization of facet data to setup facet synch _smmc_facets_conn, ///< synchronization of facet global connectivity _smmc_facets_stress, ///< synchronization of facets' stress to setup ///< facet /// synch _smmc_damage, ///< synchronization of damage // --- GlobalIdsUpdater tags --- _giu_global_conn, ///< synchronization of global connectivities // --- CohesiveElementInserter tags --- _ce_groups, ///< synchronization of cohesive element insertion depending /// on facet groups // --- GroupManager tags --- _gm_clusters, ///< synchronization of clusters // --- HeatTransfer tags --- _htm_temperature, ///< synchronization of the nodal temperature _htm_gradient_temperature, ///< synchronization of the element gradient /// temperature // --- LevelSet tags --- _htm_phi, ///< synchronization of the nodal level set value phi _htm_gradient_phi, ///< synchronization of the element gradient phi //--- Material non local --- _mnl_for_average, ///< synchronization of data to average in non local /// material _mnl_weight, ///< synchronization of data for the weight computations // --- NeighborhoodSynchronization tags --- _nh_criterion, // --- General tags --- _test, ///< Test tag _user_1, ///< tag for user simulations _user_2, ///< tag for user simulations _material_id, ///< synchronization of the material ids _for_dump, ///< everything that needs to be synch before dump // --- Contact & Friction --- _cf_nodal, ///< synchronization of disp, velo, and current position _cf_incr, ///< synchronization of increment // --- Solver tags --- _solver_solution ///< synchronization of the solution obained with the /// PETSc solver }; #endif /// @enum GhostType type of ghost enum GhostType { _not_ghost = 0, _ghost = 1, _casper // not used but a real cute ghost }; /// Define the flag that can be set to a node enum class NodeFlag : std::uint8_t { _normal = 0x00, _distributed = 0x01, _master = 0x03, _slave = 0x05, _pure_ghost = 0x09, _shared_mask = 0x0F, _periodic = 0x10, _periodic_master = 0x30, _periodic_slave = 0x50, _periodic_mask = 0xF0, _local_master_mask = 0xCC, // ~(_master & _periodic_mask) }; inline NodeFlag operator&(const NodeFlag & a, const NodeFlag & b) { using under = std::underlying_type_t; return NodeFlag(under(a) & under(b)); } inline NodeFlag operator|(const NodeFlag & a, const NodeFlag & b) { using under = std::underlying_type_t; return NodeFlag(under(a) | under(b)); } inline NodeFlag & operator|=(NodeFlag & a, const NodeFlag & b) { a = a | b; return a; } inline NodeFlag & operator&=(NodeFlag & a, const NodeFlag & b) { a = a & b; return a; } inline NodeFlag operator~(const NodeFlag & a) { using under = std::underlying_type_t; return NodeFlag(~under(a)); } std::ostream & operator<<(std::ostream & stream, NodeFlag flag); } // namespace akantu AKANTU_ENUM_HASH(GhostType) namespace akantu { /* -------------------------------------------------------------------------- */ struct GhostType_def { using type = GhostType; static const type _begin_ = _not_ghost; static const type _end_ = _casper; }; using ghost_type_t = safe_enum; extern ghost_type_t ghost_types; /// standard output stream operator for GhostType inline std::ostream & operator<<(std::ostream & stream, GhostType type); /* -------------------------------------------------------------------------- */ /* Global defines */ /* -------------------------------------------------------------------------- */ #define AKANTU_MIN_ALLOCATION 2000 #define AKANTU_INDENT ' ' #define AKANTU_INCLUDE_INLINE_IMPL /* -------------------------------------------------------------------------- */ #define AKANTU_SET_MACRO(name, variable, type) \ inline void set##name(type variable) { this->variable = variable; } #define AKANTU_GET_MACRO(name, variable, type) \ inline type get##name() const { return variable; } #define AKANTU_GET_MACRO_NOT_CONST(name, variable, type) \ inline type get##name() { return variable; } -#define AKANTU_GET_MACRO_DEREF_PTR(name, ptr) \ +#define AKANTU_GET_MACRO_DEREF_PTR(name, ptr) \ inline decltype(auto) get##name() const { \ if (not ptr) { \ - AKANTU_EXCEPTION("The member " << #ptr << " is not initialized"); \ + AKANTU_EXCEPTION("The member " << #ptr << " is not initialized"); \ } \ return (*ptr); \ } #define AKANTU_GET_MACRO_BY_SUPPORT_TYPE(name, variable, type, support, con) \ inline con Array & get##name( \ const support & el_type, const GhostType & ghost_type = _not_ghost) \ con { \ return variable(el_type, ghost_type); \ } #define AKANTU_GET_MACRO_BY_ELEMENT_TYPE(name, variable, type) \ AKANTU_GET_MACRO_BY_SUPPORT_TYPE(name, variable, type, ElementType, ) #define AKANTU_GET_MACRO_BY_ELEMENT_TYPE_CONST(name, variable, type) \ AKANTU_GET_MACRO_BY_SUPPORT_TYPE(name, variable, type, ElementType, const) #define AKANTU_GET_MACRO_BY_GEOMETRIE_TYPE(name, variable, type) \ AKANTU_GET_MACRO_BY_SUPPORT_TYPE(name, variable, type, GeometricalType, ) #define AKANTU_GET_MACRO_BY_GEOMETRIE_TYPE_CONST(name, variable, type) \ AKANTU_GET_MACRO_BY_SUPPORT_TYPE(name, variable, type, GeometricalType, const) /* -------------------------------------------------------------------------- */ /// initialize the static part of akantu void initialize(int & argc, char **& argv); /// initialize the static part of akantu and read the global input_file void initialize(const std::string & input_file, int & argc, char **& argv); /* -------------------------------------------------------------------------- */ /// finilize correctly akantu and clean the memory void finalize(); /* -------------------------------------------------------------------------- */ /// Read an new input file void readInputFile(const std::string & input_file); /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* string manipulation */ /* -------------------------------------------------------------------------- */ inline std::string to_lower(const std::string & str); /* -------------------------------------------------------------------------- */ inline std::string trim(const std::string & to_trim); inline std::string trim(const std::string & to_trim, char c); /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /// give a string representation of the a human readable size in bit template std::string printMemorySize(UInt size); /* -------------------------------------------------------------------------- */ struct TensorTrait {}; struct TensorProxyTrait {}; } // namespace akantu /* -------------------------------------------------------------------------- */ /* Type traits */ /* -------------------------------------------------------------------------- */ namespace aka { /* ------------------------------------------------------------------------ */ template using is_tensor = std::is_base_of; template using is_tensor_proxy = std::is_base_of; /* ------------------------------------------------------------------------ */ template using is_scalar = std::is_arithmetic; /* ------------------------------------------------------------------------ */ template ::value> * = nullptr> bool is_of_type(T && t) { return ( dynamic_cast>::value, std::add_const_t, R>>>(&t) != nullptr); } /* -------------------------------------------------------------------------- */ template bool is_of_type(std::unique_ptr & t) { return ( dynamic_cast::value, std::add_const_t, R>>>( t.get()) != nullptr); } /* ------------------------------------------------------------------------ */ template ::value> * = nullptr> decltype(auto) as_type(T && t) { static_assert( disjunction< std::is_base_of, std::decay_t>, // down-cast std::is_base_of, std::decay_t> // up-cast >::value, - "Type T and R are not valid for a as_type conversion"); + "Type T and R are not valid for a as_type conversion, as_type does not " + "handle side-cast"); return dynamic_cast>::value, std::add_const_t, R>>>(t); } /* -------------------------------------------------------------------------- */ template ::value> * = nullptr> decltype(auto) as_type(T && t) { return &as_type(*t); } /* -------------------------------------------------------------------------- */ template decltype(auto) as_type(const std::shared_ptr & t) { return std::dynamic_pointer_cast(t); } } // namespace aka #include "aka_fwd.hh" namespace akantu { /// get access to the internal argument parser cppargparse::ArgumentParser & getStaticArgumentParser(); /// get access to the internal input file parser Parser & getStaticParser(); /// get access to the user part of the internal input file parser const ParserSection & getUserParser(); #define AKANTU_CURRENT_FUNCTION \ (std::string(__func__) + "():" + std::to_string(__LINE__)) } // namespace akantu #include "aka_common_inline_impl.cc" /* -------------------------------------------------------------------------- */ #if AKANTU_INTEGER_SIZE == 4 #define AKANTU_HASH_COMBINE_MAGIC_NUMBER 0x9e3779b9 #elif AKANTU_INTEGER_SIZE == 8 #define AKANTU_HASH_COMBINE_MAGIC_NUMBER 0x9e3779b97f4a7c13LL #endif namespace std { /** * Hashing function for pairs based on hash_combine from boost The magic * number is coming from the golden number @f[\phi = \frac{1 + \sqrt5}{2}@f] * @f[\frac{2^32}{\phi} = 0x9e3779b9@f] * http://stackoverflow.com/questions/4948780/magic-number-in-boosthash-combine * http://burtleburtle.net/bob/hash/doobs.html */ template struct hash> { hash() = default; size_t operator()(const std::pair & p) const { size_t seed = ah(p.first); return bh(p.second) + AKANTU_HASH_COMBINE_MAGIC_NUMBER + (seed << 6) + (seed >> 2); } private: const hash ah{}; const hash bh{}; }; } // namespace std #endif /* __AKANTU_COMMON_HH__ */ diff --git a/src/model/common/dof_manager/dof_manager_default.cc b/src/model/common/dof_manager/dof_manager_default.cc index 18f34620d..d4d44e50e 100644 --- a/src/model/common/dof_manager/dof_manager_default.cc +++ b/src/model/common/dof_manager/dof_manager_default.cc @@ -1,486 +1,486 @@ /** * @file dof_manager_default.cc * * @author Nicolas Richart * * @date creation: Tue Aug 18 2015 * @date last modification: Thu Feb 08 2018 * * @brief Implementation of the default DOFManager * * @section LICENSE * * Copyright (©) 2015-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "dof_manager_default.hh" #include "communicator.hh" #include "dof_synchronizer.hh" #include "element_group.hh" #include "non_linear_solver_default.hh" #include "periodic_node_synchronizer.hh" #include "solver_vector_default.hh" #include "solver_vector_distributed.hh" #include "sparse_matrix_aij.hh" #include "time_step_solver_default.hh" /* -------------------------------------------------------------------------- */ #include #include #include #include /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ DOFManagerDefault::DOFManagerDefault(const ID & id, const MemoryID & memory_id) : DOFManager(id, memory_id), synchronizer(nullptr) { residual = std::make_unique( *this, std::string(id + ":residual")); solution = std::make_unique( *this, std::string(id + ":solution")); data_cache = std::make_unique( *this, std::string(id + ":data_cache")); } /* -------------------------------------------------------------------------- */ DOFManagerDefault::DOFManagerDefault(Mesh & mesh, const ID & id, const MemoryID & memory_id) : DOFManager(mesh, id, memory_id), synchronizer(nullptr) { if (this->mesh->isDistributed()) { this->synchronizer = std::make_unique( *this, this->id + ":dof_synchronizer", this->memory_id); residual = std::make_unique( *this, std::string(id + ":residual")); solution = std::make_unique( *this, std::string(id + ":solution")); data_cache = std::make_unique( *this, std::string(id + ":data_cache")); } else { residual = std::make_unique( *this, std::string(id + ":residual")); solution = std::make_unique( *this, std::string(id + ":solution")); data_cache = std::make_unique( *this, std::string(id + ":data_cache")); } } /* -------------------------------------------------------------------------- */ DOFManagerDefault::~DOFManagerDefault() = default; /* -------------------------------------------------------------------------- */ void DOFManagerDefault::makeConsistentForPeriodicity(const ID & dof_id, SolverVector & array) { auto & dof_data = this->getDOFDataTyped(dof_id); if (dof_data.support_type != _dst_nodal) return; if (not mesh->isPeriodic()) return; this->mesh->getPeriodicNodeSynchronizer() .reduceSynchronizeWithPBCSlaves( aka::as_type(array)); } /* -------------------------------------------------------------------------- */ template void DOFManagerDefault::assembleToGlobalArray( const ID & dof_id, const Array & array_to_assemble, Array & global_array, T scale_factor) { AKANTU_DEBUG_IN(); auto & dof_data = this->getDOFDataTyped(dof_id); AKANTU_DEBUG_ASSERT(dof_data.local_equation_number.size() == array_to_assemble.size() * array_to_assemble.getNbComponent(), "The array to assemble does not have a correct size." << " (" << array_to_assemble.getID() << ")"); if (dof_data.support_type == _dst_nodal and mesh->isPeriodic()) { for (auto && data : zip(dof_data.local_equation_number, dof_data.associated_nodes, make_view(array_to_assemble))) { auto && equ_num = std::get<0>(data); auto && node = std::get<1>(data); auto && arr = std::get<2>(data); global_array(equ_num) += scale_factor * (arr) * (not this->mesh->isPeriodicSlave(node)); } } else { for (auto && data : zip(dof_data.local_equation_number, make_view(array_to_assemble))) { auto && equ_num = std::get<0>(data); auto && arr = std::get<1>(data); global_array(equ_num) += scale_factor * (arr); } } AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::assembleToGlobalArray( const ID & dof_id, const Array & array_to_assemble, SolverVector & global_array_v, Real scale_factor) { assembleToGlobalArray(dof_id, array_to_assemble, - aka::as_type(global_array_v), + dynamic_cast &>(global_array_v), scale_factor); } /* -------------------------------------------------------------------------- */ DOFManagerDefault::DOFDataDefault::DOFDataDefault(const ID & dof_id) : DOFData(dof_id) {} /* -------------------------------------------------------------------------- */ auto DOFManagerDefault::getNewDOFData(const ID & dof_id) -> std::unique_ptr { return std::make_unique(dof_id); } /* -------------------------------------------------------------------------- */ std::tuple DOFManagerDefault::registerDOFsInternal(const ID & dof_id, Array & dofs_array) { auto ret = DOFManager::registerDOFsInternal(dof_id, dofs_array); // update the synchronizer if needed if (this->synchronizer) this->synchronizer->registerDOFs(dof_id); return ret; } /* -------------------------------------------------------------------------- */ SparseMatrix & DOFManagerDefault::getNewMatrix(const ID & id, const MatrixType & matrix_type) { return this->registerSparseMatrix(*this, id, matrix_type); } /* -------------------------------------------------------------------------- */ SparseMatrix & DOFManagerDefault::getNewMatrix(const ID & id, const ID & matrix_to_copy_id) { return this->registerSparseMatrix(id, matrix_to_copy_id); } /* -------------------------------------------------------------------------- */ SolverVector & DOFManagerDefault::getNewLumpedMatrix(const ID & id) { return this->registerLumpedMatrix(*this, id); } /* -------------------------------------------------------------------------- */ SparseMatrixAIJ & DOFManagerDefault::getMatrix(const ID & id) { auto & matrix = DOFManager::getMatrix(id); return aka::as_type(matrix); } /* -------------------------------------------------------------------------- */ NonLinearSolver & DOFManagerDefault::getNewNonLinearSolver(const ID & id, const NonLinearSolverType & type) { switch (type) { #if defined(AKANTU_USE_MUMPS) case NonLinearSolverType::_newton_raphson: /* FALLTHRU */ /* [[fallthrough]]; un-comment when compiler will get it */ case NonLinearSolverType::_newton_raphson_modified: { return this->registerNonLinearSolver( *this, id, type); } case NonLinearSolverType::_linear: { return this->registerNonLinearSolver(*this, id, type); } #endif case NonLinearSolverType::_lumped: { return this->registerNonLinearSolver(*this, id, type); } default: AKANTU_EXCEPTION("The asked type of non linear solver is not supported by " "this dof manager"); } } /* -------------------------------------------------------------------------- */ TimeStepSolver & DOFManagerDefault::getNewTimeStepSolver( const ID & id, const TimeStepSolverType & type, NonLinearSolver & non_linear_solver, SolverCallback & solver_callback) { return this->registerTimeStepSolver( *this, id, type, non_linear_solver, solver_callback); } /* -------------------------------------------------------------------------- */ template void DOFManagerDefault::getArrayPerDOFs(const ID & dof_id, const Array & global_array, Array & local_array) const { AKANTU_DEBUG_IN(); const Array & equation_number = this->getLocalEquationsNumbers(dof_id); UInt nb_degree_of_freedoms = equation_number.size(); local_array.resize(nb_degree_of_freedoms / local_array.getNbComponent()); auto loc_it = local_array.begin_reinterpret(nb_degree_of_freedoms); auto equ_it = equation_number.begin(); for (UInt d = 0; d < nb_degree_of_freedoms; ++d, ++loc_it, ++equ_it) { (*loc_it) = global_array(*equ_it); } AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::getArrayPerDOFs(const ID & dof_id, const SolverVector & global_array, Array & local_array) { - getArrayPerDOFs(dof_id, aka::as_type(global_array), + getArrayPerDOFs(dof_id, dynamic_cast &>(global_array), local_array); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::assembleLumpedMatMulVectToResidual( const ID & dof_id, const ID & A_id, const Array & x, Real scale_factor) { const auto & A = aka::as_type(this->getLumpedMatrix(A_id)); auto & cache = aka::as_type(*this->data_cache); cache.clear(); this->assembleToGlobalArray(dof_id, x, cache, scale_factor); for (auto && data : zip(make_view(A), make_view(cache), make_view(this->getResidualArray()))) { const auto & A = std::get<0>(data); const auto & x = std::get<1>(data); auto & r = std::get<2>(data); r += A * x; } } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::assembleElementalMatricesToMatrix( const ID & matrix_id, const ID & dof_id, const Array & elementary_mat, const ElementType & type, const GhostType & ghost_type, const MatrixType & elemental_matrix_type, const Array & filter_elements) { this->addToProfile(matrix_id, dof_id, type, ghost_type); auto & A = getMatrix(matrix_id); DOFManager::assembleElementalMatricesToMatrix_( A, dof_id, elementary_mat, type, ghost_type, elemental_matrix_type, filter_elements); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::assemblePreassembledMatrix( const ID & dof_id_m, const ID & dof_id_n, const ID & matrix_id, const TermsToAssemble & terms) { auto & A = getMatrix(matrix_id); DOFManager::assemblePreassembledMatrix_(A, dof_id_m, dof_id_n, terms); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::assembleMatMulVectToArray(const ID & dof_id, const ID & A_id, const Array & x, Array & array, Real scale_factor) { if (mesh->isDistributed()) { DOFManager::assembleMatMulVectToArray_( dof_id, A_id, x, array, scale_factor); } else { DOFManager::assembleMatMulVectToArray_( dof_id, A_id, x, array, scale_factor); } } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::addToProfile(const ID & matrix_id, const ID & dof_id, const ElementType & type, const GhostType & ghost_type) { AKANTU_DEBUG_IN(); const auto & dof_data = this->getDOFData(dof_id); if (dof_data.support_type != _dst_nodal) return; auto mat_dof = std::make_pair(matrix_id, dof_id); auto type_pair = std::make_pair(type, ghost_type); auto prof_it = this->matrix_profiled_dofs.find(mat_dof); if (prof_it != this->matrix_profiled_dofs.end() && std::find(prof_it->second.begin(), prof_it->second.end(), type_pair) != prof_it->second.end()) return; auto nb_degree_of_freedom_per_node = dof_data.dof->getNbComponent(); const auto & equation_number = this->getLocalEquationsNumbers(dof_id); auto & A = this->getMatrix(matrix_id); A.resize(system_size); auto size = A.size(); auto nb_nodes_per_element = Mesh::getNbNodesPerElement(type); const auto & connectivity = this->mesh->getConnectivity(type, ghost_type); auto cbegin = connectivity.begin(nb_nodes_per_element); auto cit = cbegin; auto nb_elements = connectivity.size(); UInt * ge_it = nullptr; if (dof_data.group_support != "__mesh__") { const auto & group_elements = this->mesh->getElementGroup(dof_data.group_support) .getElements(type, ghost_type); ge_it = group_elements.storage(); nb_elements = group_elements.size(); } UInt size_mat = nb_nodes_per_element * nb_degree_of_freedom_per_node; Vector element_eq_nb(size_mat); for (UInt e = 0; e < nb_elements; ++e) { if (ge_it) cit = cbegin + *ge_it; this->extractElementEquationNumber( equation_number, *cit, nb_degree_of_freedom_per_node, element_eq_nb); std::transform( element_eq_nb.storage(), element_eq_nb.storage() + element_eq_nb.size(), element_eq_nb.storage(), [&](auto & local) { return this->localToGlobalEquationNumber(local); }); if (ge_it) ++ge_it; else ++cit; for (UInt i = 0; i < size_mat; ++i) { UInt c_irn = element_eq_nb(i); if (c_irn < size) { for (UInt j = 0; j < size_mat; ++j) { UInt c_jcn = element_eq_nb(j); if (c_jcn < size) { A.add(c_irn, c_jcn); } } } } } this->matrix_profiled_dofs[mat_dof].push_back(type_pair); AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ Array & DOFManagerDefault::getSolutionArray() { return aka::as_type(*this->solution); } /* -------------------------------------------------------------------------- */ const Array & DOFManagerDefault::getResidualArray() const { return aka::as_type(*this->residual); } /* -------------------------------------------------------------------------- */ Array & DOFManagerDefault::getResidualArray() { return aka::as_type(*this->residual); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::onNodesAdded(const Array & nodes_list, const NewNodesEvent & event) { DOFManager::onNodesAdded(nodes_list, event); if (this->synchronizer) this->synchronizer->onNodesAdded(nodes_list); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::resizeGlobalArrays() { DOFManager::resizeGlobalArrays(); this->global_blocked_dofs.resize(this->local_system_size, true); this->previous_global_blocked_dofs.resize(this->local_system_size, true); matrix_profiled_dofs.clear(); } /* -------------------------------------------------------------------------- */ void DOFManagerDefault::updateGlobalBlockedDofs() { DOFManager::updateGlobalBlockedDofs(); if (this->global_blocked_dofs_release == this->previous_global_blocked_dofs_release) return; global_blocked_dofs_uint.resize(local_system_size); global_blocked_dofs_uint.set(false); for (const auto & dof : global_blocked_dofs) { global_blocked_dofs_uint[dof] = true; } } /* -------------------------------------------------------------------------- */ Array & DOFManagerDefault::getBlockedDOFs() { return global_blocked_dofs_uint; } /* -------------------------------------------------------------------------- */ const Array & DOFManagerDefault::getBlockedDOFs() const { return global_blocked_dofs_uint; } /* -------------------------------------------------------------------------- */ // register in factory // static bool default_dof_manager_is_registered [[gnu::unused]] = // DefaultDOFManagerFactory::getInstance().registerAllocator( // "default", // [](const ID & id, // const MemoryID & mem_id) -> std::unique_ptr { // return std::make_unique(id, mem_id); // }); static bool dof_manager_is_registered [[gnu::unused]] = DOFManagerFactory::getInstance().registerAllocator( "default", [](Mesh & mesh, const ID & id, const MemoryID & mem_id) -> std::unique_ptr { return std::make_unique(mesh, id, mem_id); }); static bool dof_manager_is_registered_mumps [[gnu::unused]] = DOFManagerFactory::getInstance().registerAllocator( "mumps", [](Mesh & mesh, const ID & id, const MemoryID & mem_id) -> std::unique_ptr { return std::make_unique(mesh, id, mem_id); }); } // namespace akantu diff --git a/src/solver/solver_sparse_matrix.hh b/src/solver/solver_sparse_matrix.hh index 20ea94904..e3e70525b 100644 --- a/src/solver/solver_sparse_matrix.hh +++ b/src/solver/solver_sparse_matrix.hh @@ -1,108 +1,115 @@ /** * @file solver_sparse_matrix.hh * * @author Nicolas Richart * * @date creation ven mai 01 2020 * * @brief A Documented file. * * @section LICENSE * * Copyright (©) 2010-2011 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "aka_common.hh" #include "dof_manager_default.hh" #include "solver_vector.hh" #if defined(AKANTU_PETSC) #include "dof_manager_petsc.hh" #include "sparse_matrix_petsc.hh" #endif /* -------------------------------------------------------------------------- */ #ifndef __SOLVER_SPARSE_MATRIX_H_ #define __SOLVER_SPARSE_MATRIX_H_ namespace aka { template struct dof_manager_type { using type = akantu::DOFManagerDefault; }; #if defined(AKANTU_PETSC) template <> struct dof_manager_type { using type = akantu::DOFManagerPETSc; }; #endif template using dof_manager_t = typename dof_manager_type::type; } // namespace aka namespace akantu { /* -------------------------------------------------------------------------- */ class SolverSparseMatrix { public: SolverSparseMatrix() = default; virtual ~SolverSparseMatrix() = default; virtual void applyBoundary(Real block_val = 1.) = 0; virtual void clearProfile() = 0; virtual void clear() = 0; virtual UInt getRelease() const = 0; virtual void resize() = 0; + /// Equivalent of *gemv in blas + virtual void matVecMul(const Array & x, Array & y, + Real alpha = 1., Real beta = 0.) const = 0; + /// Equivalent of *gemv in blas virtual void matVecMul(const SolverVector & x, SolverVector & y, Real alpha = 1., Real beta = 0.) const = 0; }; /* -------------------------------------------------------------------------- */ template > class SolverSparseMatrixTmpl : public SparseMatrix, public SolverSparseMatrix { public: SolverSparseMatrixTmpl(DOFManagerType & dof_manager, const MatrixType & matrix_type, const ID & id = "solver_sparse_matrix"); SolverSparseMatrixTmpl(SolverSparseMatrixTmpl & other, const ID & id = "solver_sparse_matrix_copy"); virtual ~SolverSparseMatrixTmpl() = default; void resize(); /// modify the matrix to "remove" the blocked dof void applyBoundary(Real block_val = 1.) override; inline void clearProfile() override { SparseMatrix::clearProfile(); } inline void clear() override { SparseMatrix::clear(); } inline UInt getRelease() const override { return SparseMatrix::getRelease(); } + void matVecMul(const Array & x, Array & y, + Real alpha = 1., Real beta = 0.) const override; + /// Equivalent of *gemv in blas void matVecMul(const SolverVector & x, SolverVector & y, Real alpha = 1., Real beta = 0.) const override; private: DOFManagerType & dof_manager; }; } // namespace akantu #endif // __SOLVER_SPARSE_MATRIX_H_ diff --git a/src/solver/solver_sparse_matrix_aij.cc b/src/solver/solver_sparse_matrix_aij.cc index 8a82343bf..c42328f48 100644 --- a/src/solver/solver_sparse_matrix_aij.cc +++ b/src/solver/solver_sparse_matrix_aij.cc @@ -1,83 +1,104 @@ /* -------------------------------------------------------------------------- */ #include "solver_sparse_matrix.hh" /* -------------------------------------------------------------------------- */ +#include "dof_synchronizer.hh" #include "solver_vector_default.hh" #include "sparse_matrix_aij.hh" /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ template <> SolverSparseMatrixTmpl:: SolverSparseMatrixTmpl(DOFManagerDefault & dof_manager, const MatrixType & matrix_type, const ID & id) : SparseMatrixAIJ(dof_manager.getCommunicator(), - dof_manager.getPureLocalSystemSize(), - dof_manager.getPureLocalSystemSize(), SizeType::_local, + dof_manager.getSystemSize(), + dof_manager.getSystemSize(), SizeType::_global, matrix_type, id), dof_manager(dof_manager) {} /* -------------------------------------------------------------------------- */ template <> SolverSparseMatrixTmpl:: SolverSparseMatrixTmpl(SolverSparseMatrixAIJ & B, const ID & id) : SparseMatrixAIJ(B, id), dof_manager(B.dof_manager) {} /* -------------------------------------------------------------------------- */ template <> -void SolverSparseMatrixTmpl:: -resize() { +void SolverSparseMatrixTmpl::resize() { this->m = this->n = this->dof_manager.getSystemSize(); this->m_local = this->n_local = this->dof_manager.getPureLocalSystemSize(); } /* -------------------------------------------------------------------------- */ template <> void SolverSparseMatrixTmpl::applyBoundary( Real block_val) { AKANTU_DEBUG_IN(); const auto & blocked_dofs = this->dof_manager.getGlobalBlockedDOFs(); auto begin = blocked_dofs.begin(); auto end = blocked_dofs.end(); auto is_blocked = [&](auto && i) -> bool { auto il = this->dof_manager.globalToLocalEquationNumber(i); return std::binary_search(begin, end, il); }; for (auto && ij_a : zip(this->irn, this->jcn, a)) { UInt ni = std::get<0>(ij_a) - 1; UInt nj = std::get<1>(ij_a) - 1; if (is_blocked(ni) or is_blocked(nj)) { std::get<2>(ij_a) = std::get<0>(ij_a) != std::get<1>(ij_a) ? 0. : this->dof_manager.isLocalOrMasterDOF( this->dof_manager.globalToLocalEquationNumber(ni)) ? block_val : 0.; } } this->value_release++; AKANTU_DEBUG_OUT(); } +/* -------------------------------------------------------------------------- */ +template <> +void SolverSparseMatrixTmpl::matVecMul( + const Array & x, Array & y, Real alpha, Real beta) const { + auto && id = [&](auto && i) { + return this->dof_manager.globalToLocalEquationNumber(i); + }; + + SparseMatrixAIJ::matVecMulLocal(x, y, id, id, alpha, beta); + + if (this->dof_manager.hasSynchronizer()) + this->dof_manager.getSynchronizer().reduceSynchronizeArray(y); +} + /* -------------------------------------------------------------------------- */ template <> void SolverSparseMatrixTmpl::matVecMul( const SolverVector & _x, SolverVector & _y, Real alpha, Real beta) const { AKANTU_DEBUG_IN(); auto && x = aka::as_type(_x); auto && y = aka::as_type(_y); - SparseMatrixAIJ::matVecMul(x, y, alpha, beta); + auto && id = [&](auto && i) { + return this->dof_manager.globalToLocalEquationNumber(i); + }; + + SparseMatrixAIJ::matVecMulLocal(x, y, id, id, alpha, beta); + + if (this->dof_manager.hasSynchronizer()) + this->dof_manager.getSynchronizer().reduceSynchronizeArray(y); } } // namespace akantu diff --git a/src/solver/solver_sparse_matrix_petsc.cc b/src/solver/solver_sparse_matrix_petsc.cc index ddeeff159..f000a2bea 100644 --- a/src/solver/solver_sparse_matrix_petsc.cc +++ b/src/solver/solver_sparse_matrix_petsc.cc @@ -1,71 +1,80 @@ #include "dof_manager_petsc.hh" #include "solver_vector_petsc.hh" #include "solver_sparse_matrix.hh" #include "sparse_matrix_petsc.hh" namespace akantu { /* -------------------------------------------------------------------------- */ template <> SolverSparseMatrixTmpl:: SolverSparseMatrixTmpl(DOFManagerPETSc & dof_manager, const MatrixType & matrix_type, const ID & id) : SparseMatrixPETSc(dof_manager.getCommunicator(), dof_manager.getPureLocalSystemSize(), dof_manager.getPureLocalSystemSize(), SizeType::_local, matrix_type, id), dof_manager(dof_manager) { auto & is_ltog_mapping = dof_manager.getISLocalToGlobalMapping(); PETSc_call(MatSetLocalToGlobalMapping, mat, is_ltog_mapping, is_ltog_mapping); } /* -------------------------------------------------------------------------- */ template <> SolverSparseMatrixTmpl:: SolverSparseMatrixTmpl(SolverSparseMatrixPETSc & B, const ID & id) : SparseMatrixPETSc(B, id), dof_manager(B.dof_manager) {} /* --------------------------------------------------------------------------*/ template <> void SolverSparseMatrixTmpl::resize() { this->m_local = dof_manager.getPureLocalSystemSize(); this->n_local = this->m_local; PETSc_call(MatSetSizes, mat, this->m_local, this->n_local, PETSC_DETERMINE, PETSC_DETERMINE); PETSc_call(MatGetSize, mat, &(this->m), &(this->n)); auto & is_ltog_mapping = dof_manager.getISLocalToGlobalMapping(); PETSc_call(MatSetLocalToGlobalMapping, mat, is_ltog_mapping, is_ltog_mapping); } /* -------------------------------------------------------------------------- */ template <> void SolverSparseMatrixTmpl::applyBoundary( Real block_val) { AKANTU_DEBUG_IN(); const auto & blocked_dofs = this->dof_manager.getGlobalBlockedDOFs(); static int c = 0; // saveMatrix("before_blocked_" + std::to_string(c) + ".mtx"); PETSc_call(MatZeroRowsColumnsLocal, mat, blocked_dofs.size(), blocked_dofs.storage(), block_val, nullptr, nullptr); // saveMatrix("after_blocked_" + std::to_string(c) + ".mtx"); ++c; AKANTU_DEBUG_OUT(); } +/* -------------------------------------------------------------------------- */ +template <> +void SolverSparseMatrixTmpl::matVecMul( + const Array & _x, Array & _y, Real alpha, Real beta) const { + auto && x = internal::make_petsc_wrapped_vector(_x); + auto && y = internal::make_petsc_wrapped_vector(_y); + SparseMatrixPETSc::matVecMul(x, y, alpha, beta); +} + /* -------------------------------------------------------------------------- */ template <> void SolverSparseMatrixTmpl::matVecMul( const SolverVector & _x, SolverVector & _y, Real alpha, Real beta) const { auto & x = aka::as_type(_x); auto & y = aka::as_type(_y); SparseMatrixPETSc::matVecMul(x, y, alpha, beta); } } // namespace akantu diff --git a/src/solver/solver_vector.hh b/src/solver/solver_vector.hh index 3a3ce4e79..52c9d6b4e 100644 --- a/src/solver/solver_vector.hh +++ b/src/solver/solver_vector.hh @@ -1,135 +1,136 @@ /** * @file solver_vector.hh * * @author Nicolas Richart * * @date creation Tue Jan 01 2019 * * @brief A Documented file. * * @section LICENSE * * Copyright (©) 2010-2011 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "aka_array.hh" /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_SOLVER_VECTOR_HH__ #define __AKANTU_SOLVER_VECTOR_HH__ namespace akantu { class DOFManager; } namespace akantu { /* -------------------------------------------------------------------------- */ class SolverVector { public: SolverVector(DOFManager & dof_manager, const ID & id = "solver_vector") : id(id), dof_manager_(dof_manager) {} SolverVector(const SolverVector & vector, const ID & id = "solver_vector") : id(id), dof_manager_(vector.dof_manager_) {} virtual ~SolverVector() = default; // resize the vector to the size of the problem virtual void resize() = 0; // clear the vector virtual void clear() = 0; //virtual operator const Array &() const = 0; virtual Int size() const = 0; virtual Int localSize() const = 0; //virtual SolverVector & operator+(const SolverVector & y) = 0; // virtual SolverVector & operator=(const SolverVector & y) = 0; UInt & release() { return release_; } UInt release() const { return release_; } virtual void printself(std::ostream & stream, int indent = 0) const = 0; protected: ID id; DOFManager & dof_manager_; UInt release_{0}; }; /* -------------------------------------------------------------------------- */ template class SolverVectorTmpl : public SolverVector, public Vector { public: SolverVectorTmpl(DOFManager & dof_manager, const ID & id = "solver_vector"); SolverVectorTmpl(const SolverVectorTmpl & vector, const ID & id = "solver_vector") : SolverVector(vector, id), Vector(vector, id), dof_manager(vector.dof_manager) {} SolverVectorTmpl(const Vector & vector, DOFManager & dof_manager, const ID & id = "solver_vector") : SolverVector(vector, id), Vector(vector, id), dof_manager(dof_manager) { } // resize the vector to the size of the problem void resize() override; // clear the vector void clear() override { Vector::clear(); } //operator const Array &() const override; Int size() const override { return Vector::size(); }; Int localSize() const override; SolverVectorTmpl & operator+(const SolverVectorTmpl & y) { const auto & y_ = aka::as_type(y); Vector::operator+(y_); ++release_; return *this; } SolverVectorTmpl & operator=(const SolverVectorTmpl & y) { const auto & y_ = aka::as_type(y); Vector::operator=(y_); return *this; } void printself(std::ostream & stream, int indent = 0) const override { Vector::printself(stream, indent); } virtual Vector & getGlobalVector(); virtual void setGlobalVector(const Vector & global); + protected: DOFManager & dof_manager; }; /* -------------------------------------------------------------------------- */ inline std::ostream & operator<<(std::ostream & stream, SolverVector & _this) { _this.printself(stream); return stream; } } // namespace akantu #endif /* __AKANTU_SOLVER_VECTOR_HH__ */ diff --git a/src/solver/solver_vector_petsc.cc b/src/solver/solver_vector_petsc.cc index 62b4bb30a..24de80698 100644 --- a/src/solver/solver_vector_petsc.cc +++ b/src/solver/solver_vector_petsc.cc @@ -1,95 +1,95 @@ /** * @file solver_vector_petsc.cc * * @author Nicolas Richart * * @date creation Tue Jan 01 2019 * * @brief A Documented file. * * @section LICENSE * * Copyright (©) 2010-2011 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "solver_vector_petsc.hh" #include "dof_manager_petsc.hh" #include "mpi_communicator_data.hh" /* -------------------------------------------------------------------------- */ #include #include /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ template <> SolverVectorTmpl::SolverVectorTmpl( DOFManagerPETSc & dof_manager, const ID & id) : SolverVector(dof_manager, id), VectorPETSc(dof_manager.getCommunicator(), dof_manager.getPureLocalSystemSize(), SizeType::_local, id), dof_manager(dof_manager) { auto local_system_size = dof_manager.getPureLocalSystemSize(); VecType vec_type; PETSc_call(VecGetType, x, &vec_type); if (std::string(vec_type) == std::string(VECMPI)) { PetscInt lowest_gidx, highest_gidx; PETSc_call(VecGetOwnershipRange, x, &lowest_gidx, &highest_gidx); std::vector ghost_idx; for (auto && d : arange(local_system_size)) { int gidx = dof_manager.localToGlobalEquationNumber(d); if (gidx != -1) { if ((gidx < lowest_gidx) or (gidx >= highest_gidx)) { ghost_idx.push_back(gidx); } } } PETSc_call(VecMPISetGhost, x, ghost_idx.size(), ghost_idx.data()); } } /* -------------------------------------------------------------------------- */ template <> void SolverVectorTmpl::resize() { // the arrays are destroyed and recreated in the dof manager // resize is so not implemented - AKANTU_TO_IMPLEMENT(); + //AKANTU_TO_IMPLEMENT(); } /* -------------------------------------------------------------------------- */ template <> Int SolverVectorTmpl::localSize() const { return VectorPETSc::local_size(); } /* -------------------------------------------------------------------------- */ template <> void SolverVectorTmpl::setGlobalVector(const VectorPETSc &) { AKANTU_TO_IMPLEMENT(); } /* -------------------------------------------------------------------------- */ template <> VectorPETSc & SolverVectorTmpl::getGlobalVector() { AKANTU_TO_IMPLEMENT(); } } // namespace akantu diff --git a/src/solver/sparse_matrix.cc b/src/solver/sparse_matrix.cc index 3ebb51e9a..90b0a4a41 100644 --- a/src/solver/sparse_matrix.cc +++ b/src/solver/sparse_matrix.cc @@ -1,84 +1,80 @@ /** * @file sparse_matrix.cc * * @author Nicolas Richart * * @date creation: Mon Dec 13 2010 * @date last modification: Wed Nov 08 2017 * * @brief implementation of the SparseMatrix class * * @section LICENSE * * Copyright (©) 2010-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ #include "communicator.hh" #include "dof_manager.hh" #include "sparse_matrix.hh" /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ SparseMatrix::SparseMatrix(const Communicator & communicator, const UInt m, - const UInt n, const SizeType & size_type, + const UInt n, const SizeType & /*size_type*/, const MatrixType & matrix_type, const ID & id) : id(id), communicator(communicator), matrix_type(matrix_type), m(m), n(n), nb_non_zero(0) { AKANTU_DEBUG_IN(); - if (size_type == SizeType::_local) { - AKANTU_EXCEPTION("This case is not handled here"); - } - this->nb_proc = communicator.getNbProc(); AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ SparseMatrix::SparseMatrix(const SparseMatrix & matrix, const ID & id) : SparseMatrix(matrix.communicator, matrix.m, matrix.n, SizeType::_global, matrix.matrix_type, id) { nb_non_zero = matrix.nb_non_zero; } /* -------------------------------------------------------------------------- */ SparseMatrix::~SparseMatrix() = default; // /* -------------------------------------------------------------------------- // */ Array & operator*=(SolverVector & vect, const SparseMatrix & mat) { // Array tmp(vect.size(), vect.getNbComponent(), 0.); // mat.matVecMul(vect, tmp); // vect.copy(tmp); // return vect; // } /* -------------------------------------------------------------------------- */ void SparseMatrix::add(const SparseMatrix & B, Real alpha) { B.addMeTo(*this, alpha); } /* -------------------------------------------------------------------------- */ } // namespace akantu diff --git a/src/solver/sparse_matrix_aij.cc b/src/solver/sparse_matrix_aij.cc index 1231e6c28..c6ee276fb 100644 --- a/src/solver/sparse_matrix_aij.cc +++ b/src/solver/sparse_matrix_aij.cc @@ -1,252 +1,219 @@ /** * @file sparse_matrix_aij.cc * * @author Nicolas Richart * * @date creation: Fri Aug 21 2015 * @date last modification: Mon Dec 04 2017 * * @brief Implementation of the AIJ sparse matrix * * @section LICENSE * * Copyright (©) 2015-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "sparse_matrix_aij.hh" #include "aka_iterators.hh" #include "dof_manager_default.hh" #include "dof_synchronizer.hh" #include "solver_vector_default.hh" #include "terms_to_assemble.hh" /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ SparseMatrixAIJ::SparseMatrixAIJ(const Communicator & communicator, const UInt m, const UInt n, const SizeType & size_type, const MatrixType & matrix_type, const ID & id) : SparseMatrix(communicator, m, n, size_type, matrix_type, id), - irn(0, 1, id + ":irn"), jcn(0, 1, id + ":jcn"), - a(0, 1, id + ":a") {} + irn(0, 1, id + ":irn"), jcn(0, 1, id + ":jcn"), a(0, 1, id + ":a") { +} /* -------------------------------------------------------------------------- */ SparseMatrixAIJ::SparseMatrixAIJ(const SparseMatrixAIJ & matrix, const ID & id) - : SparseMatrix(matrix, id), - irn(matrix.irn, id + ":irn"), jcn(matrix.jcn, id + ":jcn"), - a(matrix.a, id + ":a") {} + : SparseMatrix(matrix, id), irn(matrix.irn, id + ":irn"), + jcn(matrix.jcn, id + ":jcn"), a(matrix.a, id + ":a") {} /* -------------------------------------------------------------------------- */ SparseMatrixAIJ::~SparseMatrixAIJ() = default; /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::saveProfile(const std::string & filename) const { AKANTU_DEBUG_IN(); std::ofstream outfile; outfile.open(filename.c_str()); // write header if (communicator.whoAmI() == 0) { outfile << "%%MatrixMarket matrix coordinate pattern"; if (this->matrix_type == _symmetric) outfile << " symmetric"; else outfile << " general"; outfile << std::endl; - outfile << this->m << " " << this->m << " " << this->nb_non_zero << std::endl; + outfile << this->m << " " << this->m << " " << this->nb_non_zero + << std::endl; } for (auto p : arange(communicator.getNbProc())) { // write content if (communicator.whoAmI() == p) { for (UInt i = 0; i < this->nb_non_zero; ++i) { outfile << this->irn.storage()[i] << " " << this->jcn.storage()[i] << " 1" << std::endl; } } communicator.barrier(); } outfile.close(); AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::saveMatrix(const std::string & filename) const { AKANTU_DEBUG_IN(); auto & comm = this->communicator; // open and set the properties of the stream std::ofstream outfile; if (0 == comm.whoAmI()) { outfile.open(filename.c_str()); } else { outfile.open(filename.c_str(), std::ios_base::app); } outfile.precision(std::numeric_limits::digits10); // write header decltype(nb_non_zero) nnz = this->nb_non_zero; comm.allReduce(nnz); if (comm.whoAmI() == 0) { outfile << "%%MatrixMarket matrix coordinate real"; if (this->matrix_type == _symmetric) outfile << " symmetric"; else outfile << " general"; outfile << std::endl; outfile << this->m << " " << this->n << " " << nnz << std::endl; } for (auto p : arange(comm.getNbProc())) { // write content if (comm.whoAmI() == p) { for (UInt i = 0; i < this->nb_non_zero; ++i) { outfile << this->irn(i) << " " << this->jcn(i) << " " << this->a(i) << std::endl; } } comm.barrier(); } // time to end outfile.close(); AKANTU_DEBUG_OUT(); } -/* -------------------------------------------------------------------------- */ -void SparseMatrixAIJ::matVecMul(const Array & x, Array & y, - Real alpha, Real beta) const { - AKANTU_DEBUG_IN(); - - // y *= beta; - - // auto i_it = this->irn.begin(); - // auto j_it = this->jcn.begin(); - - // auto a_it = this->a.begin(); - // auto a_end = this->a.end(); - - // auto x_it = x.begin_reinterpret(x.size() * x.getNbComponent()); - // auto y_it = y.begin_reinterpret(x.size() * x.getNbComponent()); - - // for (; a_it != a_end; ++i_it, ++j_it, ++a_it) { - // Int i = this->dof_manager.globalToLocalEquationNumber(*i_it - 1); - // Int j = this->dof_manager.globalToLocalEquationNumber(*j_it - 1); - // const Real & A = *a_it; - - // y_it[i] += alpha * A * x_it[j]; - - // if ((this->matrix_type == _symmetric) && (i != j)) - // y_it[j] += alpha * A * x_it[i]; - // } - - // if (this->dof_manager.hasSynchronizer()) - // this->dof_manager.getSynchronizer().reduceSynchronizeArray(y); - - AKANTU_DEBUG_OUT(); -} - /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::copyContent(const SparseMatrix & matrix) { AKANTU_DEBUG_IN(); const auto & mat = aka::as_type(matrix); AKANTU_DEBUG_ASSERT(nb_non_zero == mat.getNbNonZero(), "The to matrix don't have the same profiles"); a = mat.getA(); this->value_release++; AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::copyProfile(const SparseMatrix & other) { auto & A = aka::as_type(other); SparseMatrix::clearProfile(); this->irn.copy(A.irn); this->jcn.copy(A.jcn); this->irn_jcn_k.clear(); UInt i, j, k; for (auto && data : enumerate(irn, jcn)) { std::tie(k, i, j) = data; this->irn_jcn_k[this->key(i - 1, j - 1)] = k; } this->nb_non_zero = this->irn.size(); this->a.resize(this->nb_non_zero); this->a.set(0.); this->m = A.m; this->n = A.n; this->profile_release = A.profile_release; this->value_release++; } /* -------------------------------------------------------------------------- */ template void SparseMatrixAIJ::addMeToTemplated(MatrixType & B, Real alpha) const { UInt i, j; Real A_ij; for (auto && tuple : zip(irn, jcn, a)) { std::tie(i, j, A_ij) = tuple; B.add(i - 1, j - 1, alpha * A_ij); } } /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::addMeTo(SparseMatrix & B, Real alpha) const { if (aka::is_of_type(B)) { this->addMeToTemplated(aka::as_type(B), alpha); } else { // this->addMeToTemplated(*this, alpha); } } /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::mul(Real alpha) { this->a *= alpha; this->value_release++; } /* -------------------------------------------------------------------------- */ void SparseMatrixAIJ::clear() { a.set(0.); this->value_release++; } } // namespace akantu diff --git a/src/solver/sparse_matrix_aij.hh b/src/solver/sparse_matrix_aij.hh index 2d88c77e3..7808c137b 100644 --- a/src/solver/sparse_matrix_aij.hh +++ b/src/solver/sparse_matrix_aij.hh @@ -1,199 +1,203 @@ /** * @file sparse_matrix_aij.hh * * @author Nicolas Richart * * @date creation: Mon Dec 13 2010 * @date last modification: Wed Nov 08 2017 * * @brief AIJ implementation of the SparseMatrix (this the format used by * Mumps) * * @section LICENSE * * Copyright (©) 2010-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "aka_array.hh" #include "aka_common.hh" #include "sparse_matrix.hh" /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_SPARSE_MATRIX_AIJ_HH__ #define __AKANTU_SPARSE_MATRIX_AIJ_HH__ namespace akantu { class DOFManagerDefault; class TermsToAssemble; } // namespace akantu namespace akantu { class SparseMatrixAIJ : public SparseMatrix { /* ------------------------------------------------------------------------ */ /* Constructors/Destructors */ /* ------------------------------------------------------------------------ */ public: SparseMatrixAIJ(const Communicator & communicator, const UInt m, const UInt n, const SizeType & size_type, const MatrixType & matrix_type, const ID & id = "sparse_matrix_aij"); SparseMatrixAIJ(const SparseMatrixAIJ & matrix, const ID & id = "sparse_matrix_aij"); ~SparseMatrixAIJ() override; /* ------------------------------------------------------------------------ */ /* Methods */ /* ------------------------------------------------------------------------ */ public: /// remove the existing profile inline void clearProfile() override; /// add a non-zero element inline UInt add(Int i, Int j); /// set the matrix to 0 void clear() override; /// assemble a local matrix in the sparse one inline void add(Int i, Int j, Real value); /// add a block of values inline void addValues(const Vector & is, const Vector & js, const Matrix & values, MatrixType values_type); /// set the size of the matrix void resize(UInt size) { this->m = size; this->n = size; } /// modify the matrix to "remove" the blocked dof // void applyBoundary(Real block_val = 1.) override; /// save the profil in a file using the MatrixMarket file format void saveProfile(const std::string & filename) const override; /// save the matrix in a file using the MatrixMarket file format void saveMatrix(const std::string & filename) const override; /// copy assuming the profile are the same virtual void copyContent(const SparseMatrix & matrix); /// multiply the matrix by a scalar void mul(Real alpha); /// add matrix *this += B // virtual void add(const SparseMatrix & matrix, Real alpha); - void matVecMul(const Array & x, Array & y, Real alpha = 1., - Real beta = 0.) const; + template + void matVecMulLocal(const Array & x, Array & y, FuncIdX && id_x, + FuncIdY && id_y, Real alpha = 1., Real beta = 0.) const; + + // void matVecMul(const Array & x, Array & y, Real alpha = 1., + // Real beta = 0.) const; /// copy the profile of another matrix void copyProfile(const SparseMatrix & other); /* ------------------------------------------------------------------------ */ /// accessor to A_{ij} - if (i, j) not present it returns 0 inline Real operator()(Int i, Int j) const; /// accessor to A_{ij} - if (i, j) not present it fails, (i, j) should be /// first added to the profile inline Real & operator()(Int i, Int j); protected: /// This is the revert of add B += \alpha * *this; void addMeTo(SparseMatrix & B, Real alpha) const override; inline void addSymmetricValuesToSymmetric(const Vector & is, const Vector & js, const Matrix & values); inline void addUnsymmetricValuesToSymmetric(const Vector & is, const Vector & js, const Matrix & values); inline void addValuesToUnsymmetric(const Vector & is, const Vector & js, const Matrix & values); private: /// This is just to inline the addToMatrix function template void addMeToTemplated(MatrixType & B, Real alpha) const; /* ------------------------------------------------------------------------ */ /* Accessors */ /* ------------------------------------------------------------------------ */ public: AKANTU_GET_MACRO(IRN, irn, const Array &); AKANTU_GET_MACRO(JCN, jcn, const Array &); AKANTU_GET_MACRO(A, a, const Array &); /// The release changes at each call of a function that changes the profile, /// it in increasing but could overflow so it should be checked as /// (my_release != release) and not as (my_release < release) AKANTU_GET_MACRO(ProfileRelease, profile_release, UInt); AKANTU_GET_MACRO(ValueRelease, value_release, UInt); UInt getRelease() const { return value_release; } protected: using KeyCOO = std::pair; using coordinate_list_map = std::unordered_map; /// get the pair corresponding to (i, j) inline KeyCOO key(Int i, Int j) const { if (this->matrix_type == _symmetric && (i > j)) return std::make_pair(j, i); return std::make_pair(i, j); } /* ------------------------------------------------------------------------ */ /* Class Members */ /* ------------------------------------------------------------------------ */ protected: /// row indexes Array irn; /// column indexes Array jcn; /// values : A[k] = Matrix[irn[k]][jcn[k]] Array a; /// Profile release UInt profile_release{1}; /// Value release UInt value_release{1}; /// map for (i, j) -> k correspondence coordinate_list_map irn_jcn_k; }; } // namespace akantu /* -------------------------------------------------------------------------- */ /* inline functions */ /* -------------------------------------------------------------------------- */ #include "sparse_matrix_aij_inline_impl.cc" #endif /* __AKANTU_SPARSE_MATRIX_AIJ_HH__ */ diff --git a/src/solver/sparse_matrix_aij_inline_impl.cc b/src/solver/sparse_matrix_aij_inline_impl.cc index f49b9c340..9d20a0bc8 100644 --- a/src/solver/sparse_matrix_aij_inline_impl.cc +++ b/src/solver/sparse_matrix_aij_inline_impl.cc @@ -1,190 +1,224 @@ /** * @file sparse_matrix_aij_inline_impl.cc * * @author Nicolas Richart * * @date creation: Fri Aug 21 2015 * @date last modification: Wed Nov 08 2017 * * @brief Implementation of inline functions of SparseMatrixAIJ * * @section LICENSE * * Copyright (©) 2015-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "sparse_matrix_aij.hh" +#include "aka_iterators.hh" +/* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_SPARSE_MATRIX_AIJ_INLINE_IMPL_CC__ #define __AKANTU_SPARSE_MATRIX_AIJ_INLINE_IMPL_CC__ namespace akantu { inline UInt SparseMatrixAIJ::add(Int i, Int j) { KeyCOO jcn_irn = this->key(i, j); auto it = this->irn_jcn_k.find(jcn_irn); if (!(it == this->irn_jcn_k.end())) return it->second; - AKANTU_DEBUG_ASSERT(i + 1 > this->m, "I is outside the matrix [" + AKANTU_DEBUG_ASSERT(i < this->m, "I is outside the matrix [" << i + 1 << " > " << this->m << "]"); - AKANTU_DEBUG_ASSERT(j + 1 > this->n, "J is outside the matrix [" + AKANTU_DEBUG_ASSERT(j < this->n, "J is outside the matrix [" << j + 1 << " > " << this->n << "]"); this->irn.push_back(i + 1); this->jcn.push_back(j + 1); this->a.push_back(0.); this->irn_jcn_k[jcn_irn] = this->nb_non_zero; (this->nb_non_zero)++; this->profile_release++; this->value_release++; return (this->nb_non_zero - 1); } /* -------------------------------------------------------------------------- */ inline void SparseMatrixAIJ::clearProfile() { SparseMatrix::clearProfile(); this->irn_jcn_k.clear(); this->irn.resize(0); this->jcn.resize(0); this->a.resize(0); this->m = 0; this->n = 0; this->nb_non_zero = 0; this->profile_release++; this->value_release++; } +/* -------------------------------------------------------------------------- */ +template +void SparseMatrixAIJ::matVecMulLocal(const Array & x, Array & y, + FuncIdX && id_x, FuncIdY && id_y, + Real alpha, Real beta) const { + AKANTU_DEBUG_IN(); + + y *= beta; + + auto i_it = this->irn.begin(); + auto j_it = this->jcn.begin(); + + auto a_it = this->a.begin(); + auto a_end = this->a.end(); + + auto x_it = make_view(x).begin(); + auto y_it = make_view(y).begin(); + + for (auto && data : zip(irn, jcn, a)) { + auto i = id_y(std::get<0>(data) - 1); + auto j = id_x(std::get<1>(data) - 1); + auto && A = std::get<2>(data); + + y_it[i] += alpha * A * x_it[j]; + + if ((this->matrix_type == _symmetric) && (i != j)) + y_it[j] += alpha * A * x_it[i]; + } + + AKANTU_DEBUG_OUT(); +} + /* -------------------------------------------------------------------------- */ inline void SparseMatrixAIJ::add(Int i, Int j, Real value) { UInt idx = this->add(i, j); this->a(idx) += value; this->value_release++; } /* -------------------------------------------------------------------------- */ inline Real SparseMatrixAIJ::operator()(Int i, Int j) const { KeyCOO jcn_irn = this->key(i, j); auto irn_jcn_k_it = this->irn_jcn_k.find(jcn_irn); if (irn_jcn_k_it == this->irn_jcn_k.end()) return 0.; return this->a(irn_jcn_k_it->second); } /* -------------------------------------------------------------------------- */ inline Real & SparseMatrixAIJ::operator()(Int i, Int j) { KeyCOO jcn_irn = this->key(i, j); auto irn_jcn_k_it = this->irn_jcn_k.find(jcn_irn); AKANTU_DEBUG_ASSERT(irn_jcn_k_it != this->irn_jcn_k.end(), "Couple (i,j) = (" << i << "," << j << ") does not exist in the profile"); // it may change the profile so it is considered as a change this->value_release++; return this->a(irn_jcn_k_it->second); } /* -------------------------------------------------------------------------- */ inline void SparseMatrixAIJ::addSymmetricValuesToSymmetric(const Vector & is, const Vector & js, const Matrix & values) { for (UInt i = 0; i < values.rows(); ++i) { Int c_irn = is(i); if (c_irn < this->m) { for (UInt j = i; j < values.cols(); ++j) { Int c_jcn = js(j); if (c_jcn < this->n) { operator()(c_irn, c_jcn) += values(i, j); } } } } } /* -------------------------------------------------------------------------- */ inline void SparseMatrixAIJ::addUnsymmetricValuesToSymmetric(const Vector & is, const Vector & js, const Matrix & values) { for (UInt i = 0; i < values.rows(); ++i) { Int c_irn = is(i); if (c_irn < this->m) { for (UInt j = 0; j < values.cols(); ++j) { Int c_jcn = js(j); if (c_jcn < this->n) { if (c_jcn >= c_irn) { operator()(c_irn, c_jcn) += values(i, j); } } } } } } /* -------------------------------------------------------------------------- */ inline void SparseMatrixAIJ::addValuesToUnsymmetric(const Vector & is, const Vector & js, const Matrix & values) { for (UInt i = 0; i < values.rows(); ++i) { Int c_irn = is(i); if (c_irn < this->m) { for (UInt j = 0; j < values.cols(); ++j) { Int c_jcn = js(j); if (c_jcn < this->n) { operator()(c_irn, c_jcn) += values(i, j); } } } } } /* -------------------------------------------------------------------------- */ inline void SparseMatrixAIJ::addValues(const Vector & is, const Vector & js, const Matrix & values, MatrixType values_type) { if (getMatrixType() == _symmetric) if (values_type == _symmetric) this->addSymmetricValuesToSymmetric(is, js, values); else this->addUnsymmetricValuesToSymmetric(is, js, values); else this->addValuesToUnsymmetric(is, js, values); } } // namespace akantu #endif /* __AKANTU_SPARSE_MATRIX_AIJ_INLINE_IMPL_CC__ */ diff --git a/src/solver/sparse_matrix_petsc.cc b/src/solver/sparse_matrix_petsc.cc index 266b3a66c..dd64cfc7c 100644 --- a/src/solver/sparse_matrix_petsc.cc +++ b/src/solver/sparse_matrix_petsc.cc @@ -1,235 +1,244 @@ /** * @file sparse_matrix_petsc.cc * * @author Aurelia Isabel Cuba Ramos * * @date creation: Mon Dec 13 2010 * @date last modification: Sat Feb 03 2018 * * @brief Implementation of PETSc matrix class * * @section LICENSE * * Copyright (©) 2010-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "sparse_matrix_petsc.hh" #include "mpi_communicator_data.hh" #include "vector_petsc.hh" /* -------------------------------------------------------------------------- */ namespace akantu { /* -------------------------------------------------------------------------- */ -SparseMatrixPETSc::SparseMatrixPETSc(const Communicator & communicator, const UInt m, - const UInt n, const SizeType & size_type, +SparseMatrixPETSc::SparseMatrixPETSc(const Communicator & communicator, + const UInt m, const UInt n, + const SizeType & size_type, const MatrixType & matrix_type, const ID & id) : SparseMatrix(communicator, m, n, SizeType::_global, matrix_type, id) { AKANTU_DEBUG_IN(); const auto & mpi_data = aka::as_type(communicator.getCommunicatorData()); mpi_comm = mpi_data.getMPICommunicator(); PETSc_call(MatCreate, mpi_comm, &mat); detail::PETScSetName(mat, id); m_local = m; n_local = n; switch (size_type) { case SizeType::_local: { PETSc_call(MatSetSizes, mat, m_local, n_local, PETSC_DETERMINE, PETSC_DETERMINE); break; } case SizeType::_global: { PETSc_call(MatSetSizes, mat, PETSC_DECIDE, PETSC_DECIDE, m, n); break; } } PETSc_call(MatSetFromOptions, mat); PETSc_call(MatSetUp, mat); PETSc_call(MatSetOption, mat, MAT_ROW_ORIENTED, PETSC_TRUE); PETSc_call(MatSetOption, mat, MAT_NEW_NONZERO_LOCATIONS, PETSC_TRUE); if (matrix_type == _symmetric) { PETSc_call(MatSetOption, mat, MAT_SYMMETRIC, PETSC_TRUE); } switch (size_type) { case SizeType::_local: { PETSc_call(MatGetSize, mat, &(this->m), &(this->n)); break; } case SizeType::_global: { PETSc_call(MatGetLocalSize, mat, &m_local, &n_local); break; } } AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ SparseMatrixPETSc::SparseMatrixPETSc(const SparseMatrixPETSc & matrix, const ID & id) : SparseMatrix(matrix, id) { PETSc_call(MatDuplicate, matrix.mat, MAT_COPY_VALUES, &mat); detail::PETScSetName(mat, id); } /* -------------------------------------------------------------------------- */ SparseMatrixPETSc::~SparseMatrixPETSc() { AKANTU_DEBUG_IN(); if (mat) PETSc_call(MatDestroy, &mat); AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ /** * Method to save the nonzero pattern and the values stored at each position * @param filename name of the file in which the information will be stored */ void SparseMatrixPETSc::saveMatrix(const std::string & filename) const { AKANTU_DEBUG_IN(); /// create Petsc viewer PetscViewer viewer; PETSc_call(PetscViewerASCIIOpen, mpi_comm, filename.c_str(), &viewer); PETSc_call(PetscViewerPushFormat, viewer, PETSC_VIEWER_ASCII_MATRIXMARKET); PETSc_call(MatView, mat, viewer); PETSc_call(PetscViewerPopFormat, viewer); PETSc_call(PetscViewerDestroy, &viewer); AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ /// Equivalent of *gemv in blas void SparseMatrixPETSc::matVecMul(const VectorPETSc & x, VectorPETSc & y, Real alpha, Real beta) const { + matVecMul(x.getVec(), y.getVec(), alpha, beta); +} + +void SparseMatrixPETSc::matVecMul(const Vec & x, Vec & y, Real alpha, + Real beta) const { // y = alpha A x + beta y - VectorPETSc w(x, this->id + ":tmp"); + Vec w; + PETSc_call(VecDuplicate, x, &w); + detail::PETScSetName(x, this->id + ":tmp"); // w = A x if (release == 0) { PETSc_call(VecZeroEntries, w); } else { PETSc_call(MatMult, mat, x, w); } if (alpha != 1.) { // w = alpha w PETSc_call(VecScale, w, alpha); } // y = w + beta y PETSc_call(VecAYPX, y, beta, w); } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::addMeToImpl(SparseMatrixPETSc & B, Real alpha) const { - PETSc_call(MatAXPY, B.mat, alpha, mat, SAME_NONZERO_PATTERN); + if(release != 0) + PETSc_call(MatAXPY, B.mat, alpha, mat, SAME_NONZERO_PATTERN); B.release++; } /* -------------------------------------------------------------------------- */ /** * Method to add another PETSc matrix to this PETSc matrix * @param matrix PETSc matrix to be added * @param alpha the factor specifying how many times the matrix should be added */ void SparseMatrixPETSc::addMeTo(SparseMatrix & B, Real alpha) const { if (aka::is_of_type(B)) { auto & B_petsc = aka::as_type(B); this->addMeToImpl(B_petsc, alpha); } else { AKANTU_TO_IMPLEMENT(); // this->addMeToTemplated(*this, alpha); } } /* -------------------------------------------------------------------------- */ /** * MatSetValues() generally caches the values. The matrix is ready to * use only after MatAssemblyBegin() and MatAssemblyEnd() have been * called. (http://www.mcs.anl.gov/petsc/) */ void SparseMatrixPETSc::applyModifications() { this->beginAssembly(); this->endAssembly(); } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::beginAssembly() { PETSc_call(MatAssemblyBegin, mat, MAT_FINAL_ASSEMBLY); } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::endAssembly() { PETSc_call(MatAssemblyEnd, mat, MAT_FINAL_ASSEMBLY); PETSc_call(MatSetOption, mat, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE); this->release++; } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::copyProfile(const SparseMatrix & other) { auto & A = aka::as_type(other); MatDestroy(&mat); MatDuplicate(A.mat, MAT_DO_NOT_COPY_VALUES, &mat); } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::mul(Real alpha) { PETSc_call(MatScale, mat, alpha); this->release++; } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::clear() { PETSc_call(MatZeroEntries, mat); this->release++; } /* -------------------------------------------------------------------------- */ void SparseMatrixPETSc::clearProfile() { SparseMatrix::clearProfile(); PETSc_call(MatResetPreallocation, mat); PETSc_call(MatSetOption, mat, MAT_NEW_NONZERO_LOCATIONS, PETSC_TRUE); // PETSc_call(MatSetOption, MAT_KEEP_NONZERO_PATTERN, PETSC_TRUE); // PETSc_call(MatSetOption, MAT_NEW_NONZERO_ALLOCATIONS, PETSC_TRUE); // PETSc_call(MatSetOption, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE); clear(); } /* -------------------------------------------------------------------------- */ } // namespace akantu diff --git a/src/solver/sparse_matrix_petsc.hh b/src/solver/sparse_matrix_petsc.hh index 57aa5d169..992e07ec2 100644 --- a/src/solver/sparse_matrix_petsc.hh +++ b/src/solver/sparse_matrix_petsc.hh @@ -1,162 +1,165 @@ /** * @file sparse_matrix_petsc.hh * * @author Aurelia Isabel Cuba Ramos * * @date creation: Fri Jun 18 2010 * @date last modification: Tue Feb 06 2018 * * @brief Interface for PETSc matrices * * @section LICENSE * * Copyright (©) 2010-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_PETSC_MATRIX_HH__ #define __AKANTU_PETSC_MATRIX_HH__ /* -------------------------------------------------------------------------- */ #include "sparse_matrix.hh" /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ namespace akantu { class VectorPETSc; } namespace akantu { class SparseMatrixPETSc : public SparseMatrix { /* ------------------------------------------------------------------------ */ /* Constructors/Destructors */ /* ------------------------------------------------------------------------ */ public: SparseMatrixPETSc(const Communicator & communicator, const UInt m = 0, const UInt n = 0, const SizeType & size_type = SizeType::_local, const MatrixType & matrix_type = _unsymmetric, const ID & id = "sparse_matrix_petsc"); SparseMatrixPETSc(const SparseMatrixPETSc & matrix, const ID & id = "sparse_matrix_petsc"); virtual ~SparseMatrixPETSc(); /* ------------------------------------------------------------------------ */ /* Methods */ /* ------------------------------------------------------------------------ */ public: /// set the matrix to 0 void clear(); void clearProfile() override; /// add a non-zero element to the profile inline UInt add(UInt i, UInt j); /// assemble a local matrix in the sparse one inline void add(UInt i, UInt j, Real value); inline void addLocal(UInt i, UInt j); inline void addLocal(UInt i, UInt j, Real val); template void addLocal(const Rows & rows, const Cols & cols, const Values & vals); /// add a block of values template void addValues(const Rows & is, const Cols & js, const Values & values, MatrixType values_type); /// save the profil in a file using the MatrixMarket file format // void saveProfile(__attribute__((unused)) const std::string &) const // override { // AKANTU_DEBUG_TO_IMPLEMENT(); // } /// save the matrix in a file using the MatrixMarket file format void saveMatrix(const std::string & filename) const; /// multiply the matrix by a coefficient void mul(Real alpha); /// Equivalent of *gemv in blas void matVecMul(const VectorPETSc & x, VectorPETSc & y, Real alpha = 1., Real beta = 0.) const; + void matVecMul(const Vec & x, Vec & y, Real alpha = 1., + Real beta = 0.) const; + /// copy the profile of a matrix void copyProfile(const SparseMatrix & other); void applyModifications(); // void resize(); protected: /// This is the revert of add B += \alpha * *this; void addMeTo(SparseMatrix & B, Real alpha) const override; /// This is the specific implementation void addMeToImpl(SparseMatrixPETSc & B, Real alpha) const; void beginAssembly(); void endAssembly(); /* ------------------------------------------------------------------------ */ /* Accessors */ /* ------------------------------------------------------------------------ */ public: /// return the values at potition i, j virtual inline Real operator()(__attribute__((unused)) UInt i, __attribute__((unused)) UInt j) const { AKANTU_TO_IMPLEMENT(); } /// return the values at potition i, j virtual inline Real & operator()(__attribute__((unused)) UInt i, __attribute__((unused)) UInt j) { AKANTU_TO_IMPLEMENT(); } UInt getRelease() const { return release; }; operator Mat &() { return mat; } operator const Mat &() const { return mat; } AKANTU_GET_MACRO(Mat, mat, const Mat &); AKANTU_GET_MACRO_NOT_CONST(Mat, mat, Mat &); /* ------------------------------------------------------------------------ */ /* Class Members */ /* ------------------------------------------------------------------------ */ protected: /// store the PETSc matrix Mat mat; /// matrix release UInt release{0}; /// communicator used MPI_Comm mpi_comm; }; } // namespace akantu #include "sparse_matrix_petsc_tmpl.hh" #endif /* __AKANTU_PETSC_MATRIX_HH__ */ diff --git a/test/test_solver/test_sparse_matrix_product.cc b/test/test_solver/test_sparse_matrix_product.cc index 7bf4e30a9..fe088ee43 100644 --- a/test/test_solver/test_sparse_matrix_product.cc +++ b/test/test_solver/test_sparse_matrix_product.cc @@ -1,122 +1,122 @@ /** * @file test_sparse_matrix_product.cc * * @author Nicolas Richart * * @date creation: Fri Jun 17 2011 * @date last modification: Wed Nov 08 2017 * * @brief test the matrix vector product in parallel * * @section LICENSE * * Copyright (©) 2010-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne) * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ #include "aka_common.hh" #include "dof_synchronizer.hh" #include "element_synchronizer.hh" #include "mesh.hh" #include "mesh_partition_scotch.hh" #include "sparse_matrix_aij.hh" /* -------------------------------------------------------------------------- */ using namespace akantu; /* -------------------------------------------------------------------------- */ int main(int argc, char * argv[]) { initialize(argc, argv); const UInt spatial_dimension = 2; const UInt nb_dof = 2; const auto & comm = Communicator::getStaticCommunicator(); Int psize = comm.getNbProc(); Int prank = comm.whoAmI(); Mesh mesh(spatial_dimension); mesh.read("bar.msh"); mesh.distribute(); UInt nb_nodes = mesh.getNbNodes(); DOFManagerDefault dof_manager(mesh, "test_dof_manager"); Array test_synchronize(nb_nodes, nb_dof, "Test vector"); dof_manager.registerDOFs("test_synchronize", test_synchronize, _dst_nodal); if (prank == 0) std::cout << "Creating a SparseMatrix" << std::endl; - auto & A = dynamic_cast( + auto & A = dynamic_cast( dof_manager.getNewMatrix("A", _symmetric)); Array dof_vector(nb_nodes, nb_dof, "vector"); if (prank == 0) std::cout << "Filling the matrix" << std::endl; for (UInt i = 0; i < nb_nodes * nb_dof; ++i) { if (dof_manager.isLocalOrMasterDOF(i)) A.add(i, i, 2.); } std::stringstream str; str << "Matrix_" << prank << ".mtx"; A.saveMatrix(str.str()); for (UInt n = 0; n < nb_nodes; ++n) { for (UInt d = 0; d < nb_dof; ++d) { dof_vector(n, d) = 1.; } } Array dof_vector_tmp(dof_vector); if (prank == 0) std::cout << "Computing x = A * x" << std::endl; A.matVecMul(dof_vector, dof_vector_tmp); dof_vector.copy(dof_vector_tmp); auto & sync = dynamic_cast(dof_manager).getSynchronizer(); if (prank == 0) std::cout << "Gathering the results on proc 0" << std::endl; if (psize > 1) { if (prank == 0) { Array gathered; sync.gather(dof_vector, gathered); debug::setDebugLevel(dblTest); std::cout << gathered << std::endl; debug::setDebugLevel(dblWarning); } else { sync.gather(dof_vector); } } else { debug::setDebugLevel(dblTest); std::cout << dof_vector << std::endl; debug::setDebugLevel(dblWarning); } finalize(); return 0; }