diff --git a/python/wrap/model.cpp b/python/wrap/model.cpp index 7131198..01f0442 100644 --- a/python/wrap/model.cpp +++ b/python/wrap/model.cpp @@ -1,520 +1,532 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * Copyright (©) 2020-2022 Lucas Frérot * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "model.hh" #include "adhesion_functional.hh" #include "elastic_functional.hh" #include "field_container.hh" #include "functional.hh" #include "integral_operator.hh" #include "model_dumper.hh" #include "model_extensions.hh" #include "model_factory.hh" #include "numpy.hh" #include "residual.hh" #include "wrap.hh" #include #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { namespace wrap { using namespace py::literals; struct model_operator_accessor { Model& m; decltype(auto) get(const std::string& name) { return m.getIntegralOperator(name); } }; template std::unique_ptr> instanciateFromNumpy(numpy& num) { std::unique_ptr> result = nullptr; switch (num.ndim()) { case 2: result = std::make_unique>>(num); return result; case 3: result = std::make_unique>>(num); return result; case 4: result = std::make_unique>>(num); return result; default: TAMAAS_EXCEPTION("instanciateFromNumpy expects the last dimension of numpy " "array to be the number of components"); } } void wrapFieldContainer(py::module& mod) { py::class_(mod, "FieldContainer") .def(py::init<>()) .def("__getitem__", &FieldContainer::at, py::return_value_policy::reference_internal) .def( "__setitem__", - [](FieldContainer& f, std::string key, numpy grid) { + [](FieldContainer& f, const std::string& key, numpy grid) { + f[key] = instanciateFromNumpy(grid); + }, + py::keep_alive<1, 3>()) + .def( + "__setitem__", + [](FieldContainer& f, const std::string& key, numpy grid) { + f[key] = instanciateFromNumpy(grid); + }, + py::keep_alive<1, 3>()) + .def( + "__setitem__", + [](FieldContainer& f, const std::string& key, numpy grid) { f[key] = instanciateFromNumpy(grid); }, py::keep_alive<1, 3>()) .def( "__contains__", - [](const FieldContainer& m, std::string key) { + [](const FieldContainer& m, const std::string& key) { const auto fields = m.fields(); return std::find(fields.begin(), fields.end(), key) != fields.end(); }, "Test field existence") .def( "__iter__", [](const FieldContainer& m) { const auto& fields = m.fields_map(); return py::make_key_iterator(fields.cbegin(), fields.cend()); }, py::keep_alive<0, 1>(), "Iterator on fields") /// Deprecated interface .def( "registerField", - [](Model& m, std::string name, numpy field) { + [](Model& m, const std::string& name, numpy field) { TAMAAS_DEPRECATE("registerField()", "the [] operator"); m[name] = instanciateFromNumpy(field); }, "field_name"_a, "field"_a, py::keep_alive<1, 3>()) .def( "getField", - [](const Model& m, std::string name) -> decltype(m.at(name)) { + [](const Model& m, const std::string& name) -> decltype(m.at(name)) { TAMAAS_DEPRECATE("getField()", "the [] operator"); return m.at(name); }, "field_name"_a, py::return_value_policy::reference_internal) .def( "getFields", [](const Model& m) { TAMAAS_DEPRECATE("getFields()", "list(model)"); return m.fields(); }, "Return fields list"); } /// Wrap functional classes void wrapFunctionals(py::module& mod) { py::class_, functional::wrap::PyFunctional> func(mod, "Functional"); func.def(py::init<>()) .def("computeF", &functional::Functional::computeF, "Compute functional value") .def("computeGradF", &functional::Functional::computeGradF, "Compute functional gradient"); py::class_( mod, "ElasticFunctionalPressure", func) .def(py::init&>()); py::class_(mod, "ElasticFunctionalGap", func) .def(py::init&>()); py::class_ adh(mod, "AdhesionFunctional", func); adh.def_property("parameters", &functional::AdhesionFunctional::getParameters, &functional::AdhesionFunctional::setParameters, "Parameters dictionary") .def("setParameters", [](functional::AdhesionFunctional& f, const std::map& m) { TAMAAS_DEPRECATE("setParameters()", "the parameters property"); f.setParameters(m); }); py::class_( mod, "ExponentialAdhesionFunctional", adh, "Potential of the form F = -γ·exp(-g/ρ)") .def(py::init&>(), "surface"_a); py::class_( mod, "MaugisAdhesionFunctional", adh, "Cohesive zone potential F = H(g - ρ)·γ/ρ") .def(py::init&>(), "surface"_a); py::class_( mod, "SquaredExponentialAdhesionFunctional", adh, "Potential of the form F = -γ·exp(-0.5·(g/ρ)²)") .def(py::init&>(), "surface"_a); } /// Wrap IntegralOperator void wrapIntegralOperator(py::module& mod) { - py::class_(mod, "IntegralOperator") + py::class_(mod, "IntegralOperator") .def("apply", [](IntegralOperator& op, numpy input, numpy output) { TAMAAS_DEPRECATE("apply()", "the () operator"); auto in = instanciateFromNumpy(input); auto out = instanciateFromNumpy(output); op.apply(*in, *out); }) .def(TAMAAS_DEPRECATE_ACCESSOR(getModel, IntegralOperator, "model"), py::return_value_policy::reference) .def(TAMAAS_DEPRECATE_ACCESSOR(getKind, IntegralOperator, "kind")) .def(TAMAAS_DEPRECATE_ACCESSOR(getType, IntegralOperator, "type")) .def( "__call__", [](IntegralOperator& op, numpy input, numpy output) { auto in = instanciateFromNumpy(input); auto out = instanciateFromNumpy(output); op.apply(*in, *out); }, "Apply the integral operator") .def("updateFromModel", &IntegralOperator::updateFromModel, "Resets internal persistent variables from the model") .def_property_readonly("kind", &IntegralOperator::getKind) .def_property_readonly("model", &IntegralOperator::getModel) .def_property_readonly("type", &IntegralOperator::getType) .def_property_readonly("shape", &IntegralOperator::matvecShape) .def( "matvec", [](const IntegralOperator& op, numpy X) -> GridBase { GridBaseNumpy x(X); auto y = op.matvec(x); return y; }, py::return_value_policy::move); py::enum_(mod, "integration_method", "Integration method used for the computation " "of volumetric Fourier operators") .value("linear", integration_method::linear, "No approximation error, O(N₁·N₂·N₃) time complexity, may cause " "float overflow/underflow") .value("cutoff", integration_method::cutoff, "Approximation, O(sqrt(N₁²+N₂²)·N₃²) time complexity, no " "overflow/underflow risk"); } /// Wrap BEEngine classes void wrapBEEngine(py::module& mod) { py::class_(mod, "BEEngine") .def("solveNeumann", &BEEngine::solveNeumann) .def("solveDirichlet", &BEEngine::solveDirichlet) .def("registerNeumann", &BEEngine::registerNeumann) .def("registerDirichlet", &BEEngine::registerDirichlet) .def(TAMAAS_DEPRECATE_ACCESSOR(getModel, BEEngine, "model"), py::return_value_policy::reference) .def_property_readonly("model", &BEEngine::getModel); } template void wrapModelTypeTrait(py::module& mod) { using trait = model_type_traits; py::class_(mod, trait::repr) .def_property_readonly_static( "dimension", [](const py::object&) { return trait::dimension; }, "Dimension of computational domain") .def_property_readonly_static( "components", [](const py::object&) { return trait::components; }, "Number of components of vector fields") .def_property_readonly_static( "boundary_dimension", [](const py::object&) { return trait::boundary_dimension; }, "Dimension of boundary of computational domain") .def_property_readonly_static( "voigt", [](const py::object&) { return trait::voigt; }, "Number of components of symmetrical tensor fields") .def_property_readonly_static( "indices", [](const py::object&) { return trait::indices; }); } /// Wrap Models void wrapModelClass(py::module& mod) { py::enum_(mod, "model_type") .value("basic_1d", model_type::basic_1d, "Normal contact with 1D interface") .value("basic_2d", model_type::basic_2d, "Normal contact with 2D interface") .value("surface_1d", model_type::surface_1d, "Normal & tangential contact with 1D interface") .value("surface_2d", model_type::surface_2d, "Normal & tangential contact with 2D interface") .value("volume_1d", model_type::volume_1d, "Contact with volumetric representation and 1D interface") .value("volume_2d", model_type::volume_2d, "Contact with volumetric representation and 2D interface"); auto trait_mod = mod.def_submodule("_type_traits"); wrapModelTypeTrait(trait_mod); wrapModelTypeTrait(trait_mod); wrapModelTypeTrait(trait_mod); wrapModelTypeTrait(trait_mod); wrapModelTypeTrait(trait_mod); wrapModelTypeTrait(trait_mod); py::class_(mod, "_model_operator_acessor") .def(py::init()) .def( "__getitem__", [](model_operator_accessor& acc, std::string name) { try { return acc.get(name); } catch (std::out_of_range&) { throw py::key_error(name); } }, py::return_value_policy::reference_internal) .def("__contains__", [](model_operator_accessor& acc, std::string key) { const auto ops = acc.m.getIntegralOperators(); return std::find(ops.begin(), ops.end(), key) != ops.end(); }) .def( "__iter__", [](const model_operator_accessor& acc) { const auto& ops = acc.m.getIntegralOperatorsMap(); return py::make_key_iterator(ops.cbegin(), ops.cend()); }, py::keep_alive<0, 1>()); py::class_(mod, "Model") .def(py::init(py::overload_cast&, const std::vector&>( &ModelFactory::createModel))) .def_property_readonly("type", &Model::getType) .def_property("E", &Model::getYoungModulus, &Model::setYoungModulus, "Young's modulus") .def_property("nu", &Model::getPoissonRatio, &Model::setPoissonRatio, "Poisson's ratio") .def_property_readonly("mu", &Model::getShearModulus, "Shear modulus") .def_property_readonly("E_star", &Model::getHertzModulus, "Contact (Hertz) modulus") .def_property_readonly("be_engine", &Model::getBEEngine, "Boundary element engine") .def( "setElasticity", [](Model& m, Real E, Real nu) { TAMAAS_DEPRECATE("setElasticity()", "the E and nu properties"); m.setElasticity(E, nu); }, "E"_a, "nu"_a) .def(TAMAAS_DEPRECATE_ACCESSOR(getHertzModulus, Model, "E_star")) .def(TAMAAS_DEPRECATE_ACCESSOR(getYoungModulus, Model, "E")) .def(TAMAAS_DEPRECATE_ACCESSOR(getShearModulus, Model, "mu")) .def(TAMAAS_DEPRECATE_ACCESSOR(getPoissonRatio, Model, "nu")) .def(TAMAAS_DEPRECATE_ACCESSOR(getTraction, Model, "traction"), py::return_value_policy::reference_internal) .def(TAMAAS_DEPRECATE_ACCESSOR(getDisplacement, Model, "displacement"), py::return_value_policy::reference_internal) .def(TAMAAS_DEPRECATE_ACCESSOR(getSystemSize, Model, "system_size")) .def(TAMAAS_DEPRECATE_ACCESSOR(getDiscretization, Model, "shape")) .def(TAMAAS_DEPRECATE_ACCESSOR(getBoundarySystemSize, Model, "boundary_system_size")) .def(TAMAAS_DEPRECATE_ACCESSOR(getBoundaryDiscretization, Model, "boundary_shape")) .def("solveNeumann", &Model::solveNeumann, "Solve surface tractions -> displacements") .def("solveDirichlet", &Model::solveDirichlet, "Solve surface displacemnts -> tractions") .def("dump", &Model::dump, "Write model data to registered dumpers") .def("addDumper", &Model::addDumper, "dumper"_a, py::keep_alive<1, 2>(), "Register a dumper") .def( "getBEEngine", [](Model& m) -> decltype(m.getBEEngine()) { TAMAAS_DEPRECATE("getBEEngine()", "the be_engine property"); return m.getBEEngine(); }, py::return_value_policy::reference_internal) .def( "getIntegralOperator", [](const Model& m, std::string name) { TAMAAS_DEPRECATE("getIntegralOperator()", "the operators property"); return m.getIntegralOperator(name); }, "operator_name"_a, py::return_value_policy::reference_internal) .def( "applyElasticity", [](Model& model, numpy stress, numpy strain) { auto out = instanciateFromNumpy(stress); auto in = instanciateFromNumpy(strain); model.applyElasticity(*out, *in); }, "Apply Hooke's law") // Python magic functions .def("__repr__", [](const Model& m) { std::stringstream ss; ss << m; return ss.str(); }) .def( "__copy__", [](const Model&) { throw std::runtime_error("__copy__ not implemented"); }, "Shallow copy of model. Not implemented.") .def( "__deepcopy__", [](const Model& m, py::dict) { return ModelFactory::createModel(m); }, "Deep copy of model.") .def_property_readonly("boundary_fields", &Model::getBoundaryFields) .def_property_readonly( "operators", [](Model& m) { return model_operator_accessor{m}; }, "Returns a dict-like object allowing access to the model's " "integral " "operators") // More python-like access to model properties .def_property_readonly("shape", &Model::getDiscretization, "Discretization (local in MPI environment)") .def_property_readonly("global_shape", &Model::getGlobalDiscretization, "Global discretization (in MPI environement)") .def_property_readonly("boundary_shape", &Model::getBoundaryDiscretization, "Number of points on boundary") .def_property_readonly("system_size", &Model::getSystemSize, "Size of physical domain") .def_property_readonly("boundary_system_size", &Model::getBoundarySystemSize, "Physical size of surface") .def_property_readonly("traction", (const GridBase& (Model::*)() const) & Model::getTraction, "Surface traction field") .def_property_readonly("displacement", (const GridBase& (Model::*)() const) & Model::getDisplacement, "Displacement field"); py::class_>( mod, "ModelDumper") .def(py::init<>()) .def("dump", &ModelDumper::dump, "model"_a, "Dump model") .def( "__lshift__", [](ModelDumper& dumper, Model& model) { dumper << model; }, "Dump model"); } /// Wrap factory for models void wrapModelFactory(py::module& mod) { py::class_(mod, "ModelFactory") .def_static( "createModel", py::overload_cast&, const std::vector&>( &ModelFactory::createModel), "model_type"_a, "system_size"_a, "global_discretization"_a, R"-(Create a new model of a given type, physical size and *global* discretization. :param model_type: the type of desired model :param system_size: the physical size of the domain in each direction :param global_discretization: number of points in each direction)-") .def_static("createModel", py::overload_cast(&ModelFactory::createModel), "model"_a, "Create a deep copy of a model.") .def_static("createResidual", &ModelFactory::createResidual, "model"_a, "sigma_y"_a, "hardening"_a = 0., R"-(Create an isotropic linear hardening residual. :param model: the model on which to define the residual :param sigma_y: the (von Mises) yield stress :param hardening: the hardening modulus)-") .def_static("registerVolumeOperators", &ModelFactory::registerVolumeOperators, "model"_a, "Register Boussinesq and Mindlin operators to model.") .def_static("setIntegrationMethod", &ModelFactory::setIntegrationMethod, "operator"_a, "method"_a, "cutoff"_a, "Set the integration method (linear or cutoff) for a volume " "integral operator"); } /// Wrap residual class void wrapResidual(py::module& mod) { // TODO adapt to n-dim py::class_(mod, "Residual") .def(py::init()) .def("computeResidual", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.computeResidual(*in); }) .def("computeStress", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.computeStress(*in); }) .def("updateState", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.updateState(*in); }) .def("computeResidualDisplacement", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.computeResidualDisplacement(*in); }) .def( "applyTangent", [](Residual& res, numpy& output, numpy& input, numpy& current_strain_inc) { auto out = instanciateFromNumpy(output); auto in = instanciateFromNumpy(input); auto inc = instanciateFromNumpy(current_strain_inc); res.applyTangent(*out, *in, *inc); }, "output"_a, "input"_a, "current_strain_increment"_a) .def("getVector", &Residual::getVector, py::return_value_policy::reference_internal) .def("getPlasticStrain", &Residual::getPlasticStrain, py::return_value_policy::reference_internal) .def("getStress", &Residual::getStress, py::return_value_policy::reference_internal) .def("setIntegrationMethod", &Residual::setIntegrationMethod, "method"_a, "cutoff"_a = 1e-12) .def_property("yield_stress", &Residual::getYieldStress, &Residual::setYieldStress) .def_property("hardening_modulus", &Residual::getHardeningModulus, &Residual::setHardeningModulus) .def_property_readonly("model", &Residual::getModel); } void wrapModel(py::module& mod) { wrapFieldContainer(mod); wrapBEEngine(mod); wrapModelClass(mod); wrapModelFactory(mod); wrapFunctionals(mod); wrapResidual(mod); wrapIntegralOperator(mod); } } // namespace wrap } // namespace tamaas diff --git a/src/core/field_container.hh b/src/model/field_container.hh similarity index 100% rename from src/core/field_container.hh rename to src/model/field_container.hh diff --git a/src/model/integral_operator.hh b/src/model/integral_operator.hh index 4cde60e..a879a86 100644 --- a/src/model/integral_operator.hh +++ b/src/model/integral_operator.hh @@ -1,96 +1,95 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * Copyright (©) 2020-2022 Lucas Frérot * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef INTEGRAL_OPERATOR_HH #define INTEGRAL_OPERATOR_HH /* -------------------------------------------------------------------------- */ #include "grid_base.hh" #include "model_type.hh" +#include "field_container.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ class Model; /** * @brief Generic class for integral operators */ -class IntegralOperator { +class IntegralOperator : public FieldContainer { public: /// Kind of operator enum kind { neumann, dirichlet, dirac }; public: /// Constructor IntegralOperator(Model* model) : model(model) {} - /// Virtual destructor for subclasses - virtual ~IntegralOperator() = default; /// Apply operator on input virtual void apply(GridBase& input, GridBase& output) const = 0; /// Apply operator on filtered input virtual void applyIf(GridBase& input, GridBase& output, const std::function& /*unused*/) const { apply(input, output); } /// Update any data dependent on model parameters virtual void updateFromModel() = 0; /// Get model const Model& getModel() const { return *model; } /// Kind virtual kind getKind() const = 0; /// Type virtual model_type getType() const = 0; /// Norm virtual Real getOperatorNorm() { TAMAAS_EXCEPTION("operator does not implement norm"); } /// Dense shape (for Scipy integration) virtual std::pair matvecShape() const { TAMAAS_EXCEPTION("operator does not implement " "scipy.sparse.linalg.LinearOperator interface"); } /// matvec definition virtual GridBase matvec(GridBase& /*x*/) const { TAMAAS_EXCEPTION("operator does not implement " "scipy.sparse.linalg.LinearOperator interface"); } protected: Model* model = nullptr; }; /* -------------------------------------------------------------------------- */ /* Printing IntegralOperator::kind to string */ /* -------------------------------------------------------------------------- */ std::ostream& operator<<(std::ostream& o, const IntegralOperator::kind& val); } // namespace tamaas #endif // INTEGRAL_OPERATOR_HH diff --git a/src/model/model_type.hh b/src/model/model_type.hh index f7cb958..47eec32 100644 --- a/src/model/model_type.hh +++ b/src/model/model_type.hh @@ -1,266 +1,268 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * Copyright (©) 2020-2022 Lucas Frérot * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef MODEL_TYPE_HH #define MODEL_TYPE_HH /* -------------------------------------------------------------------------- */ #include "grid.hh" #include "grid_base.hh" #include "static_types.hh" #include "tamaas.hh" #include #include #include #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { /// Types for grid dimensions and number of components enum class model_type { basic_1d, ///< one component line basic_2d, ///< one component surface surface_1d, ///< two components line surface_2d, ///< three components surface volume_1d, ///< two components volume volume_2d ///< three components volume }; /// Trait class to store physical dimension of domain, of boundary and /// number of components template struct model_type_traits {}; #define MODEL_TYPE_TRAITS_MACRO(type, dim, comp, bdim) \ template <> \ struct model_type_traits { \ static constexpr char repr[]{#type}; \ static constexpr UInt dimension = dim; \ static constexpr UInt components = comp; \ static constexpr UInt boundary_dimension = bdim; \ static constexpr UInt voigt = voigt_size::value; \ static const std::vector indices; \ } MODEL_TYPE_TRAITS_MACRO(basic_1d, 1, 1, 1); MODEL_TYPE_TRAITS_MACRO(basic_2d, 2, 1, 2); MODEL_TYPE_TRAITS_MACRO(surface_1d, 1, 2, 1); MODEL_TYPE_TRAITS_MACRO(surface_2d, 2, 3, 2); MODEL_TYPE_TRAITS_MACRO(volume_1d, 2, 2, 1); MODEL_TYPE_TRAITS_MACRO(volume_2d, 3, 3, 2); #undef MODEL_TYPE_TRAITS_MACRO // clang-format off #ifdef TAMAAS_MODEL_TYPES #undef TAMAAS_MODEL_TYPES #endif #define TAMAAS_MODEL_TYPES \ (model_type::basic_1d) \ (model_type::basic_2d) \ (model_type::surface_1d) \ (model_type::surface_2d) \ (model_type::volume_1d) \ (model_type::volume_2d) // clang-format on namespace detail { /// Convert enum value to a type template using model_type_t = std::integral_constant; /// Convert dim value to a type template using dim_t = std::integral_constant; #define MAKE_MODEL_TYPE(r, x, type) (model_type_t) /// Enumeration of model types using model_types_t = std::tuple; #undef MAKE_MODEL_TYPE #define MAKE_DIM_TYPE(r, x, dim) (dim_t) /// Enumeration of dimension types using dims_t = std::tuple; #undef MAKE_DIM_TYPE } // namespace detail /// Print function for model_type inline std::ostream& operator<<(std::ostream& o, const model_type& val) { switch (val) { #define PRINT_MODEL_TYPE(r, data, type) \ case type: \ o << model_type_traits::repr; \ break; BOOST_PP_SEQ_FOR_EACH(PRINT_MODEL_TYPE, model_type, TAMAAS_MODEL_TYPES); #undef PRINT_MODEL_TYPE } return o; } #undef ALLOC_GRID_CASE_MACRO // Implementing a static dispatch mechanism // References for a generic static dispatch: // https://stackoverflow.com/questions/39915986/solutions-for-dynamic-dispatch-on-unrelated-types // constexpr map: Jason Turner C++ Weekly ep 233 // C++14 limited dispatch (Nicolas Richart): // akantu/features/eigen::src/common/aka_element_classes_info_inline_impl.hh // TODO when switching C++17, implement with constexpr map namespace detail { /// Specialized static dispatch for all model types template constexpr decltype(auto) static_switch_dispatch( const model_types_t&, Function&& function, const DynamicType& type, DefaultFunction&& default_function, std::index_sequence) { #define SWITCH_DISPATCH_CASE(r, data, type) \ case type: { \ return function(model_type_t{}); \ } switch (type) { BOOST_PP_SEQ_FOR_EACH(SWITCH_DISPATCH_CASE, ~, TAMAAS_MODEL_TYPES); default: return default_function(type); } #undef SWITCH_DISPATCH_CASE } /// Specialized static dispatch for all dimensions template constexpr decltype(auto) static_switch_dispatch( const dims_t&, Function&& function, const DynamicType& dim, DefaultFunction&& default_function, std::index_sequence) { #define SWITCH_DISPATCH_CASE(r, data, dim) \ case dim: { \ return function(dim_t{}); \ } switch (dim) { BOOST_PP_SEQ_FOR_EACH(SWITCH_DISPATCH_CASE, ~, (1)(2)(3)); default: return default_function(dim); } #undef SWITCH_DISPATCH_CASE } /// Dispatch to tuple of types with a default case template constexpr decltype(auto) tuple_dispatch_with_default(Function&& function, DefaultFunction&& default_function, const DynamicType& type) { return detail::static_switch_dispatch( TypeTuple{}, std::forward(function), type, std::forward(default_function), std::make_index_sequence::value>{}); } /// Dispatch to tuple of types, error on default template constexpr decltype(auto) tuple_dispatch(Function&& function, const DynamicType& type) { return tuple_dispatch_with_default( std::forward(function), [](auto&& type) -> decltype( function(std::tuple_element_t<0, TypeTuple>{})) { TAMAAS_EXCEPTION("Unknown type in static dispatch" << type); }, type); } } // namespace detail /// Static dispatch lambda to model types template constexpr decltype(auto) model_type_dispatch(Function&& function, model_type type) { return detail::tuple_dispatch( std::forward(function), type); } /// Static dispatch lambda to dimensions template constexpr decltype(auto) dimension_dispatch(Function&& function, UInt dim) { return detail::tuple_dispatch( std::forward(function), dim); } /// \cond DO_NOT_DOCUMENT namespace detail { template using dim_choice = std::integral_constant< UInt, (boundary) ? model_type_traits::boundary_dimension : model_type_traits::dimension>; } // namespace detail /// \endcond /// Allocate a Grid unique_ptr -template -std::unique_ptr::value>> +template class GridType = Grid, + class Container = void> +std::unique_ptr::value>> allocateGrid(Container&& n, UInt nc) { - return std::make_unique::value>>( + return std::make_unique::value>>( std::begin(n), std::end(n), nc); } /// Helper function for grid allocation with model type template decltype(auto) allocateGrid(model_type type, Container&& n) { return model_type_dispatch( [&n](auto&& type) -> std::unique_ptr> { constexpr auto mtype = std::decay_t::value; return allocateGrid( std::forward(n), model_type_traits::components); }, type); } /// Helper function for grid allocation with non-standard components template decltype(auto) allocateGrid(model_type type, Container&& n, UInt nc) { return model_type_dispatch( [&n, nc](auto&& type) -> std::unique_ptr> { constexpr auto mtype = std::decay_t::value; return allocateGrid(std::forward(n), nc); }, type); } } // namespace tamaas /* -------------------------------------------------------------------------- */ #endif // MODEL_TYPE_HH diff --git a/src/model/westergaard.cpp b/src/model/westergaard.cpp index 6d33600..2e2e9b6 100644 --- a/src/model/westergaard.cpp +++ b/src/model/westergaard.cpp @@ -1,305 +1,308 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * Copyright (©) 2020-2022 Lucas Frérot * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ +#include "grid_hermitian.hh" #include "grid_view.hh" #include "influence.hh" #include "loop.hh" #include "model.hh" +#include "model_type.hh" #include "static_types.hh" #include "westergaard.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ template Westergaard::Westergaard(Model* model) - : IntegralOperator(model), influence(), buffer(), - engine(FFTEngine::makeEngine()) { + : IntegralOperator(model), buffer(), engine(FFTEngine::makeEngine()) { // Copy sizes auto bdisc = model->getBoundaryDiscretization(); auto boundary_hermitian_sizes = GridHermitian::hermitianDimensions( bdisc); constexpr UInt nb_components = trait::components; buffer.setNbComponents(nb_components); buffer.resize(boundary_hermitian_sizes); - influence.setNbComponents(nb_components * nb_components); - influence.resize(boundary_hermitian_sizes); + + influence = allocateGrid( + boundary_hermitian_sizes, nb_components * nb_components); + (*this)["influence"] = influence; initInfluence(); } /* -------------------------------------------------------------------------- */ namespace detail { template struct boundary_fft_helper { template static void backwardTransform(FFTEngine& e, Buffer&& buffer, Out&& out) { auto view = make_view(out, 0); e.backward(view, buffer); } }; template struct boundary_fft_helper { template static void backwardTransform(FFTEngine& e, Buffer&& buffer, Out&& out) { e.backward(out, buffer); } }; } // namespace detail template template void Westergaard::fourierApply(Functor func, GridBase& in, GridBase& out) const try { auto& i = dynamic_cast&>(in); auto& full_out = dynamic_cast&>(out); engine->forward(i, buffer); /// Applying influence - func(buffer, influence); + func(buffer, *influence); /// Backward fourier transform on boundary only detail::boundary_fft_helper::backwardTransform(*engine, buffer, full_out); } catch (const std::bad_cast& e) { TAMAAS_EXCEPTION("Neumann and dirichlet types do not match model type"); } /* -------------------------------------------------------------------------- */ template template void Westergaard::initFromFunctor(Functor func) { // Compute wavevectors for influence auto wavevectors = FFTEngine::template computeFrequencies( - influence.sizes()); + influence->sizes()); // Get boundary physical size auto system_size = this->model->getBoundarySystemSize(); VectorProxy domain(system_size[0]); // Normalize wavevectors wavevectors *= 2 * M_PI; wavevectors /= domain; // Apply functor Loop::loop(func, range>(wavevectors), - range>(influence)); + range>(*influence)); if (mpi::rank() == 0) { // Set fundamental mode to zero - MatrixProxy mat(influence(0)); + MatrixProxy mat((*influence)(0)); mat = 0; } } /* -------------------------------------------------------------------------- */ /// \cond DO_NOT_DOCUMENT #define NEUMANN_BASIC(type) \ template <> \ void Westergaard::initInfluence() { \ auto E_star = model->getHertzModulus(); \ auto basic = [E_star] CUDA_LAMBDA(VectorProxy q, \ MatrixProxy k) { \ k(0, 0) = 2. / (E_star * q.l2norm()); \ }; \ initFromFunctor(basic); \ } #define DIRICHLET_BASIC(type) \ template <> \ void Westergaard::initInfluence() { \ auto E_star = model->getHertzModulus(); \ auto basic = [E_star] CUDA_LAMBDA(VectorProxy q, \ MatrixProxy k) { \ k(0, 0) = E_star * q.l2norm() / 2; \ }; \ initFromFunctor(basic); \ } NEUMANN_BASIC(model_type::basic_1d) NEUMANN_BASIC(model_type::basic_2d) DIRICHLET_BASIC(model_type::basic_1d) DIRICHLET_BASIC(model_type::basic_2d) #undef NEUMANN_BASIC #undef DIRICHLET_BASIC /* -------------------------------------------------------------------------- */ template <> void Westergaard::initInfluence() { auto E = model->getYoungModulus(); auto nu = model->getPoissonRatio(); const Complex I(0, 1); auto surface = [E, nu, I] CUDA_LAMBDA(VectorProxy q_, MatrixProxy F) { Real q = q_(0); q *= 1. / q_.l2norm(); F(0, 0) = 2 * (1 + nu) * (1 - nu * q * q); F(1, 1) = 2 * (1 - nu * nu); F(0, 1) = I * q * (1 + nu) * (1 - 2 * nu); F(1, 0) = -F(0, 1); F *= 1. / (E * q_.l2norm()); }; initFromFunctor(surface); } /* -------------------------------------------------------------------------- */ template <> void Westergaard::initInfluence() { auto E = model->getYoungModulus(); auto nu = model->getPoissonRatio(); const Complex I(0, 1); auto surface = [E, nu, I] CUDA_LAMBDA(VectorProxy q_, MatrixProxy F) { Vector q(q_); q *= 1 / q_.l2norm(); F(0, 0) = 2 * (1 + nu) * (1 - nu * q(0) * q(0)); F(1, 1) = 2 * (1 + nu) * (1 - nu * q(1) * q(1)); F(2, 2) = 2 * (1 - nu * nu); F(0, 1) = F(1, 0) = -q(0) * q(1) * 2 * nu * (1 + nu); F(0, 2) = I * q(0) * (1 + nu) * (1. - 2. * nu); F(1, 2) = I * q(1) * (1 + nu) * (1. - 2. * nu); F(2, 0) = -F(0, 2); F(2, 1) = -F(1, 2); F *= 1. / (E * q_.l2norm()); }; initFromFunctor(surface); } /* -------------------------------------------------------------------------- */ template void Westergaard::initInfluence() {} /// \endcond /* ------------------------------------------------------------------------ */ template void Westergaard::apply(GridBase& input, GridBase& output) const { auto apply = [](decltype(buffer)& buffer, - const decltype(influence)& influence) { + const decltype(*influence)& influence) { Loop::loop( [] CUDA_LAMBDA(VectorProxy i, MatrixProxy F) { i = F * i; }, range>(buffer), range>(influence)); }; fourierApply(apply, input, output); } /* -------------------------------------------------------------------------- */ template Real Westergaard::getOperatorNorm() { constexpr UInt comp = model_type_traits::components; Real _norm = 0.0; _norm = Loop::reduce( [] CUDA_LAMBDA(MatrixProxy m) { Real n = thrust::norm(m.l2squared()); return std::isnan(n) ? 0 : n; }, - range>(influence)); + range>(*influence)); auto size = model->getSystemSize(); auto disc = model->getDiscretization(); auto dim = model_type_traits::dimension; /// TODO: why? switch (dim) { case 1: _norm /= (size[0] / disc[0]) * (size[0] / disc[0]); break; default: for (UInt i = 0; i < dim; i++) { _norm /= size[i] / disc[i]; } } return std::sqrt(_norm); } /* -------------------------------------------------------------------------- */ template std::pair Westergaard::matvecShape() const { auto n = model->getBoundaryDiscretization(); auto N = std::accumulate(n.begin(), n.end(), comp, std::multiplies<>()); return std::make_pair(N, N); } /* -------------------------------------------------------------------------- */ template GridBase Westergaard::matvec(GridBase& X) const { auto n = model->getBoundaryDiscretization(); // Creating correct input Grid x{n, comp, X.view()}; // Creating correct output if (bdim != dim) n.insert(n.begin(), 1); Grid y(n, comp); apply(x, y); return std::move(y); } /* -------------------------------------------------------------------------- */ /* Template instanciation */ /* -------------------------------------------------------------------------- */ template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; template class Westergaard; /* -------------------------------------------------------------------------- */ } // namespace tamaas /* -------------------------------------------------------------------------- */ diff --git a/src/model/westergaard.hh b/src/model/westergaard.hh index 1373a17..a91cf10 100644 --- a/src/model/westergaard.hh +++ b/src/model/westergaard.hh @@ -1,97 +1,97 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * Copyright (©) 2020-2022 Lucas Frérot * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef WESTERGAARD_HH #define WESTERGAARD_HH /* -------------------------------------------------------------------------- */ #include "fft_engine.hh" #include "grid_hermitian.hh" #include "integral_operator.hh" #include "model_type.hh" #include "tamaas.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /** * @brief Operator based on Westergaard solution and the Dicrete Fourier * Transform. * This class is templated with model type to allow efficient storage of the * influence coefficients. * The integral operator is only applied to surface pressure/displacements, * even for volume models. */ template class Westergaard : public IntegralOperator { using trait = model_type_traits; static constexpr UInt dim = trait::dimension; static constexpr UInt bdim = trait::boundary_dimension; static constexpr UInt comp = trait::components; public: /// Constuctor: initalizes influence coefficients and allocates buffer Westergaard(Model* model); /// Get influence coefficients - const GridHermitian& getInfluence() const { return influence; } + const GridHermitian& getInfluence() const { return *influence; } /// Apply influence coefficients to input void apply(GridBase& input, GridBase& output) const override; /// Update the influence coefficients void updateFromModel() override { initInfluence(); } /// Kind IntegralOperator::kind getKind() const override { return otype; } /// Type model_type getType() const override { return mtype; } /// Initialize influence coefficients void initInfluence(); template void initFromFunctor(Functor func); /// Apply a functor in Fourier space template void fourierApply(Functor func, GridBase& in, GridBase& out) const; /// Compute L_2 norm of influence functions Real getOperatorNorm() override; /// Dense shape std::pair matvecShape() const override; /// Dense matvec GridBase matvec(GridBase& x) const override; public: - GridHermitian influence; + std::shared_ptr> influence; mutable GridHermitian buffer; mutable std::unique_ptr engine; }; } // namespace tamaas #endif // WESTERGAARD_HH