diff --git a/.clang-tidy b/.clang-tidy index 76c907d..d2fdbe2 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,4 +1,4 @@ -Checks: 'modernize-use-*, performance-*, cppcoreguidelines-*' +Checks: '-*, modernize-use-*, -modernize-use-trailing-return-type*, performance-*, readability-*, -readability-braces-around-statements, -readability-implicit-bool-conversion' AnalyzeTemporaryDtors: false HeaderFilterRegex: 'src/.*' FormatStyle: file diff --git a/src/core/fftw/fftw_engine.hh b/src/core/fftw/fftw_engine.hh index e5a2634..3273aba 100644 --- a/src/core/fftw/fftw_engine.hh +++ b/src/core/fftw/fftw_engine.hh @@ -1,104 +1,104 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef FFTW_ENGINE_HH #define FFTW_ENGINE_HH /* -------------------------------------------------------------------------- */ #include "fft_engine.hh" #include "fftw/interface.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { class FFTWEngine : public FFTEngine { protected: using plan_t = std::pair, fftw::plan>; using complex_t = fftw::helper::complex; /// Perform forward (R2C) transform template void forwardImpl(const Grid& real, GridHermitian& spectral); /// Perform backward (C2R) transform template void backwardImpl(Grid& real, const GridHermitian& spectral); /// Return the plans pair for a given transform signature plan_t& getPlans(key_t key); public: /// Initialize with flags explicit FFTWEngine(unsigned int flags = FFTW_ESTIMATE) noexcept - : _flags(flags), plans() {} + : _flags(flags) {} void forward(const Grid& real, GridHermitian& spectral) override { forwardImpl(real, spectral); } void forward(const Grid& real, GridHermitian& spectral) override { forwardImpl(real, spectral); } void backward(Grid& real, GridHermitian& spectral) override { backwardImpl(real, spectral); } void backward(Grid& real, GridHermitian& spectral) override { backwardImpl(real, spectral); } unsigned int flags() const { return _flags; } /// Cast to FFTW complex type static auto cast(Complex* data) { return reinterpret_cast(data); } static auto cast(const Complex* data) { return const_cast(reinterpret_cast(data)); } protected: unsigned int _flags; ///< FFTW flags std::map plans; ///< plans corresponding to signatures }; /* -------------------------------------------------------------------------- */ template void FFTWEngine::forwardImpl(const Grid& real, GridHermitian& spectral) { auto& plans = getPlans(make_key(real, spectral)); fftw::execute(plans.first, const_cast(real.getInternalData()), cast(spectral.getInternalData())); } template void FFTWEngine::backwardImpl(Grid& real, const GridHermitian& spectral) { auto& plans = getPlans(make_key(real, spectral)); fftw::execute(plans.second, cast(spectral.getInternalData()), real.getInternalData()); // Normalize real *= (1. / real.getNbPoints()); } } // namespace tamaas #endif diff --git a/src/core/grid_hermitian.hh b/src/core/grid_hermitian.hh index 8d9375f..1a3a496 100644 --- a/src/core/grid_hermitian.hh +++ b/src/core/grid_hermitian.hh @@ -1,161 +1,159 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef GRID_HERMITIAN_HH #define GRID_HERMITIAN_HH /* -------------------------------------------------------------------------- */ #include "grid.hh" #include "tamaas.hh" #include #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { static complex dummy; /** * @brief Multi-dimensional, multi-component herimitian array * * This class represents an array of hermitian data, meaning it has dimensions * of: n1 * n2 * n3 * ... * (nx / 2 + 1) * * However, it acts as a fully dimensioned array, returning a dummy reference * for data outside its real dimension, allowing one to write full (and * inefficient) loops without really worrying about the reduced dimension. * * It would however be better to just use the true dimensions of the surface * for all intents and purposes, as it saves computation time. */ template class GridHermitian : public Grid, dim> { public: using value_type = complex; /* ------------------------------------------------------------------------ */ /* Constructors */ /* ------------------------------------------------------------------------ */ public: GridHermitian() = default; GridHermitian(const GridHermitian& o) = default; GridHermitian(GridHermitian&& o) noexcept = default; using Grid::Grid; using Grid, dim>::operator=; /* ------------------------------------------------------------------------ */ /* Access operators */ /* ------------------------------------------------------------------------ */ template inline complex& operator()(T1... args); template inline const complex& operator()(T1... args) const; static std::array hermitianDimensions(std::array n) { n[dim - 1] /= 2; n[dim - 1] += 1; return n; } template ::value>> static std::vector hermitianDimensions(std::vector n) { n.back() /= 2; n.back() += 1; return n; } private: template inline void packTuple(UInt* t, UInt i, T1... args) const; template inline void packTuple(UInt* t, UInt i) const; }; /* -------------------------------------------------------------------------- */ /* Inline function definitions */ /* -------------------------------------------------------------------------- */ template template inline void GridHermitian::packTuple(UInt* t, UInt i, T1... args) const { *t = i; packTuple(t + 1, args...); } template template inline void GridHermitian::packTuple(UInt* t, UInt i) const { *t = i; } template template inline complex& GridHermitian::operator()(T1... args) { constexpr UInt nargs = sizeof...(T1); static_assert(nargs <= dim + 1, "Too many arguments"); UInt last_index = std::get(std::forward_as_tuple(args...)); if (nargs != 1 && last_index >= this->n[dim - 1]) { std::array tuple = {{0}}; packTuple(tuple.data(), args...); for (UInt i = 0; i < dim; i++) { if (tuple[i] && i != dim - 1) tuple[i] = this->n[i] - tuple[i]; else if (tuple[i] && i == dim - 1) tuple[i] = 2 * (this->n[i] - 1) - tuple[i]; } dummy = conj(this->Grid, dim>::operator()(tuple)); return dummy; } - else return Grid, dim>::operator()(args...); } template template inline const complex& GridHermitian::operator()(T1... args) const { constexpr UInt nargs = sizeof...(T1); static_assert(nargs <= dim + 1, "Too many arguments"); UInt last_index = std::get(std::forward_as_tuple(args...)); if (nargs != 1 && last_index >= this->n[dim - 1]) { std::array tuple = {{0}}; packTuple(tuple.data(), args...); for (UInt i = 0; i < dim; i++) { if (tuple[i] && i != dim - 1) tuple[i] = this->n[i] - tuple[i]; else if (tuple[i] && i == dim - 1) tuple[i] = 2 * (this->n[i] - 1) - tuple[i]; } dummy = conj(this->Grid, dim>::operator()(tuple)); return dummy; } - else return Grid, dim>::operator()(args...); } } // namespace tamaas #endif // GRID_HERMITIAN_HH diff --git a/src/core/loops/loop_utils.hh b/src/core/loops/loop_utils.hh index 78bb12e..5fac93a 100644 --- a/src/core/loops/loop_utils.hh +++ b/src/core/loops/loop_utils.hh @@ -1,103 +1,103 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef LOOP_UTILS_HH #define LOOP_UTILS_HH /* -------------------------------------------------------------------------- */ #include "loop.hh" #include "tamaas.hh" #include #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { namespace detail { template UInt loopSize(Grids&&... grids) { return (only_points) ? std::get<0>(std::forward_as_tuple(grids...)).getNbPoints() : std::get<0>(std::forward_as_tuple(grids...)).dataSize(); } template -void areAllEqual(bool, UInt) {} +void areAllEqual(bool /*unused*/ /*unused*/, UInt /*unused*/ /*unused*/) {} template void areAllEqual(bool result, UInt prev, UInt current) { if (!(result && prev == current)) TAMAAS_EXCEPTION("Cannot loop over ranges that do not have the same size!"); } template void areAllEqual(bool result, UInt prev, UInt current, Sizes... rest) { areAllEqual(result && prev == current, current, rest...); } } // namespace detail template void checkLoopSize(Ranges&&... ranges) { detail::areAllEqual(true, (ranges.end() - ranges.begin())...); } namespace detail { template struct reduction_helper; template struct reduction_helper : public thrust::plus { ReturnType init() const { return ReturnType(0); } }; template struct reduction_helper : public thrust::multiplies { ReturnType init() const { return ReturnType(1); } }; template struct reduction_helper : public thrust::minimum { ReturnType init() const { return std::numeric_limits::max(); } }; template struct reduction_helper : public thrust::maximum { ReturnType init() const { return std::numeric_limits::lowest(); } }; } // namespace detail } // namespace tamaas #endif // __LOOP_UTILS_HH diff --git a/src/core/mpi_interface.hh b/src/core/mpi_interface.hh index 46add03..41df35d 100644 --- a/src/core/mpi_interface.hh +++ b/src/core/mpi_interface.hh @@ -1,244 +1,246 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef MPI_INTERFACE_HH #define MPI_INTERFACE_HH /* -------------------------------------------------------------------------- */ #include "static_types.hh" #include "tamaas.hh" #include #ifdef TAMAAS_USE_MPI #include #endif /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ /// Contains mock mpi functions namespace mpi_dummy { struct comm { static comm world; }; struct sequential { static void enter(){}; static void exit(){}; }; struct sequential_guard { sequential_guard() { sequential::enter(); } ~sequential_guard() { sequential::exit(); } }; enum class thread : int { single, funneled, serialized, multiple }; inline bool initialized() { return false; } inline bool finalized() { return false; } -inline int init(int*, char***) { return 0; } -inline int init_thread(int*, char***, thread, thread* provided) { +inline int init(int* /*unused*/, char*** /*unused*/) { return 0; } +inline int init_thread(int* /*unused*/, char*** /*unused*/, thread /*unused*/, + thread* provided) { *provided = thread::funneled; return 0; } inline int finalize() { return 0; } -inline int rank(comm = comm::world) { return 0; } -inline int size(comm = comm::world) { return 1; } +inline int rank(comm /*unused*/ = comm::world) { return 0; } +inline int size(comm /*unused*/ = comm::world) { return 1; } template -inline decltype(auto) reduce(T&& val, comm = comm::world) { +inline decltype(auto) reduce(T&& val, comm /*unused*/ = comm::world) { return std::forward(val); } template -inline decltype(auto) allreduce(T&& val, comm = comm::world) { +inline decltype(auto) allreduce(T&& val, comm /*unused*/ = comm::world) { return std::forward(val); } template inline decltype(auto) gather(const T* send, T* recv, int count, - comm = comm::world) { + comm /*unused*/ = comm::world) { if (send == recv) return; thrust::copy_n(send, count, recv); } template -inline decltype(auto) bcast(T*, int, comm = comm::world) {} +inline decltype(auto) bcast(T* /*unused*/, int /*unused*/, + comm /*unused*/ = comm::world) {} } // namespace mpi_dummy /* -------------------------------------------------------------------------- */ #ifdef TAMAAS_USE_MPI /// Contains real mpi functions namespace mpi_impl { /// MPI_Comm wrapper struct comm { MPI_Comm _comm; operator MPI_Comm() { return _comm; } static comm& world(); }; struct sequential { static void enter() { comm::world()._comm = MPI_COMM_SELF; } static void exit() { comm::world()._comm = MPI_COMM_WORLD; } }; struct sequential_guard { sequential_guard() { sequential::enter(); } ~sequential_guard() { sequential::exit(); } }; /// MPI Thread level enum class thread : int { single = MPI_THREAD_SINGLE, funneled = MPI_THREAD_FUNNELED, serialized = MPI_THREAD_SERIALIZED, multiple = MPI_THREAD_MULTIPLE }; template struct type_trait; #define TYPE(t, mpi_t) \ template <> \ struct type_trait { \ static constexpr MPI_Datatype value = mpi_t; \ } TYPE(double, MPI_DOUBLE); TYPE(int, MPI_INT); TYPE(unsigned int, MPI_UNSIGNED); TYPE(long double, MPI_LONG_DOUBLE); TYPE(long, MPI_LONG); TYPE(unsigned long, MPI_UNSIGNED_LONG); TYPE(::thrust::complex, MPI_CXX_DOUBLE_COMPLEX); TYPE(::thrust::complex, MPI_CXX_LONG_DOUBLE_COMPLEX); TYPE(bool, MPI_CXX_BOOL); #undef TYPE template struct operation_trait; #define OPERATION(op, mpi_op) \ template <> \ struct operation_trait { \ static constexpr MPI_Op value = mpi_op; \ } OPERATION(plus, MPI_SUM); OPERATION(min, MPI_MIN); OPERATION(max, MPI_MAX); OPERATION(times, MPI_PROD); #undef OPERATION inline bool initialized() { int has_init; MPI_Initialized(&has_init); return has_init != 0; } inline bool finalized() { int has_final; MPI_Finalized(&has_final); return has_final != 0; } inline int init(int* argc, char*** argv) { return MPI_Init(argc, argv); } inline int init_thread(int* argc, char*** argv, thread required, thread* provided) { return MPI_Init_thread(argc, argv, static_cast(required), reinterpret_cast(provided)); } inline int finalize() { return MPI_Finalize(); } inline int rank(comm communicator = comm::world()) { int rank; MPI_Comm_rank(communicator, &rank); return rank; } inline int size(comm communicator = comm::world()) { int size; MPI_Comm_size(communicator, &size); return size; } template inline decltype(auto) reduce(T val, comm communicator = comm::world()) { MPI_Reduce(&val, &val, 1, type_trait::value, operation_trait::value, 0, communicator); return val; } template ::value or std::is_same::value>> inline decltype(auto) allreduce(T val, comm communicator = comm::world()) { MPI_Allreduce(&val, &val, 1, type_trait::value, operation_trait::value, communicator); return val; } template inline decltype(auto) allreduce(const StaticVector& v, comm communicator = comm::world()) { Vector res; MPI_Allreduce(v.begin(), res.begin(), n, type_trait
::value, operation_trait::value, communicator); return res; } template inline decltype(auto) allreduce(const StaticMatrix& v, comm communicator = comm::world()) { Matrix res; MPI_Allreduce(v.begin(), res.begin(), n * m, type_trait
::value, operation_trait::value, communicator); return res; } template inline decltype(auto) gather(const T* send, T* recv, int count, comm communicator = comm::world()) { MPI_Gather(send, count, type_trait::value, recv, count, type_trait::value, 0, communicator); } template inline decltype(auto) bcast(T* buffer, int count, comm communicator = comm::world()) { MPI_Bcast(buffer, count, type_trait::value, 0, communicator); } } // namespace mpi_impl namespace mpi = mpi_impl; #else namespace mpi = mpi_dummy; #endif // TAMAAS_USE_MPI } // namespace tamaas /* -------------------------------------------------------------------------- */ #endif // MPI_INTERFACE_HH diff --git a/src/core/statistics.cpp b/src/core/statistics.cpp index 80ca0bc..d84c453 100644 --- a/src/core/statistics.cpp +++ b/src/core/statistics.cpp @@ -1,213 +1,211 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "statistics.hh" #include "fft_engine.hh" #include "loop.hh" #include "static_types.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { template Real Statistics::computeRMSHeights(Grid& surface) { return std::sqrt(surface.var()); } template Real Statistics::computeSpectralRMSSlope(Grid& surface) { const auto h_size = GridHermitian::hermitianDimensions(surface.sizes()); auto wavevectors = FFTEngine::template computeFrequencies(h_size); wavevectors *= 2 * M_PI; // need q for slopes const auto psd = computePowerSpectrum(surface); const Real rms_slope_mean = Loop::reduce( [] CUDA_LAMBDA(VectorProxy q, const Complex& psd_val) { // Checking if we're in the zone that does not have hermitian symmetry if (std::abs(q.back()) < 1e-15) return q.l2squared() * psd_val.real(); - else - return 2 * q.l2squared() * psd_val.real(); + return 2 * q.l2squared() * psd_val.real(); }, range>(wavevectors), psd); return std::sqrt(rms_slope_mean); } /* -------------------------------------------------------------------------- */ template GridHermitian Statistics::computePowerSpectrum(Grid& surface) { const auto h_size = GridHermitian::hermitianDimensions(surface.sizes()); GridHermitian psd(h_size, surface.getNbComponents()); FFTEngine::makeEngine()->forward(surface, psd); Real factor = 1. / surface.getGlobalNbPoints(); // Squaring the fourier transform of surface and normalizing Loop::loop( [factor] CUDA_LAMBDA(Complex & c) { c *= factor; c *= conj(c); }, psd); return psd; } /* -------------------------------------------------------------------------- */ template Grid Statistics::computeAutocorrelation(Grid& surface) { Grid acf(surface.sizes(), surface.getNbComponents()); auto psd = computePowerSpectrum(surface); FFTEngine::makeEngine()->backward(acf, psd); acf *= acf.getGlobalNbPoints(); return acf; } /* -------------------------------------------------------------------------- */ template Real Statistics::contact(const Grid& tractions, UInt perimeter) { Real points = 0; UInt nc = tractions.getNbComponents(); switch (nc) { case 1: points = Loop::reduce( [] CUDA_LAMBDA(const Real& t) -> Real { return t > 0; }, tractions); break; case 2: points = Loop::reduce( [] CUDA_LAMBDA(VectorProxy t) -> Real { return t.back() > 0; }, range>(tractions)); break; case 3: points = Loop::reduce( [] CUDA_LAMBDA(VectorProxy t) -> Real { return t.back() > 0; }, range>(tractions)); break; default: TAMAAS_EXCEPTION("Invalid number of components in traction"); } auto area = points / tractions.getGlobalNbPoints(); if (dim == 1) perimeter = 0; // Correction from Yastrebov et al. (Trib. Intl., 2017) // 10.1016/j.triboint.2017.04.023 - return area - - (M_PI - 1 + std::log(2)) / (24. * tractions.getGlobalNbPoints()) * perimeter; + return area - (M_PI - 1 + std::log(2)) / + (24. * tractions.getGlobalNbPoints()) * perimeter; } /* -------------------------------------------------------------------------- */ namespace { template class moment_helper { public: moment_helper(const std::array& exp) : exponent(exp) {} CUDA_LAMBDA Complex operator()(VectorProxy q, const Complex& phi) const { Real mul = 1; for (UInt i = 0; i < dim; ++i) mul *= std::pow(q(i), exponent[i]); // Do not duplicate everything from hermitian symmetry if (std::abs(q.back()) < 1e-15) return mul * phi; - else - return 2 * mul * phi; + return 2 * mul * phi; } private: std::array exponent; }; } // namespace template <> std::vector Statistics<1>::computeMoments(Grid& surface) { constexpr UInt dim = 1; std::vector moments(3); const auto psd = computePowerSpectrum(surface); auto wavevectors = FFTEngine::template computeFrequencies(psd.sizes()); // we don't multiply by 2 pi because moments are computed with k moments[0] = Loop::reduce(moment_helper{{{0}}}, range(wavevectors), psd) .real(); moments[1] = Loop::reduce(moment_helper{{{2}}}, range(wavevectors), psd) .real(); moments[2] = Loop::reduce(moment_helper{{{4}}}, range(wavevectors), psd) .real(); return moments; } template <> std::vector Statistics<2>::computeMoments(Grid& surface) { constexpr UInt dim = 2; std::vector moments(3); const auto psd = computePowerSpectrum(surface); auto wavevectors = FFTEngine::template computeFrequencies(psd.sizes()); // we don't multiply by 2 pi because moments are computed with k moments[0] = Loop::reduce(moment_helper{{{0, 0}}}, range(wavevectors), psd) .real(); auto m02 = Loop::reduce(moment_helper{{{0, 2}}}, range(wavevectors), psd) .real(); auto m20 = Loop::reduce(moment_helper{{{2, 0}}}, range(wavevectors), psd) .real(); moments[1] = 0.5 * (m02 + m20); auto m22 = Loop::reduce(moment_helper{{{2, 2}}}, range(wavevectors), psd) .real(); auto m40 = Loop::reduce(moment_helper{{{4, 0}}}, range(wavevectors), psd) .real(); auto m04 = Loop::reduce(moment_helper{{{0, 4}}}, range(wavevectors), psd) .real(); moments[2] = (3 * m22 + m40 + m04) / 3.; return moments; } template struct Statistics<1>; template struct Statistics<2>; } // namespace tamaas diff --git a/src/model/adhesion_functional.hh b/src/model/adhesion_functional.hh index fa4311f..4c45591 100644 --- a/src/model/adhesion_functional.hh +++ b/src/model/adhesion_functional.hh @@ -1,102 +1,99 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef ADHESION_FUNCTIONAL_HH #define ADHESION_FUNCTIONAL_HH /* -------------------------------------------------------------------------- */ #include "functional.hh" #include /* -------------------------------------------------------------------------- */ namespace tamaas { namespace functional { /// Functional class for adhesion energy class AdhesionFunctional : public Functional { protected: /// Constructor - AdhesionFunctional(const GridBase& surface) - : Functional(), surface() { + AdhesionFunctional(const GridBase& surface) { this->surface.wrap(surface); } public: /// Get parameters const std::map& getParameters() const { return parameters; } /// Set parameters void setParameters(std::map other) { parameters = std::move(other); } protected: GridBase surface; std::map parameters; }; /// Exponential adhesion functional class ExponentialAdhesionFunctional : public AdhesionFunctional { public: /// Explicit declaration of constructor for swig ExponentialAdhesionFunctional(const GridBase& surface) : AdhesionFunctional(surface) {} /// Compute the total adhesion energy Real computeF(GridBase& gap, GridBase& pressure) const override; /// Compute the gradient of the adhesion functional void computeGradF(GridBase& gap, GridBase& gradient) const override; }; - /// Constant adhesion functional class MaugisAdhesionFunctional : public AdhesionFunctional { public: /// Explicit declaration of constructor for swig MaugisAdhesionFunctional(const GridBase& surface) : AdhesionFunctional(surface) {} /// Compute the total adhesion energy Real computeF(GridBase& gap, GridBase& pressure) const override; /// Compute the gradient of the adhesion functional void computeGradF(GridBase& gap, GridBase& gradient) const override; }; /// Squared exponential adhesion functional class SquaredExponentialAdhesionFunctional : public AdhesionFunctional { public: /// Explicit declaration of constructor for swig SquaredExponentialAdhesionFunctional(const GridBase& surface) : AdhesionFunctional(surface) {} /// Compute the total adhesion energy Real computeF(GridBase& gap, GridBase& pressure) const override; /// Compute the gradient of the adhesion functional void computeGradF(GridBase& gap, GridBase& gradient) const override; }; - -} // namespace functional +} // namespace functional } // namespace tamaas -#endif // ADHESION_FUNCTIONAL_HH +#endif // ADHESION_FUNCTIONAL_HH diff --git a/src/model/elastic_functional.hh b/src/model/elastic_functional.hh index 392a5ef..08e4d6b 100644 --- a/src/model/elastic_functional.hh +++ b/src/model/elastic_functional.hh @@ -1,79 +1,79 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef ELASTIC_FUNCTIONAL_HH #define ELASTIC_FUNCTIONAL_HH /* -------------------------------------------------------------------------- */ #include "functional.hh" #include "model.hh" #include "tamaas.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { namespace functional { /// Generic functional for elastic energy class ElasticFunctional : public Functional { public: ElasticFunctional(const IntegralOperator& op, const GridBase& surface) - : Functional(), op(op), surface(surface) { + : op(op), surface(surface) { const Model& model = op.getModel(); buffer = allocateGrid(op.getType(), model.getBoundaryDiscretization()); } protected: const IntegralOperator& op; const GridBase& surface; std::unique_ptr> mutable buffer; }; /* -------------------------------------------------------------------------- */ /// Functional with pressure as primal field class ElasticFunctionalPressure : public ElasticFunctional { public: using ElasticFunctional::ElasticFunctional; /// Compute functional with input pressure Real computeF(GridBase& pressure, GridBase& dual) const override; /// Compute functional gradient with input pressure void computeGradF(GridBase& pressure, GridBase& gradient) const override; }; /* -------------------------------------------------------------------------- */ /// Functional with gap as primal field class ElasticFunctionalGap : public ElasticFunctional { public: using ElasticFunctional::ElasticFunctional; /// Compute functional with input gap Real computeF(GridBase& gap, GridBase& dual) const override; /// Compute functional gradient with input gap void computeGradF(GridBase& gap, GridBase& gradient) const override; }; } // namespace functional } // namespace tamaas /* -------------------------------------------------------------------------- */ #endif // ELASTIC_FUNCTIONAL_HH diff --git a/src/model/hooke.hh b/src/model/hooke.hh index ef70ea2..9f46125 100644 --- a/src/model/hooke.hh +++ b/src/model/hooke.hh @@ -1,56 +1,56 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef HOOKE_HH #define HOOKE_HH /* -------------------------------------------------------------------------- */ #include "integral_operator.hh" #include "model_type.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /** * @brief Applies Hooke's law of elasticity */ template class Hooke : public IntegralOperator { public: using IntegralOperator::IntegralOperator; /// Type of underlying model model_type getType() const override { return type; } /// Hooke's law computes stresses ~ Neumann IntegralOperator::kind getKind() const override { return IntegralOperator::dirac; } /// Does not update void updateFromModel() override {} /// Apply Hooke's tensor - void apply(GridBase& input, GridBase& output) const override; + void apply(GridBase& strain, GridBase& stress) const override; }; } // namespace tamaas #endif // HOOKE_HH diff --git a/src/model/integral_operator.hh b/src/model/integral_operator.hh index fe9c696..aa91177 100644 --- a/src/model/integral_operator.hh +++ b/src/model/integral_operator.hh @@ -1,83 +1,83 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef INTEGRAL_OPERATOR_HH #define INTEGRAL_OPERATOR_HH /* -------------------------------------------------------------------------- */ #include "grid_base.hh" #include "model_type.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ class Model; /** * @brief Generic class for integral operators */ class IntegralOperator { public: /// Kind of operator enum kind { neumann, dirichlet, dirac }; public: /// Constructor IntegralOperator(Model* model) : model(model) {} /// Virtual destructor for subclasses virtual ~IntegralOperator() = default; /// Apply operator on input virtual void apply(GridBase& input, GridBase& output) const = 0; /// Apply operator on filtered input virtual void applyIf(GridBase& input, GridBase& output, - std::function) const { + std::function /*unused*/) const { apply(input, output); } /// Update any data dependent on model parameters virtual void updateFromModel() = 0; /// Get model const Model& getModel() const { return *model; } /// Kind virtual kind getKind() const = 0; /// Type virtual model_type getType() const = 0; /// Norm virtual Real getOperatorNorm() { TAMAAS_EXCEPTION("operator does not implement norm"); } protected: Model* model = nullptr; }; /* -------------------------------------------------------------------------- */ /* Printing IntegralOperator::kind to string */ /* -------------------------------------------------------------------------- */ std::ostream& operator<<(std::ostream& o, const IntegralOperator::kind& val); } // namespace tamaas #endif // INTEGRAL_OPERATOR_HH diff --git a/src/model/integration/element.hh b/src/model/integration/element.hh index d61ecc3..a05fe08 100644 --- a/src/model/integration/element.hh +++ b/src/model/integration/element.hh @@ -1,66 +1,65 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef ELEMENT_HH #define ELEMENT_HH /* -------------------------------------------------------------------------- */ #include "tamaas.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { template class ExponentialElement; template <> struct ExponentialElement<1> { template static constexpr __device__ __host__ expolit::Polynomial shapes() { constexpr expolit::Polynomial z({0, 1}); if (shape == 0) return 0.5 + (-0.5) * z; - else - return 0.5 + 0.5 * z; + return 0.5 + 0.5 * z; } static constexpr __device__ __host__ auto sign(bool upper) { return (upper) ? -1 : 1; } template static constexpr __device__ __host__ auto g0(Real q) { constexpr expolit::Polynomial z({0, 1}); return expolit::exp((q * sign(upper)) * z) * shapes(); } template static constexpr __device__ __host__ auto g1(Real q) { constexpr expolit::Polynomial z({0, 1}); return expolit::exp((q * sign(upper)) * z) * (q * z * shapes()); } }; // namespace tamaas } // namespace tamaas #endif // ELEMENT_HH diff --git a/src/model/meta_functional.hh b/src/model/meta_functional.hh index f823c2a..3c8b4dd 100644 --- a/src/model/meta_functional.hh +++ b/src/model/meta_functional.hh @@ -1,58 +1,58 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef META_FUNCTIONAL_HH #define META_FUNCTIONAL_HH #include "functional.hh" #include "tamaas.hh" #include #include namespace tamaas { namespace functional { /// Meta functional that contains list of functionals class MetaFunctional : public Functional { using FunctionalList = std::list>; public: /// Compute functional Real computeF(GridBase& variable, GridBase& dual) const override; /// Compute functional gradient void computeGradF(GridBase& variable, GridBase& gradient) const override; /// Add functional to the list - void addFunctionalTerm(std::shared_ptr term); + void addFunctionalTerm(std::shared_ptr functional); protected: FunctionalList functionals; }; } // namespace functional } // namespace tamaas #endif // META_FUNCTIONAL_HH diff --git a/src/model/model.cpp b/src/model/model.cpp index 1cb28f0..d82d2b8 100644 --- a/src/model/model.cpp +++ b/src/model/model.cpp @@ -1,183 +1,183 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "model.hh" #include "be_engine.hh" #include "logger.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ void Model::setElasticity(Real E_, Real nu_) { setYoungModulus(E_); setPoissonRatio(nu_); updateOperators(); } /* -------------------------------------------------------------------------- */ void Model::applyElasticity(GridBase& stress, const GridBase& strain) const { operators.at("hooke")->apply(const_cast&>(strain), stress); } /* -------------------------------------------------------------------------- */ GridBase& Model::getTraction() { return getField("traction"); } const GridBase& Model::getTraction() const { return (*this)["traction"]; } /* -------------------------------------------------------------------------- */ GridBase& Model::getDisplacement() { return getField("displacement"); } const GridBase& Model::getDisplacement() const { return (*this)["displacement"]; } /* -------------------------------------------------------------------------- */ const std::vector& Model::getSystemSize() const { return system_size; } /* -------------------------------------------------------------------------- */ const std::vector& Model::getDiscretization() const { return discretization; } /* -------------------------------------------------------------------------- */ void Model::solveNeumann() { engine->registerNeumann(); engine->solveNeumann(getTraction(), getDisplacement()); } void Model::solveDirichlet() { engine->registerDirichlet(); engine->solveDirichlet(getDisplacement(), getTraction()); } /* -------------------------------------------------------------------------- */ IntegralOperator* Model::getIntegralOperator(const std::string& name) const { return operators.at(name).get(); } /* -------------------------------------------------------------------------- */ std::vector Model::getIntegralOperators() const { std::vector keys; keys.reserve(operators.size()); std::transform(operators.begin(), operators.end(), std::back_inserter(keys), [](auto&& pair) { return pair.first; }); return keys; } /* -------------------------------------------------------------------------- */ void Model::updateOperators() { for (auto& op : operators) op.second->updateFromModel(); } /* -------------------------------------------------------------------------- */ void Model::addDumper(std::shared_ptr dumper) { this->dumpers.push_back(std::move(dumper)); } void Model::dump() const { for (auto& dumper : dumpers) if (dumper) dumper->dump(*this); } /* -------------------------------------------------------------------------- */ void Model::registerField(const std::string& name, std::shared_ptr> field) { fields[name] = std::move(field); } const GridBase& Model::getField(const std::string& name) const try { return *fields.at(name); } catch (std::out_of_range& e) { Logger().get(LogLevel::warning) << "Field " << name << " not registered in model\n"; throw e; } GridBase& Model::getField(const std::string& name) try { return *fields.at(name); } catch (std::out_of_range& e) { Logger().get(LogLevel::warning) << "Field " << name << " not registered in model\n"; throw e; } std::vector Model::getFields() const { std::vector keys; keys.reserve(fields.size()); std::transform(fields.begin(), fields.end(), std::back_inserter(keys), [](auto&& pair) { return pair.first; }); return keys; } const GridBase& Model::operator[](const std::string& name) const { return getField(name); } GridBase& Model::operator[](const std::string& name) { return getField(name); } /* -------------------------------------------------------------------------- */ std::ostream& operator<<(std::ostream& o, const Model& _this) { o << "Model<" << _this.getType() << "> (E = " << _this.getYoungModulus() << ", nu = " << _this.getPoissonRatio() << ")\n"; auto out_collec = [&o](auto&& collec) { std::for_each(collec.begin(), collec.end() - 1, [&o](const auto& x) { o << x << ", "; }); o << collec.back(); }; // Printing domain size o << " - domain = ["; out_collec(_this.getSystemSize()); o << "]\n"; // Printing discretization o << " - discretization = ["; out_collec(_this.getDiscretization()); o << "]\n"; if (mpi::size() > 1) { o << " - global discretization = ["; out_collec(_this.getGlobalDiscretization()); o << "]\n"; } // Print fields o << " - registered fields = ["; out_collec(_this.getFields()); o << "]\n"; o << " - registered operators = ["; out_collec(_this.getIntegralOperators()); o << "]"; - if (_this.dumpers.size()) + if (not _this.dumpers.empty()) o << "\n - " << _this.dumpers.size() << " registered dumpers"; return o; } /* -------------------------------------------------------------------------- */ } // namespace tamaas diff --git a/src/model/volume_potential.hh b/src/model/volume_potential.hh index d06fbbd..1e1e35a 100644 --- a/src/model/volume_potential.hh +++ b/src/model/volume_potential.hh @@ -1,138 +1,138 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef VOLUME_POTENTIAL_HH #define VOLUME_POTENTIAL_HH /* -------------------------------------------------------------------------- */ #include "fft_engine.hh" #include "grid_hermitian.hh" #include "grid_view.hh" #include "integral_operator.hh" #include "logger.hh" #include "model_type.hh" /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ namespace tamaas { /// Trait type for component management template struct derivative_traits; template <> struct derivative_traits<0> { template static constexpr UInt source_components = model_type_traits::components; template static constexpr UInt out_components = model_type_traits::components; }; template <> struct derivative_traits<1> { template static constexpr UInt source_components = model_type_traits::voigt; template static constexpr UInt out_components = model_type_traits::components; }; template <> struct derivative_traits<2> { template static constexpr UInt source_components = model_type_traits::voigt; template static constexpr UInt out_components = model_type_traits::voigt; }; /** * @brief Volume potential operator class. Applies the operators for computation * of displacements and strains due to residual/eigen strains */ template class VolumePotential : public IntegralOperator { using trait = model_type_traits; protected: using filter_t = std::function; public: VolumePotential(Model* model); /// Update from model (does nothing) void updateFromModel() override {} /// Kind IntegralOperator::kind getKind() const override { return IntegralOperator::neumann; } /// Type model_type getType() const override; /// Apply to all of the source layers void apply(GridBase& input, GridBase& output) const override { applyIf(input, output, [](UInt) { return true; }); } protected: /// Transform source layer-by-layer void transformSource(GridBase& in, filter_t pred) const; /// Transform all source void transformSource(GridBase& in) const { transformSource(in, [](auto) { return true; }); } /// Transform output layer-by-layer template void transformOutput(Func func, GridBase& out) const; /// Initialize fourier buffers void initialize(UInt source_components, UInt out_components, - UInt source_buffer_size); + UInt out_buffer_size); protected: Grid wavevectors; using BufferType = GridHermitian; mutable std::vector source_buffer; mutable std::vector out_buffer; mutable std::unique_ptr engine; }; /* -------------------------------------------------------------------------- */ /* Template implementation */ /* -------------------------------------------------------------------------- */ template template void VolumePotential::transformOutput(Func func, GridBase& out) const { constexpr UInt dim = trait::dimension; auto& o = dynamic_cast&>(out); // Transforming output for (UInt layer : Loop::range(o.sizes().front())) { auto out_layer = make_view(o, layer); auto& fourier_out_layer = func(out_buffer, layer); engine->backward(out_layer, fourier_out_layer); } } } // namespace tamaas #endif // VOLUME_POTENTIAL_HH diff --git a/src/model/westergaard.hh b/src/model/westergaard.hh index 8cf3d42..ee8f299 100644 --- a/src/model/westergaard.hh +++ b/src/model/westergaard.hh @@ -1,90 +1,90 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef WESTERGAARD_HH #define WESTERGAARD_HH /* -------------------------------------------------------------------------- */ #include "grid_hermitian.hh" #include "integral_operator.hh" #include "model_type.hh" #include "fft_engine.hh" #include "tamaas.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /** * @brief Operator based on Westergaard solution and the Dicrete Fourier * Transform. * This class is templated with model type to allow efficient storage of the * influence coefficients. * The integral operator is only applied to surface pressure/displacements, * even for volume models. */ template class Westergaard : public IntegralOperator { using trait = model_type_traits; static constexpr UInt dim = trait::dimension; static constexpr UInt bdim = trait::boundary_dimension; static constexpr UInt comp = trait::components; public: /// Constuctor: initalizes influence coefficients and allocates buffer Westergaard(Model* model); /// Get influence coefficients const GridHermitian& getInfluence() const { return influence; } /// Apply influence coefficients to input - void apply(GridBase& input, GridBase& ouput) const override; + void apply(GridBase& input, GridBase& output) const override; /// Update the influence coefficients void updateFromModel() override { initInfluence(); } /// Kind IntegralOperator::kind getKind() const override { return otype; } /// Type model_type getType() const override { return mtype; } /// Initialize influence coefficients void initInfluence(); template void initFromFunctor(Functor func); /// Apply a functor in Fourier space template void fourierApply(Functor func, GridBase& in, GridBase& out) const; /// Compute L_2 norm of influence functions Real getOperatorNorm() override; public: GridHermitian influence; mutable GridHermitian buffer; mutable std::unique_ptr engine; }; } // namespace tamaas #endif // WESTERGAARD_HH diff --git a/src/percolation/flood_fill.cpp b/src/percolation/flood_fill.cpp index 3aed09b..4e3d345 100644 --- a/src/percolation/flood_fill.cpp +++ b/src/percolation/flood_fill.cpp @@ -1,257 +1,257 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "flood_fill.hh" #include "partitioner.hh" #include #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { Int unsigned_modulo(Int i, UInt n) { return (i % n + n) % n; } template std::array unsigned_modulo(const std::array& t, const std::array& n) { std::array index; for (UInt i = 0; i < dim; ++i) index[i] = unsigned_modulo(t[i], n[i]); return index; } template std::array cast_int(const std::array& a) { std::array r; for (UInt i = 0; i < dim; ++i) r[i] = (Int)a[i]; return r; } template Cluster::Cluster(const Cluster& other) : points(other.points), perimeter(other.perimeter) {} template <> auto Cluster<1>::getNextNeighbors(const std::array& p) { std::vector neighbors(2); Int i = p[0]; neighbors[0] = {i - 1}; neighbors[1] = {i + 1}; return neighbors; } template <> -auto Cluster<1>::getDiagonalNeighbors(const std::array&) { +auto Cluster<1>::getDiagonalNeighbors(const std::array& /*p*/) { return std::vector(); } template <> auto Cluster<2>::getNextNeighbors(const std::array& p) { std::vector neighbors(4); Int i = p[0], j = p[1]; neighbors[0] = {i + 1, j}; neighbors[1] = {i - 1, j}; neighbors[2] = {i, j - 1}; neighbors[3] = {i, j + 1}; return neighbors; } template <> auto Cluster<2>::getDiagonalNeighbors(const std::array& p) { std::vector neighbors(4); Int i = p[0], j = p[1]; neighbors[0] = {i + 1, j + 1}; neighbors[1] = {i - 1, j - 1}; neighbors[2] = {i - 1, j + 1}; neighbors[3] = {i + 1, j - 1}; return neighbors; } template <> auto Cluster<3>::getNextNeighbors(const std::array& p) { std::vector neighbors(6); Int i = p[0], j = p[1], k = p[2]; neighbors[0] = {i + 1, j, k}; neighbors[1] = {i - 1, j, k}; neighbors[2] = {i, j - 1, k}; neighbors[3] = {i, j + 1, k}; neighbors[4] = {i, j, k - 1}; neighbors[5] = {i, j, k + 1}; return neighbors; } template <> auto Cluster<3>::getDiagonalNeighbors(const std::array& p) { std::vector neighbors(20); Int i = p[0], j = p[1], k = p[2]; // 8 corners neighbors[0] = {i + 1, j + 1, k + 1}; neighbors[1] = {i + 1, j + 1, k - 1}; neighbors[2] = {i + 1, j - 1, k + 1}; neighbors[3] = {i + 1, j - 1, k - 1}; neighbors[4] = {i - 1, j + 1, k + 1}; neighbors[5] = {i - 1, j + 1, k - 1}; neighbors[6] = {i - 1, j - 1, k + 1}; neighbors[7] = {i - 1, j - 1, k - 1}; // 4 diagonals in i = 0 neighbors[8] = {i, j + 1, k + 1}; neighbors[9] = {i, j + 1, k - 1}; neighbors[10] = {i, j - 1, k + 1}; neighbors[11] = {i, j - 1, k - 1}; // 4 diagonals in j = 0 neighbors[12] = {i + 1, j, k + 1}; neighbors[13] = {i + 1, j, k - 1}; neighbors[14] = {i - 1, j, k + 1}; neighbors[15] = {i - 1, j, k - 1}; // 4 diagonals in k = 0 neighbors[16] = {i + 1, j + 1, k}; neighbors[17] = {i + 1, j - 1, k}; neighbors[18] = {i - 1, j + 1, k}; neighbors[19] = {i - 1, j - 1, k}; return neighbors; } template std::pair, std::array> Cluster::boundingBox() const { // TODO make sure periodic boundaries are correctly unwrapped std::array mins, maxs; mins.fill(std::numeric_limits::max()); maxs.fill(std::numeric_limits::min()); for (const auto& p : points) for (UInt i = 0; i < dim; ++i) { mins[i] = std::min(mins[i], p[i]); maxs[i] = std::max(maxs[i], p[i]); } return std::make_pair(mins, maxs); } template -Cluster::Cluster(Point start, const Grid& contact, +Cluster::Cluster(Point start, const Grid& map, Grid& visited, bool diagonal) { // Visiting stack std::stack> visiting; visiting.push(start); // Contact sizes - const auto& n = contact.sizes(); + const auto& n = map.sizes(); while (not visiting.empty()) { auto p = unsigned_modulo(visiting.top(), n); if (visited(p)) { visiting.pop(); continue; } visited(p) = true; points.push_back(visiting.top()); visiting.pop(); auto process = [&](const std::vector& neighbors, const bool do_perimeter) { for (auto& p : neighbors) { auto index = unsigned_modulo(p, n); - if (not visited(index) and contact(index)) { + if (not visited(index) and map(index)) { visiting.push(cast_int(index)); } - if (do_perimeter and (not contact(index))) + if (do_perimeter and (not map(index))) ++perimeter; } }; process(getNextNeighbors(p), true); if (diagonal) { process(getDiagonalNeighbors(p), false); } } } -std::list> FloodFill::getSegments(const Grid& contact) { - auto n = contact.sizes(); +std::list> FloodFill::getSegments(const Grid& map) { + auto n = map.sizes(); Grid visited(n, 1); visited = false; std::list> clusters; for (UInt i = 0; i < n[0]; ++i) { - if (contact(i) && !visited(i)) { - clusters.emplace_back(std::array{(Int)i}, contact, visited, + if (map(i) && !visited(i)) { + clusters.emplace_back(std::array{(Int)i}, map, visited, false); } } return clusters; } -std::list> FloodFill::getClusters(const Grid& contact, +std::list> FloodFill::getClusters(const Grid& map, bool diagonal) { - auto global_contact = Partitioner<2>::gather(contact); + auto global_contact = Partitioner<2>::gather(map); auto n = global_contact.sizes(); Grid visited(n, 1); visited = false; std::list> clusters; if (mpi::rank() == 0) for (UInt i = 0; i < n[0]; ++i) { for (UInt j = 0; j < n[1]; ++j) { if (global_contact(i, j) && !visited(i, j)) { clusters.emplace_back(std::array{(Int)i, (Int)j}, global_contact, visited, diagonal); } } } return clusters; } -std::list> FloodFill::getVolumes(const Grid& contact, +std::list> FloodFill::getVolumes(const Grid& map, bool diagonal) { - auto n = contact.sizes(); + auto n = map.sizes(); Grid visited(n, 1); visited = false; std::list> clusters; for (UInt i = 0; i < n[0]; ++i) { for (UInt j = 0; j < n[1]; ++j) { for (UInt k = 0; k < n[2]; ++k) { - if (contact(i, j, k) && !visited(i, j, k)) { + if (map(i, j, k) && !visited(i, j, k)) { clusters.emplace_back(std::array{(Int)i, (Int)j, (Int)k}, - contact, visited, diagonal); + map, visited, diagonal); } } } } return clusters; } template class Cluster<1>; template class Cluster<2>; template class Cluster<3>; } // namespace tamaas diff --git a/src/percolation/flood_fill.hh b/src/percolation/flood_fill.hh index 107f238..c64159a 100644 --- a/src/percolation/flood_fill.hh +++ b/src/percolation/flood_fill.hh @@ -1,76 +1,76 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef FLOOD_FILL_H #define FLOOD_FILL_H /* -------------------------------------------------------------------------- */ #include "grid.hh" #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ template class Cluster { using Point = std::array; public: /// Constructor Cluster(Point start, const Grid& map, Grid& visited, bool diagonal); /// Copy constructor Cluster(const Cluster& other); /// Default constructor Cluster() = default; /// Get area of cluster UInt getArea() const { return getPoints().size(); } /// Get perimeter of cluster UInt getPerimeter() const { return perimeter; } /// Get contact points const std::list& getPoints() const { return points; } /// Get bounding box std::pair, std::array> boundingBox() const; /// Assign next neighbors auto getNextNeighbors(const std::array& p); /// Assign diagonal neighbors auto getDiagonalNeighbors(const std::array& p); private: std::list points; UInt perimeter = 0; }; /* -------------------------------------------------------------------------- */ class FloodFill { public: - static std::list> getSegments(const Grid& contact); + static std::list> getSegments(const Grid& map); /// Returns a list of clusters from a boolean contact map - static std::list> getClusters(const Grid& contact, + static std::list> getClusters(const Grid& map, bool diagonal); static std::list> getVolumes(const Grid& map, bool diagonal); }; /* -------------------------------------------------------------------------- */ } // namespace tamaas /* -------------------------------------------------------------------------- */ #endif // FLOOD_FILL_H diff --git a/src/solvers/contact_solver.cpp b/src/solvers/contact_solver.cpp index 0c23f99..78f6562 100644 --- a/src/solvers/contact_solver.cpp +++ b/src/solvers/contact_solver.cpp @@ -1,64 +1,64 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "contact_solver.hh" #include "logger.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { ContactSolver::ContactSolver(Model& model, const GridBase& surface, Real tolerance) - : model(model), surface(), functional(), tolerance(tolerance), + : model(model), tolerance(tolerance), surface_stddev(std::sqrt(surface.var())) { auto discretization = model.getBoundaryDiscretization(); auto n_points = model.getTraction().getNbPoints(); if (n_points != surface.dataSize()) TAMAAS_EXCEPTION("Model size and surface size do not match!"); this->surface.wrap(surface); _gap = allocateGrid(model.getType(), discretization); model.registerField("gap", _gap); } /* -------------------------------------------------------------------------- */ void ContactSolver::printState(UInt iter, Real cost_f, Real error) { // UInt precision = std::cout.precision(); if (iter % dump_frequency) return; Logger().get(LogLevel::info) << std::setw(5) << iter << " " << std::setw(15) << std::scientific << cost_f << " " << std::setw(15) << error << std::endl << std::fixed; } /* -------------------------------------------------------------------------- */ void ContactSolver::addFunctionalTerm( std::shared_ptr func) { functional.addFunctionalTerm(std::move(func)); } } // namespace tamaas /* -------------------------------------------------------------------------- */ diff --git a/src/solvers/dfsane_solver.cpp b/src/solvers/dfsane_solver.cpp index 17526f4..9eda0ca 100644 --- a/src/solvers/dfsane_solver.cpp +++ b/src/solvers/dfsane_solver.cpp @@ -1,167 +1,163 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "dfsane_solver.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ DFSANESolver::DFSANESolver(Residual& residual) : EPSolver(residual), search_direction(residual.getVector().dataSize(), residual.getVector().getNbComponents()), previous_residual(residual.getVector().dataSize(), residual.getVector().getNbComponents()), current_x(residual.getVector().dataSize(), residual.getVector().getNbComponents()), delta_x(residual.getVector().dataSize(), residual.getVector().getNbComponents()), delta_residual(residual.getVector().dataSize(), residual.getVector().getNbComponents()) {} void DFSANESolver::solve() { EPSolver::beforeSolve(); auto& x_prev = *_x; UInt k = 0, nmax = 500; Real F_norm = 0; previous_merits.clear(); _residual.computeResidual(x_prev); previous_residual = _residual.getVector(); const auto F_norm0 = previous_residual.l2norm(); eta = [F_norm0](UInt k) { return F_norm0 / ((1 + k) * (1 + k)); }; const std::pair sigma_extrema{1e-10, 1e10}; Real sigma = 1; do { computeSearchDirection(sigma); lineSearch(eta(k)); // No need for a solution update // it was done automatically in line search sigma = computeSpectralCoeff(sigma_extrema); x_prev = current_x; previous_residual = _residual.getVector(); F_norm = previous_residual.l2norm(); Logger().get(LogLevel::debug) << TAMAAS_DEBUG_MSG(k << ' ' << F_norm); } while (F_norm > getTolerance() and k++ < nmax); if (k >= nmax) TAMAAS_EXCEPTION("DF-SANE did not converge"); Logger().get(LogLevel::info) << "DF-SANE: sucessful convergence (" << k << " iterations, " << getTolerance() << ")\n"; _residual.computeResidualDisplacement(*_x); } Real DFSANESolver::computeSpectralCoeff(const std::pair& bounds) { delta_x = current_x; delta_x -= *_x; delta_residual = _residual.getVector(); delta_residual -= previous_residual; auto trial = delta_x.dot(delta_x) / delta_x.dot(delta_residual); if (std::abs(trial) >= bounds.first and std::abs(trial) <= bounds.second) return trial; - else { - auto F = previous_residual.l2norm(); - - // Rule from Cruz et al. (2006) - if (F > 1) - return 1; - else if (F < 1e-5) - return 1e5; - else - return 1 / F; - } + auto F = previous_residual.l2norm(); + + // Rule from Cruz et al. (2006) + if (F > 1) + return 1; + if (F < 1e-5) + return 1e5; + return 1 / F; } void DFSANESolver::computeSearchDirection(Real sigma) { _residual.computeResidual(*_x); search_direction = _residual.getVector(); search_direction *= -sigma; } void DFSANESolver::lineSearch(Real eta_k) { const UInt M = 10; const Real gamma = 1e-4; Real alphap = 1, alpham = 1; const std::pair tau_extrema{0.1, 0.5}; // Merit function (nexp = 2) const auto f = [&](Real alpha, Real sign) { current_x = search_direction; current_x *= alpha * sign; current_x += *_x; _residual.computeResidual(current_x); return _residual.getVector().dot(_residual.getVector()); }; // Manage previous merits const auto fk = _residual.getVector().dot(_residual.getVector()); previous_merits.push_front(fk); if (previous_merits.size() > M) previous_merits.pop_back(); const auto fbar = *std::max_element(previous_merits.begin(), previous_merits.end()); do { Real fp = f(alphap, +1); if (fp < fbar + eta_k - gamma * alphap * alphap * fk) return; Real fm = f(alpham, -1); if (fm < fbar + eta_k - gamma * alpham * alpham * fk) return; alphap = computeAlpha(alphap, fp, fk, tau_extrema); alpham = computeAlpha(alpham, fm, fk, tau_extrema); if (std::isnan(alphap) or std::isnan(alpham)) TAMAAS_EXCEPTION("NaN error in line search"); } while (true); } -Real DFSANESolver::computeAlpha(const Real alpha, Real f, Real fk, +Real DFSANESolver::computeAlpha(Real alpha, Real f, Real fk, const std::pair& bounds) { const auto alphat = alpha * alpha * fk / (f + (2 * alpha - 1) * fk); if (alphat < bounds.first * alpha) return bounds.first * alpha; - else if (alphat > bounds.second * alpha) + if (alphat > bounds.second * alpha) return bounds.second * alpha; - else - return alphat; + return alphat; } } // namespace tamaas diff --git a/src/solvers/dfsane_solver.hh b/src/solvers/dfsane_solver.hh index 6b6624e..456b993 100644 --- a/src/solvers/dfsane_solver.hh +++ b/src/solvers/dfsane_solver.hh @@ -1,70 +1,69 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef DFSANE_SOLVER_HH #define DFSANE_SOLVER_HH /* -------------------------------------------------------------------------- */ #include "ep_solver.hh" #include "grid_base.hh" #include "residual.hh" #include #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ -/** +/** @brief Derivative-free non-linear solver This algorithm is based on W. La Cruz, J. Martínez, and M. Raydan, “Spectral residual method without gradient information for solving large-scale nonlinear systems of equations,” Math. Comp., vol. 75, no. 255, pp. 1429–1448, 2006, doi: 10.1090/S0025-5718-06-01840-0. The same algorithm is available in scipy.optimize, but this version is robustly parallel by default (i.e. does not depend on BLAS's parallelism and is future-proof for MPI parallelism). */ class DFSANESolver : public EPSolver { // Public interface public: DFSANESolver(Residual& residual); void solve() override; // Algorithm functions protected: Real computeSpectralCoeff(const std::pair& bounds); void computeSearchDirection(Real sigma); void lineSearch(Real eta_k); - Real computeAlpha(const Real alpha, Real f, Real fk, + Real computeAlpha(Real alpha, Real f, Real fk, const std::pair& bounds); - protected: GridBase search_direction, previous_residual, current_x, delta_x, delta_residual; std::deque previous_merits; std::function eta; }; } // namespace tamaas #endif diff --git a/src/solvers/epic.cpp b/src/solvers/epic.cpp index 30c4a0a..3a0d354 100644 --- a/src/solvers/epic.cpp +++ b/src/solvers/epic.cpp @@ -1,151 +1,151 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "epic.hh" #include "logger.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ EPICSolver::EPICSolver(ContactSolver& csolver, EPSolver& epsolver, Real tolerance, Real relaxation) : csolver(csolver), epsolver(epsolver), tolerance(tolerance), relaxation(relaxation) { auto& model = csolver.getModel(); surface.wrap(csolver.getSurface()); pressure.resize(model.getTraction().getNbPoints()); #define SET_VIEWS(_, __, type) \ case type: { \ setViews(); \ break; \ } switch (model.getType()) { BOOST_PP_SEQ_FOR_EACH(SET_VIEWS, ~, TAMAAS_MODEL_TYPES); } #undef SET_VIEWS } /* -------------------------------------------------------------------------- */ void EPICSolver::solve(std::vector load) { UInt n = 0, nmax = 1000; Real error = 0.; const GridBase initial_surface(surface); Real normalizing_factor = std::sqrt(initial_surface.var()); GridBase previous_residual(*residual_disp); GridBase relaxation_direction(*residual_disp); previous_residual = 0; pressure = *pressure_inc; // set previous pressure to current model pressure do { fixedPoint(relaxation_direction, previous_residual, initial_surface, load); relaxation_direction -= previous_residual; relaxation_direction *= relaxation; error = computeError(*residual_disp, previous_residual, normalizing_factor); previous_residual += relaxation_direction; Logger().get(LogLevel::info) << "[EPIC] error = " << std::scientific << error << std::fixed << std::endl; } while (error > tolerance && n++ < nmax); // computing full pressure pressure += *pressure_inc; // setting the model pressure to full converged pressure *pressure_inc = pressure; // updating plastic state epsolver.updateState(); } /* -------------------------------------------------------------------------- */ void EPICSolver::acceleratedSolve(std::vector load) { UInt n = 0, nmax = 1000; Real error = 0.; const GridBase initial_surface(surface); Real normalizing_factor = std::sqrt(initial_surface.var()); GridBase doubleg(*residual_disp), dg(*residual_disp), d2x(*residual_disp), x(*residual_disp), x_prev(*residual_disp); do { // First compute g(x) and g(g(x)) fixedPoint(dg, x, initial_surface, load); fixedPoint(doubleg, dg, initial_surface, load); // dg contains g(x): compute delta x d2x = dg; d2x -= x; // compute delta g(x) dg *= -1; dg += doubleg; // now we have delta g(x) and delta x, compute delta^2 x d2x *= -1; d2x += dg; // Apply Irons & Truck iteration auto f = dg.dot(d2x) / d2x.dot(d2x); dg *= -f; x = doubleg; x += dg; error = computeError(x, x_prev, normalizing_factor); x_prev = x; Logger().get(LogLevel::info) << "[EPIC] error = " << std::scientific << error << std::fixed << std::endl; } while (error > tolerance && n++ < nmax); // computing full pressure pressure += *pressure_inc; // setting the model pressure to full converged pressure *pressure_inc = pressure; // updating plastic state epsolver.updateState(); } /* -------------------------------------------------------------------------- */ void EPICSolver::fixedPoint(GridBase& result, const GridBase& x, const GridBase& initial_surface, std::vector load) { surface = initial_surface; surface -= x; - csolver.solve(load); + csolver.solve(std::move(load)); *pressure_inc -= pressure; epsolver.solve(); result = *residual_disp; } /* -------------------------------------------------------------------------- */ Real EPICSolver::computeError(const GridBase& current, const GridBase& prev, Real factor) const { GridBase error(current); error -= prev; return std::sqrt(error.dot(error)) / factor; } /* -------------------------------------------------------------------------- */ } // namespace tamaas diff --git a/src/solvers/epic.hh b/src/solvers/epic.hh index a372336..35ae12f 100644 --- a/src/solvers/epic.hh +++ b/src/solvers/epic.hh @@ -1,85 +1,83 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef EPIC_HH #define EPIC_HH /* -------------------------------------------------------------------------- */ #include "contact_solver.hh" #include "ep_solver.hh" #include "model.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ class EPICSolver { public: /// Constructor - EPICSolver(ContactSolver& csolver, EPSolver& epsolver, - Real tolerance = 1e-10, Real relaxation = 0.3); + EPICSolver(ContactSolver& csolver, EPSolver& epsolver, Real tolerance = 1e-10, + Real relaxation = 0.3); void solve(std::vector load); void acceleratedSolve(std::vector load); - Real computeError(const GridBase& current, - const GridBase& prev, - Real factor) const; + Real computeError(const GridBase& current, const GridBase& prev, + Real factor) const; - void fixedPoint(GridBase& result, - const GridBase& residual_disp, - const GridBase& initial_surface, - std::vector load); + void fixedPoint(GridBase& result, const GridBase& x, + const GridBase& initial_surface, + std::vector load); template void setViews(); protected: GridBase surface; ///< corrected surface GridBase pressure; ///< current pressure std::unique_ptr> residual_disp; ///< plastic residual disp std::unique_ptr> pressure_inc; ///< pressure increment ContactSolver& csolver; EPSolver& epsolver; Real tolerance, relaxation; }; /* -------------------------------------------------------------------------- */ /* Template impl. */ /* -------------------------------------------------------------------------- */ template void EPICSolver::setViews() { constexpr UInt dim = model_type_traits::dimension; constexpr UInt bdim = model_type_traits::boundary_dimension; constexpr UInt comp = model_type_traits::components; pressure_inc = std::unique_ptr>{new GridView( csolver.getModel().getTraction(), {}, comp - 1)}; residual_disp = std::unique_ptr>{new GridView( csolver.getModel().getDisplacement(), model_type_traits::indices, comp - 1)}; } /* -------------------------------------------------------------------------- */ } // namespace tamaas #endif // EPIC_HH diff --git a/src/solvers/kato.hh b/src/solvers/kato.hh index 2dbac4b..cfaa212 100644 --- a/src/solvers/kato.hh +++ b/src/solvers/kato.hh @@ -1,344 +1,322 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef KATO_HH #define KATO_HH /* -------------------------------------------------------------------------- */ #include "contact_solver.hh" #include "meta_functional.hh" #include "model_type.hh" #include "static_types.hh" #include "tamaas.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { class Kato : public ContactSolver { public: /// Constructor Kato(Model& model, const GridBase& surface, Real tolerance, Real mu); public: /// Solve Real solve(GridBase& p0, UInt proj_iter); /// Solve relaxed problem Real solveRelaxed(GridBase& g0); /// Solve regularized problem Real solveRegularized(GridBase& p0, Real r); /// Compute cost function Real computeCost(bool use_tresca = false); public: /// Template for solve function template Real solveTmpl(GridBase& p0, UInt proj_iter); /// Template for solveRelaxed function template Real solveRelaxedTmpl(GridBase& g0); /// Template for solveRegularized function template Real solveRegularizedTmpl(GridBase& p0, Real r); public: /// Creates surfaceComp form surface template void initSurfaceWithComponents(); /// Compute gradient of functional template void computeGradient(bool use_tresca = false); /// Project pressure on friction cone template void enforcePressureConstraints(GridBase& p0, UInt proj_iter); /// Project on C template void enforcePressureMean(GridBase& p0); /// Project on D template void enforcePressureCoulomb(); /// Project on D (Tresca) template void enforcePressureTresca(); /// Comupte mean value of field template Vector computeMean(GridBase& field); /// Add vector to each point of field template void addUniform(GridBase& field, GridBase& vec); /// Regularization function with factor r (0 -> unregugularized) static __device__ __host__ Real regularize(Real x, Real r); /// Compute grids of dual and primal variables template void computeValuesForCost(GridBase& lambda, GridBase& eta, GridBase& p_N, GridBase& p_C); /// Compute dual and primal variables with Tresca friction template void computeValuesForCostTresca(GridBase& lambda, GridBase& eta, GridBase& p_N, GridBase& p_C); /// Compute total displacement template void computeFinalGap(); protected: BEEngine& engine; GridBase* gap = nullptr; GridBase* pressure = nullptr; std::unique_ptr> surfaceComp = nullptr; Real mu = 0; UInt N = 0; // number of points }; /* -------------------------------------------------------------------------- */ template void Kato::computeGradient(bool use_tresca) { engine.solveNeumann(*pressure, *gap); *gap -= *surfaceComp; // Impose zero tangential displacement in non-sliding zone UInt count_static = 0; Vector g_static; using pvector = VectorProxy; const auto mu = this->mu; if (!use_tresca) { // with Coulomb friction count_static = Loop::reduce( [mu] CUDA_LAMBDA(VectorProxy p) -> UInt { VectorProxy p_T(p(0)); Real p_T_norm = p_T.l2norm(); Real p_N = p(comp - 1); - if (0.99 * mu * p_N > p_T_norm) { // non-sliding contact - return 1; - } else { - return 0; - } + return (0.99 * mu * p_N > p_T_norm) ? 1 : 0; }, range(*pressure)); g_static = Loop::reduce( [mu] CUDA_LAMBDA(VectorProxy g, VectorProxy p) -> Vector { VectorProxy p_T(p(0)); Real p_T_norm = p_T.l2norm(); Real p_N = p(comp - 1); - if (0.99 * mu * p_N > p_T_norm) { // non-sliding contact - return g; // to compute mean of g_T - } else { - return 0; - } + if (0.99 * mu * p_N > p_T_norm) + return g; + return 0; }, range(*gap), range(*pressure)); } else { // with Tresca friction count_static = Loop::reduce( [mu] CUDA_LAMBDA(VectorProxy p) -> UInt { VectorProxy p_T(p(0)); Real p_T_norm = p_T.l2norm(); Real p_N = p(comp - 1); - if (0.99 * mu > p_T_norm && p_N > 0) { // non-sliding contact - return 1; - } else { - return 0; - } + return (0.99 * mu > p_T_norm && p_N > 0) ? 1 : 0; }, range(*pressure)); g_static = Loop::reduce( [mu] CUDA_LAMBDA(VectorProxy g, VectorProxy p) -> Vector { VectorProxy p_T(p(0)); Real p_N = p(comp - 1); Real p_T_norm = p_T.l2norm(); - if (0.99 * mu > p_T_norm && p_N > 0) { // non-sliding contact - return g; // to compute mean of g_T - } else { - return 0; - } + if (0.99 * mu > p_T_norm && p_N > 0) + return g; + return 0; }, range(*gap), range(*pressure)); } if (count_static != 0) { g_static /= count_static; // g_static(comp - 1) = 0; } else { // if no sticking zone, mean computed on sliding zone count_static = Loop::reduce( [] CUDA_LAMBDA(VectorProxy p) -> UInt { - if (p(comp - 1) > 0) { - return 1; - } else { - return 0; - } + return (p(comp - 1) > 0) ? 1 : 0; }, range(*pressure)); Real g_N_static = Loop::reduce( - [] CUDA_LAMBDA(VectorProxy g, - VectorProxy p) -> Real { - if (p(comp - 1) > 0) { - return g(comp - 1); - } else { - return 0; - } - }, + [] CUDA_LAMBDA(VectorProxy g, VectorProxy p) + -> Real { return (p(comp - 1) > 0) ? g(comp - 1) : 0; }, range(*gap), range(*pressure)); g_static(comp - 1) = g_N_static / count_static; } // Add frictionnal term to functional if (!use_tresca) { // with Coulomb friction Loop::loop( [mu, g_static] CUDA_LAMBDA(VectorProxy g) { g -= g_static; VectorProxy g_T(g(0)); Real g_T_norm = g_T.l2norm(); g(comp - 1) += mu * g_T_norm; // Frictionnal work }, range(*gap)); } else { // with Tresca friction *gap -= g_static; // Loop::stridedLoop( // [this, g_static] CUDA_LAMBDA(VectorProxy&& g, // VectorProxy&& p) { // g -= g_static; // VectorProxy g_T = g(0); // Real g_T_norm = g_T.l2norm(); // Real p_N = p(comp - 1); // if (p_N > 0) // g(comp - 1) += mu/p_N * g_T_norm; // Frictionnal work // }, // *gap, *pressure); } // Loop::stridedLoop( // [this] CUDA_LAMBDA(VectorProxy&& g, VectorProxy&& // p) { // VectorProxy g_T = g(0); // Real g_T_norm = g_T.l2norm(); // VectorProxy g_N = g(comp - 1); // VectorProxy p_T = p(0); // Real p_T_norm = p_T.l2norm(); // VectorProxy p_N = p(comp - 1); // if (p_T_norm != 0) { // g_T = p_T; // g_T *= -g_T_norm / p_T_norm; // } // g_N += mu * g_T_norm; // Frictionnal work // }, // *gap, *pressure); } /* -------------------------------------------------------------------------- */ /** * Projects \f$\vec{p}\f$ on \f$\mathcal{C}\f$ and \f$\mathcal{D}\f$. */ // template // void Kato::enforcePressureConstraints(GridBase& p0, UInt proj_iter) { // for (UInt i = 0; i < proj_iter; i++) { // enforcePressureMean(p0); // enforcePressureCoulomb(); // } // } /* -------------------------------------------------------------------------- */ /** * Projects \f$\vec{p}\f$ on \f$\mathcal{C}\f$ and \f$\mathcal{D}\f$. */ template void Kato::enforcePressureConstraints(GridBase& p0, UInt proj_iter) { for (UInt i = 0; i < proj_iter; i++) { enforcePressureMean(p0); enforcePressureCoulomb(); } } /* -------------------------------------------------------------------------- */ template void Kato::enforcePressureCoulomb() { const auto mu = this->mu; Loop::loop( [mu] CUDA_LAMBDA(VectorProxy p) { VectorProxy p_T(p(0)); Real p_N = p(comp - 1); Real p_T_sqrd = p_T.l2squared(); // Projection normale au cône de friction bool cond1 = (p_N >= 0 && p_T_sqrd <= mu * mu * p_N * p_N); bool cond2 = (p_N <= 0 && p_T_sqrd <= p_N * p_N / mu / mu); if (cond2) { p_T = 0; p(comp - 1) = 0; } else if (!cond1) { Real p_T_norm = std::sqrt(p_T_sqrd); Real k = (p_N + mu * p_T_norm) / (1 + mu * mu); p_T *= k * mu / p_T_norm; p(comp - 1) = k; } }, range>(*pressure)); } /* -------------------------------------------------------------------------- */ /** * Compute mean of the field taking each component separately. */ template Vector Kato::computeMean(GridBase& field) { Vector mean = Loop::reduce( [] CUDA_LAMBDA(VectorProxy f) -> Vector { return f; }, range>(field)); mean /= N; return mean; } /* -------------------------------------------------------------------------- */ template void Kato::addUniform(GridBase& field, GridBase& vec) { VectorProxy _vec(vec(0)); field += _vec; } } // namespace tamaas #endif // KATO_HH diff --git a/src/solvers/kato_saturated.cpp b/src/solvers/kato_saturated.cpp index 540fb0d..fdb3bec 100644 --- a/src/solvers/kato_saturated.cpp +++ b/src/solvers/kato_saturated.cpp @@ -1,232 +1,231 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "kato_saturated.hh" #include "logger.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ KatoSaturated::KatoSaturated(Model& model, const GridBase& surface, Real tolerance, Real pmax) : PolonskyKeerRey(model, surface, tolerance, PolonskyKeerRey::pressure, PolonskyKeerRey::pressure), pmax(pmax) {} /* -------------------------------------------------------------------------- */ Real KatoSaturated::solve(std::vector load) { GridBase initial_surface = surface; GridBase residual_disp = surface; residual_disp = 0; auto negative = [this](const GridBase& f) { return f.min() < -std::abs(this->tolerance); }; UInt n = 0; do { surface = initial_surface; surface += residual_disp; PolonskyKeerRey::solve(load); // Update the rough surface Loop::loop([] CUDA_LAMBDA(Real & h_pl, Real g) { h_pl += g * (g < 0); }, residual_disp, *this->dual); } while (negative(*this->dual) and n++ < this->max_iterations); surface = initial_surface; *this->displacement_view -= residual_disp; return Real(n >= this->max_iterations); } /* -------------------------------------------------------------------------- */ Real KatoSaturated::meanOnUnsaturated(const GridBase& /*field*/) const { return 0; } Real KatoSaturated::computeSquaredNorm(const GridBase& /*field*/) const { return 1.; } void KatoSaturated::updateSearchDirection(Real /*factor*/) { *this->search_direction = *this->dual; } Real KatoSaturated::computeCriticalStep(Real /*target*/) { // integral_op->apply(*search_direction, *projected_search_direction); // Real num = search_direction->dot(*search_direction); // Real denum = projected_search_direction->dot(*search_direction); // return 0.1 * num / denum; return 1; } bool KatoSaturated::updatePrimal(Real step) { UInt na_num = Loop::reduce( [step] CUDA_LAMBDA(Real & p, const Real& /*q*/, const Real& t) -> UInt { p -= step * t; // Updating primal return 0; // if (p < 0) // p = 0; // Truncating neg values // if (p == 0 && q < 0) { // If non-admissible state // p -= step * q; // return 1; // } else // return 0; }, *primal, *dual, *search_direction); return na_num == 0; } /* -------------------------------------------------------------------------- */ Real KatoSaturated::computeError() { // We shift the gap by the minimum on unsaturated area const auto pmax = this->pmax; Real shift = Loop::reduce( [pmax] CUDA_LAMBDA(Real p, Real d) { return (p < pmax) ? d : std::numeric_limits::max(); }, *primal, *dual); *dual -= shift; Real norm = 1; // Ignore points that are saturated Real error = Loop::reduce( [pmax] CUDA_LAMBDA(Real p, Real d) { return (p < pmax) ? p * d : 0; }, *primal, *dual); if (std::isnan(error)) TAMAAS_EXCEPTION("Encountered NaN in complementarity error: this may be " "caused by a contact area of a single node."); if (variable_type == pressure) norm = std::abs(primal->sum() * this->surface_stddev); else norm = std::abs(dual->sum() * this->surface_stddev); norm *= primal->getGlobalNbPoints(); return std::abs(error) / norm; } /* -------------------------------------------------------------------------- */ void KatoSaturated::enforceMeanValue(Real mean) { // We want to cancel the difference between saturated alpha + t and the // applied pressure const auto pmax = this->pmax; *primal -= primal->mean(); auto f = [&](Real scale) { Real sum = Loop::reduce( [pmax, scale] CUDA_LAMBDA(Real t) -> Real { t += scale; if (t > pmax) return pmax; - else if (t < 0) + if (t < 0) return 0; - else - return t; + return t; }, *this->primal); sum /= this->primal->getGlobalNbPoints(); return sum - mean; }; if (pmax < mean) TAMAAS_EXCEPTION("Cannot find equilibrium"); // Dichotomy + Secant Newton on f // Initial points Real x_n_2 = -primal->max(), x_n_1 = -primal->min(), x_n = 0.; Real f_n_2 = 0., f_n_1 = 0., f_n = 0.; // Find a not-to-large search interval Logger().get(LogLevel::debug) << TAMAAS_DEBUG_MSG("Searching equilibrium interval"); while (std::signbit(f(x_n_1)) == std::signbit(f(x_n_2))) { x_n_1 *= 10; x_n_2 *= 10; } Logger().get(LogLevel::debug) << TAMAAS_DEBUG_MSG( "Reducing interval [abs(x1/x2) = " << std::abs(x_n_1 / x_n_2) << ']'); UInt n_dic = 10; f_n_1 = f(x_n_1); for (UInt i = 0; i < n_dic; ++i) { x_n = 0.5 * (x_n_1 + x_n_2); f_n = f(x_n); if (std::signbit(f_n) == std::signbit(f_n_1)) { x_n_1 = x_n; f_n_1 = f_n; } else x_n_2 = x_n; } Logger().get(LogLevel::debug) << TAMAAS_DEBUG_MSG("Starting Newton secant [abs(x1/x2) = " << std::abs(x_n_1 / x_n_2) << ']'); // Secant loop do { f_n_2 = f(x_n_2); f_n_1 = f(x_n_1); if (f_n_1 == f_n_2) break; // Avoid nans x_n = x_n_1 - f_n_1 * (x_n_1 - x_n_2) / (f_n_1 - f_n_2); f_n = f(x_n); x_n_2 = x_n_1; x_n_1 = x_n; } while (std::abs(f_n / mean) > 1e-14); // Pressure update Loop::loop( [pmax, x_n] CUDA_LAMBDA(Real & t) { t += x_n; if (t > pmax) t = pmax; else if (t < 0) t = 0.; }, *this->primal); } /* -------------------------------------------------------------------------- */ void KatoSaturated::enforceAdmissibleState() { /// Make dual admissible const auto pmax = this->pmax; Real shift = Loop::reduce( [pmax] CUDA_LAMBDA(Real p, Real d) { return (p < pmax) ? d : std::numeric_limits::max(); }, *primal, *dual); *dual -= shift; *displacement_view = *dual; *displacement_view += this->surface; } } // namespace tamaas diff --git a/src/solvers/polonsky_keer_rey.cpp b/src/solvers/polonsky_keer_rey.cpp index c31a40c..27f97de 100644 --- a/src/solvers/polonsky_keer_rey.cpp +++ b/src/solvers/polonsky_keer_rey.cpp @@ -1,300 +1,299 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "polonsky_keer_rey.hh" #include "elastic_functional.hh" #include "logger.hh" #include "loop.hh" #include "model_type.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { PolonskyKeerRey::PolonskyKeerRey(Model& model, const GridBase& surface, Real tolerance, type variable_type, type constraint_type) : ContactSolver(model, surface, tolerance), variable_type(variable_type), constraint_type(constraint_type) { #define SET_VIEWS(_, __, type) \ case type: { \ setViews(); \ break; \ } switch (model.getType()) { BOOST_PP_SEQ_FOR_EACH(SET_VIEWS, ~, TAMAAS_MODEL_TYPES); } #undef SET_VIEWS search_direction = allocateGrid( operation_type, model.getBoundaryDiscretization()); projected_search_direction = allocateGrid( operation_type, model.getBoundaryDiscretization()); switch (variable_type) { case gap: { model.getBEEngine().registerDirichlet(); primal = gap_view.get(); dual = pressure_view.get(); this->functional.addFunctionalTerm( std::make_shared(*integral_op, this->surface)); break; } case pressure: { model.getBEEngine().registerNeumann(); primal = pressure_view.get(); dual = gap_view.get(); this->functional.addFunctionalTerm( std::make_shared(*integral_op, this->surface)); break; } } } /* -------------------------------------------------------------------------- */ Real PolonskyKeerRey::solve(std::vector target_v) { // Update in case of a surface change between solves this->surface_stddev = std::sqrt(this->surface.var()); // Printing column headers Logger().get(LogLevel::info) << std::setw(5) << "Iter" << " " << std::setw(15) << "Cost_f" << " " << std::setw(15) << "Error" << '\n' << std::fixed; const Real target = target_v.back(); // Setting uniform value if constraint if (constraint_type == variable_type && std::abs(primal->sum()) <= 0) *primal = target; else if (constraint_type == variable_type) *primal *= target / primal->mean(); else if (constraint_type != variable_type) *primal = this->surface_stddev; // Algorithm variables Real Gold = 1, error = 0; UInt n = 0; bool delta = false; *search_direction = 0; do { // Computing functional gradient functional.computeGradF(*primal, *dual); const Real dbar = meanOnUnsaturated(*dual); // Enforcing dual constraint via gradient if (constraint_type != variable_type) { *dual += 2 * target + dbar; } else { // Centering dual on primal > 0 *dual -= dbar; } // Computing gradient norm const Real G = computeSquaredNorm(*dual); // Updating search direction (conjugate gradient) updateSearchDirection(delta * G / Gold); Gold = G; // Computing critical step const Real tau = computeCriticalStep(target); // Update primal variable delta = updatePrimal(tau); // We should scale to constraint if (constraint_type == variable_type) enforceMeanValue(target); error = computeError(); const Real cost_f = functional.computeF(*primal, *dual); printState(n, cost_f, error); } while (error > this->tolerance and n++ < this->max_iterations); // Final update of dual variable functional.computeGradF(*primal, *dual); enforceAdmissibleState(); return error; } /* -------------------------------------------------------------------------- */ void PolonskyKeerRey::enforceAdmissibleState() { /// Make dual admissible const Real dual_min = dual->min(); *dual -= dual_min; // Primal is pressure: need to make sure gap is positive if (variable_type == pressure) { *displacement_view = *dual; *displacement_view += this->surface; } // Primal is gap: need to make sure dual is positive (pressure + adhesion) else { *displacement_view = *primal; *displacement_view += this->surface; integral_op->apply(*displacement_view, *pressure_view); *pressure_view -= dual_min; } } /* -------------------------------------------------------------------------- */ /** * Computes \f$ \frac{1}{\mathrm{card}(\{p > 0\})} \sum_{\{p > 0\}}{f_i} \f$ */ Real PolonskyKeerRey::meanOnUnsaturated(const GridBase& field) const { // Sum on unsaturated const Real sum = Loop::reduce( [] CUDA_LAMBDA(Real & p, const Real& f) { return (p > 0) ? f : 0; }, *primal, field); // Number of unsaturated points const UInt n_unsat = Loop::reduce( [] CUDA_LAMBDA(Real & p) -> UInt { return (p > 0); }, *primal); return sum / n_unsat; } /* -------------------------------------------------------------------------- */ /** * Computes \f$ \sum_{\{p > 0\}}{f_i^2} \f$ */ Real PolonskyKeerRey::computeSquaredNorm(const GridBase& field) const { return Loop::reduce( [] CUDA_LAMBDA(Real & p, const Real& f) { return (p > 0) ? f * f : 0; }, *primal, field); } /* -------------------------------------------------------------------------- */ /** * Computes \f$ \tau = \frac{ \sum_{\{p > 0\}}{q_i't_i} }{ \sum_{\{p > 0\}}{r_i' * t_i} } \f$ */ Real PolonskyKeerRey::computeCriticalStep(Real target) { integral_op->apply(*search_direction, *projected_search_direction); const Real rbar = meanOnUnsaturated(*projected_search_direction); if (constraint_type == variable_type) *projected_search_direction -= rbar; else { *projected_search_direction += 2 * target + rbar; } const Real num = Loop::reduce( [] CUDA_LAMBDA(const Real& p, const Real& q, const Real& t) { return (p > 0) ? q * t : 0; }, *primal, *dual, *search_direction); const Real denum = Loop::reduce( [] CUDA_LAMBDA(const Real& p, const Real& r, const Real& t) { return (p > 0) ? r * t : 0; }, *primal, *projected_search_direction, *search_direction); return num / denum; } /* -------------------------------------------------------------------------- */ /** * Update steps: * 1. \f$\mathbf{p} = \mathbf{p} - \tau \mathbf{t} \f$ * 2. Truncate all \f$p\f$ negative * 3. For all points in \f$I_\mathrm{na} = \{p = 0 \land q < 0 \}\f$ do \f$p_i = * p_i - \tau q_i\f$ */ bool PolonskyKeerRey::updatePrimal(Real step) { const UInt na_num = Loop::reduce( [step] CUDA_LAMBDA(Real & p, const Real& q, const Real& t) -> UInt { p -= step * t; // Updating primal if (p < 0) p = 0; // Truncating neg values if (p == 0 && q < 0) { // If non-admissible state p -= step * q; return 1; - } else - return 0; + } + return 0; }, *primal, *dual, *search_direction); return na_num == 0; } /* -------------------------------------------------------------------------- */ /** * Error is based on \f$ \sum{p_i q_i} \f$ */ Real PolonskyKeerRey::computeError() { /// Making sure dual respects constraint *dual -= dual->min(); /// Complementarity error const Real error = primal->dot(*dual); if (std::isnan(error)) TAMAAS_EXCEPTION("Encountered NaN in complementarity error: this may be " "caused by a contact area of a single node."); const Real norm = [this]() { if (variable_type == pressure) return std::abs(primal->sum() * this->surface_stddev); - else - return std::abs(dual->sum() * this->surface_stddev); + return std::abs(dual->sum() * this->surface_stddev); }(); return std::abs(error) / (norm * primal->getGlobalNbPoints()); } /* -------------------------------------------------------------------------- */ /** * Do \f$\mathbf{t} = \mathbf{q}' + \delta \frac{R}{R_\mathrm{old}}\mathbf{t} * \f$ */ void PolonskyKeerRey::updateSearchDirection(Real factor) { Loop::loop( [factor] CUDA_LAMBDA(Real & p, Real & q, Real & t) { t = (p > 0) ? q + factor * t : 0; }, *primal, *dual, *search_direction); } /* -------------------------------------------------------------------------- */ void PolonskyKeerRey::enforceMeanValue(Real mean) { *primal *= mean / primal->mean(); } } // namespace tamaas /* -------------------------------------------------------------------------- */ diff --git a/src/solvers/polonsky_keer_tan.cpp b/src/solvers/polonsky_keer_tan.cpp index dd2e466..d013536 100644 --- a/src/solvers/polonsky_keer_tan.cpp +++ b/src/solvers/polonsky_keer_tan.cpp @@ -1,339 +1,333 @@ /* * SPDX-License-Indentifier: AGPL-3.0-or-later * * Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "polonsky_keer_tan.hh" #include /* -------------------------------------------------------------------------- */ namespace tamaas { PolonskyKeerTan::PolonskyKeerTan(Model& model, const GridBase& surface, Real tolerance, Real mu) : Kato(model, surface, tolerance, mu) { search_direction = allocateGrid(model.getType(), model.getDiscretization(), model.getTraction().getNbComponents()); search_direction_backup = allocateGrid(model.getType(), model.getDiscretization(), model.getTraction().getNbComponents()); projected_search_direction = allocateGrid(model.getType(), model.getDiscretization(), model.getTraction().getNbComponents()); } /* -------------------------------------------------------------------------- */ Real PolonskyKeerTan::solve(GridBase& p0) { if (p0.getNbPoints() != pressure->getNbComponents()) { TAMAAS_EXCEPTION( "Target mean pressure does not have the right number of components"); } Real cost = 0; switch (model.getType()) { case model_type::surface_1d: cost = solveTmpl(p0); break; case model_type::surface_2d: cost = solveTmpl(p0); break; default: break; } return cost; } /* -------------------------------------------------------------------------- */ Real PolonskyKeerTan::solveTresca(GridBase& p0) { if (p0.getNbPoints() != pressure->getNbComponents()) { TAMAAS_EXCEPTION( "Target mean pressure does not have the right number of components"); } Real cost = 0; switch (model.getType()) { case model_type::surface_1d: cost = solveTmpl(p0, true); break; case model_type::surface_2d: cost = solveTmpl(p0, true); break; default: break; } return cost; } template Real PolonskyKeerTan::solveTmpl(GridBase& p0, bool use_tresca) { // Printing column headers std::cout << std::setw(5) << "Iter" << " " << std::setw(15) << "Cost_f" << " " << std::setw(15) << "Error" << '\n' << std::fixed; constexpr UInt comp = model_type_traits::components; Real cost = 0.0; Real error = 0.0; UInt n = 0; pressure->uniformSetComponents(p0); Real R_old = 1.0; *search_direction = 0.0; do { // Enforce external condition (should be at the end) enforcePressureMean(p0); // Compute functional gradient computeGradient(use_tresca); // Compute search direction Real R = computeSquaredNorm(*gap); *search_direction *= R / R_old; *search_direction += *gap; R_old = R; // Compute step size Real tau = computeStepSize(false); // Update pressure *search_direction *= tau; *pressure -= *search_direction; *search_direction *= model.getSystemSize()[0] / model.getYoungModulus(); // *search_direction *= 1/tau; // this line should *theoreticaly* replace // the one above // Enforce constraints if (!use_tresca) { enforcePressureCoulomb(); } else { enforcePressureTresca(); } cost = computeCost(use_tresca); error = cost / pressure->getNbPoints() / model.getSystemSize()[0] / model.getYoungModulus(); printState(n, cost, error); } while (error > this->tolerance && n++ < this->max_iterations); computeFinalGap(); return error; } // Original algorithm // template // Real PolonskyKeerTan::solveTmpl(GridBase& p0) { // // Printing column headers // std::cout << std::setw(5) << "Iter" // << " " << std::setw(15) << "Cost_f" // << " " << std::setw(15) << "Error" << '\n' // << std::fixed; // constexpr UInt comp = model_type_traits::components; // Real cost = 0.0; // UInt n = 0; // pressure->uniformSetComponents(p0); // Real R_old = 1.0; // Real delta = 0.0; // *search_direction = 0.0; // do { // // Enforce external condition (should be at the end) // enforcePressureMean(p0); // // Compute functional gradient // computeGradient(); // Vector gap_mean = computeMean(*gap, true); // for (UInt i = 0; i < comp - 1; i++) gap_mean(i) = 0; // *gap -= gap_mean; // // Compute search direction // Real R = computeSquaredNorm(*gap); // *search_direction *= delta * R / R_old; // *search_direction += *gap; // R_old = R; // truncateSearchDirection(true); // // Compute step size // Real tau = computeStepSize(true); // // Update pressure // *search_direction *= tau; // *pressure -= *search_direction; // // Enforce constraints // enforcePressureCoulomb(); // // Empty set of inadmissible gaps // UInt na_count = Loop::stridedReduce( // [tau] CUDA_LAMBDA(VectorProxy&& p, // VectorProxy&& g) { // if (p(comp - 1) == 0.0 && g(comp - 1) < 0.0) { // Vector _g = g; // _g *= tau; // p -= _g; // return 1; // } else { // return 0; // } // }, // *pressure, *gap); // delta = (na_count > 0) ? 0.0 : 1.0; // // Enforce external condition // // enforcePressureMean(p0); // cost = computeCost(); // printState(n, cost, cost); // } while (cost > this->tolerance && n++ < this->max_iterations); // computeFinalGap(); // return cost; // } /* -------------------------------------------------------------------------- */ template void PolonskyKeerTan::enforcePressureMean(GridBase& p0) { Vector pressure_mean = computeMean(*pressure, false); // *pressure -= pressure_mean; // addUniform(*pressure, p0); // for (UInt i = 0; i < comp; i++) // if (pressure_mean(i) == 0) pressure_mean(i) = 1.0; // *pressure /= pressure_mean; // VectorProxy _p0(p0(0)); // *pressure *= _p0; VectorProxy _p0(p0(0)); Loop::loop( [pressure_mean, _p0] CUDA_LAMBDA(VectorProxy p) { for (UInt i = 0; i < comp - 1; i++) p(i) += _p0(i) - pressure_mean(i); p(comp - 1) *= _p0(comp - 1) / pressure_mean(comp - 1); }, range>(*pressure)); // Loop::stridedLoop( // [pressure_mean, p0] CUDA_LAMBDA(VectorProxy&& p) { // for (UInt i = 0; i < comp - 1; i++) // p(i) *= p0(i) / (pressure_mean(i) != 0 ? pressure_mean(i) : 1); // p(comp - 1) += p0(comp - 1) - pressure_mean(comp - 1); // }, // *pressure); } /* -------------------------------------------------------------------------- */ template Vector PolonskyKeerTan::computeMean(GridBase& field, bool on_c) { UInt count = Loop::reduce( [on_c] CUDA_LAMBDA(VectorProxy p) -> UInt { - if ((!on_c) || p(comp - 1) > 0.0) { - return 1; - } else { - return 0; - } + return ((!on_c) || p(comp - 1) > 0.0) ? 1 : 0; }, range>(*pressure)); Vector mean = Loop::reduce( [on_c] CUDA_LAMBDA(VectorProxy f, VectorProxy p) -> Vector { - if ((!on_c) || p(comp - 1) > 0.0) { + if ((!on_c) || p(comp - 1) > 0.0) return f; - } else { - return 0; - } + return 0; }, range>(field), range>(*pressure)); mean /= count; return mean; } /* -------------------------------------------------------------------------- */ Real PolonskyKeerTan::computeSquaredNorm(GridBase& field) { return Loop::reduce( [] CUDA_LAMBDA(Real & f) { return f * f; }, field); } /* -------------------------------------------------------------------------- */ template void PolonskyKeerTan::truncateSearchDirection(bool on_c) { if (!on_c) return; Loop::loop( [] CUDA_LAMBDA(VectorProxy t, VectorProxy p) { if (p(comp - 1) == 0.0) t = 0.0; }, range>(*search_direction), range>(*pressure)); } /* -------------------------------------------------------------------------- */ template Real PolonskyKeerTan::computeStepSize(bool on_c) { engine.solveNeumann(*search_direction, *projected_search_direction); Vector r_mean = computeMean(*projected_search_direction, on_c); *projected_search_direction -= r_mean; Real num = Loop::reduce( [] CUDA_LAMBDA(VectorProxy q, VectorProxy t) { return q.dot(t); }, range>(*gap), range>(*search_direction)); Real denum = Loop::reduce( [] CUDA_LAMBDA(VectorProxy r, VectorProxy t) { return r.dot(t); }, range>(*projected_search_direction), range>(*search_direction)); // if (denum == 0) denum = 1.0; return num / denum; } } // namespace tamaas /* -------------------------------------------------------------------------- */