diff --git a/src/common/aka_types.hh b/src/common/aka_types.hh index c8c19f1a3..62d998fde 100644 --- a/src/common/aka_types.hh +++ b/src/common/aka_types.hh @@ -1,1300 +1,1302 @@ /** * @file aka_types.hh * * @author Nicolas Richart * * @date creation: Thu Feb 17 2011 * @date last modification: Fri Jan 22 2016 * * @brief description of the "simple" types * * @section LICENSE * * Copyright (©) 2010-2012, 2014, 2015 EPFL (Ecole Polytechnique Fédérale de * Lausanne) Laboratory (LSMS - Laboratoire de Simulation en Mécanique des * Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "aka_error.hh" #include "aka_fwd.hh" #include "aka_math.hh" /* -------------------------------------------------------------------------- */ #include #include #include /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_AKA_TYPES_HH__ #define __AKANTU_AKA_TYPES_HH__ namespace akantu { enum NormType { L_1 = 1, L_2 = 2, L_inf = UInt(-1) }; /** * DimHelper is a class to generalize the setup of a dim array from 3 * values. This gives a common interface in the TensorStorage class * independently of its derived inheritance (Vector, Matrix, Tensor3) * @tparam dim */ template struct DimHelper { static inline void setDims(UInt m, UInt n, UInt p, UInt dims[dim]); }; /* -------------------------------------------------------------------------- */ template <> struct DimHelper<1> { static inline void setDims(UInt m, __attribute__((unused)) UInt n, __attribute__((unused)) UInt p, UInt dims[1]) { dims[0] = m; } }; /* -------------------------------------------------------------------------- */ template <> struct DimHelper<2> { static inline void setDims(UInt m, UInt n, __attribute__((unused)) UInt p, UInt dims[2]) { dims[0] = m; dims[1] = n; } }; /* -------------------------------------------------------------------------- */ template <> struct DimHelper<3> { static inline void setDims(UInt m, UInt n, UInt p, UInt dims[3]) { dims[0] = m; dims[1] = n; dims[2] = p; } }; /* -------------------------------------------------------------------------- */ template class TensorStorage; /* -------------------------------------------------------------------------- */ /* Proxy classes */ /* -------------------------------------------------------------------------- */ namespace tensors { template struct is_copyable { enum : bool { value = false }; }; template struct is_copyable { enum : bool { value = true }; }; template struct is_copyable { enum : bool { value = true }; }; template struct is_copyable { enum : bool { value = true }; }; } // namespace tensors /** * @class TensorProxy aka_types.hh * @desc The TensorProxy class is a proxy class to the TensorStorage it handles * the * wrapped case. That is to say if an accessor should give access to a Tensor * wrapped on some data, like the Array::iterator they can return a * TensorProxy that will be automatically transformed as a TensorStorage wrapped * on the same data * @tparam T stored type * @tparam ndim order of the tensor * @tparam RetType real derived type */ template class TensorProxy { protected: using RetTypeProxy = typename _RetType::proxy; constexpr TensorProxy(T * data, UInt m, UInt n, UInt p) { DimHelper::setDims(m, n, p, this->n); this->values = data; } #ifndef SWIG template ::value>> explicit TensorProxy(const Other & other) { this->values = other.storage(); for (UInt i = 0; i < ndim; ++i) this->n[i] = other.size(i); } #endif public: using RetType = _RetType; UInt size(UInt i) const { AKANTU_DEBUG_ASSERT(i < ndim, "This tensor has only " << ndim << " dimensions, not " << (i + 1)); return n[i]; } inline UInt size() const { UInt _size = 1; for (UInt d = 0; d < ndim; ++d) _size *= this->n[d]; return _size; } T * storage() const { return values; } #ifndef SWIG template ::value>> inline TensorProxy & operator=(const Other & other) { AKANTU_DEBUG_ASSERT( other.size() == this->size(), "You are trying to copy two tensors with different sizes"); memcpy(this->values, other.storage(), this->size() * sizeof(T)); return *this; } #endif // template ::value>> // inline TensorProxy & operator=(const Other && other) { // AKANTU_DEBUG_ASSERT( // other.size() == this->size(), // "You are trying to copy two tensors with different sizes"); // memcpy(this->values, other.storage(), this->size() * sizeof(T)); // return *this; // } template inline RetTypeProxy & operator*=(const O & o) { RetType(*this) *= o; return static_cast(*this); } template inline RetTypeProxy & operator/=(const O & o) { RetType(*this) /= o; return static_cast(*this); } protected: T * values; UInt n[ndim]; }; /* -------------------------------------------------------------------------- */ template class VectorProxy : public TensorProxy> { using parent = TensorProxy>; using type = Vector; public: constexpr VectorProxy(T * data, UInt n) : parent(data, n, 0, 0) {} template explicit VectorProxy(Other & src) : parent(src) {} /* ---------------------------------------------------------------------- */ template inline VectorProxy & operator=(const Other & other) { parent::operator=(other); return *this; } // inline VectorProxy & operator=(const VectorProxy && other) { // parent::operator=(other); // return *this; // } /* ------------------------------------------------------------------------ */ T & operator()(UInt index) { return this->values[index]; }; const T & operator()(UInt index) const { return this->values[index]; }; }; template class MatrixProxy : public TensorProxy> { using parent = TensorProxy>; using type = Matrix; public: MatrixProxy(T * data, UInt m, UInt n) : parent(data, m, n, 0) {} template explicit MatrixProxy(Other & src) : parent(src) {} /* ---------------------------------------------------------------------- */ template inline MatrixProxy & operator=(const Other & other) { parent::operator=(other); return *this; } }; template class Tensor3Proxy : public TensorProxy> { using parent = TensorProxy>; using type = Tensor3; public: Tensor3Proxy(const T * data, UInt m, UInt n, UInt k) : parent(data, m, n, k) {} Tensor3Proxy(const Tensor3Proxy & src) : parent(src) {} Tensor3Proxy(const Tensor3 & src) : parent(src) {} /* ---------------------------------------------------------------------- */ template inline Tensor3Proxy & operator=(const Other & other) { parent::operator=(other); return *this; } }; /* -------------------------------------------------------------------------- */ /* Tensor base class */ /* -------------------------------------------------------------------------- */ template class TensorStorage : public TensorTrait { public: using value_type = T; friend class Array; protected: template void copySize(const TensorType & src) { for (UInt d = 0; d < ndim; ++d) this->n[d] = src.size(d); this->_size = src.size(); } TensorStorage() : values(nullptr) { for (UInt d = 0; d < ndim; ++d) this->n[d] = 0; _size = 0; } TensorStorage(const TensorProxy & proxy) { this->copySize(proxy); this->values = proxy.storage(); this->wrapped = true; } public: TensorStorage(const TensorStorage & src) = delete; TensorStorage(const TensorStorage & src, bool deep_copy) : values(nullptr) { if (deep_copy) this->deepCopy(src); else this->shallowCopy(src); } protected: TensorStorage(UInt m, UInt n, UInt p, const T & def) { static_assert(std::is_trivially_constructible{}, "Cannot create a tensor on non trivial types"); DimHelper::setDims(m, n, p, this->n); this->computeSize(); this->values = new T[this->_size]; this->set(def); this->wrapped = false; } TensorStorage(T * data, UInt m, UInt n, UInt p) { DimHelper::setDims(m, n, p, this->n); this->computeSize(); this->values = data; this->wrapped = true; } public: /* ------------------------------------------------------------------------ */ template inline void shallowCopy(const TensorType & src) { this->copySize(src); if (!this->wrapped) delete[] this->values; this->values = src.storage(); this->wrapped = true; } /* ------------------------------------------------------------------------ */ template inline void deepCopy(const TensorType & src) { this->copySize(src); if (!this->wrapped) delete[] this->values; static_assert(std::is_trivially_constructible{}, "Cannot create a tensor on non trivial types"); this->values = new T[this->_size]; static_assert(std::is_trivially_copyable{}, "Cannot copy a tensor on non trivial types"); memcpy((void *)this->values, (void *)src.storage(), this->_size * sizeof(T)); this->wrapped = false; } virtual ~TensorStorage() { if (!this->wrapped) delete[] this->values; } /* ------------------------------------------------------------------------ */ inline TensorStorage & operator=(const TensorStorage & src) { return this->operator=(dynamic_cast(src)); } /* ------------------------------------------------------------------------ */ inline TensorStorage & operator=(const RetType & src) { if (this != &src) { if (this->wrapped) { static_assert(std::is_trivially_copyable{}, "Cannot copy a tensor on non trivial types"); // this test is not sufficient for Tensor of order higher than 1 AKANTU_DEBUG_ASSERT(this->_size == src.size(), "Tensors of different size"); memcpy((void *)this->values, (void *)src.storage(), this->_size * sizeof(T)); } else { deepCopy(src); } } return *this; } /* ------------------------------------------------------------------------ */ template inline RetType & operator+=(const TensorStorage & other) { T * a = this->storage(); T * b = other.storage(); AKANTU_DEBUG_ASSERT( _size == other.size(), "The two tensors do not have the same size, they cannot be subtracted"); for (UInt i = 0; i < _size; ++i) *(a++) += *(b++); return *(static_cast(this)); } /* ------------------------------------------------------------------------ */ template inline RetType & operator-=(const TensorStorage & other) { T * a = this->storage(); T * b = other.storage(); AKANTU_DEBUG_ASSERT( _size == other.size(), "The two tensors do not have the same size, they cannot be subtracted"); for (UInt i = 0; i < _size; ++i) *(a++) -= *(b++); return *(static_cast(this)); } /* ------------------------------------------------------------------------ */ inline RetType & operator+=(const T & x) { T * a = this->values; for (UInt i = 0; i < _size; ++i) *(a++) += x; return *(static_cast(this)); } /* ------------------------------------------------------------------------ */ inline RetType & operator-=(const T & x) { T * a = this->values; for (UInt i = 0; i < _size; ++i) *(a++) -= x; return *(static_cast(this)); } /* ------------------------------------------------------------------------ */ inline RetType & operator*=(const T & x) { T * a = this->storage(); for (UInt i = 0; i < _size; ++i) *(a++) *= x; return *(static_cast(this)); } /* ---------------------------------------------------------------------- */ inline RetType & operator/=(const T & x) { T * a = this->values; for (UInt i = 0; i < _size; ++i) *(a++) /= x; return *(static_cast(this)); } /// Y = \alpha X + Y inline RetType & aXplusY(const TensorStorage & other, const T & alpha = 1.) { AKANTU_DEBUG_ASSERT( _size == other.size(), "The two tensors do not have the same size, they cannot be subtracted"); Math::aXplusY(this->_size, alpha, other.storage(), this->storage()); return *(static_cast(this)); } /* ------------------------------------------------------------------------ */ T * storage() const { return values; } UInt size() const { return _size; } UInt size(UInt i) const { AKANTU_DEBUG_ASSERT(i < ndim, "This tensor has only " << ndim << " dimensions, not " << (i + 1)); return n[i]; }; /* ------------------------------------------------------------------------ */ inline void clear() { memset(values, 0, _size * sizeof(T)); }; inline void set(const T & t) { std::fill_n(values, _size, t); }; template inline void copy(const TensorType & other) { AKANTU_DEBUG_ASSERT( _size == other.size(), "The two tensors do not have the same size, they cannot be copied"); memcpy(values, other.storage(), _size * sizeof(T)); } bool isWrapped() const { return this->wrapped; } protected: inline void computeSize() { _size = 1; for (UInt d = 0; d < ndim; ++d) _size *= this->n[d]; } protected: template struct NormHelper { template static R norm(const Ten & ten) { R _norm = 0.; R * it = ten.storage(); R * end = ten.storage() + ten.size(); for (; it < end; ++it) _norm += std::pow(std::abs(*it), norm_type); return std::pow(_norm, 1. / norm_type); } }; template struct NormHelper { template static R norm(const Ten & ten) { R _norm = 0.; R * it = ten.storage(); R * end = ten.storage() + ten.size(); for (; it < end; ++it) _norm += std::abs(*it); return _norm; } }; template struct NormHelper { template static R norm(const Ten & ten) { R _norm = 0.; R * it = ten.storage(); R * end = ten.storage() + ten.size(); for (; it < end; ++it) _norm += *it * *it; return sqrt(_norm); } }; template struct NormHelper { template static R norm(const Ten & ten) { R _norm = 0.; R * it = ten.storage(); R * end = ten.storage() + ten.size(); for (; it < end; ++it) _norm = std::max(std::abs(*it), _norm); return _norm; } }; public: /*----------------------------------------------------------------------- */ /// "Entrywise" norm norm @f[ \|\boldsymbol{T}\|_p = \left( /// \sum_i^{n[0]}\sum_j^{n[1]}\sum_k^{n[2]} |T_{ijk}|^p \right)^{\frac{1}{p}} /// @f] template inline T norm() const { return NormHelper::norm(*this); } protected: UInt n[ndim]; UInt _size; T * values; bool wrapped{false}; }; /* -------------------------------------------------------------------------- */ /* Vector */ /* -------------------------------------------------------------------------- */ template class Vector : public TensorStorage> { using parent = TensorStorage>; public: using value_type = typename parent::value_type; using proxy = VectorProxy; public: Vector() : parent() {} explicit Vector(UInt n, const T & def = T()) : parent(n, 0, 0, def) {} Vector(T * data, UInt n) : parent(data, n, 0, 0) {} Vector(const Vector & src, bool deep_copy = true) : parent(src, deep_copy) {} Vector(const TensorProxy & src) : parent(src) {} Vector(std::initializer_list list) : parent(list.size(), 0, 0, T()) { UInt i = 0; for (auto val : list) { operator()(i++) = val; } } public: ~Vector() override = default; /* ------------------------------------------------------------------------ */ inline Vector & operator=(const Vector & src) { parent::operator=(src); return *this; } /* ------------------------------------------------------------------------ */ inline T & operator()(UInt i) { AKANTU_DEBUG_ASSERT((i < this->n[0]), "Access out of the vector! " << "Index (" << i << ") is out of the vector of size (" << this->n[0] << ")"); return *(this->values + i); } inline const T & operator()(UInt i) const { AKANTU_DEBUG_ASSERT((i < this->n[0]), "Access out of the vector! " << "Index (" << i << ") is out of the vector of size (" << this->n[0] << ")"); return *(this->values + i); } inline T & operator[](UInt i) { return this->operator()(i); } inline const T & operator[](UInt i) const { return this->operator()(i); } /* ------------------------------------------------------------------------ */ inline Vector & operator*=(Real x) { return parent::operator*=(x); } inline Vector & operator/=(Real x) { return parent::operator/=(x); } /* ------------------------------------------------------------------------ */ inline Vector & operator*=(const Vector & vect) { T * a = this->storage(); T * b = vect.storage(); for (UInt i = 0; i < this->_size; ++i) *(a++) *= *(b++); return *this; } /* ------------------------------------------------------------------------ */ inline Real dot(const Vector & vect) const { return Math::vectorDot(this->values, vect.storage(), this->_size); } /* ------------------------------------------------------------------------ */ inline Real mean() const { Real mean = 0; T * a = this->storage(); for (UInt i = 0; i < this->_size; ++i) mean += *(a++); return mean / this->_size; } /* ------------------------------------------------------------------------ */ inline Vector & crossProduct(const Vector & v1, const Vector & v2) { AKANTU_DEBUG_ASSERT(this->size() == 3, "crossProduct is only defined in 3D (n=" << this->size() << ")"); AKANTU_DEBUG_ASSERT( this->size() == v1.size() && this->size() == v2.size(), "crossProduct is not a valid operation non matching size vectors"); Math::vectorProduct3(v1.storage(), v2.storage(), this->values); return *this; } inline Vector crossProduct(const Vector & v) { Vector tmp(this->size()); tmp.crossProduct(*this, v); return tmp; } /* ------------------------------------------------------------------------ */ inline void solve(const Matrix & A, const Vector & b) { AKANTU_DEBUG_ASSERT( this->size() == A.rows() && this->_size == A.cols(), "The size of the solution vector mismatches the size of the matrix"); AKANTU_DEBUG_ASSERT( this->_size == b._size, "The rhs vector has a mismatch in size with the matrix"); Math::solve(this->_size, A.storage(), this->values, b.storage()); } /* ------------------------------------------------------------------------ */ template inline void mul(const Matrix & A, const Vector & x, Real alpha = 1.0); /* ------------------------------------------------------------------------ */ inline Real norm() const { return parent::template norm(); } template inline Real norm() const { return parent::template norm(); } /* ------------------------------------------------------------------------ */ inline Vector & normalize() { Real n = norm(); operator/=(n); return *this; } /* ------------------------------------------------------------------------ */ /// norm of (*this - x) inline Real distance(const Vector & y) const { Real * vx = this->values; Real * vy = y.storage(); Real sum_2 = 0; for (UInt i = 0; i < this->_size; ++i, ++vx, ++vy) sum_2 += (*vx - *vy) * (*vx - *vy); return sqrt(sum_2); } /* ------------------------------------------------------------------------ */ inline bool equal(const Vector & v, Real tolerance = Math::getTolerance()) const { T * a = this->storage(); T * b = v.storage(); UInt i = 0; while (i < this->_size && (std::abs(*(a++) - *(b++)) < tolerance)) ++i; return i == this->_size; } /* ------------------------------------------------------------------------ */ inline short compare(const Vector & v, Real tolerance = Math::getTolerance()) const { T * a = this->storage(); T * b = v.storage(); for (UInt i(0); i < this->_size; ++i, ++a, ++b) { if (std::abs(*a - *b) > tolerance) return (((*a - *b) > tolerance) ? 1 : -1); } return 0; } /* ------------------------------------------------------------------------ */ inline bool operator==(const Vector & v) const { return equal(v); } inline bool operator!=(const Vector & v) const { return !operator==(v); } inline bool operator<(const Vector & v) const { return compare(v) == -1; } inline bool operator>(const Vector & v) const { return compare(v) == 1; } /* ------------------------------------------------------------------------ */ /// function to print the containt of the class virtual void printself(std::ostream & stream, int indent = 0) const { std::string space; for (Int i = 0; i < indent; i++, space += AKANTU_INDENT) ; stream << "["; for (UInt i = 0; i < this->_size; ++i) { if (i != 0) stream << ", "; stream << this->values[i]; } stream << "]"; } // friend class ::akantu::Array; }; using RVector = Vector; /* ------------------------------------------------------------------------ */ template <> inline bool Vector::equal(const Vector & v, __attribute__((unused)) Real tolerance) const { UInt * a = this->storage(); UInt * b = v.storage(); UInt i = 0; while (i < this->_size && (*(a++) == *(b++))) ++i; return i == this->_size; } /* ------------------------------------------------------------------------ */ /* Matrix */ /* ------------------------------------------------------------------------ */ template class Matrix : public TensorStorage> { using parent = TensorStorage>; public: using value_type = typename parent::value_type; using proxy = MatrixProxy; public: Matrix() : parent() {} Matrix(UInt m, UInt n, const T & def = T()) : parent(m, n, 0, def) {} Matrix(T * data, UInt m, UInt n) : parent(data, m, n, 0) {} Matrix(const Matrix & src, bool deep_copy = true) : parent(src, deep_copy) {} Matrix(const MatrixProxy & src) : parent(src) {} Matrix(std::initializer_list> list) { static_assert(std::is_trivially_copyable{}, "Cannot create a tensor on non trivial types"); std::size_t n = 0; std::size_t m = list.size(); for (auto row : list) { n = std::max(n, row.size()); } DimHelper<2>::setDims(m, n, 0, this->n); this->computeSize(); this->values = new T[this->_size]; this->set(0); UInt i = 0, j = 0; for (auto & row : list) { for (auto & val : row) { at(i, j++) = val; } ++i; j = 0; } } ~Matrix() override = default; /* ------------------------------------------------------------------------ */ inline Matrix & operator=(const Matrix & src) { parent::operator=(src); return *this; } public: /* ---------------------------------------------------------------------- */ UInt rows() const { return this->n[0]; } UInt cols() const { return this->n[1]; } /* ---------------------------------------------------------------------- */ inline T & at(UInt i, UInt j) { AKANTU_DEBUG_ASSERT(((i < this->n[0]) && (j < this->n[1])), "Access out of the matrix! " << "Index (" << i << ", " << j << ") is out of the matrix of size (" << this->n[0] << ", " << this->n[1] << ")"); return *(this->values + i + j * this->n[0]); } inline const T & at(UInt i, UInt j) const { AKANTU_DEBUG_ASSERT(((i < this->n[0]) && (j < this->n[1])), "Access out of the matrix! " << "Index (" << i << ", " << j << ") is out of the matrix of size (" << this->n[0] << ", " << this->n[1] << ")"); return *(this->values + i + j * this->n[0]); } /* ------------------------------------------------------------------------ */ inline T & operator()(UInt i, UInt j) { return this->at(i, j); } inline const T & operator()(UInt i, UInt j) const { return this->at(i, j); } /// give a line vector wrapped on the column i inline VectorProxy operator()(UInt j) { AKANTU_DEBUG_ASSERT(j < this->n[1], "Access out of the matrix! " << "You are trying to access the column vector " << j << " in a matrix of size (" << this->n[0] << ", " << this->n[1] << ")"); return VectorProxy(this->values + j * this->n[0], this->n[0]); } inline const VectorProxy operator()(UInt j) const { AKANTU_DEBUG_ASSERT(j < this->n[1], "Access out of the matrix! " << "You are trying to access the column vector " << j << " in a matrix of size (" << this->n[0] << ", " << this->n[1] << ")"); return VectorProxy(this->values + j * this->n[0], this->n[0]); } inline void block(Matrix & block, UInt pos_i, UInt pos_j) { AKANTU_DEBUG_ASSERT(pos_i + block.rows() <= rows(), "The block size or position are not correct"); AKANTU_DEBUG_ASSERT(pos_i + block.cols() <= cols(), "The block size or position are not correct"); for (UInt i = 0; i < block.rows(); ++i) for (UInt j = 0; j < block.cols(); ++j) this->at(i + pos_i, j + pos_j) = block(i, j); } inline Matrix block(UInt pos_i, UInt pos_j, UInt block_rows, UInt block_cols) const { AKANTU_DEBUG_ASSERT(pos_i + block_rows <= rows(), "The block size or position are not correct"); AKANTU_DEBUG_ASSERT(pos_i + block_cols <= cols(), "The block size or position are not correct"); Matrix block(block_rows, block_cols); for (UInt i = 0; i < block_rows; ++i) for (UInt j = 0; j < block_cols; ++j) block(i, j) = this->at(i + pos_i, j + pos_j); return block; } inline T & operator[](UInt idx) { return *(this->values + idx); }; inline const T & operator[](UInt idx) const { return *(this->values + idx); }; /* ---------------------------------------------------------------------- */ inline Matrix operator*(const Matrix & B) { Matrix C(this->rows(), B.cols()); C.mul(*this, B); return C; } /* ----------------------------------------------------------------------- */ inline Matrix & operator*=(const T & x) { return parent::operator*=(x); } inline Matrix & operator*=(const Matrix & B) { Matrix C(*this); this->mul(C, B); return *this; } /* ---------------------------------------------------------------------- */ template inline void mul(const Matrix & A, const Matrix & B, T alpha = 1.0) { UInt k = A.cols(); if (tr_A) k = A.rows(); #ifndef AKANTU_NDEBUG if (tr_B) { AKANTU_DEBUG_ASSERT(k == B.cols(), "matrices to multiply have no fit dimensions"); AKANTU_DEBUG_ASSERT(this->cols() == B.rows(), "matrices to multiply have no fit dimensions"); } else { AKANTU_DEBUG_ASSERT(k == B.rows(), "matrices to multiply have no fit dimensions"); AKANTU_DEBUG_ASSERT(this->cols() == B.cols(), "matrices to multiply have no fit dimensions"); } if (tr_A) { AKANTU_DEBUG_ASSERT(this->rows() == A.cols(), "matrices to multiply have no fit dimensions"); } else { AKANTU_DEBUG_ASSERT(this->rows() == A.rows(), "matrices to multiply have no fit dimensions"); } #endif // AKANTU_NDEBUG Math::matMul(this->rows(), this->cols(), k, alpha, A.storage(), B.storage(), 0., this->storage()); } /* ---------------------------------------------------------------------- */ inline void outerProduct(const Vector & A, const Vector & B) { AKANTU_DEBUG_ASSERT( A.size() == this->rows() && B.size() == this->cols(), "A and B are not compatible with the size of the matrix"); for (UInt i = 0; i < this->rows(); ++i) { for (UInt j = 0; j < this->cols(); ++j) { this->values[i + j * this->rows()] += A[i] * B[j]; } } } private: class EigenSorter { public: EigenSorter(const Vector & eigs) : eigs(eigs) {} bool operator()(const UInt & a, const UInt & b) const { return (eigs(a) > eigs(b)); } private: const Vector & eigs; }; public: /* ---------------------------------------------------------------------- */ inline void eig(Vector & eigenvalues, Matrix & eigenvectors) const { AKANTU_DEBUG_ASSERT(this->cols() == this->rows(), "eig is not a valid operation on a rectangular matrix"); AKANTU_DEBUG_ASSERT(eigenvalues.size() == this->cols(), "eigenvalues should be of size " << this->cols() << "."); #ifndef AKANTU_NDEBUG if (eigenvectors.storage() != nullptr) AKANTU_DEBUG_ASSERT((eigenvectors.rows() == eigenvectors.cols()) && (eigenvectors.rows() == this->cols()), "Eigenvectors needs to be a square matrix of size " << this->cols() << " x " << this->cols() << "."); #endif Matrix tmp = *this; Vector tmp_eigs(eigenvalues.size()); Matrix tmp_eig_vects(eigenvectors.rows(), eigenvectors.cols()); if (tmp_eig_vects.rows() == 0 || tmp_eig_vects.cols() == 0) Math::matrixEig(tmp.cols(), tmp.storage(), tmp_eigs.storage()); else Math::matrixEig(tmp.cols(), tmp.storage(), tmp_eigs.storage(), tmp_eig_vects.storage()); Vector perm(eigenvalues.size()); for (UInt i = 0; i < perm.size(); ++i) perm(i) = i; std::sort(perm.storage(), perm.storage() + perm.size(), EigenSorter(tmp_eigs)); for (UInt i = 0; i < perm.size(); ++i) eigenvalues(i) = tmp_eigs(perm(i)); if (tmp_eig_vects.rows() != 0 && tmp_eig_vects.cols() != 0) for (UInt i = 0; i < perm.size(); ++i) { for (UInt j = 0; j < eigenvectors.rows(); ++j) { eigenvectors(j, i) = tmp_eig_vects(j, perm(i)); } } } /* ---------------------------------------------------------------------- */ inline void eig(Vector & eigenvalues) const { Matrix empty; eig(eigenvalues, empty); } /* ---------------------------------------------------------------------- */ inline void eye(T alpha = 1.) { AKANTU_DEBUG_ASSERT(this->cols() == this->rows(), "eye is not a valid operation on a rectangular matrix"); this->clear(); for (UInt i = 0; i < this->cols(); ++i) { this->values[i + i * this->rows()] = alpha; } } /* ---------------------------------------------------------------------- */ static inline Matrix eye(UInt m, T alpha = 1.) { Matrix tmp(m, m); tmp.eye(alpha); return tmp; } /* ---------------------------------------------------------------------- */ inline T trace() const { AKANTU_DEBUG_ASSERT( this->cols() == this->rows(), "trace is not a valid operation on a rectangular matrix"); T trace = 0.; for (UInt i = 0; i < this->rows(); ++i) { trace += this->values[i + i * this->rows()]; } return trace; } /* ---------------------------------------------------------------------- */ inline Matrix transpose() const { Matrix tmp(this->cols(), this->rows()); for (UInt i = 0; i < this->rows(); ++i) { for (UInt j = 0; j < this->cols(); ++j) { tmp(j, i) = operator()(i, j); } } return tmp; } /* ---------------------------------------------------------------------- */ inline void inverse(const Matrix & A) { AKANTU_DEBUG_ASSERT(A.cols() == A.rows(), "inv is not a valid operation on a rectangular matrix"); AKANTU_DEBUG_ASSERT(this->cols() == A.cols(), "the matrix should have the same size as its inverse"); if (this->cols() == 1) *this->values = 1. / *A.storage(); else if (this->cols() == 2) Math::inv2(A.storage(), this->values); else if (this->cols() == 3) Math::inv3(A.storage(), this->values); else Math::inv(this->cols(), A.storage(), this->values); } inline Matrix inverse() { Matrix inv(this->rows(), this->cols()); inv.inverse(*this); return inv; } /* --------------------------------------------------------------------- */ inline T det() const { AKANTU_DEBUG_ASSERT(this->cols() == this->rows(), "inv is not a valid operation on a rectangular matrix"); if (this->cols() == 1) return *(this->values); else if (this->cols() == 2) return Math::det2(this->values); else if (this->cols() == 3) return Math::det3(this->values); else return Math::det(this->cols(), this->values); } /* --------------------------------------------------------------------- */ inline T doubleDot(const Matrix & other) const { AKANTU_DEBUG_ASSERT( this->cols() == this->rows(), "doubleDot is not a valid operation on a rectangular matrix"); if (this->cols() == 1) return *(this->values) * *(other.storage()); else if (this->cols() == 2) return Math::matrixDoubleDot22(this->values, other.storage()); else if (this->cols() == 3) return Math::matrixDoubleDot33(this->values, other.storage()); else AKANTU_DEBUG_ERROR("doubleDot is not defined for other spatial dimensions" << " than 1, 2 or 3."); return T(); } /* ---------------------------------------------------------------------- */ /// function to print the containt of the class virtual void printself(std::ostream & stream, int indent = 0) const { std::string space; for (Int i = 0; i < indent; i++, space += AKANTU_INDENT) ; stream << "["; for (UInt i = 0; i < this->n[0]; ++i) { if (i != 0) stream << ", "; stream << "["; for (UInt j = 0; j < this->n[1]; ++j) { if (j != 0) stream << ", "; stream << operator()(i, j); } stream << "]"; } stream << "]"; }; }; /* ------------------------------------------------------------------------ */ template template inline void Vector::mul(const Matrix & A, const Vector & x, Real alpha) { #ifndef AKANTU_NDEBUG UInt n = x.size(); if (tr_A) { AKANTU_DEBUG_ASSERT(n == A.rows(), "matrix and vector to multiply have no fit dimensions"); AKANTU_DEBUG_ASSERT(this->size() == A.cols(), "matrix and vector to multiply have no fit dimensions"); } else { AKANTU_DEBUG_ASSERT(n == A.cols(), "matrix and vector to multiply have no fit dimensions"); AKANTU_DEBUG_ASSERT(this->size() == A.rows(), "matrix and vector to multiply have no fit dimensions"); } #endif Math::matVectMul(A.rows(), A.cols(), alpha, A.storage(), x.storage(), 0., this->storage()); } /* -------------------------------------------------------------------------- */ template inline std::ostream & operator<<(std::ostream & stream, const Matrix & _this) { _this.printself(stream); return stream; } /* -------------------------------------------------------------------------- */ template inline std::ostream & operator<<(std::ostream & stream, const Vector & _this) { _this.printself(stream); return stream; } /* ------------------------------------------------------------------------ */ /* Tensor3 */ /* ------------------------------------------------------------------------ */ template class Tensor3 : public TensorStorage> { using parent = TensorStorage>; public: using value_type = typename parent::value_type; using proxy = Tensor3Proxy; public: Tensor3() : parent(){}; Tensor3(UInt m, UInt n, UInt p, const T & def = T()) : parent(m, n, p, def) {} Tensor3(T * data, UInt m, UInt n, UInt p) : parent(data, m, n, p) {} Tensor3(const Tensor3 & src, bool deep_copy = true) : parent(src, deep_copy) {} + Tensor3(const proxy & src) : parent(src) {} + public: /* ------------------------------------------------------------------------ */ inline Tensor3 & operator=(const Tensor3 & src) { parent::operator=(src); return *this; } /* ---------------------------------------------------------------------- */ inline T & operator()(UInt i, UInt j, UInt k) { AKANTU_DEBUG_ASSERT( (i < this->n[0]) && (j < this->n[1]) && (k < this->n[2]), "Access out of the tensor3! " << "You are trying to access the element " << "(" << i << ", " << j << ", " << k << ") in a tensor of size (" << this->n[0] << ", " << this->n[1] << ", " << this->n[2] << ")"); return *(this->values + (k * this->n[0] + i) * this->n[1] + j); } inline const T & operator()(UInt i, UInt j, UInt k) const { AKANTU_DEBUG_ASSERT( (i < this->n[0]) && (j < this->n[1]) && (k < this->n[2]), "Access out of the tensor3! " << "You are trying to access the element " << "(" << i << ", " << j << ", " << k << ") in a tensor of size (" << this->n[0] << ", " << this->n[1] << ", " << this->n[2] << ")"); return *(this->values + (k * this->n[0] + i) * this->n[1] + j); } inline MatrixProxy operator()(UInt k) { AKANTU_DEBUG_ASSERT((k < this->n[2]), "Access out of the tensor3! " << "You are trying to access the slice " << k << " in a tensor3 of size (" << this->n[0] << ", " << this->n[1] << ", " << this->n[2] << ")"); return MatrixProxy(this->values + k * this->n[0] * this->n[1], this->n[0], this->n[1]); } inline const MatrixProxy operator()(UInt k) const { AKANTU_DEBUG_ASSERT((k < this->n[2]), "Access out of the tensor3! " << "You are trying to access the slice " << k << " in a tensor3 of size (" << this->n[0] << ", " << this->n[1] << ", " << this->n[2] << ")"); return MatrixProxy(this->values + k * this->n[0] * this->n[1], this->n[0], this->n[1]); } inline MatrixProxy operator[](UInt k) { return MatrixProxy(this->values + k * this->n[0] * this->n[1], this->n[0], this->n[1]); } inline const MatrixProxy operator[](UInt k) const { return MatrixProxy(this->values + k * this->n[0] * this->n[1], this->n[0], this->n[1]); } }; /* -------------------------------------------------------------------------- */ // support operations for the creation of other vectors /* -------------------------------------------------------------------------- */ template Vector operator*(const T & scalar, const Vector & a) { Vector r(a); r *= scalar; return r; } template Vector operator*(const Vector & a, const T & scalar) { Vector r(a); r *= scalar; return r; } template Vector operator/(const Vector & a, const T & scalar) { Vector r(a); r /= scalar; return r; } template Vector operator*(const Vector & a, const Vector & b) { Vector r(a); r *= b; return r; } template Vector operator+(const Vector & a, const Vector & b) { Vector r(a); r += b; return r; } template Vector operator-(const Vector & a, const Vector & b) { Vector r(a); r -= b; return r; } template Vector operator*(const Matrix & A, const Vector & b) { Vector r(b.size()); r.template mul(A, b); return r; } /* -------------------------------------------------------------------------- */ template Matrix operator*(const T & scalar, const Matrix & a) { Matrix r(a); r *= scalar; return r; } template Matrix operator*(const Matrix & a, const T & scalar) { Matrix r(a); r *= scalar; return r; } template Matrix operator/(const Matrix & a, const T & scalar) { Matrix r(a); r /= scalar; return r; } template Matrix operator+(const Matrix & a, const Matrix & b) { Matrix r(a); r += b; return r; } template Matrix operator-(const Matrix & a, const Matrix & b) { Matrix r(a); r -= b; return r; } } // namespace akantu #endif /* __AKANTU_AKA_TYPES_HH__ */ diff --git a/src/fe_engine/shape_functions.hh b/src/fe_engine/shape_functions.hh index 002b9fe7d..62e4953a8 100644 --- a/src/fe_engine/shape_functions.hh +++ b/src/fe_engine/shape_functions.hh @@ -1,214 +1,216 @@ /** * @file shape_functions.hh * * @author Guillaume Anciaux * @author Nicolas Richart * * @date creation: Fri Jun 18 2010 * @date last modification: Thu Oct 22 2015 * * @brief shape function class * * @section LICENSE * * Copyright (©) 2010-2012, 2014, 2015 EPFL (Ecole Polytechnique Fédérale de * Lausanne) Laboratory (LSMS - Laboratoire de Simulation en Mécanique des * Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "aka_memory.hh" #include "mesh.hh" /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_SHAPE_FUNCTIONS_HH__ #define __AKANTU_SHAPE_FUNCTIONS_HH__ namespace akantu { /* -------------------------------------------------------------------------- */ class ShapeFunctions : protected Memory { /* ------------------------------------------------------------------------ */ /* Constructors/Destructors */ /* ------------------------------------------------------------------------ */ public: ShapeFunctions(const Mesh & mesh, const ID & id = "shape", const MemoryID & memory_id = 0); ~ShapeFunctions() override = default; /* ------------------------------------------------------------------------ */ /* Methods */ /* ------------------------------------------------------------------------ */ public: /// function to print the contain of the class virtual void printself(std::ostream & stream, int indent = 0) const { std::string space; for (Int i = 0; i < indent; i++, space += AKANTU_INDENT) ; stream << space << "Shapes [" << std::endl; integration_points.printself(stream, indent + 1); + shapes.printself(stream, indent + 1); + shapes_derivatives.printself(stream, indent + 1); stream << space << "]" << std::endl; - }; + } /// set the integration points for a given element template void setIntegrationPointsByType(const Matrix & integration_points, const GhostType & ghost_type); /// Build pre-computed matrices for interpolation of field form integration /// points at other given positions (interpolation_points) void initElementalFieldInterpolationFromIntegrationPoints( const ElementTypeMapArray & interpolation_points_coordinates, ElementTypeMapArray & interpolation_points_coordinates_matrices, ElementTypeMapArray & quad_points_coordinates_inv_matrices, const ElementTypeMapArray & quadrature_points_coordinates, const ElementTypeMapArray * element_filter) const; /// Interpolate field at given position from given values of this field at /// integration points (field) /// using matrices precomputed with /// initElementalFieldInterplationFromIntegrationPoints void interpolateElementalFieldFromIntegrationPoints( const ElementTypeMapArray & field, const ElementTypeMapArray & interpolation_points_coordinates_matrices, const ElementTypeMapArray & quad_points_coordinates_inv_matrices, ElementTypeMapArray & result, const GhostType & ghost_type, const ElementTypeMapArray * element_filter) const; protected: /// interpolate nodal values stored by element on the integration points template void interpolateElementalFieldOnIntegrationPoints( const Array & u_el, Array & uq, const GhostType & ghost_type, const Array & shapes, const Array & filter_elements = empty_filter) const; /// gradient of nodal values stored by element on the control points template void gradientElementalFieldOnIntegrationPoints( const Array & u_el, Array & out_nablauq, const GhostType & ghost_type, const Array & shapes_derivatives, const Array & filter_elements) const; protected: /// By element versions of non-templated eponym methods template inline void interpolateElementalFieldFromIntegrationPoints( const Array & field, const Array & interpolation_points_coordinates_matrices, const Array & quad_points_coordinates_inv_matrices, ElementTypeMapArray & result, const GhostType & ghost_type, const Array & element_filter) const; /// Interpolate field at given position from given values of this field at /// integration points (field) /// using matrices precomputed with /// initElementalFieldInterplationFromIntegrationPoints template inline void initElementalFieldInterpolationFromIntegrationPoints( const Array & interpolation_points_coordinates, ElementTypeMapArray & interpolation_points_coordinates_matrices, ElementTypeMapArray & quad_points_coordinates_inv_matrices, const Array & quadrature_points_coordinates, const GhostType & ghost_type, const Array & element_filter) const; /// build matrix for the interpolation of field form integration points template inline void buildElementalFieldInterpolationMatrix( const Matrix & coordinates, Matrix & coordMatrix, UInt integration_order = ElementClassProperty::polynomial_degree) const; /// build the so called interpolation matrix (first collumn is 1, then the /// other collumns are the traansposed coordinates) inline void buildInterpolationMatrix(const Matrix & coordinates, Matrix & coordMatrix, UInt integration_order) const; public: virtual void onElementsAdded(const Array &) { AKANTU_DEBUG_TO_IMPLEMENT(); } virtual void onElementsRemoved(const Array &, const ElementTypeMapArray &) { AKANTU_DEBUG_TO_IMPLEMENT(); } /* ------------------------------------------------------------------------ */ /* Accessors */ /* ------------------------------------------------------------------------ */ public: /// get the size of the shapes returned by the element class static inline UInt getShapeSize(const ElementType & type); /// get the size of the shapes derivatives returned by the element class static inline UInt getShapeDerivativesSize(const ElementType & type); inline const Matrix & getIntegrationPoints(const ElementType & type, const GhostType & ghost_type) const { return integration_points(type, ghost_type); } /* ------------------------------------------------------------------------ */ /* Accessors */ /* ------------------------------------------------------------------------ */ public: /// get a the shapes vector inline const Array & getShapes(const ElementType & el_type, const GhostType & ghost_type = _not_ghost) const; /// get a the shapes derivatives vector inline const Array & getShapesDerivatives(const ElementType & el_type, const GhostType & ghost_type = _not_ghost) const; /* ------------------------------------------------------------------------ */ /* Class Members */ /* ------------------------------------------------------------------------ */ protected: /// shape functions for all elements ElementTypeMapArray shapes; /// shape functions derivatives for all elements ElementTypeMapArray shapes_derivatives; /// associated mesh const Mesh & mesh; /// shape functions for all elements ElementTypeMap> integration_points; }; /* -------------------------------------------------------------------------- */ /* inline functions */ /* -------------------------------------------------------------------------- */ /// standard output stream operator inline std::ostream & operator<<(std::ostream & stream, const ShapeFunctions & _this) { _this.printself(stream); return stream; } } // namespace akantu #include "shape_functions_inline_impl.cc" #endif /* __AKANTU_SHAPE_FUNCTIONS_HH__ */ diff --git a/src/fe_engine/shape_structural.hh b/src/fe_engine/shape_structural.hh index ec8cb2d61..7f6840b61 100644 --- a/src/fe_engine/shape_structural.hh +++ b/src/fe_engine/shape_structural.hh @@ -1,145 +1,156 @@ /** * @file shape_structural.hh * * @author Fabian Barras * @author Nicolas Richart * * @date creation: Tue Feb 15 2011 * @date last modification: Thu Oct 22 2015 * * @brief shape class for element with different set of shapes functions * * @section LICENSE * * Copyright (©) 2010-2012, 2014, 2015 EPFL (Ecole Polytechnique Fédérale de * Lausanne) Laboratory (LSMS - Laboratoire de Simulation en Mécanique des * Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "shape_functions.hh" /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_SHAPE_STRUCTURAL_HH__ #define __AKANTU_SHAPE_STRUCTURAL_HH__ namespace akantu { template class ShapeStructural : public ShapeFunctions { /* ------------------------------------------------------------------------ */ /* Constructors/Destructors */ /* ------------------------------------------------------------------------ */ public: - using ElementTypeMapMultiReal = ElementTypeMap **>; - ShapeStructural(Mesh & mesh, const ID & id = "shape_structural", - const MemoryID & memory_id = 0); - ~ShapeStructural() override; + const MemoryID & memory_id = 0) + : ShapeFunctions(mesh, id, memory_id), + rotation_matrices("rotation_matrices", id, memory_id) {} + ~ShapeStructural() override = default; /* ------------------------------------------------------------------------ */ /* Methods */ /* ------------------------------------------------------------------------ */ public: + /// function to print the contain of the class + void printself(std::ostream & stream, int indent = 0) const override { + std::string space; + for (Int i = 0; i < indent; i++, space += AKANTU_INDENT) + ; + stream << space << "ShapesStructural [" << std::endl; + rotation_matrices.printself(stream, indent + 1); + ShapeFunctions::printself(stream, indent + 1); + stream << space << "]" << std::endl; + } + /// compute shape functions on given integration points template void computeShapesOnIntegrationPoints( const Array &, const Matrix & integration_points, Array & shapes, const GhostType & ghost_type, const Array & filter_elements = empty_filter) const; /// initialization function for structural elements inline void initShapeFunctions(const Array & nodes, const Matrix & integration_points, const ElementType & type, const GhostType & ghost_type); /// precompute the rotation matrices for the elements dofs template void precomputeRotationMatrices(const Array & nodes, const GhostType & ghost_type); /// pre compute all shapes on the element integration points from natural /// coordinates template void precomputeShapesOnIntegrationPoints(const Array & nodes, const GhostType & ghost_type); /// pre compute all shapes on the element integration points from natural /// coordinates template void precomputeShapeDerivativesOnIntegrationPoints(const Array & nodes, const GhostType & ghost_type); /// interpolate nodal values on the integration points template void interpolateOnIntegrationPoints( const Array & u, Array & uq, UInt nb_degree_of_freedom, const GhostType & ghost_type = _not_ghost, const Array & filter_elements = empty_filter) const; /// compute the gradient of u on the integration points template void gradientOnIntegrationPoints( const Array & u, Array & nablauq, UInt nb_degree_of_freedom, const GhostType & ghost_type = _not_ghost, const Array & filter_elements = empty_filter) const; /// interpolate on physical point template void interpolate(const Vector & /*real_coords*/, UInt /*elem*/, const Matrix & /*nodal_values*/, Vector & /*interpolated*/, const GhostType & /*ghost_type*/) const { AKANTU_DEBUG_TO_IMPLEMENT(); } /// compute the shapes on a provided point template void computeShapes(const Vector & /*real_coords*/, UInt /*elem*/, Vector & /*shapes*/, const GhostType & /*ghost_type*/) const { AKANTU_DEBUG_TO_IMPLEMENT(); } /// compute the shape derivatives on a provided point template void computeShapeDerivatives(const Matrix & /*real_coords*/, UInt /*elem*/, Tensor3 & /*shapes*/, const GhostType & /*ghost_type*/) const { AKANTU_DEBUG_TO_IMPLEMENT(); } /// multiply a field by shape functions template void fieldTimesShapes(__attribute__((unused)) const Array & field, __attribute__((unused)) Array & field_times_shapes, __attribute__((unused)) const GhostType & ghost_type) const { AKANTU_DEBUG_TO_IMPLEMENT(); } protected: ElementTypeMapArray rotation_matrices; }; } // namespace akantu #include "shape_structural_inline_impl.cc" #endif /* __AKANTU_SHAPE_STRUCTURAL_HH__ */ diff --git a/src/fe_engine/shape_structural_inline_impl.cc b/src/fe_engine/shape_structural_inline_impl.cc index efdbc515c..15e339e85 100644 --- a/src/fe_engine/shape_structural_inline_impl.cc +++ b/src/fe_engine/shape_structural_inline_impl.cc @@ -1,353 +1,354 @@ /** * @file shape_structural_inline_impl.cc * * @author Fabian Barras * @author Nicolas Richart * * @date creation: Mon Dec 13 2010 * @date last modification: Thu Oct 15 2015 * * @brief ShapeStructural inline implementation * * @section LICENSE * * Copyright (©) 2010-2012, 2014, 2015 EPFL (Ecole Polytechnique Fédérale de * Lausanne) Laboratory (LSMS - Laboratoire de Simulation en Mécanique des * Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "mesh_iterators.hh" #include "shape_structural.hh" /* -------------------------------------------------------------------------- */ #ifndef __AKANTU_SHAPE_STRUCTURAL_INLINE_IMPL_CC__ #define __AKANTU_SHAPE_STRUCTURAL_INLINE_IMPL_CC__ namespace akantu { template inline void ShapeStructural::initShapeFunctions( const Array & /* unused */, const Matrix & /* unused */, const ElementType & /* unused */, const GhostType & /* unused */) { AKANTU_DEBUG_TO_IMPLEMENT(); } /* -------------------------------------------------------------------------- */ #define INIT_SHAPE_FUNCTIONS(type) \ setIntegrationPointsByType(integration_points, ghost_type); \ precomputeRotationMatrices(nodes, ghost_type); \ precomputeShapesOnIntegrationPoints(nodes, ghost_type); \ precomputeShapeDerivativesOnIntegrationPoints(nodes, ghost_type); template <> inline void ShapeStructural<_ek_structural>::initShapeFunctions( const Array & nodes, const Matrix & integration_points, const ElementType & type, const GhostType & ghost_type) { AKANTU_BOOST_STRUCTURAL_ELEMENT_SWITCH(INIT_SHAPE_FUNCTIONS); + std::cout << "Hello" << std::endl; } #undef INIT_SHAPE_FUNCTIONS /* -------------------------------------------------------------------------- */ template <> template void ShapeStructural<_ek_structural>::computeShapesOnIntegrationPoints( const Array & /*nodes*/, const Matrix & integration_points, Array & shapes, const GhostType & ghost_type, const Array & filter_elements) const { /// \TODO this code differs from ShapeLagrangeBase only in the size of N UInt nb_points = integration_points.cols(); UInt nb_element = mesh.getConnectivity(type, ghost_type).size(); shapes.resize(nb_element * nb_points); UInt ndof = ElementClass::getNbDegreeOfFreedom(); #if !defined(AKANTU_NDEBUG) UInt size_of_shapes = ElementClass::getShapeSize(); AKANTU_DEBUG_ASSERT(shapes.getNbComponent() == size_of_shapes, "The shapes array does not have the correct " << "number of component"); #endif auto shapes_it = shapes.begin_reinterpret( ElementClass::getNbNodesPerInterpolationElement(), ndof, nb_points, nb_element); auto shapes_begin = shapes_it; if (filter_elements != empty_filter) { nb_element = filter_elements.size(); } for (UInt elem = 0; elem < nb_element; ++elem) { if (filter_elements != empty_filter) shapes_it = shapes_begin + filter_elements(elem); Tensor3 & N = *shapes_it; ElementClass::computeShapes(integration_points, N); if (filter_elements == empty_filter) ++shapes_it; } } /* -------------------------------------------------------------------------- */ template template void ShapeStructural::precomputeRotationMatrices( const Array & nodes, const GhostType & ghost_type) { AKANTU_DEBUG_IN(); const auto spatial_dimension = mesh.getSpatialDimension(); const auto nb_nodes_per_element = Mesh::getNbNodesPerElement(type); const auto nb_element = mesh.getNbElement(type, ghost_type); const auto nb_dof = ElementClass::getNbDegreeOfFreedom(); if (not this->rotation_matrices.exists(type, ghost_type)) { this->rotation_matrices.alloc(0, nb_dof * nb_dof, type, ghost_type); } auto & rot_matrices = this->rotation_matrices(type, ghost_type); rot_matrices.resize(nb_element); Array x_el(0, spatial_dimension * nb_nodes_per_element); FEEngine::extractNodalToElementField(mesh, nodes, x_el, type, ghost_type); bool has_extra_normal = mesh.hasData("extra_normal", type, ghost_type); Array::vector_iterator extra_normal; if (has_extra_normal) extra_normal = mesh.getData("extra_normal", type, ghost_type) .begin(spatial_dimension); for (auto && tuple : zip(make_view(x_el, spatial_dimension, nb_nodes_per_element), make_view(rot_matrices, nb_dof, nb_dof))) { // compute shape derivatives auto & X = std::get<0>(tuple); auto & R = std::get<1>(tuple); if (has_extra_normal) { ElementClass::computeRotationMatrix(R, X, *extra_normal); ++extra_normal; } else { ElementClass::computeRotationMatrix(R, X, Vector()); } } AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ template template void ShapeStructural::precomputeShapesOnIntegrationPoints( const Array & /*nodes*/, const GhostType & ghost_type) { AKANTU_DEBUG_IN(); const auto & natural_coords = integration_points(type, ghost_type); auto nb_nodes_per_element = Mesh::getNbNodesPerElement(type); auto nb_points = integration_points(type, ghost_type).cols(); auto nb_element = mesh.getNbElement(type, ghost_type); auto nb_dof = ElementClass::getNbDegreeOfFreedom(); auto itp_type = FEEngine::getInterpolationType(type); if (not shapes.exists(itp_type, ghost_type)) { auto size_of_shapes = this->getShapeSize(type); this->shapes.alloc(0, size_of_shapes, itp_type, ghost_type); } auto & shapes_ = this->shapes(itp_type, ghost_type); shapes_.resize(nb_element * nb_points); auto shapes_it = shapes_.begin_reinterpret( nb_dof, nb_dof * nb_nodes_per_element, nb_points, nb_element); for (UInt elem = 0; elem < nb_element; ++elem, ++shapes_it) { auto & N = *shapes_it; ElementClass::computeShapes(natural_coords, N); } AKANTU_DEBUG_OUT(); } // namespace akantu /* -------------------------------------------------------------------------- */ template template void ShapeStructural::precomputeShapeDerivativesOnIntegrationPoints( const Array & nodes, const GhostType & ghost_type) { AKANTU_DEBUG_IN(); const auto & natural_coords = integration_points(type, ghost_type); const auto spatial_dimension = mesh.getSpatialDimension(); const auto natural_spatial_dimension = ElementClass::getNaturalSpaceDimension(); const auto nb_nodes_per_element = Mesh::getNbNodesPerElement(type); const auto nb_points = natural_coords.cols(); const auto nb_dof = ElementClass::getNbDegreeOfFreedom(); const auto nb_element = mesh.getNbElement(type, ghost_type); const auto nb_stress_components = ElementClass::getNbStressComponents(); auto itp_type = FEEngine::getInterpolationType(type); if (not this->shapes_derivatives.exists(itp_type, ghost_type)) { auto size_of_shapesd = this->getShapeDerivativesSize(type); this->shapes_derivatives.alloc(0, size_of_shapesd, itp_type, ghost_type); } auto & rot_matrices = this->rotation_matrices(type, ghost_type); Array x_el(0, spatial_dimension * nb_nodes_per_element); FEEngine::extractNodalToElementField(mesh, nodes, x_el, type, ghost_type); auto & shapesd = this->shapes_derivatives(itp_type, ghost_type); shapesd.resize(nb_element * nb_points); for (auto && tuple : zip(make_view(x_el, spatial_dimension, nb_nodes_per_element), make_view(shapesd, nb_stress_components, nb_nodes_per_element * nb_dof, nb_points), make_view(rot_matrices, nb_dof, nb_dof))) { // compute shape derivatives auto & X = std::get<0>(tuple); auto & B = std::get<1>(tuple); auto & RDOFs = std::get<2>(tuple); auto R = RDOFs.block(0, 0, spatial_dimension, spatial_dimension); // Rotate to local basis auto x = (R * X).block(0, 0, natural_spatial_dimension, nb_nodes_per_element); Tensor3 dnds(B.size(0), B.size(1), B.size(2)); ElementClass::computeDNDS(natural_coords, dnds); Tensor3 J(x.rows(), natural_coords.rows(), natural_coords.cols()); ElementClass::computeJMat(dnds, x, J); ElementClass::computeShapeDerivatives(J, dnds, B); } AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ template template void ShapeStructural::interpolateOnIntegrationPoints( const Array & in_u, Array & out_uq, UInt nb_dof, const GhostType & ghost_type, const Array & filter_elements) const { AKANTU_DEBUG_IN(); AKANTU_DEBUG_ASSERT(out_uq.getNbComponent() == nb_dof, "The output array shape is not correct"); auto itp_type = FEEngine::getInterpolationType(type); const auto & shapes_ = shapes(itp_type, ghost_type); auto nb_element = mesh.getNbElement(type, ghost_type); auto nb_nodes_per_element = ElementClass::getNbNodesPerElement(); auto nb_quad_points_per_element = integration_points(type, ghost_type).cols(); Array u_el(0, nb_nodes_per_element * nb_dof); FEEngine::extractNodalToElementField(mesh, in_u, u_el, type, ghost_type, filter_elements); auto nb_quad_points = nb_quad_points_per_element * u_el.size(); out_uq.resize(nb_quad_points); auto out_it = out_uq.begin_reinterpret(nb_dof, 1, nb_quad_points_per_element, u_el.size()); auto shapes_it = shapes_.begin_reinterpret(nb_dof, nb_dof * nb_nodes_per_element, nb_quad_points_per_element, nb_element); auto u_it = u_el.begin_reinterpret(nb_dof * nb_nodes_per_element, 1, nb_quad_points_per_element, u_el.size()); for_each_elements(nb_element, filter_elements, [&](auto && el) { auto & uq = *out_it; const auto & u = *u_it; auto N = Tensor3(shapes_it[el]); for (auto && q : arange(uq.size(2))) { auto uq_q = Matrix(uq(q)); auto u_q = Matrix(u(q)); auto N_q = Matrix(N(q)); uq_q.mul(N_q, u_q); } ++out_it; ++u_it; }); AKANTU_DEBUG_OUT(); } /* -------------------------------------------------------------------------- */ template template void ShapeStructural::gradientOnIntegrationPoints( const Array & in_u, Array & out_nablauq, UInt nb_dof, const GhostType & ghost_type, const Array & filter_elements) const { AKANTU_DEBUG_IN(); auto itp_type = FEEngine::getInterpolationType(type); const auto & shapesd = shapes_derivatives(itp_type, ghost_type); auto nb_element = mesh.getNbElement(type, ghost_type); auto element_dimension = ElementClass::getSpatialDimension(); auto nb_quad_points_per_element = integration_points(type, ghost_type).cols(); auto nb_nodes_per_element = ElementClass::getNbNodesPerElement(); Array u_el(0, nb_nodes_per_element * nb_dof); FEEngine::extractNodalToElementField(mesh, in_u, u_el, type, ghost_type, filter_elements); auto nb_quad_points = nb_quad_points_per_element * u_el.size(); out_nablauq.resize(nb_quad_points); auto out_it = out_nablauq.begin_reinterpret( element_dimension, 1, nb_quad_points_per_element, u_el.size()); auto shapesd_it = shapesd.begin_reinterpret( element_dimension, nb_dof * nb_nodes_per_element, nb_quad_points_per_element, nb_element); auto u_it = u_el.begin_reinterpret(nb_dof * nb_nodes_per_element, 1, nb_quad_points_per_element, u_el.size()); for_each_elements(nb_element, filter_elements, [&](auto && el) { auto & nablau = *out_it; const auto & u = *u_it; auto B = Tensor3(shapesd_it[el]); for (auto && q : arange(nablau.size(2))) { auto nablau_q = Matrix(nablau(q)); auto u_q = Matrix(u(q)); auto B_q = Matrix(B(q)); nablau_q.mul(B_q, u_q); } ++out_it; ++u_it; }); AKANTU_DEBUG_OUT(); } } // namespace akantu #endif /* __AKANTU_SHAPE_STRUCTURAL_INLINE_IMPL_CC__ */ diff --git a/test/test_fe_engine/test_fe_engine_precomputation_bernoulli_2.cc b/test/test_fe_engine/test_fe_engine_precomputation_bernoulli_2.cc index 30644bea7..c955be439 100644 --- a/test/test_fe_engine/test_fe_engine_precomputation_bernoulli_2.cc +++ b/test/test_fe_engine/test_fe_engine_precomputation_bernoulli_2.cc @@ -1,72 +1,71 @@ /** * @file test_fe_engine_precomputation.cc * * @author Nicolas Richart * * @date creation: Mon Jun 14 2010 * @date last modification: Mon Jul 13 2015 * * @brief test of the fem class * * @section LICENSE * * Copyright (©) 2010-2012, 2014, 2015 EPFL (Ecole Polytechnique Fédérale de * Lausanne) Laboratory (LSMS - Laboratoire de Simulation en Mécanique des * Solides) * * Akantu is free software: you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * Akantu is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with Akantu. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "fe_engine.hh" #include "shape_structural.hh" #include "integrator_gauss.hh" /* -------------------------------------------------------------------------- */ #include /* -------------------------------------------------------------------------- */ using namespace akantu; int main(int argc, char *argv[]) { akantu::initialize(argc, argv); // debug::setDebugLevel(dblTest); constexpr ElementType type = _bernoulli_beam_2; UInt dim = ElementClass::getSpatialDimension(); Mesh mesh(dim); Vector node = {0, 0}; mesh.getNodes().push_back(node); node = {1, 1}; mesh.getNodes().push_back(node); mesh.addConnectivityType(type); auto & connectivity = mesh.getConnectivity(type); Vector elem = {0, 1}; connectivity.push_back(elem); auto fem = - std::make_unique>( + std::make_unique>( mesh, dim, "test_fem"); fem->initShapeFunctions(); - // std::cout << *fem << std::endl; + std::cout << *fem << std::endl; - // delete fem; - // finalize(); + finalize(); return 0; }