diff --git a/CMakeLists.txt b/CMakeLists.txt index bd8320f..2b23796 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,148 +1,154 @@ #==================================================================================================# # # # (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM # # # #==================================================================================================# # initialization # -------------- # required to specify the c++ standard cmake_minimum_required(VERSION 3.0) # required for install include(CMakePackageConfigHelpers) include(GNUInstallDirs) # options option(PKGCONFIG "Build pkg-config" ON) option(BUILD_TESTS "${PROJECT_NAME} test suite" OFF) # project settings # ---------------- # name project(GooseFEM) # file that contains the version information set(parse_version include/GooseFEM/config.h) # header files set(headers include/GooseFEM/config.h include/GooseFEM/GooseFEM.h include/GooseFEM/Mesh.hpp include/GooseFEM/Mesh.h include/GooseFEM/MeshTri3.hpp include/GooseFEM/MeshTri3.h include/GooseFEM/MeshQuad4.hpp include/GooseFEM/MeshQuad4.h include/GooseFEM/MeshHex8.hpp include/GooseFEM/MeshHex8.h include/GooseFEM/Element.hpp include/GooseFEM/Element.h include/GooseFEM/ElementQuad4.hpp include/GooseFEM/ElementQuad4.h include/GooseFEM/ElementQuad4Planar.hpp include/GooseFEM/ElementQuad4Planar.h include/GooseFEM/ElementQuad4Axisymmetric.hpp include/GooseFEM/ElementQuad4Axisymmetric.h include/GooseFEM/ElementHex8.hpp include/GooseFEM/ElementHex8.h include/GooseFEM/Vector.hpp include/GooseFEM/Vector.h include/GooseFEM/VectorPartitioned.hpp include/GooseFEM/VectorPartitioned.h + include/GooseFEM/VectorPartitionedTyings.hpp + include/GooseFEM/VectorPartitionedTyings.h include/GooseFEM/Matrix.hpp include/GooseFEM/Matrix.h include/GooseFEM/MatrixPartitioned.hpp include/GooseFEM/MatrixPartitioned.h + include/GooseFEM/MatrixPartitionedTyings.hpp + include/GooseFEM/MatrixPartitionedTyings.h include/GooseFEM/MatrixDiagonal.hpp include/GooseFEM/MatrixDiagonal.h include/GooseFEM/MatrixDiagonalPartitioned.hpp include/GooseFEM/MatrixDiagonalPartitioned.h include/GooseFEM/Iterate.hpp include/GooseFEM/Iterate.h + include/GooseFEM/TyingsPeriodic.hpp + include/GooseFEM/TyingsPeriodic.h ) # automatically parse the version number file(READ "${parse_version}" version) string(REGEX MATCH "define[ \t]+GOOSEFEM_WORLD_VERSION[ \t]+([0-9]+)" _ "${version}") set(world_version "${CMAKE_MATCH_1}") string(REGEX MATCH "define[ \t]+GOOSEFEM_MAJOR_VERSION[ \t]+([0-9]+)" _ "${version}") set(major_version "${CMAKE_MATCH_1}") string(REGEX MATCH "define[ \t]+GOOSEFEM_MINOR_VERSION[ \t]+([0-9]+)" _ "${version}") set(minor_version "${CMAKE_MATCH_1}") set(GOOSEFEM_VERSION ${world_version}.${major_version}.${minor_version}) # print information to screen message(STATUS "Building ${PROJECT_NAME} v${GOOSEFEM_VERSION}") # paths # ----- set(GOOSEFEM_ROOT_DIR "${CMAKE_INSTALL_PREFIX}") set(GOOSEFEM_INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/${INCLUDE_INSTALL_DIR}") set(INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}") set(CMAKEPACKAGE_INSTALL_DIR "${CMAKE_INSTALL_DATADIR}/${PROJECT_NAME}") set(PKGCONFIG_INSTALL_DIR "${CMAKE_INSTALL_DATADIR}/pkgconfig") set(fcmake "GooseFEMConfig.cmake") set(fpkg "GooseFEM.pc") # configure CMake # --------------- configure_package_config_file( ${CMAKE_CURRENT_SOURCE_DIR}/GooseFEMConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/GooseFEMConfig.cmake PATH_VARS GOOSEFEM_INCLUDE_DIR GOOSEFEM_ROOT_DIR INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR} NO_CHECK_REQUIRED_COMPONENTS_MACRO ) # install/build # ------------- # build test cases if(BUILD_TESTS) add_subdirectory(test) endif() # disable pkg-config for native Windows builds if(WIN32 OR CMAKE_HOST_SYSTEM_NAME MATCHES Windows) option(PKGCONFIG "Build pkg-config ${fpkg} file" OFF) endif() # pkg-config if(PKGCONFIG) configure_file(${fpkg}.in ${fpkg} @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${fpkg} DESTINATION ${PKGCONFIG_INSTALL_DIR}) endif() # CMake install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${fcmake} DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}) # header files install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${headers} DESTINATION ${INCLUDE_INSTALL_DIR}) # print information to screen # --------------------------- message(STATUS "") message(STATUS "+---------------------------------------------------------------------------------") message(STATUS "|") message(STATUS "| Use 'make install' to install in") message(STATUS "| ${CMAKE_INSTALL_PREFIX}") if(BUILD_TESTS) message(STATUS "|") message(STATUS "| Use 'make' and './test/test' to run the tests") endif() message(STATUS "|") message(STATUS "| To specify a custom directory call") message(STATUS "| cmake /path/to/${PROJECT_NAME} -DCMAKE_INSTALL_PREFIX=yourprefix") message(STATUS "|") message(STATUS "| For custom paths, add the following line to your '~/.bashrc'") message(STATUS "| export PKG_CONFIG_PATH=${CMAKE_INSTALL_PREFIX}/share/pkgconfig:$PKG_CONFIG_PATH") message(STATUS "|") message(STATUS "+---------------------------------------------------------------------------------") message(STATUS "") diff --git a/docs/examples/Dynamics/laminateElastic/noDamping/CMakeLists.txt b/docs/examples/Dynamics/laminateElastic/noDamping/CMakeLists.txt index d79d331..24c53c3 100644 --- a/docs/examples/Dynamics/laminateElastic/noDamping/CMakeLists.txt +++ b/docs/examples/Dynamics/laminateElastic/noDamping/CMakeLists.txt @@ -1,57 +1,64 @@ cmake_minimum_required(VERSION 3.1) # basic info # ---------- # define a project name project(main) # define empty list of libraries to link set(PROJECT_LIBS "") # copy files to build folder configure_file("plot.py" "plot.py" COPYONLY) # basic compiler options # ---------------------- # set optimization level set(CMAKE_BUILD_TYPE Release) # set C++ standard set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) # set warnings on set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -g") # enable optimisation set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DXTENSOR_USE_XSIMD=ON") +# assertions +if(NOT ASSERT) + add_definitions(-DNDEBUG) +else() + add_definitions(-DXTENSOR_ENABLE_ASSERT=ON) +endif() + # load packages # ------------- # add custom include paths if(NOT "$ENV{INCLUDE_PATH}" STREQUAL "") string(REPLACE ":" ";" INCLUDE_LIST "$ENV{INCLUDE_PATH}") include_directories(${INCLUDE_LIST}) endif() # load HDF5 find_package(HDF5 COMPONENTS CXX REQUIRED) include_directories(${HDF5_INCLUDE_DIRS}) set(PROJECT_LIBS ${PROJECT_LIBS} ${HDF5_LIBS} ${HDF5_LIBRARIES}) # load eigen find_package(PkgConfig) pkg_check_modules(EIGEN3 REQUIRED eigen3) include_directories(SYSTEM ${EIGEN3_INCLUDE_DIRS}) # create executable # ----------------- add_executable(${PROJECT_NAME} main.cpp) target_link_libraries(${PROJECT_NAME} ${PROJECT_LIBS}) diff --git a/docs/examples/Dynamics/laminateElastic/noDamping/main.cpp b/docs/examples/Dynamics/laminateElastic/noDamping/main.cpp index 8fa9ffe..e06f24b 100644 --- a/docs/examples/Dynamics/laminateElastic/noDamping/main.cpp +++ b/docs/examples/Dynamics/laminateElastic/noDamping/main.cpp @@ -1,163 +1,170 @@ #include #include -#include +#include // ------------------------------------------------------------------------------------------------- namespace GF = GooseFEM; namespace QD = GooseFEM::Element::Quad4; namespace GM = GMatElastoPlasticQPot::Cartesian2d; // ------------------------------------------------------------------------------------------------- inline double sqdot(const xt::xtensor &M, const xt::xtensor &V) { double out = 0.; for ( size_t i = 0 ; i < M.size() ; ++i ) out += M(i) * V(i) * V(i); return out; } // ------------------------------------------------------------------------------------------------- int main() { // simulation parameters double T = 60. ; // total time double dt = 1.e-2; // time increment size_t nx = 60 ; // number of elements in both directions double gamma = .05 ; // displacement step // get mesh & quadrature GF::Mesh::Quad4::Regular mesh(nx,nx,1.); xt::xtensor coor = mesh.coor(); xt::xtensor conn = mesh.conn(); xt::xtensor dofs = mesh.dofsPeriodic(); GF::Vector vector(conn, dofs); - QD::Quadrature quad(vector.asElement(coor)); + QD::Quadrature quad(vector.AsElement(coor)); size_t nnode = mesh.nnode(); size_t ndim = mesh.ndim(); + size_t nne = mesh.nne(); size_t nelem = mesh.nelem(); size_t nip = quad.nip(); xt::xtensor u = xt::zeros(coor.shape()); xt::xtensor v = xt::zeros(coor.shape()); xt::xtensor a = xt::zeros(coor.shape()); xt::xtensor v_n = xt::zeros(coor.shape()); xt::xtensor a_n = xt::zeros(coor.shape()); xt::xtensor fint = xt::zeros(coor.shape()); xt::xtensor fext = xt::zeros(coor.shape()); xt::xtensor fres = xt::zeros(coor.shape()); + xt::xtensor ue = xt::zeros({nelem, nne, ndim}); + xt::xtensor fe = xt::zeros({nelem, nne, ndim}); + xt::xtensor Eps = xt::zeros({nelem, nip, ndim, ndim}); xt::xtensor Sig = xt::zeros({nelem, nip, ndim, ndim}); // material definition GM::Matrix material({nelem, nip}); xt::xtensor Ihard = xt::zeros({nelem, nip}); xt::xtensor Isoft = xt::ones ({nelem, nip}); for ( size_t e = 0 ; e < nx*nx/4 ; ++e ) for ( size_t q = 0 ; q < nip ; ++q ) Ihard(e,q) = 1; Isoft -= Ihard; material.setElastic(Ihard, 100., 10.); material.setElastic(Isoft, 100., 1.); material.check(); // mass matrix - QD::Quadrature nodalQuad(vector.asElement(coor), QD::Nodal::xi(), QD::Nodal::w()); + QD::Quadrature nodalQuad(vector.AsElement(coor), QD::Nodal::xi(), QD::Nodal::w()); xt::xtensor rho = 1.0 * xt::ones({nelem, nip}); GF::MatrixDiagonal M(conn, dofs); - M.assemble( nodalQuad.int_N_scalar_NT_dV(rho) ); + M.assemble(nodalQuad.Int_N_scalar_NT_dV(rho)); - xt::xtensor mass = M.asDiagonal(); + xt::xtensor mass = M.AsDiagonal(); // update in macroscopic deformation gradient xt::xtensor dFbar = xt::zeros({2,2}); dFbar(0,1) = gamma; for ( size_t n = 0 ; n < nnode ; ++n ) for ( size_t j = 0 ; j < ndim ; ++j ) for ( size_t k = 0 ; k < ndim ; ++k ) u(n,j) += dFbar(j,k) * ( coor(n,k) - coor(0,k) ); // output variables xt::xtensor Epot = xt::zeros({static_cast(T/dt)}); xt::xtensor Ekin = xt::zeros({static_cast(T/dt)}); xt::xtensor t = xt::zeros({static_cast(T/dt)}); + xt::xtensor dV = quad.DV(); + // loop over increments for ( size_t inc = 0 ; inc < static_cast(Epot.size()) ; ++inc ) { // store history xt::noalias(v_n) = v; xt::noalias(a_n) = a; // new displacement xt::noalias(u) = u + dt * v + 0.5 * std::pow(dt,2.) * a; - // compute strain/strain, and corresponding internal + // compute strain/strain, and corresponding internal force - quad.symGradN_vector(vector.asElement(u), Eps); + vector.asElement(u, ue); + quad.symGradN_vector(ue, Eps); material.Sig(Eps, Sig); - vector.assembleNode(quad.int_gradN_dot_tensor2_dV(Sig), fint); + quad.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); // compute residual force & solve xt::noalias(fres) = fext - fint; M.solve(fres, a); // new velocity xt::noalias(v) = v_n + .5 * dt * ( a_n + a ); // store output variables - xt::xtensor E = material.energy(Eps); - xt::xtensor dV = quad.dV(); - xt::xtensor V = vector.asDofs(v); + xt::xtensor E = material.energy(Eps); + xt::xtensor V = vector.AsDofs(v); t (inc) = static_cast(inc) * dt; Ekin(inc) = 0.5 * sqdot(mass,V); Epot(inc) = xt::sum(E*dV)[0]; } // write output variables to file HighFive::File file("example.hdf5", HighFive::File::Overwrite); - LowFive::xtensor::dump(file, "/global/Epot",Epot ); - LowFive::xtensor::dump(file, "/global/Ekin",Ekin ); - LowFive::xtensor::dump(file, "/global/t" ,t ); - LowFive::xtensor::dump(file, "/mesh/conn" ,conn ); - LowFive::xtensor::dump(file, "/mesh/coor" ,coor ); - LowFive::xtensor::dump(file, "/mesh/disp" ,u ); + xt::dump(file, "/global/Epot",Epot ); + xt::dump(file, "/global/Ekin",Ekin ); + xt::dump(file, "/global/t" ,t ); + xt::dump(file, "/mesh/conn" ,conn ); + xt::dump(file, "/mesh/coor" ,coor ); + xt::dump(file, "/mesh/disp" ,u ); return 0; } diff --git a/docs/examples/Dynamics/laminateElastic/noDamping/main_velocityVerlet.cpp b/docs/examples/Dynamics/laminateElastic/noDamping/main_velocityVerlet.cpp index 08026e8..edff410 100644 --- a/docs/examples/Dynamics/laminateElastic/noDamping/main_velocityVerlet.cpp +++ b/docs/examples/Dynamics/laminateElastic/noDamping/main_velocityVerlet.cpp @@ -1,187 +1,190 @@ #include #include -#include +#include // ------------------------------------------------------------------------------------------------- namespace GF = GooseFEM; namespace QD = GooseFEM::Element::Quad4; namespace GM = GMatElastoPlasticQPot::Cartesian2d; // ------------------------------------------------------------------------------------------------- inline double sqdot(const xt::xtensor &M, const xt::xtensor &V) { double out = 0.; for ( size_t i = 0 ; i < M.size() ; ++i ) out += M(i) * V(i) * V(i); return out; } // ------------------------------------------------------------------------------------------------- int main() { // simulation parameters double T = 60. ; // total time double dt = 1.e-2; // time increment size_t nx = 60 ; // number of elements in both directions double gamma = .05 ; // displacement step // get mesh & quadrature GF::Mesh::Quad4::Regular mesh(nx,nx,1.); xt::xtensor coor = mesh.coor(); xt::xtensor conn = mesh.conn(); xt::xtensor dofs = mesh.dofsPeriodic(); GF::Vector vector(conn, dofs); - QD::Quadrature quad(vector.asElement(coor)); + QD::Quadrature quad(vector.AsElement(coor)); - size_t nnode = coor.shape()[0]; - size_t ndim = coor.shape()[1]; - size_t nelem = conn.shape()[0]; + size_t nnode = mesh.nnode(); + size_t ndim = mesh.ndim(); + size_t nne = mesh.nne(); + size_t nelem = mesh.nelem(); size_t nip = quad.nip(); - // nodal displacements, velocities, and accelerations (current and last increment) - xt::xtensor u = xt::zeros(coor.shape()); - xt::xtensor v = xt::zeros(coor.shape()); - xt::xtensor a = xt::zeros(coor.shape()); - xt::xtensor v_n = xt::zeros(coor.shape()); - xt::xtensor a_n = xt::zeros(coor.shape()); - - // nodal forces + xt::xtensor u = xt::zeros(coor.shape()); + xt::xtensor v = xt::zeros(coor.shape()); + xt::xtensor a = xt::zeros(coor.shape()); + xt::xtensor v_n = xt::zeros(coor.shape()); + xt::xtensor a_n = xt::zeros(coor.shape()); xt::xtensor fint = xt::zeros(coor.shape()); xt::xtensor fext = xt::zeros(coor.shape()); xt::xtensor fres = xt::zeros(coor.shape()); - // integration point strain and stress - xt::xtensor Eps = xt::zeros({nelem, nip, ndim, ndim}); - xt::xtensor Sig = xt::zeros({nelem, nip, ndim, ndim}); + xt::xtensor ue = xt::zeros({nelem, nne, ndim}); + xt::xtensor fe = xt::zeros({nelem, nne, ndim}); + + xt::xtensor Eps = xt::zeros({nelem, nip, ndim, ndim}); + xt::xtensor Sig = xt::zeros({nelem, nip, ndim, ndim}); // material definition GM::Matrix material({nelem, nip}); xt::xtensor Ihard = xt::zeros({nelem, nip}); xt::xtensor Isoft = xt::ones ({nelem, nip}); for ( size_t e = 0 ; e < nx*nx/4 ; ++e ) for ( size_t q = 0 ; q < nip ; ++q ) Ihard(e,q) = 1; Isoft -= Ihard; material.setElastic(Ihard, 100., 10.); material.setElastic(Isoft, 100., 1.); material.check(); // mass matrix - QD::Quadrature nodalQuad(vector.asElement(coor), QD::Nodal::xi(), QD::Nodal::w()); + QD::Quadrature nodalQuad(vector.AsElement(coor), QD::Nodal::xi(), QD::Nodal::w()); xt::xtensor rho = 1.0 * xt::ones({nelem, nip}); GF::MatrixDiagonal M(conn, dofs); - M.assemble( nodalQuad.int_N_scalar_NT_dV(rho) ); + M.assemble(nodalQuad.Int_N_scalar_NT_dV(rho)); - xt::xtensor mass = M.asDiagonal(); + xt::xtensor mass = M.AsDiagonal(); // update in macroscopic deformation gradient xt::xtensor dFbar = xt::zeros({2,2}); dFbar(0,1) = gamma; for ( size_t n = 0 ; n < nnode ; ++n ) for ( size_t j = 0 ; j < ndim ; ++j ) for ( size_t k = 0 ; k < ndim ; ++k ) u(n,j) += dFbar(j,k) * ( coor(n,k) - coor(0,k) ); // output variables xt::xtensor Epot = xt::zeros({static_cast(T/dt)}); xt::xtensor Ekin = xt::zeros({static_cast(T/dt)}); xt::xtensor t = xt::zeros({static_cast(T/dt)}); + xt::xtensor dV = quad.DV(); + // loop over increments for ( size_t inc = 0 ; inc < static_cast(Epot.size()) ; ++inc ) { // store history xt::noalias(v_n) = v; xt::noalias(a_n) = a; // new displacement xt::noalias(u) = u + dt * v + 0.5 * std::pow(dt,2.) * a; - // compute strain/strain, and corresponding force + // compute strain/strain, and corresponding internal force - quad.symGradN_vector(vector.asElement(u), Eps); + vector.asElement(u, ue); + quad.symGradN_vector(ue, Eps); material.Sig(Eps, Sig); - vector.assembleNode(quad.int_gradN_dot_tensor2_dV(Sig), fint); + quad.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); // estimate new velocity xt::noalias(v) = v_n + dt * a_n; // compute residual force & solve xt::noalias(fres) = fext - fint; M.solve(fres, a); // re-estimate new velocity xt::noalias(v) = v_n + .5 * dt * ( a_n + a ); // compute residual force & solve xt::noalias(fres) = fext - fint; M.solve(fres, a); // new velocity xt::noalias(v) = v_n + .5 * dt * ( a_n + a ); // compute residual force & solve xt::noalias(fres) = fext - fint; M.solve(fres, a); // store output variables - xt::xtensor E = material.energy(Eps); - xt::xtensor dV = quad.dV(); - xt::xtensor V = vector.asDofs(v); + xt::xtensor E = material.energy(Eps); + xt::xtensor V = vector.AsDofs(v); t (inc) = static_cast(inc) * dt; Ekin(inc) = 0.5 * sqdot(mass,V); Epot(inc) = xt::sum(E*dV)[0]; } // write output variables to file HighFive::File file("example.hdf5", HighFive::File::Overwrite); - LowFive::xtensor::dump(file, "/global/Epot",Epot ); - LowFive::xtensor::dump(file, "/global/Ekin",Ekin ); - LowFive::xtensor::dump(file, "/global/t" ,t ); - LowFive::xtensor::dump(file, "/mesh/conn" ,conn ); - LowFive::xtensor::dump(file, "/mesh/coor" ,coor ); - LowFive::xtensor::dump(file, "/mesh/disp" ,u ); + xt::dump(file, "/global/Epot",Epot ); + xt::dump(file, "/global/Ekin",Ekin ); + xt::dump(file, "/global/t" ,t ); + xt::dump(file, "/mesh/conn" ,conn ); + xt::dump(file, "/mesh/coor" ,coor ); + xt::dump(file, "/mesh/disp" ,u ); return 0; } diff --git a/docs/examples/Statics/LinearElasticity/CMakeLists.txt b/docs/examples/Statics/LinearElasticity/CMakeLists.txt index d79d331..028001e 100644 --- a/docs/examples/Statics/LinearElasticity/CMakeLists.txt +++ b/docs/examples/Statics/LinearElasticity/CMakeLists.txt @@ -1,57 +1,65 @@ cmake_minimum_required(VERSION 3.1) # basic info # ---------- # define a project name project(main) # define empty list of libraries to link set(PROJECT_LIBS "") # copy files to build folder configure_file("plot.py" "plot.py" COPYONLY) # basic compiler options # ---------------------- # set optimization level set(CMAKE_BUILD_TYPE Release) # set C++ standard set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) # set warnings on set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -g") # enable optimisation set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DXTENSOR_USE_XSIMD=ON") +# assertions +if(NOT ASSERT) + add_definitions(-DNDEBUG) +else() + add_definitions(-DXTENSOR_ENABLE_ASSERT=ON) +endif() + # load packages # ------------- # add custom include paths if(NOT "$ENV{INCLUDE_PATH}" STREQUAL "") string(REPLACE ":" ";" INCLUDE_LIST "$ENV{INCLUDE_PATH}") include_directories(${INCLUDE_LIST}) endif() # load HDF5 find_package(HDF5 COMPONENTS CXX REQUIRED) include_directories(${HDF5_INCLUDE_DIRS}) set(PROJECT_LIBS ${PROJECT_LIBS} ${HDF5_LIBS} ${HDF5_LIBRARIES}) # load eigen find_package(PkgConfig) pkg_check_modules(EIGEN3 REQUIRED eigen3) include_directories(SYSTEM ${EIGEN3_INCLUDE_DIRS}) # create executable # ----------------- add_executable(${PROJECT_NAME} main.cpp) +# add_executable(${PROJECT_NAME} main_manualPartition.cpp) target_link_libraries(${PROJECT_NAME} ${PROJECT_LIBS}) diff --git a/docs/examples/Statics/LinearElasticity/main.cpp b/docs/examples/Statics/LinearElasticity/main.cpp index af1ff49..f5ae4f0 100644 --- a/docs/examples/Statics/LinearElasticity/main.cpp +++ b/docs/examples/Statics/LinearElasticity/main.cpp @@ -1,97 +1,147 @@ #include +#include #include -#include +#include int main() { // mesh // ---- + // define mesh + GooseFEM::Mesh::Quad4::Regular mesh(5,5); + // mesh dimensions + + size_t nelem = mesh.nelem(); + size_t nne = mesh.nne(); + size_t ndim = mesh.ndim(); + + // mesh definitions + xt::xtensor coor = mesh.coor(); xt::xtensor conn = mesh.conn(); xt::xtensor dofs = mesh.dofs(); - xt::xtensor disp = xt::zeros(coor.shape()); - xt::xtensor fint = xt::zeros(coor.shape()); - xt::xtensor fext = xt::zeros(coor.shape()); - xt::xtensor fres = xt::zeros(coor.shape()); + + // node sets xt::xtensor nodesLeft = mesh.nodesLeftEdge(); xt::xtensor nodesRight = mesh.nodesRightEdge(); xt::xtensor nodesTop = mesh.nodesTopEdge(); xt::xtensor nodesBottom = mesh.nodesBottomEdge(); // fixed displacements DOFs // ------------------------ - xt::xtensor iip = xt::empty({2*nodesLeft.size()+2*nodesBottom.size()}); + xt::xtensor iip = xt::concatenate(xt::xtuple( + xt::view(dofs, xt::keep(nodesRight ), 0), + xt::view(dofs, xt::keep(nodesTop ), 1), + xt::view(dofs, xt::keep(nodesLeft ), 0), + xt::view(dofs, xt::keep(nodesBottom), 1) + )); - { - size_t i = 0; - for ( auto &n : nodesRight ) { iip(i) = dofs(n,0); ++i; } - for ( auto &n : nodesTop ) { iip(i) = dofs(n,1); ++i; } - for ( auto &n : nodesLeft ) { iip(i) = dofs(n,0); ++i; } - for ( auto &n : nodesBottom ) { iip(i) = dofs(n,1); ++i; } - } + // simulation variables + // -------------------- - // element definition - // ------------------ - - xt::xtensor Eps, Sig; - xt::xtensor C; + // vector definition GooseFEM::VectorPartitioned vector(conn, dofs, iip); - GooseFEM::MatrixPartitioned K (conn, dofs, iip); - GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.asElement(coor)); + // nodal quantities + + xt::xtensor disp = xt::zeros(coor.shape()); + xt::xtensor fint = xt::zeros(coor.shape()); + xt::xtensor fext = xt::zeros(coor.shape()); + xt::xtensor fres = xt::zeros(coor.shape()); + + // element vectors + + xt::xtensor ue = xt::empty({nelem, nne, ndim}); + xt::xtensor fe = xt::empty({nelem, nne, ndim}); + xt::xtensor Ke = xt::empty({nelem, nne*ndim, nne*ndim}); + + // element/material definition + // --------------------------- - GMatLinearElastic::Cartesian3d::Matrix mat(mesh.nelem(), elem.nip(), 1., 1.); + GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.AsElement(coor)); + + size_t nip = elem.nip(); + + GMatLinearElastic::Cartesian3d::Matrix mat(nelem, nip, 1., 1.); // solve // ----- + // allocate tensors + size_t d = 3; + xt::xtensor Eps = xt::empty({nelem, nip, d, d }); + xt::xtensor Sig = xt::empty({nelem, nip, d, d }); + xt::xtensor C = xt::empty({nelem, nip, d, d, d, d}); + + // allocate system matrix + GooseFEM::MatrixPartitioned K(conn, dofs, iip); + // strain - Eps = elem.symGradN_vector(vector.asElement(disp)); + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); // stress & tangent - std::tie(Sig,C) = mat.Tangent(Eps); + mat.Tangent(Eps, Sig, C); // internal force - fint = vector.assembleNode(elem.int_gradN_dot_tensor2_dV(Sig)); + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); // stiffness matrix - K.assemble(elem.int_gradN_dot_tensor4_dot_gradNT_dV(C)); + elem.int_gradN_dot_tensor4_dot_gradNT_dV(C, Ke); + K.assemble(Ke); // set fixed displacements - for ( auto &n : nodesRight ) disp(n,0) = 0.1; - for ( auto &n : nodesTop ) disp(n,1) = -0.1; - for ( auto &n : nodesLeft ) disp(n,0) = 0.0; - for ( auto &n : nodesBottom ) disp(n,1) = 0.0; + xt::view(disp, xt::keep(nodesRight ), 0) = +0.1; + xt::view(disp, xt::keep(nodesTop ), 1) = -0.1; + xt::view(disp, xt::keep(nodesLeft ), 0) = 0.0; + xt::view(disp, xt::keep(nodesBottom), 1) = 0.0; - // compute residual + // residual xt::noalias(fres) = fext - fint; // solve K.solve(fres, disp); - // print result - std::cout << disp << std::endl; + // post-process + // ------------ + + // compute strain and stress + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); + mat.Sig(Eps, Sig); + + // internal force + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); + + // apply reaction force + vector.copy_p(fint, fext); + + // residual + xt::noalias(fres) = fext - fint; - // store result + // print residual + std::cout << xt::sum(xt::abs(fres))[0] / xt::sum(xt::abs(fext))[0] << std::endl; - Eps = elem.symGradN_vector(vector.asElement(disp)); - Sig = mat.Sig(Eps); + // average stress per node + xt::xtensor dV = elem.DV(2); + xt::xtensor SigAv = xt::average(Sig, dV, {1}); - xt::xtensor dV = elem.dVtensor(); - xt::xtensor SigBar = xt::average(Sig, dV, {1}); + // write output HighFive::File file("main.h5", HighFive::File::Overwrite); - LowFive::xtensor::dump(file, "/coor", coor); - LowFive::xtensor::dump(file, "/conn", conn); - LowFive::xtensor::dump(file, "/disp", disp); - LowFive::xtensor::dump(file, "/Sig" , SigBar); + xt::dump(file, "/coor", coor); + xt::dump(file, "/conn", conn); + xt::dump(file, "/disp", disp); + xt::dump(file, "/Sig" , SigAv); return 0; } diff --git a/docs/examples/Statics/LinearElasticity/main_manualPartition.cpp b/docs/examples/Statics/LinearElasticity/main_manualPartition.cpp index 2c9eeec..e6188da 100644 --- a/docs/examples/Statics/LinearElasticity/main_manualPartition.cpp +++ b/docs/examples/Statics/LinearElasticity/main_manualPartition.cpp @@ -1,79 +1,164 @@ #include +#include #include +#include int main() { // mesh // ---- + // define mesh + GooseFEM::Mesh::Quad4::Regular mesh(5,5); + // mesh dimensions + + size_t nelem = mesh.nelem(); + size_t nne = mesh.nne(); + size_t ndim = mesh.ndim(); + + // mesh definitions + xt::xtensor coor = mesh.coor(); xt::xtensor conn = mesh.conn(); xt::xtensor dofs = mesh.dofs(); - xt::xtensor disp = xt::zeros(coor.shape()); - xt::xtensor fint = xt::zeros(coor.shape()); - xt::xtensor fext = xt::zeros(coor.shape()); - xt::xtensor fres = xt::zeros(coor.shape()); + + // node sets xt::xtensor nodesLeft = mesh.nodesLeftEdge(); xt::xtensor nodesRight = mesh.nodesRightEdge(); xt::xtensor nodesTop = mesh.nodesTopEdge(); xt::xtensor nodesBottom = mesh.nodesBottomEdge(); - // fixed displacements DOFs & displacements - // ---------------------------------------- - - xt::xtensor iip = xt::empty({2*nodesLeft.size()+2*nodesBottom.size()}); - xt::xtensor u_p = xt::zeros(iip.shape()); + // fixed displacements DOFs + // ------------------------ - { - size_t i = 0; - for ( auto &n : nodesRight ) { iip(i) = dofs(n,0); u_p(i) = 0.1; ++i; } - for ( auto &n : nodesTop ) { iip(i) = dofs(n,1); u_p(i) = -0.1; ++i; } - for ( auto &n : nodesLeft ) { iip(i) = dofs(n,0); u_p(i) = 0.0; ++i; } - for ( auto &n : nodesBottom ) { iip(i) = dofs(n,1); u_p(i) = 0.0; ++i; } - } + xt::xtensor iip = xt::concatenate(xt::xtuple( + xt::view(dofs, xt::keep(nodesRight ), 0), + xt::view(dofs, xt::keep(nodesTop ), 1), + xt::view(dofs, xt::keep(nodesLeft ), 0), + xt::view(dofs, xt::keep(nodesBottom), 1) + )); - // element definition - // ------------------ + // simulation variables + // -------------------- - xt::xtensor Eps, Sig; - xt::xtensor C; + // vector definition GooseFEM::VectorPartitioned vector(conn, dofs, iip); - GooseFEM::MatrixPartitioned K (conn, dofs, iip); - GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.asElement(coor)); + // nodal quantities + + xt::xtensor disp = xt::zeros(coor.shape()); + xt::xtensor fint = xt::zeros(coor.shape()); + xt::xtensor fext = xt::zeros(coor.shape()); + xt::xtensor fres = xt::zeros(coor.shape()); + + // DOF values + + xt::xtensor u_u = xt::zeros({vector.nnu()}); + xt::xtensor fres_u = xt::zeros({vector.nnu()}); + xt::xtensor fext_p = xt::zeros({vector.nnp()}); + + // element vectors + + xt::xtensor ue = xt::empty({nelem, nne, ndim}); + xt::xtensor fe = xt::empty({nelem, nne, ndim}); + xt::xtensor Ke = xt::empty({nelem, nne*ndim, nne*ndim}); - GMatLinearElastic::Cartesian3d::Matrix mat(mesh.nelem(), elem.nip(), 1., 1.); + // element/material definition + // --------------------------- + + GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.AsElement(coor)); + + size_t nip = elem.nip(); + + GMatLinearElastic::Cartesian3d::Matrix mat(nelem, nip, 1., 1.); // solve // ----- + // allocate tensors + size_t d = 3; + xt::xtensor Eps = xt::empty({nelem, nip, d, d }); + xt::xtensor Sig = xt::empty({nelem, nip, d, d }); + xt::xtensor C = xt::empty({nelem, nip, d, d, d, d}); + + // allocate system matrix + GooseFEM::MatrixPartitioned K(conn, dofs, iip); + // strain - Eps = elem.symGradN_vector(vector.asElement(disp)); + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); // stress & tangent - std::tie(Sig,C) = mat.Tangent(Eps); + mat.Tangent(Eps, Sig, C); // internal force - fint = vector.assembleNode(elem.int_gradN_dot_tensor2_dV(Sig)); + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); // stiffness matrix - K.assemble(elem.int_gradN_dot_tensor4_dot_gradNT_dV(C)); - - // compute residual + elem.int_gradN_dot_tensor4_dot_gradNT_dV(C, Ke); + K.assemble(Ke); + + // set fixed displacements + xt::xtensor u_p = xt::concatenate(xt::xtuple( + +0.1 * xt::ones({nodesRight .size()}), + -0.1 * xt::ones({nodesTop .size()}), + 0.0 * xt::ones({nodesLeft .size()}), + 0.0 * xt::ones({nodesBottom.size()}) + )); + + // residual xt::noalias(fres) = fext - fint; + // partition + vector.asDofs_u(fres, fres_u); + // solve - xt::xtensor u_u = K.solve_u(vector.asDofs_u(fres), u_p); + K.solve_u(fres_u, u_p, u_u); // assemble to nodal vector - disp = vector.asNode(u_u, u_p); + vector.asNode(u_u, u_p, disp); + + // post-process + // ------------ + + // compute strain and stress + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); + mat.Sig(Eps, Sig); + + // internal force + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); + + // apply reaction force + vector.asDofs_p(fint, fext_p); + + // residual + xt::noalias(fres) = fext - fint; + + // partition + vector.asDofs_u(fres, fres_u); + + // print residual + std::cout << xt::sum(xt::abs(fres_u))[0] / xt::sum(xt::abs(fext_p))[0] << std::endl; + + // average stress per node + xt::xtensor dV = elem.DV(2); + xt::xtensor SigAv = xt::average(Sig, dV, {1}); + + // write output + + HighFive::File file("main.h5", HighFive::File::Overwrite); - // print result - std::cout << disp << std::endl; + xt::dump(file, "/coor", coor); + xt::dump(file, "/conn", conn); + xt::dump(file, "/disp", disp); + xt::dump(file, "/Sig" , SigAv); return 0; } diff --git a/docs/examples/Statics/LinearElasticityPeriodic/CMakeLists.txt b/docs/examples/Statics/LinearElasticityPeriodic/CMakeLists.txt index d79d331..24c53c3 100644 --- a/docs/examples/Statics/LinearElasticityPeriodic/CMakeLists.txt +++ b/docs/examples/Statics/LinearElasticityPeriodic/CMakeLists.txt @@ -1,57 +1,64 @@ cmake_minimum_required(VERSION 3.1) # basic info # ---------- # define a project name project(main) # define empty list of libraries to link set(PROJECT_LIBS "") # copy files to build folder configure_file("plot.py" "plot.py" COPYONLY) # basic compiler options # ---------------------- # set optimization level set(CMAKE_BUILD_TYPE Release) # set C++ standard set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) # set warnings on set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -g") # enable optimisation set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DXTENSOR_USE_XSIMD=ON") +# assertions +if(NOT ASSERT) + add_definitions(-DNDEBUG) +else() + add_definitions(-DXTENSOR_ENABLE_ASSERT=ON) +endif() + # load packages # ------------- # add custom include paths if(NOT "$ENV{INCLUDE_PATH}" STREQUAL "") string(REPLACE ":" ";" INCLUDE_LIST "$ENV{INCLUDE_PATH}") include_directories(${INCLUDE_LIST}) endif() # load HDF5 find_package(HDF5 COMPONENTS CXX REQUIRED) include_directories(${HDF5_INCLUDE_DIRS}) set(PROJECT_LIBS ${PROJECT_LIBS} ${HDF5_LIBS} ${HDF5_LIBRARIES}) # load eigen find_package(PkgConfig) pkg_check_modules(EIGEN3 REQUIRED eigen3) include_directories(SYSTEM ${EIGEN3_INCLUDE_DIRS}) # create executable # ----------------- add_executable(${PROJECT_NAME} main.cpp) target_link_libraries(${PROJECT_NAME} ${PROJECT_LIBS}) diff --git a/docs/examples/Statics/LinearElasticityPeriodic/main.cpp b/docs/examples/Statics/LinearElasticityPeriodic/main.cpp index 38438e3..0d0af5c 100644 --- a/docs/examples/Statics/LinearElasticityPeriodic/main.cpp +++ b/docs/examples/Statics/LinearElasticityPeriodic/main.cpp @@ -1,88 +1,171 @@ #include +#include +#include +#include #include -#include +#include int main() { // mesh // ---- + // define mesh + GooseFEM::Mesh::Quad4::Regular mesh(5,5); + // mesh dimensions + + size_t nelem = mesh.nelem(); + size_t nne = mesh.nne(); + size_t ndim = mesh.ndim(); + + // mesh definitions + xt::xtensor coor = mesh.coor(); xt::xtensor conn = mesh.conn(); - xt::xtensor dofs = mesh.dofsPeriodic(); + xt::xtensor dofs = mesh.dofs(); + + // periodicity and fixed displacements DOFs + // ---------------------------------------- + + // add control nodes/DOFs + + xt::xtensor control_dofs = xt::arange(ndim*ndim).reshape({ndim,ndim}); + + control_dofs += xt::amax(dofs)[0] + 1; + + xt::xtensor control_nodes = mesh.nnode() + xt::arange(ndim); + + coor = xt::concatenate(xt::xtuple(coor, xt::zeros({ndim,ndim}))); + + dofs = xt::concatenate(xt::xtuple(dofs, control_dofs)); + + // extract fixed DOFs + + xt::xtensor iip = xt::concatenate(xt::xtuple( + xt::reshape_view(control_dofs, {ndim*ndim}), + xt::reshape_view(xt::view(dofs, xt::keep(mesh.nodesOrigin()), xt::all()), {ndim}) + )); + + // get DOF-tyings, reorganise system + + GooseFEM::Tyings::Periodic tyings(coor, dofs, control_dofs, mesh.nodesPeriodic(), iip); + + dofs = tyings.dofs(); + + // simulation variables + // -------------------- + + // vector definition + + GooseFEM::VectorPartitionedTyings vector(conn, dofs, tyings.Cdu(), tyings.Cdp(), tyings.Cdi()); + + // nodal quantities + xt::xtensor disp = xt::zeros(coor.shape()); xt::xtensor fint = xt::zeros(coor.shape()); xt::xtensor fext = xt::zeros(coor.shape()); xt::xtensor fres = xt::zeros(coor.shape()); - // element definition - // ------------------ + // element vectors + + xt::xtensor ue = xt::empty({nelem, nne, ndim}); + xt::xtensor fe = xt::empty({nelem, nne, ndim}); + xt::xtensor Ke = xt::empty({nelem, nne*ndim, nne*ndim}); - xt::xtensor Eps, Sig; - xt::xtensor C; + // element/material definition + // --------------------------- - GooseFEM::Vector vector(conn, dofs); - GooseFEM::Matrix K (conn, dofs); + GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.AsElement(coor)); - GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.asElement(coor)); + size_t nip = elem.nip(); - GMatLinearElastic::Cartesian3d::Matrix mat(mesh.nelem(), elem.nip()); + GMatLinearElastic::Cartesian3d::Matrix mat(nelem, nip); - xt::xtensor Ihard = xt::zeros({mesh.nelem(), elem.nip()}); + xt::xtensor Ihard = xt::zeros({nelem, nip}); - for ( size_t q = 0 ; q < elem.nip() ; ++q ) { - Ihard(0,q) = 1; - Ihard(1,q) = 1; - Ihard(5,q) = 1; - Ihard(6,q) = 1; - } + xt::view(Ihard, xt::keep(0,1,5,6), xt::all()) = 1; - xt::xtensor Isoft = xt::ones({mesh.nelem(), elem.nip()}) - Ihard; + xt::xtensor Isoft = xt::ones({nelem, nip}) - Ihard; mat.set(Isoft,10.,1. ); mat.set(Ihard,10.,10.); // solve // ----- - // introduce affine displacement - for ( size_t n = 0 ; n < coor.shape()[0] ; ++n ) - disp(n,0) += 0.1 * ( coor(n,1) - coor(0,1) ); + // allocate tensors + size_t d = 3; + xt::xtensor Eps = xt::empty({nelem, nip, d, d }); + xt::xtensor Sig = xt::empty({nelem, nip, d, d }); + xt::xtensor C = xt::empty({nelem, nip, d, d, d, d}); + + // allocate system matrix + GooseFEM::MatrixPartitionedTyings K(conn, dofs, tyings.Cdu(), tyings.Cdp()); // strain - Eps = elem.symGradN_vector(vector.asElement(disp)); + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); // stress & tangent - std::tie(Sig,C) = mat.Tangent(Eps); + mat.Tangent(Eps, Sig, C); // internal force - fint = vector.assembleNode(elem.int_gradN_dot_tensor2_dV(Sig)); + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); // stiffness matrix - K.assemble(elem.int_gradN_dot_tensor4_dot_gradNT_dV(C)); + elem.int_gradN_dot_tensor4_dot_gradNT_dV(C, Ke); + K.assemble(Ke); + + // set fixed displacements + disp(control_nodes(0),1) = 0.1; - // compute residual + // residual xt::noalias(fres) = fext - fint; // solve K.solve(fres, disp); - // store result + // post-process + // ------------ + + // compute strain and stress + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); + mat.Sig(Eps, Sig); + + // internal force + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); + + // allocate DOF-list + xt::xtensor Fext = xt::zeros({tyings.nni()}); + xt::xtensor Fint = xt::zeros({tyings.nni()}); + + // internal/external force on DOFs + vector.asDofs_i(fext, Fext); + vector.asDofs_i(fint, Fint); + + // apply reaction force + vector.copy_p(Fint, Fext); + + // print residual + std::cout << xt::sum(xt::abs(Fext-Fint))[0] / xt::sum(xt::abs(Fext))[0] << std::endl; - Eps = elem.symGradN_vector(vector.asElement(disp)); - Sig = mat.Sig(Eps); + // average stress per node + xt::xtensor dV = elem.DV(2); + xt::xtensor SigAv = xt::average(Sig, dV, {1}); - xt::xtensor dV = elem.dVtensor(); - xt::xtensor SigBar = xt::average(Sig, dV, {1}); + // write output HighFive::File file("main.h5", HighFive::File::Overwrite); - LowFive::xtensor::dump(file, "/coor", coor); - LowFive::xtensor::dump(file, "/conn", conn); - LowFive::xtensor::dump(file, "/disp", disp); - LowFive::xtensor::dump(file, "/Sig" , SigBar); + xt::dump(file, "/coor", coor); + xt::dump(file, "/conn", conn); + xt::dump(file, "/disp", disp); + xt::dump(file, "/Sig" , SigAv); return 0; } diff --git a/docs/examples/Statics/LinearElasticityPeriodic/plot.py b/docs/examples/Statics/LinearElasticityPeriodic/plot.py index 0bf1209..2ac28f9 100644 --- a/docs/examples/Statics/LinearElasticityPeriodic/plot.py +++ b/docs/examples/Statics/LinearElasticityPeriodic/plot.py @@ -1,51 +1,51 @@ import h5py import matplotlib.pyplot as plt import GooseMPL as gplt import numpy as np plt.style.use(['goose', 'goose-latex']) # open file file = h5py.File('main.h5', 'r') # read fields coor = file['/coor'][...] conn = file['/conn'][...] disp = file['/disp'][...] Sig = file['/Sig' ][...] # extract dimension nelem = conn.shape[0] # tensor products ddot22 = lambda A2,B2: np.einsum('eij ,eji->e ',A2,B2) ddot42 = lambda A4,B2: np.einsum('eijkl,elk->eij ',A4,B2) dyad22 = lambda A2,B2: np.einsum('eij ,ekl->eijkl',A2,B2) # identity tensor (single tensor) i = np.eye(3) # identity tensors (grid) I = np.einsum('ij ,e' , i ,np.ones([nelem])) I4 = np.einsum('ijkl,e->eijkl',np.einsum('il,jk',i,i),np.ones([nelem])) I4rt = np.einsum('ijkl,e->eijkl',np.einsum('ik,jl',i,i),np.ones([nelem])) I4s = (I4+I4rt)/2. II = dyad22(I,I) I4d = I4s-II/3. # compute equivalent stress Sigd = ddot42(I4d, Sig) sigeq = np.sqrt(3./2.*ddot22(Sigd,Sigd)) # plot fig, ax = plt.subplots() -gplt.patch(coor=coor+disp, conn=conn, cindex=sigeq, cmap='jet', axis=ax, clim=(0,0.1)) +gplt.patch(coor=coor+disp, conn=conn, cindex=sigeq, cmap='jet', axis=ax, clim=(0,0.5)) gplt.patch(coor=coor, conn=conn, linestyle='--', axis=ax) plt.show() diff --git a/docs/examples/Statics/LinearElasticityPeriodicPartial/CMakeLists.txt b/docs/examples/Statics/LinearElasticityPeriodicPartial/CMakeLists.txt index d79d331..24c53c3 100644 --- a/docs/examples/Statics/LinearElasticityPeriodicPartial/CMakeLists.txt +++ b/docs/examples/Statics/LinearElasticityPeriodicPartial/CMakeLists.txt @@ -1,57 +1,64 @@ cmake_minimum_required(VERSION 3.1) # basic info # ---------- # define a project name project(main) # define empty list of libraries to link set(PROJECT_LIBS "") # copy files to build folder configure_file("plot.py" "plot.py" COPYONLY) # basic compiler options # ---------------------- # set optimization level set(CMAKE_BUILD_TYPE Release) # set C++ standard set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) # set warnings on set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -g") # enable optimisation set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DXTENSOR_USE_XSIMD=ON") +# assertions +if(NOT ASSERT) + add_definitions(-DNDEBUG) +else() + add_definitions(-DXTENSOR_ENABLE_ASSERT=ON) +endif() + # load packages # ------------- # add custom include paths if(NOT "$ENV{INCLUDE_PATH}" STREQUAL "") string(REPLACE ":" ";" INCLUDE_LIST "$ENV{INCLUDE_PATH}") include_directories(${INCLUDE_LIST}) endif() # load HDF5 find_package(HDF5 COMPONENTS CXX REQUIRED) include_directories(${HDF5_INCLUDE_DIRS}) set(PROJECT_LIBS ${PROJECT_LIBS} ${HDF5_LIBS} ${HDF5_LIBRARIES}) # load eigen find_package(PkgConfig) pkg_check_modules(EIGEN3 REQUIRED eigen3) include_directories(SYSTEM ${EIGEN3_INCLUDE_DIRS}) # create executable # ----------------- add_executable(${PROJECT_NAME} main.cpp) target_link_libraries(${PROJECT_NAME} ${PROJECT_LIBS}) diff --git a/docs/examples/Statics/LinearElasticityPeriodicPartial/main.cpp b/docs/examples/Statics/LinearElasticityPeriodicPartial/main.cpp index 883faa8..017508c 100644 --- a/docs/examples/Statics/LinearElasticityPeriodicPartial/main.cpp +++ b/docs/examples/Statics/LinearElasticityPeriodicPartial/main.cpp @@ -1,114 +1,159 @@ #include +#include #include -#include +#include int main() { // mesh // ---- + // define mesh + GooseFEM::Mesh::Quad4::Regular mesh(5,5); + // mesh dimensions + + size_t nelem = mesh.nelem(); + size_t nne = mesh.nne(); + size_t ndim = mesh.ndim(); + + // mesh definitions + xt::xtensor coor = mesh.coor(); xt::xtensor conn = mesh.conn(); xt::xtensor dofs = mesh.dofs(); - xt::xtensor disp = xt::zeros(coor.shape()); - xt::xtensor fint = xt::zeros(coor.shape()); - xt::xtensor fext = xt::zeros(coor.shape()); - xt::xtensor fres = xt::zeros(coor.shape()); + + // node sets xt::xtensor nodesLeft = mesh.nodesLeftOpenEdge(); xt::xtensor nodesRight = mesh.nodesRightOpenEdge(); xt::xtensor nodesTop = mesh.nodesTopEdge(); xt::xtensor nodesBottom = mesh.nodesBottomEdge(); - // periodic and fixed displacements DOFs - // ------------------------------------- + // periodicity and fixed displacements DOFs + // ---------------------------------------- for ( size_t i = 0 ; i < nodesLeft.size() ; ++i ) for ( size_t j = 0 ; j < coor.shape()[1] ; ++j ) dofs(nodesRight(i),j) = dofs(nodesLeft(i),j); dofs = GooseFEM::Mesh::renumber(dofs); - xt::xtensor iip = xt::empty({4*nodesBottom.size()}); + xt::xtensor iip = xt::concatenate(xt::xtuple( + xt::view(dofs, xt::keep(nodesBottom), 0), + xt::view(dofs, xt::keep(nodesBottom), 1), + xt::view(dofs, xt::keep(nodesTop ), 0), + xt::view(dofs, xt::keep(nodesTop ), 1) + )); - { - size_t i = 0; - for ( auto &n : nodesBottom ) { iip(i) = dofs(n,0); ++i; } - for ( auto &n : nodesBottom ) { iip(i) = dofs(n,1); ++i; } - for ( auto &n : nodesTop ) { iip(i) = dofs(n,0); ++i; } - for ( auto &n : nodesTop ) { iip(i) = dofs(n,1); ++i; } - } + // simulation variables + // -------------------- - // element definition - // ------------------ - - xt::xtensor Eps, Sig; - xt::xtensor C; + // vector definition GooseFEM::VectorPartitioned vector(conn, dofs, iip); - GooseFEM::MatrixPartitioned K (conn, dofs, iip); - GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.asElement(coor)); + // nodal quantities + + xt::xtensor disp = xt::zeros(coor.shape()); + xt::xtensor fint = xt::zeros(coor.shape()); + xt::xtensor fext = xt::zeros(coor.shape()); + xt::xtensor fres = xt::zeros(coor.shape()); + + // element vectors + + xt::xtensor ue = xt::empty({nelem, nne, ndim}); + xt::xtensor fe = xt::empty({nelem, nne, ndim}); + xt::xtensor Ke = xt::empty({nelem, nne*ndim, nne*ndim}); + + // element/material definition + // --------------------------- - GMatLinearElastic::Cartesian3d::Matrix mat(mesh.nelem(), elem.nip()); + GooseFEM::Element::Quad4::QuadraturePlanar elem(vector.AsElement(coor)); - xt::xtensor Ihard = xt::zeros({mesh.nelem(), elem.nip()}); + size_t nip = elem.nip(); - for ( size_t q = 0 ; q < elem.nip() ; ++q ) { - Ihard(0,q) = 1; - Ihard(1,q) = 1; - Ihard(5,q) = 1; - Ihard(6,q) = 1; - } + GMatLinearElastic::Cartesian3d::Matrix mat(nelem, nip); - xt::xtensor Isoft = xt::ones({mesh.nelem(), elem.nip()}) - Ihard; + xt::xtensor Ihard = xt::zeros({nelem, nip}); + + xt::view(Ihard, xt::keep(0,1,5,6), xt::all()) = 1; + + xt::xtensor Isoft = xt::ones({nelem, nip}) - Ihard; mat.set(Isoft,10.,1. ); mat.set(Ihard,10.,10.); // solve // ----- + // allocate tensors + size_t d = 3; + xt::xtensor Eps = xt::empty({nelem, nip, d, d }); + xt::xtensor Sig = xt::empty({nelem, nip, d, d }); + xt::xtensor C = xt::empty({nelem, nip, d, d, d, d}); + + // allocate system matrix + GooseFEM::MatrixPartitioned K(conn, dofs, iip); + // strain - Eps = elem.symGradN_vector(vector.asElement(disp)); + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); // stress & tangent - std::tie(Sig,C) = mat.Tangent(Eps); + mat.Tangent(Eps, Sig, C); // internal force - fint = vector.assembleNode(elem.int_gradN_dot_tensor2_dV(Sig)); + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); // stiffness matrix - K.assemble(elem.int_gradN_dot_tensor4_dot_gradNT_dV(C)); + elem.int_gradN_dot_tensor4_dot_gradNT_dV(C, Ke); + K.assemble(Ke); // set fixed displacements - for ( auto &n : nodesTop ) disp(n,0) = 0.1; + xt::view(disp, xt::keep(nodesTop), xt::keep(0)) = +0.1; - // compute residual + // residual xt::noalias(fres) = fext - fint; // solve K.solve(fres, disp); - // print result - std::cout << disp << std::endl; + // post-process + // ------------ + + // compute strain and stress + vector.asElement(disp, ue); + elem.symGradN_vector(ue, Eps); + mat.Sig(Eps, Sig); + + // internal force + elem.int_gradN_dot_tensor2_dV(Sig, fe); + vector.assembleNode(fe, fint); + + // apply reaction force + vector.copy_p(fint, fext); + + // residual + xt::noalias(fres) = fext - fint; - // store result + // print residual + std::cout << xt::sum(xt::abs(fres))[0] / xt::sum(xt::abs(fext))[0] << std::endl; - Eps = elem.symGradN_vector(vector.asElement(disp)); - Sig = mat.Sig(Eps); + // average stress per node + xt::xtensor dV = elem.DV(2); + xt::xtensor SigAv = xt::average(Sig, dV, {1}); - xt::xtensor dV = elem.dVtensor(); - xt::xtensor SigBar = xt::average(Sig, dV, {1}); + // write output HighFive::File file("main.h5", HighFive::File::Overwrite); - LowFive::xtensor::dump(file, "/coor", coor); - LowFive::xtensor::dump(file, "/conn", conn); - LowFive::xtensor::dump(file, "/disp", disp); - LowFive::xtensor::dump(file, "/Sig" , SigBar); + xt::dump(file, "/coor", coor); + xt::dump(file, "/conn", conn); + xt::dump(file, "/disp", disp); + xt::dump(file, "/Sig" , SigAv); return 0; } diff --git a/include/GooseFEM/Element.h b/include/GooseFEM/Element.h index 44de102..4d658d6 100644 --- a/include/GooseFEM/Element.h +++ b/include/GooseFEM/Element.h @@ -1,45 +1,47 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENT_H #define GOOSEFEM_ELEMENT_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { namespace Element { // ------------------------------------------------------------------------------------------------- -// convert nodal vector [nnode, ndim] to nodal vector stored per element [nelem, nne, ndim] +// Convert nodal vector [nnode, ndim] to nodal vector stored per element [nelem, nne, ndim] inline xt::xtensor asElementVector( - const xt::xtensor &conn, const xt::xtensor &nodevec); + const xt::xtensor& conn, + const xt::xtensor& nodevec); -// assemble nodal vector stored per element [nelem, nne, ndim] to nodal vector [nnode, ndim] +// Assemble nodal vector stored per element [nelem, nne, ndim] to nodal vector [nnode, ndim] inline xt::xtensor assembleNodeVector( - const xt::xtensor &conn, const xt::xtensor &elemvec); + const xt::xtensor& conn, + const xt::xtensor& elemvec); -// check DOFs to leave no holes -template inline bool isSequential(const E &dofs); +// Check that DOFs leave no holes +template inline bool isSequential(const E& dofs); -// check structure of the matrices stored per element [nelem, nne*ndim, nne*ndim] -bool isDiagonal(const xt::xtensor &elemmat); +// Check structure of the matrices stored per element [nelem, nne*ndim, nne*ndim] +bool isDiagonal(const xt::xtensor& elemmat); // ------------------------------------------------------------------------------------------------- }} // namespace ... // ================================================================================================= #include "Element.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/Element.hpp b/include/GooseFEM/Element.hpp index 8f13a67..de511f0 100644 --- a/include/GooseFEM/Element.hpp +++ b/include/GooseFEM/Element.hpp @@ -1,125 +1,111 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENT_HPP #define GOOSEFEM_ELEMENT_HPP // ------------------------------------------------------------------------------------------------- #include "Element.h" // ================================================================================================= namespace GooseFEM { namespace Element { // ------------------------------------------------------------------------------------------------- inline xt::xtensor asElementVector( - const xt::xtensor &conn, const xt::xtensor &nodevec) + const xt::xtensor& conn, + const xt::xtensor& nodevec) { - // extract dimensions - size_t nelem = conn .shape()[0]; // number of elements - size_t nne = conn .shape()[1]; // number of nodes per element - size_t ndim = nodevec.shape()[1]; // number of dimensions + size_t nelem = conn.shape()[0]; + size_t nne = conn.shape()[1]; + size_t ndim = nodevec.shape()[1]; - // allocate output: nodal vectors stored per element xt::xtensor elemvec = xt::empty({nelem, nne, ndim}); - // read from nodal vectors #pragma omp parallel for - for ( size_t e = 0 ; e < nelem ; ++e ) - for ( size_t m = 0 ; m < nne ; ++m ) - for ( size_t i = 0 ; i < ndim ; ++i ) + for (size_t e = 0 ; e < nelem ; ++e) + for (size_t m = 0 ; m < nne ; ++m) + for (size_t i = 0 ; i < ndim ; ++i) elemvec(e,m,i) = nodevec(conn(e,m),i); return elemvec; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor assembleNodeVector( - const xt::xtensor &conn, const xt::xtensor &elemvec) + const xt::xtensor& conn, + const xt::xtensor& elemvec) { - // extract dimensions - size_t nelem = conn.shape()[0]; // number of elements - size_t nne = conn.shape()[1]; // number of nodes per element - size_t ndim = elemvec.shape()[2]; // number of dimensions - size_t nnode = xt::amax(conn)[0]+1; // number of nodes + size_t nelem = conn.shape()[0]; + size_t nne = conn.shape()[1]; + size_t ndim = elemvec.shape()[2]; + size_t nnode = xt::amax(conn)[0]+1; - // check input - assert( elemvec.shape()[0] == nelem ); - assert( elemvec.shape()[1] == nne ); + assert(elemvec.shape()[0] == nelem); + assert(elemvec.shape()[1] == nne ); - // zero-initialize output: nodal vectors xt::xtensor nodevec = xt::zeros({nnode, ndim}); - // assemble from nodal vectors stored per element - for ( size_t e = 0 ; e < nelem ; ++e ) - for ( size_t m = 0 ; m < nne ; ++m ) - for ( size_t i = 0 ; i < ndim ; ++i ) + for (size_t e = 0 ; e < nelem ; ++e) + for (size_t m = 0 ; m < nne ; ++m) + for (size_t i = 0 ; i < ndim ; ++i) nodevec(conn(e,m),i) += elemvec(e,m,i); return nodevec; } // ------------------------------------------------------------------------------------------------- template -inline bool isSequential(const E &dofs) +inline bool isSequential(const E& dofs) { - // number of DOFs size_t ndof = xt::amax(dofs)[0] + 1; - // list to check if all DOFs are present - // - allocate xt::xtensor exists = xt::zeros({ndof}); - // - fill - for ( auto &i : dofs ) exists[i]++; - // check - for ( auto &i : dofs ) + for (auto& i: dofs) + exists[i]++; + + for (auto& i: dofs) if ( exists[i] == 0 ) return false; - // checks passed successfully return true; } // ------------------------------------------------------------------------------------------------- -inline bool isDiagonal(const xt::xtensor &elemmat) +inline bool isDiagonal(const xt::xtensor& elemmat) { - // check input - assert( elemmat.shape()[1] == elemmat.shape()[2] ); + assert(elemmat.shape()[1] == elemmat.shape()[2]); - // get dimensions size_t nelem = elemmat.shape()[0]; size_t N = elemmat.shape()[1]; - // get numerical precision double eps = std::numeric_limits::epsilon(); - // loop over all entries #pragma omp parallel for - for ( size_t e = 0 ; e < nelem ; ++e ) - for ( size_t i = 0 ; i < N ; ++i ) - for ( size_t j = 0 ; j < N ; ++j ) + for (size_t e = 0 ; e < nelem ; ++e) + for (size_t i = 0 ; i < N ; ++i) + for (size_t j = 0 ; j < N ; ++j) if ( i != j ) if ( std::abs(elemmat(e,i,j)) > eps ) return false; - // all checks passed successful return true; } // ------------------------------------------------------------------------------------------------- }} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementHex8.h b/include/GooseFEM/ElementHex8.h index cbdd1e1..83c6067 100644 --- a/include/GooseFEM/ElementHex8.h +++ b/include/GooseFEM/ElementHex8.h @@ -1,163 +1,192 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTHEX8_H #define GOOSEFEM_ELEMENTHEX8_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Hex8 { // ------------------------------------------------------------------------------------------------- -inline double inv(const xt::xtensor_fixed> &A, - xt::xtensor_fixed> &Ainv); +inline double inv( + const xt::xtensor_fixed>& A, + xt::xtensor_fixed>& Ainv); // ------------------------------------------------------------------------------------------------- namespace Gauss { inline size_t nip(); // number of integration points inline xt::xtensor xi(); // integration point coordinates (local coordinates) inline xt::xtensor w(); // integration point weights } // ------------------------------------------------------------------------------------------------- namespace Nodal { inline size_t nip(); // number of integration points inline xt::xtensor xi(); // integration point coordinates (local coordinates) inline xt::xtensor w(); // integration point weights } // ------------------------------------------------------------------------------------------------- class Quadrature { public: - // fixed dimensions: + // Fixed dimensions: // ndim = 3 - number of dimensions // nne = 8 - number of nodes per element // - // naming convention: + // Naming convention: // "elemmat" - matrices stored per element - [nelem, nne*ndim, nne*ndim] // "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] // "qtensor" - integration point tensor - [nelem, nip, ndim, ndim] // "qscalar" - integration point scalar - [nelem, nip] - // constructor: integration point coordinates and weights are optional (default: Gauss) + // Constructor: integration point coordinates and weights are optional (default: Gauss) Quadrature() = default; - Quadrature(const xt::xtensor &x); + Quadrature( + const xt::xtensor& x); - Quadrature(const xt::xtensor &x, const xt::xtensor &xi, const xt::xtensor &w); + Quadrature( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w); - // update the nodal positions (shape of "x" should match the earlier definition) + // Update the nodal positions (shape of "x" should match the earlier definition) - void update_x(const xt::xtensor &x); + void update_x(const xt::xtensor& x); - // return dimensions + // Return dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t ndim() const; // number of dimension size_t nip() const; // number of integration points - // return integration volume + // Return shape function gradients - void dV(xt::xtensor &qscalar) const; + xt::xtensor gradN() const; - void dV(xt::xtensor &qtensor) const; // same volume for all tensor components + // Return integration volume - // dyadic product "qtensor(i,j) += dNdx(m,i) * elemvec(m,j)", its transpose and its symmetric part + void dV(xt::xtensor& qscalar) const; - void gradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void dV(xt::xtensor& qtensor) const; // same volume for all tensor components - void gradN_vector_T(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void dV(xt::xarray& qtensor) const; // same volume for all tensor components - void symGradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + // Dyadic product (and its transpose and symmetric part) + // qtensor(i,j) += dNdx(m,i) * elemvec(m,j) - // integral of the scalar product "elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV" + void gradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void int_N_scalar_NT_dV(const xt::xtensor &qscalar, - xt::xtensor &elemmat) const; + void gradN_vector_T( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - // integral of the dot product "elemvec(m,j) += dNdx(m,i) * qtensor(i,j) * dV" + void symGradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const; + // Integral of the scalar product + // elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV - // integral of the dot product "elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV" + void int_N_scalar_NT_dV( + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const; // overwritten - void int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const; + // Integral of the dot product + // elemvec(m,j) += dNdx(m,i) * qtensor(i,j) * dV - // auto allocation of the functions above + void int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const; // overwritten - xt::xtensor dV() const; + // Integral of the dot product + // elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV - xt::xtensor dVtensor() const; + void int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const; // overwritten - xt::xtensor gradN_vector(const xt::xtensor &elemvec) const; + // Auto-allocation of the functions above - xt::xtensor gradN_vector_T(const xt::xtensor &elemvec) const; + xt::xtensor DV() const; - xt::xtensor symGradN_vector(const xt::xtensor &elemvec) const; + xt::xarray DV( + size_t rank) const; - xt::xtensor int_N_scalar_NT_dV(const xt::xtensor &qscalar) const; + xt::xtensor GradN_vector( + const xt::xtensor& elemvec) const; - xt::xtensor int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor) const; + xt::xtensor GradN_vector_T( + const xt::xtensor& elemvec) const; - xt::xtensor int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor) const; + xt::xtensor SymGradN_vector( + const xt::xtensor& elemvec) const; + + xt::xtensor Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const; + + xt::xtensor Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const; + + xt::xtensor Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const; private: - // compute "vol" and "dNdx" based on current "x" + // Compute "vol" and "dNdx" based on current "x" void compute_dN(); private: - // dimensions (flexible) + // Dimensions (flexible) size_t m_nelem; // number of elements size_t m_nip; // number of integration points - // dimensions (fixed for this element type) + // Dimensions (fixed for this element type) static const size_t m_nne=8; // number of nodes per element static const size_t m_ndim=3; // number of dimensions + const size_t m_nd=3; // number of dimensions (non-static, sometimes useful) - // data arrays + // Data arrays xt::xtensor m_x; // nodal positions stored per element [nelem, nne, ndim] xt::xtensor m_w; // weight of each integration point [nip] xt::xtensor m_xi; // local coordinate of each integration point [nip, ndim] xt::xtensor m_N; // shape functions [nip, nne] - xt::xtensor m_dNxi; // shape function gradients w.r.t. local coordinate [nip, nne, ndim] - xt::xtensor m_dNx; // shape function gradients w.r.t. global coordinate [nelem, nip, nne, ndim] + xt::xtensor m_dNxi; // shape function grad. w.r.t. local coor. [nip, nne, ndim] + xt::xtensor m_dNx; // shape function grad. w.r.t. global coor. [nelem, nip, nne, ndim] xt::xtensor m_vol; // integration point volume [nelem, nip] }; // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #include "ElementHex8.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementHex8.hpp b/include/GooseFEM/ElementHex8.hpp index 2d044d2..d58c6a4 100644 --- a/include/GooseFEM/ElementHex8.hpp +++ b/include/GooseFEM/ElementHex8.hpp @@ -1,692 +1,729 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTHEX8_HPP #define GOOSEFEM_ELEMENTHEX8_HPP // ------------------------------------------------------------------------------------------------- #include "ElementHex8.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Hex8 { // ================================================================================================= -inline double inv(const xt::xtensor_fixed> &A, - xt::xtensor_fixed> &Ainv) +inline double inv( + const xt::xtensor_fixed>& A, + xt::xtensor_fixed>& Ainv) { // compute determinant double det = ( A(0,0) * A(1,1) * A(2,2) + A(0,1) * A(1,2) * A(2,0) + A(0,2) * A(1,0) * A(2,1) ) - ( A(0,2) * A(1,1) * A(2,0) + A(0,1) * A(1,0) * A(2,2) + A(0,0) * A(1,2) * A(2,1) ); // compute inverse Ainv(0,0) = (A(1,1)*A(2,2)-A(1,2)*A(2,1)) / det; Ainv(0,1) = (A(0,2)*A(2,1)-A(0,1)*A(2,2)) / det; Ainv(0,2) = (A(0,1)*A(1,2)-A(0,2)*A(1,1)) / det; Ainv(1,0) = (A(1,2)*A(2,0)-A(1,0)*A(2,2)) / det; Ainv(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0)) / det; Ainv(1,2) = (A(0,2)*A(1,0)-A(0,0)*A(1,2)) / det; Ainv(2,0) = (A(1,0)*A(2,1)-A(1,1)*A(2,0)) / det; Ainv(2,1) = (A(0,1)*A(2,0)-A(0,0)*A(2,1)) / det; Ainv(2,2) = (A(0,0)*A(1,1)-A(0,1)*A(1,0)) / det; return det; } // ================================================================================================= namespace Gauss { // ------------------------------------------------------------------------------------------------- inline size_t nip() { return 8; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor xi() { size_t nip = 8; size_t ndim = 3; xt::xtensor xi = xt::empty({nip,ndim}); xi(0,0) = -1./std::sqrt(3.); xi(0,1) = -1./std::sqrt(3.); xi(0,2) = -1./std::sqrt(3.); xi(1,0) = +1./std::sqrt(3.); xi(1,1) = -1./std::sqrt(3.); xi(1,2) = -1./std::sqrt(3.); xi(2,0) = +1./std::sqrt(3.); xi(2,1) = +1./std::sqrt(3.); xi(2,2) = -1./std::sqrt(3.); xi(3,0) = -1./std::sqrt(3.); xi(3,1) = +1./std::sqrt(3.); xi(3,2) = -1./std::sqrt(3.); xi(4,0) = -1./std::sqrt(3.); xi(4,1) = -1./std::sqrt(3.); xi(4,2) = +1./std::sqrt(3.); xi(5,0) = +1./std::sqrt(3.); xi(5,1) = -1./std::sqrt(3.); xi(5,2) = +1./std::sqrt(3.); xi(6,0) = +1./std::sqrt(3.); xi(6,1) = +1./std::sqrt(3.); xi(6,2) = +1./std::sqrt(3.); xi(7,0) = -1./std::sqrt(3.); xi(7,1) = +1./std::sqrt(3.); xi(7,2) = +1./std::sqrt(3.); return xi; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor w() { size_t nip = 8; xt::xtensor w = xt::empty({nip}); w(0) = 1.; w(1) = 1.; w(2) = 1.; w(3) = 1.; w(4) = 1.; w(5) = 1.; w(6) = 1.; w(7) = 1.; return w; } // ------------------------------------------------------------------------------------------------- } // ================================================================================================= namespace Nodal { // ------------------------------------------------------------------------------------------------- inline size_t nip() { return 8; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor xi() { size_t nip = 8; size_t ndim = 3; xt::xtensor xi = xt::empty({nip,ndim}); xi(0,0) = -1.; xi(0,1) = -1.; xi(0,2) = -1.; xi(1,0) = +1.; xi(1,1) = -1.; xi(1,2) = -1.; xi(2,0) = +1.; xi(2,1) = +1.; xi(2,2) = -1.; xi(3,0) = -1.; xi(3,1) = +1.; xi(3,2) = -1.; xi(4,0) = -1.; xi(4,1) = -1.; xi(4,2) = +1.; xi(5,0) = +1.; xi(5,1) = -1.; xi(5,2) = +1.; xi(6,0) = +1.; xi(6,1) = +1.; xi(6,2) = +1.; xi(7,0) = -1.; xi(7,1) = +1.; xi(7,2) = +1.; return xi; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor w() { size_t nip = 8; xt::xtensor w = xt::empty({nip}); w(0) = 1.; w(1) = 1.; w(2) = 1.; w(3) = 1.; w(4) = 1.; w(5) = 1.; w(6) = 1.; w(7) = 1.; return w; } // ------------------------------------------------------------------------------------------------- } // ================================================================================================= -inline Quadrature::Quadrature(const xt::xtensor &x) : +inline Quadrature::Quadrature(const xt::xtensor& x) : Quadrature(x, Gauss::xi(), Gauss::w()) {}; // ------------------------------------------------------------------------------------------------- -inline Quadrature::Quadrature(const xt::xtensor &x, const xt::xtensor &xi, - const xt::xtensor &w) : m_x(x), m_w(w), m_xi(xi) +inline Quadrature::Quadrature( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w) : + m_x(x), m_w(w), m_xi(xi) { - // check input - assert( m_x.shape()[1] == m_nne ); - assert( m_x.shape()[2] == m_ndim ); + assert(m_x.shape()[1] == m_nne ); + assert(m_x.shape()[2] == m_ndim); - // extract number of elements and number of integration points m_nelem = m_x.shape()[0]; m_nip = m_w.size(); - // check input - assert( m_xi.shape()[0] == m_nip ); - assert( m_xi.shape()[1] == m_ndim ); - assert( m_w .size() == m_nip ); + assert(m_xi.shape()[0] == m_nip ); + assert(m_xi.shape()[1] == m_ndim); + assert(m_w .size() == m_nip ); - // allocate arrays m_N = xt::empty({ m_nip, m_nne }); m_dNxi = xt::empty({ m_nip, m_nne, m_ndim}); m_dNx = xt::empty({m_nelem, m_nip, m_nne, m_ndim}); m_vol = xt::empty({m_nelem, m_nip }); // shape functions - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { m_N(q,0) = .125 * (1.-m_xi(q,0)) * (1.-m_xi(q,1)) * (1.-m_xi(q,2)); m_N(q,1) = .125 * (1.+m_xi(q,0)) * (1.-m_xi(q,1)) * (1.-m_xi(q,2)); m_N(q,2) = .125 * (1.+m_xi(q,0)) * (1.+m_xi(q,1)) * (1.-m_xi(q,2)); m_N(q,3) = .125 * (1.-m_xi(q,0)) * (1.+m_xi(q,1)) * (1.-m_xi(q,2)); m_N(q,4) = .125 * (1.-m_xi(q,0)) * (1.-m_xi(q,1)) * (1.+m_xi(q,2)); m_N(q,5) = .125 * (1.+m_xi(q,0)) * (1.-m_xi(q,1)) * (1.+m_xi(q,2)); m_N(q,6) = .125 * (1.+m_xi(q,0)) * (1.+m_xi(q,1)) * (1.+m_xi(q,2)); m_N(q,7) = .125 * (1.-m_xi(q,0)) * (1.+m_xi(q,1)) * (1.+m_xi(q,2)); } // shape function gradients in local coordinates - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - dN / dxi_0 m_dNxi(q,0,0) = -.125*(1.-m_xi(q,1))*(1.-m_xi(q,2)); m_dNxi(q,1,0) = +.125*(1.-m_xi(q,1))*(1.-m_xi(q,2)); m_dNxi(q,2,0) = +.125*(1.+m_xi(q,1))*(1.-m_xi(q,2)); m_dNxi(q,3,0) = -.125*(1.+m_xi(q,1))*(1.-m_xi(q,2)); m_dNxi(q,4,0) = -.125*(1.-m_xi(q,1))*(1.+m_xi(q,2)); m_dNxi(q,5,0) = +.125*(1.-m_xi(q,1))*(1.+m_xi(q,2)); m_dNxi(q,6,0) = +.125*(1.+m_xi(q,1))*(1.+m_xi(q,2)); m_dNxi(q,7,0) = -.125*(1.+m_xi(q,1))*(1.+m_xi(q,2)); // - dN / dxi_1 m_dNxi(q,0,1) = -.125*(1.-m_xi(q,0))*(1.-m_xi(q,2)); m_dNxi(q,1,1) = -.125*(1.+m_xi(q,0))*(1.-m_xi(q,2)); m_dNxi(q,2,1) = +.125*(1.+m_xi(q,0))*(1.-m_xi(q,2)); m_dNxi(q,3,1) = +.125*(1.-m_xi(q,0))*(1.-m_xi(q,2)); m_dNxi(q,4,1) = -.125*(1.-m_xi(q,0))*(1.+m_xi(q,2)); m_dNxi(q,5,1) = -.125*(1.+m_xi(q,0))*(1.+m_xi(q,2)); m_dNxi(q,6,1) = +.125*(1.+m_xi(q,0))*(1.+m_xi(q,2)); m_dNxi(q,7,1) = +.125*(1.-m_xi(q,0))*(1.+m_xi(q,2)); // - dN / dxi_2 m_dNxi(q,0,2) = -.125*(1.-m_xi(q,0))*(1.-m_xi(q,1)); m_dNxi(q,1,2) = -.125*(1.+m_xi(q,0))*(1.-m_xi(q,1)); m_dNxi(q,2,2) = -.125*(1.+m_xi(q,0))*(1.+m_xi(q,1)); m_dNxi(q,3,2) = -.125*(1.-m_xi(q,0))*(1.+m_xi(q,1)); m_dNxi(q,4,2) = +.125*(1.-m_xi(q,0))*(1.-m_xi(q,1)); m_dNxi(q,5,2) = +.125*(1.+m_xi(q,0))*(1.-m_xi(q,1)); m_dNxi(q,6,2) = +.125*(1.+m_xi(q,0))*(1.+m_xi(q,1)); m_dNxi(q,7,2) = +.125*(1.-m_xi(q,0))*(1.+m_xi(q,1)); } // compute the shape function gradients, based on "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- -inline size_t Quadrature::nelem() const { return m_nelem; }; +inline size_t Quadrature::nelem() const +{ return m_nelem; }; -inline size_t Quadrature::nne() const { return m_nne; }; +inline size_t Quadrature::nne() const +{ return m_nne; }; -inline size_t Quadrature::ndim() const { return m_ndim; }; +inline size_t Quadrature::ndim() const +{ return m_ndim; }; -inline size_t Quadrature::nip() const { return m_nip; }; +inline size_t Quadrature::nip() const +{ return m_nip; }; + +inline xt::xtensor Quadrature::gradN() const +{ return m_dNx; }; // ------------------------------------------------------------------------------------------------- -inline void Quadrature::dV(xt::xtensor &qscalar) const +inline void Quadrature::dV(xt::xtensor& qscalar) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); + assert(qscalar.shape()[0] == m_nelem); + assert(qscalar.shape()[1] == m_nip ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) qscalar(e,q) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::dV(xt::xtensor &qtensor) const +inline void Quadrature::dV(xt::xtensor& qtensor) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) qtensor(e,q,i,j) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::update_x(const xt::xtensor &x) +inline void Quadrature::dV(xt::xarray& qtensor) const { - assert( x.shape()[0] == m_nelem ); - assert( x.shape()[1] == m_nne ); - assert( x.shape()[2] == m_ndim ); - assert( x.size() == m_x.size() ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + + xt::dynamic_shape strides = { + static_cast(m_vol.strides()[0]), + static_cast(m_vol.strides()[1])}; + + for (size_t i = 2; i < qtensor.shape().size(); ++i) + strides.push_back(0); + + qtensor = xt::strided_view(m_vol, qtensor.shape(), std::move(strides), 0ul, xt::layout_type::dynamic); +} + +// ------------------------------------------------------------------------------------------------- + +inline void Quadrature::update_x(const xt::xtensor& x) +{ + assert(x.shape()[0] == m_nelem ); + assert(x.shape()[1] == m_nne ); + assert(x.shape()[2] == m_ndim ); + assert(x.size() == m_x.size()); - // update positions xt::noalias(m_x) = x; // update the shape function gradients for the new "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- inline void Quadrature::compute_dN() { #pragma omp parallel { // allocate local variables xt::xtensor_fixed> J, Jinv; // loop over all elements (in parallel) #pragma omp for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias nodal positions auto x = xt::adapt(&m_x(e,0,0), xt::xshape()); // loop over integration points - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNxi = xt::adapt(&m_dNxi( q,0,0), xt::xshape()); auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); // - zero-initialize J.fill(0.0); // - Jacobian - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) J(i,j) += dNxi(m,i) * x(m,j); // - determinant and inverse of the Jacobian double Jdet = inv(J, Jinv); // - shape function gradients wrt global coordinates (loops partly unrolled for efficiency) // dNx(m,i) += Jinv(i,j) * dNxi(m,j); - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { dNx(m,0) = Jinv(0,0) * dNxi(m,0) + Jinv(0,1) * dNxi(m,1) + Jinv(0,2) * dNxi(m,2); dNx(m,1) = Jinv(1,0) * dNxi(m,0) + Jinv(1,1) * dNxi(m,1) + Jinv(1,2) * dNxi(m,2); dNx(m,2) = Jinv(2,0) * dNxi(m,0) + Jinv(2,1) * dNxi(m,1) + Jinv(2,2) * dNxi(m,2); } // - integration point volume m_vol(e,q) = m_w(q) * Jdet; } } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::gradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); // zero-initialize qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate dyadic product - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) gradu(i,j) += dNx(m,i) * u(m,j); } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::gradN_vector_T( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); // zero-initialize qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate transpose of dyadic product - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) gradu(j,i) += dNx(m,i) * u(m,j); } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::symGradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); // zero-initialize qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto eps = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate symmetrized dyadic product - for ( size_t m = 0 ; m < m_nne ; ++m ) { - for ( size_t i = 0 ; i < m_ndim ; ++i ) { - for ( size_t j = 0 ; j < m_ndim ; ++j ) { + for (size_t m = 0 ; m < m_nne ; ++m ){ + for (size_t i = 0 ; i < m_ndim ; ++i ){ + for (size_t j = 0 ; j < m_ndim ; ++j ){ eps(i,j) += dNx(m,i) * u(m,j) / 2.; eps(j,i) += dNx(m,i) * u(m,j) / 2.; } } } } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::int_N_scalar_NT_dV( - const xt::xtensor &qscalar, xt::xtensor &elemmat) const + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(qscalar.shape()[0] == m_nelem ); + assert(qscalar.shape()[1] == m_nip ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize: matrix of matrices elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. mass matrix) auto M = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto N = xt::adapt(&m_N(q,0), xt::xshape()); auto& vol = m_vol (e,q); auto& rho = qscalar(e,q); // - evaluate scalar product, for all dimensions, and assemble // M(m*ndim+i,n*ndim+i) += N(m) * scalar * N(n) * dV - for ( size_t m = 0 ; m < m_nne ; ++m ) { - for ( size_t n = 0 ; n < m_nne ; ++n ) { + for (size_t m = 0 ; m < m_nne ; ++m ){ + for (size_t n = 0 ; n < m_nne ; ++n ){ M(m*m_ndim+0, n*m_ndim+0) += N(m) * rho * N(n) * vol; M(m*m_ndim+1, n*m_ndim+1) += N(m) * rho * N(n) * vol; M(m*m_ndim+2, n*m_ndim+2) += N(m) * rho * N(n) * vol; } } } } } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const +inline void Quadrature::int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); // zero-initialize output: matrix of vectors elemvec.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto f = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto sig = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { f(m,0) += ( dNx(m,0) * sig(0,0) + dNx(m,1) * sig(1,0) + dNx(m,2) * sig(2,0) ) * vol; f(m,1) += ( dNx(m,0) * sig(0,1) + dNx(m,1) * sig(1,1) + dNx(m,2) * sig(2,1) ) * vol; f(m,2) += ( dNx(m,0) * sig(0,2) + dNx(m,1) * sig(1,2) + dNx(m,2) * sig(2,2) ) * vol; } } } } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const +inline void Quadrature::int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); - assert( qtensor.shape()[4] == m_ndim ); - assert( qtensor.shape()[5] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); + assert(qtensor.shape()[4] == m_ndim ); + assert(qtensor.shape()[5] == m_ndim ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize output: matrix of vector elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto K = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx(e,q,0,0), xt::xshape()); auto C = xt::adapt(&qtensor(e,q,0,0,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t n = 0 ; n < m_nne ; ++n ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) - for ( size_t k = 0 ; k < m_ndim ; ++k ) - for ( size_t l = 0 ; l < m_ndim ; ++l ) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t n = 0 ; n < m_nne ; ++n) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) + for (size_t k = 0 ; k < m_ndim ; ++k) + for (size_t l = 0 ; l < m_ndim ; ++l) K(m*m_ndim+j, n*m_ndim+k) += dNx(m,i) * C(i,j,k,l) * dNx(n,l) * vol; } } } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::dV() const +inline xt::xtensor Quadrature::DV() const { xt::xtensor out = xt::empty({m_nelem, m_nip}); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::dVtensor() const +inline xt::xarray Quadrature::DV(size_t rank) const { - xt::xtensor out = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); + std::vector shape = {m_nelem, m_nip}; + + for (size_t i = 0; i < rank; ++i) + shape.push_back(m_nd); + + xt::xarray out = xt::empty(shape); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::gradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor Quadrature::GradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); this->gradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::gradN_vector_T(const xt::xtensor &elemvec) const +inline xt::xtensor Quadrature::GradN_vector_T( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); this->gradN_vector_T(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::symGradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor Quadrature::SymGradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); this->symGradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::int_N_scalar_NT_dV( - const xt::xtensor &qscalar) const +inline xt::xtensor Quadrature::Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const { xt::xtensor elemmat = xt::empty({m_nelem, m_nne*m_ndim, m_nne*m_ndim}); this->int_N_scalar_NT_dV(qscalar, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::int_gradN_dot_tensor2_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor Quadrature::Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->int_gradN_dot_tensor2_dV(qtensor, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::int_gradN_dot_tensor4_dot_gradNT_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor Quadrature::Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemmat = xt::empty({m_nelem, m_ndim*m_nne, m_ndim*m_nne}); this->int_gradN_dot_tensor4_dot_gradNT_dV(qtensor, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementQuad4.h b/include/GooseFEM/ElementQuad4.h index 76f20b3..a0866bf 100644 --- a/include/GooseFEM/ElementQuad4.h +++ b/include/GooseFEM/ElementQuad4.h @@ -1,174 +1,199 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTQUAD4_H #define GOOSEFEM_ELEMENTQUAD4_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Quad4 { // ------------------------------------------------------------------------------------------------- -inline double inv(const xt::xtensor_fixed> &A, - xt::xtensor_fixed> &Ainv); +inline double inv( + const xt::xtensor_fixed>& A, + xt::xtensor_fixed>& Ainv); // ------------------------------------------------------------------------------------------------- namespace Gauss { inline size_t nip(); // number of integration points inline xt::xtensor xi(); // integration point coordinates (local coordinates) inline xt::xtensor w(); // integration point weights } // ------------------------------------------------------------------------------------------------- namespace Nodal { inline size_t nip(); // number of integration points inline xt::xtensor xi(); // integration point coordinates (local coordinates) inline xt::xtensor w(); // integration point weights } // ------------------------------------------------------------------------------------------------- namespace MidPoint { inline size_t nip(); // number of integration points inline xt::xtensor xi(); // integration point coordinates (local coordinates) inline xt::xtensor w(); // integration point weights } // ------------------------------------------------------------------------------------------------- class Quadrature { public: - // fixed dimensions: + // Fixed dimensions: // ndim = 2 - number of dimensions // nne = 4 - number of nodes per element // - // naming convention: + // Naming convention: // "elemmat" - matrices stored per element - [nelem, nne*ndim, nne*ndim] // "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] // "qtensor" - integration point tensor - [nelem, nip, ndim, ndim] // "qscalar" - integration point scalar - [nelem, nip] - // constructor: integration point coordinates and weights are optional (default: Gauss) + // Constructor: integration point coordinates and weights are optional (default: Gauss) Quadrature() = default; - Quadrature(const xt::xtensor &x); + Quadrature( + const xt::xtensor& x); - Quadrature(const xt::xtensor &x, const xt::xtensor &xi, const xt::xtensor &w); + Quadrature( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w); - // update the nodal positions (shape of "x" should match the earlier definition) + // Update the nodal positions (shape of "x" should match the earlier definition) - void update_x(const xt::xtensor &x); + void update_x(const xt::xtensor& x); - // return dimensions + // Return dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t ndim() const; // number of dimension size_t nip() const; // number of integration points - // return shape function gradients + // Return shape function gradients xt::xtensor gradN() const; - // return integration volume + // Return integration volume - void dV(xt::xtensor &qscalar) const; + void dV(xt::xtensor& qscalar) const; - void dV(xt::xtensor &qtensor) const; // same volume for all tensor components + void dV(xt::xtensor& qtensor) const; // same volume for all tensor components - // dyadic product "qtensor(i,j) += dNdx(m,i) * elemvec(m,j)", its transpose and its symmetric part + void dV(xt::xarray& qtensor) const; // same volume for all tensor components - void gradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + // Dyadic product (and its transpose and symmetric part) + // qtensor(i,j) += dNdx(m,i) * elemvec(m,j) - void gradN_vector_T(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void gradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void symGradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void gradN_vector_T( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - // integral of the scalar product "elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV" + void symGradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void int_N_scalar_NT_dV(const xt::xtensor &qscalar, - xt::xtensor &elemmat) const; + // Integral of the scalar product + // elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV - // integral of the dot product "elemvec(m,j) += dNdx(m,i) * qtensor(i,j) * dV" + void int_N_scalar_NT_dV( + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const; // overwritten - void int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const; + // Integral of the dot product + // elemvec(m,j) += dNdx(m,i) * qtensor(i,j) * dV - // integral of the dot product "elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV" + void int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const; // overwritten - void int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const; + // Integral of the dot product + // elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV - // auto allocation of the functions above + void int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const; // overwritten - xt::xtensor dV() const; + // Auto-allocation of the functions above - xt::xtensor dVtensor() const; + xt::xtensor DV() const; - xt::xtensor gradN_vector(const xt::xtensor &elemvec) const; + xt::xarray DV( + size_t rank) const; - xt::xtensor gradN_vector_T(const xt::xtensor &elemvec) const; + xt::xtensor GradN_vector( + const xt::xtensor& elemvec) const; - xt::xtensor symGradN_vector(const xt::xtensor &elemvec) const; + xt::xtensor GradN_vector_T( + const xt::xtensor& elemvec) const; - xt::xtensor int_N_scalar_NT_dV(const xt::xtensor &qscalar) const; + xt::xtensor SymGradN_vector( + const xt::xtensor& elemvec) const; - xt::xtensor int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor) const; + xt::xtensor Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const; - xt::xtensor int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor) const; + xt::xtensor Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const; + + xt::xtensor Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const; private: - // compute "vol" and "dNdx" based on current "x" + // Compute "vol" and "dNdx" based on current "x" void compute_dN(); private: - // dimensions (flexible) + // Dimensions (flexible) size_t m_nelem; // number of elements size_t m_nip; // number of integration points - // dimensions (fixed for this element type) + // Dimensions (fixed for this element type) static const size_t m_nne=4; // number of nodes per element static const size_t m_ndim=2; // number of dimensions + const size_t m_nd=2; // number of dimensions (non-static, sometimes useful) - // data arrays + // Data arrays xt::xtensor m_x; // nodal positions stored per element [nelem, nne, ndim] xt::xtensor m_w; // weight of each integration point [nip] xt::xtensor m_xi; // local coordinate of each integration point [nip, ndim] xt::xtensor m_N; // shape functions [nip, nne] - xt::xtensor m_dNxi; // shape function gradients w.r.t. local coordinate [nip, nne, ndim] - xt::xtensor m_dNx; // shape function gradients w.r.t. global coordinate [nelem, nip, nne, ndim] + xt::xtensor m_dNxi; // shape function grad. w.r.t. local coor. [nip, nne, ndim] + xt::xtensor m_dNx; // shape function grad. w.r.t. global coor. [nelem, nip, nne, ndim] xt::xtensor m_vol; // integration point volume [nelem, nip] }; // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #include "ElementQuad4.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementQuad4.hpp b/include/GooseFEM/ElementQuad4.hpp index 81e3aa7..0c31a2e 100644 --- a/include/GooseFEM/ElementQuad4.hpp +++ b/include/GooseFEM/ElementQuad4.hpp @@ -1,676 +1,711 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTQUAD4_HPP #define GOOSEFEM_ELEMENTQUAD4_HPP // ------------------------------------------------------------------------------------------------- #include "ElementQuad4.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Quad4 { // ================================================================================================= -inline double inv(const xt::xtensor_fixed> &A, - xt::xtensor_fixed> &Ainv) +inline double inv( + const xt::xtensor_fixed>& A, + xt::xtensor_fixed>& Ainv) { // compute determinant double det = A(0,0) * A(1,1) - A(0,1) * A(1,0); // compute inverse Ainv(0,0) = A(1,1) / det; Ainv(0,1) = -1. * A(0,1) / det; Ainv(1,0) = -1. * A(1,0) / det; Ainv(1,1) = A(0,0) / det; return det; } // ================================================================================================= namespace Gauss { // ------------------------------------------------------------------------------------------------- inline size_t nip() { return 4; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor xi() { size_t nip = 4; size_t ndim = 2; xt::xtensor xi = xt::empty({nip,ndim}); xi(0,0) = -1./std::sqrt(3.); xi(0,1) = -1./std::sqrt(3.); xi(1,0) = +1./std::sqrt(3.); xi(1,1) = -1./std::sqrt(3.); xi(2,0) = +1./std::sqrt(3.); xi(2,1) = +1./std::sqrt(3.); xi(3,0) = -1./std::sqrt(3.); xi(3,1) = +1./std::sqrt(3.); return xi; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor w() { size_t nip = 4; xt::xtensor w = xt::empty({nip}); w(0) = 1.; w(1) = 1.; w(2) = 1.; w(3) = 1.; return w; } // ------------------------------------------------------------------------------------------------- } // ================================================================================================= namespace Nodal { // ------------------------------------------------------------------------------------------------- inline size_t nip() { return 4; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor xi() { size_t nip = 4; size_t ndim = 2; xt::xtensor xi = xt::empty({nip,ndim}); xi(0,0) = -1.; xi(0,1) = -1.; xi(1,0) = +1.; xi(1,1) = -1.; xi(2,0) = +1.; xi(2,1) = +1.; xi(3,0) = -1.; xi(3,1) = +1.; return xi; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor w() { size_t nip = 4; xt::xtensor w = xt::empty({nip}); w(0) = 1.; w(1) = 1.; w(2) = 1.; w(3) = 1.; return w; } // ------------------------------------------------------------------------------------------------- } // ================================================================================================= namespace MidPoint { // ------------------------------------------------------------------------------------------------- inline size_t nip() { return 1; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor xi() { size_t nip = 1; size_t ndim = 2; xt::xtensor xi = xt::empty({nip,ndim}); xi(0,0) = 0.; xi(0,1) = 0.; return xi; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor w() { size_t nip = 1; xt::xtensor w = xt::empty({nip}); w(0) = 1.; return w; } // ------------------------------------------------------------------------------------------------- } // ================================================================================================= -inline Quadrature::Quadrature(const xt::xtensor &x) : +inline Quadrature::Quadrature(const xt::xtensor& x) : Quadrature(x, Gauss::xi(), Gauss::w()) {}; // ------------------------------------------------------------------------------------------------- -inline Quadrature::Quadrature(const xt::xtensor &x, const xt::xtensor &xi, - const xt::xtensor &w) : m_x(x), m_w(w), m_xi(xi) +inline Quadrature::Quadrature( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w) : + m_x(x), m_w(w), m_xi(xi) { - // check input - assert( m_x.shape()[1] == m_nne ); - assert( m_x.shape()[2] == m_ndim ); + assert(m_x.shape()[1] == m_nne ); + assert(m_x.shape()[2] == m_ndim); - // extract number of elements and number of integration points m_nelem = m_x.shape()[0]; m_nip = m_w.size(); - // check input - assert( m_xi.shape()[0] == m_nip ); - assert( m_xi.shape()[1] == m_ndim ); - assert( m_w .size() == m_nip ); + assert(m_xi.shape()[0] == m_nip ); + assert(m_xi.shape()[1] == m_ndim); + assert(m_w .size() == m_nip ); - // allocate arrays m_N = xt::empty({ m_nip, m_nne }); m_dNxi = xt::empty({ m_nip, m_nne, m_ndim}); m_dNx = xt::empty({m_nelem, m_nip, m_nne, m_ndim}); m_vol = xt::empty({m_nelem, m_nip }); // shape functions - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { m_N(q,0) = .25 * (1.-m_xi(q,0)) * (1.-m_xi(q,1)); m_N(q,1) = .25 * (1.+m_xi(q,0)) * (1.-m_xi(q,1)); m_N(q,2) = .25 * (1.+m_xi(q,0)) * (1.+m_xi(q,1)); m_N(q,3) = .25 * (1.-m_xi(q,0)) * (1.+m_xi(q,1)); } // shape function gradients in local coordinates - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - dN / dxi_0 m_dNxi(q,0,0) = -.25*(1.-m_xi(q,1)); m_dNxi(q,1,0) = +.25*(1.-m_xi(q,1)); m_dNxi(q,2,0) = +.25*(1.+m_xi(q,1)); m_dNxi(q,3,0) = -.25*(1.+m_xi(q,1)); // - dN / dxi_1 m_dNxi(q,0,1) = -.25*(1.-m_xi(q,0)); m_dNxi(q,1,1) = -.25*(1.+m_xi(q,0)); m_dNxi(q,2,1) = +.25*(1.+m_xi(q,0)); m_dNxi(q,3,1) = +.25*(1.-m_xi(q,0)); } // compute the shape function gradients, based on "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- -inline size_t Quadrature::nelem() const { return m_nelem; }; +inline size_t Quadrature::nelem() const +{ return m_nelem; }; -inline size_t Quadrature::nne() const { return m_nne; }; +inline size_t Quadrature::nne() const +{ return m_nne; }; -inline size_t Quadrature::ndim() const { return m_ndim; }; +inline size_t Quadrature::ndim() const +{ return m_ndim; }; -inline size_t Quadrature::nip() const { return m_nip; }; +inline size_t Quadrature::nip() const +{ return m_nip; }; -inline xt::xtensor Quadrature::gradN() const { return m_dNx; }; +inline xt::xtensor Quadrature::gradN() const +{ return m_dNx; }; // ------------------------------------------------------------------------------------------------- -inline void Quadrature::dV(xt::xtensor &qscalar) const +inline void Quadrature::dV(xt::xtensor& qscalar) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); + assert(qscalar.shape()[0] == m_nelem); + assert(qscalar.shape()[1] == m_nip ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) qscalar(e,q) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::dV(xt::xtensor &qtensor) const +inline void Quadrature::dV(xt::xtensor& qtensor) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) qtensor(e,q,i,j) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::update_x(const xt::xtensor &x) +inline void Quadrature::dV(xt::xarray& qtensor) const { - assert( x.shape()[0] == m_nelem ); - assert( x.shape()[1] == m_nne ); - assert( x.shape()[2] == m_ndim ); - assert( x.size() == m_x.size() ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + + xt::dynamic_shape strides = { + static_cast(m_vol.strides()[0]), + static_cast(m_vol.strides()[1])}; + + for (size_t i = 2; i < qtensor.shape().size(); ++i) + strides.push_back(0); + + qtensor = xt::strided_view(m_vol, qtensor.shape(), std::move(strides), 0ul, xt::layout_type::dynamic); +} + +// ------------------------------------------------------------------------------------------------- + +inline void Quadrature::update_x(const xt::xtensor& x) +{ + assert(x.shape()[0] == m_nelem ); + assert(x.shape()[1] == m_nne ); + assert(x.shape()[2] == m_ndim ); + assert(x.size() == m_x.size()); - // update positions xt::noalias(m_x) = x; // update the shape function gradients for the new "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- inline void Quadrature::compute_dN() { #pragma omp parallel { // allocate local variables xt::xtensor_fixed> J, Jinv; // loop over all elements (in parallel) #pragma omp for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias nodal positions auto x = xt::adapt(&m_x(e,0,0), xt::xshape()); // loop over integration points - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNxi = xt::adapt(&m_dNxi( q,0,0), xt::xshape()); auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); // - Jacobian (loops unrolled for efficiency) // J(i,j) += dNxi(m,i) * x(m,j); J(0,0) = dNxi(0,0)*x(0,0) + dNxi(1,0)*x(1,0) + dNxi(2,0)*x(2,0) + dNxi(3,0)*x(3,0); J(0,1) = dNxi(0,0)*x(0,1) + dNxi(1,0)*x(1,1) + dNxi(2,0)*x(2,1) + dNxi(3,0)*x(3,1); J(1,0) = dNxi(0,1)*x(0,0) + dNxi(1,1)*x(1,0) + dNxi(2,1)*x(2,0) + dNxi(3,1)*x(3,0); J(1,1) = dNxi(0,1)*x(0,1) + dNxi(1,1)*x(1,1) + dNxi(2,1)*x(2,1) + dNxi(3,1)*x(3,1); // - determinant and inverse of the Jacobian double Jdet = inv(J, Jinv); // - shape function gradients wrt global coordinates (loops partly unrolled for efficiency) // dNx(m,i) += Jinv(i,j) * dNxi(m,j); - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { dNx(m,0) = Jinv(0,0) * dNxi(m,0) + Jinv(0,1) * dNxi(m,1); dNx(m,1) = Jinv(1,0) * dNxi(m,0) + Jinv(1,1) * dNxi(m,1); } // - integration point volume m_vol(e,q) = m_w(q) * Jdet; } } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::gradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate dyadic product (loops unrolled for efficiency) // gradu(i,j) += dNx(m,i) * u(m,j) gradu(0,0) = dNx(0,0)*u(0,0) + dNx(1,0)*u(1,0) + dNx(2,0)*u(2,0) + dNx(3,0)*u(3,0); gradu(0,1) = dNx(0,0)*u(0,1) + dNx(1,0)*u(1,1) + dNx(2,0)*u(2,1) + dNx(3,0)*u(3,1); gradu(1,0) = dNx(0,1)*u(0,0) + dNx(1,1)*u(1,0) + dNx(2,1)*u(2,0) + dNx(3,1)*u(3,0); gradu(1,1) = dNx(0,1)*u(0,1) + dNx(1,1)*u(1,1) + dNx(2,1)*u(2,1) + dNx(3,1)*u(3,1); } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::gradN_vector_T( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate transpose of dyadic product (loops unrolled for efficiency) // gradu(j,i) += dNx(m,i) * u(m,j) gradu(0,0) = dNx(0,0)*u(0,0) + dNx(1,0)*u(1,0) + dNx(2,0)*u(2,0) + dNx(3,0)*u(3,0); gradu(1,0) = dNx(0,0)*u(0,1) + dNx(1,0)*u(1,1) + dNx(2,0)*u(2,1) + dNx(3,0)*u(3,1); gradu(0,1) = dNx(0,1)*u(0,0) + dNx(1,1)*u(1,0) + dNx(2,1)*u(2,0) + dNx(3,1)*u(3,0); gradu(1,1) = dNx(0,1)*u(0,1) + dNx(1,1)*u(1,1) + dNx(2,1)*u(2,1) + dNx(3,1)*u(3,1); } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::symGradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto eps = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate symmetrized dyadic product (loops unrolled for efficiency) // gradu(i,j) += dNx(m,i) * u(m,j) // eps (j,i) = 0.5 * ( gradu(i,j) + gradu(j,i) ) eps(0,0) = dNx(0,0)*u(0,0) + dNx(1,0)*u(1,0) + dNx(2,0)*u(2,0) + dNx(3,0)*u(3,0); eps(1,1) = dNx(0,1)*u(0,1) + dNx(1,1)*u(1,1) + dNx(2,1)*u(2,1) + dNx(3,1)*u(3,1); eps(0,1) = ( dNx(0,0)*u(0,1) + dNx(1,0)*u(1,1) + dNx(2,0)*u(2,1) + dNx(3,0)*u(3,1) + dNx(0,1)*u(0,0) + dNx(1,1)*u(1,0) + dNx(2,1)*u(2,0) + dNx(3,1)*u(3,0) ) * 0.5; eps(1,0) = eps(0,1); } } } // ------------------------------------------------------------------------------------------------- inline void Quadrature::int_N_scalar_NT_dV( - const xt::xtensor &qscalar, xt::xtensor &elemmat) const + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(qscalar.shape()[0] == m_nelem ); + assert(qscalar.shape()[1] == m_nip ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize: matrix of matrices elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. mass matrix) auto M = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto N = xt::adapt(&m_N(q,0), xt::xshape()); auto& vol = m_vol (e,q); auto& rho = qscalar(e,q); // - evaluate scalar product, for all dimensions, and assemble // M(m*ndim+i,n*ndim+i) += N(m) * scalar * N(n) * dV - for ( size_t m = 0 ; m < m_nne ; ++m ) { - for ( size_t n = 0 ; n < m_nne ; ++n ) { + for (size_t m = 0 ; m < m_nne ; ++m ){ + for (size_t n = 0 ; n < m_nne ; ++n ){ M(m*m_ndim+0, n*m_ndim+0) += N(m) * rho * N(n) * vol; M(m*m_ndim+1, n*m_ndim+1) += N(m) * rho * N(n) * vol; } } } } } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const +inline void Quadrature::int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); // zero-initialize output: matrix of vectors elemvec.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto f = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto sig = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { f(m,0) += ( dNx(m,0) * sig(0,0) + dNx(m,1) * sig(1,0) ) * vol; f(m,1) += ( dNx(m,0) * sig(0,1) + dNx(m,1) * sig(1,1) ) * vol; } } } } // ------------------------------------------------------------------------------------------------- -inline void Quadrature::int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const +inline void Quadrature::int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_ndim ); - assert( qtensor.shape()[3] == m_ndim ); - assert( qtensor.shape()[4] == m_ndim ); - assert( qtensor.shape()[5] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_ndim ); + assert(qtensor.shape()[3] == m_ndim ); + assert(qtensor.shape()[4] == m_ndim ); + assert(qtensor.shape()[5] == m_ndim ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize output: matrix of vector elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto K = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx(e,q,0,0), xt::xshape()); auto C = xt::adapt(&qtensor(e,q,0,0,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t n = 0 ; n < m_nne ; ++n ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) - for ( size_t k = 0 ; k < m_ndim ; ++k ) - for ( size_t l = 0 ; l < m_ndim ; ++l ) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t n = 0 ; n < m_nne ; ++n) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) + for (size_t k = 0 ; k < m_ndim ; ++k) + for (size_t l = 0 ; l < m_ndim ; ++l) K(m*m_ndim+j, n*m_ndim+k) += dNx(m,i) * C(i,j,k,l) * dNx(n,l) * vol; } } } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::dV() const +inline xt::xtensor Quadrature::DV() const { xt::xtensor out = xt::empty({m_nelem, m_nip}); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::dVtensor() const +inline xt::xarray Quadrature::DV(size_t rank) const { - xt::xtensor out = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); + std::vector shape = {m_nelem, m_nip}; + + for (size_t i = 0; i < rank; ++i) + shape.push_back(m_nd); + + xt::xarray out = xt::empty(shape); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::gradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor Quadrature::GradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); this->gradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::gradN_vector_T(const xt::xtensor &elemvec) const +inline xt::xtensor Quadrature::GradN_vector_T( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); this->gradN_vector_T(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::symGradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor Quadrature::SymGradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_ndim, m_ndim}); this->symGradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::int_N_scalar_NT_dV( - const xt::xtensor &qscalar) const +inline xt::xtensor Quadrature::Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const { xt::xtensor elemmat = xt::empty({m_nelem, m_nne*m_ndim, m_nne*m_ndim}); this->int_N_scalar_NT_dV(qscalar, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::int_gradN_dot_tensor2_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor Quadrature::Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->int_gradN_dot_tensor2_dV(qtensor, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Quadrature::int_gradN_dot_tensor4_dot_gradNT_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor Quadrature::Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemmat = xt::empty({m_nelem, m_ndim*m_nne, m_ndim*m_nne}); this->int_gradN_dot_tensor4_dot_gradNT_dV(qtensor, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementQuad4Axisymmetric.h b/include/GooseFEM/ElementQuad4Axisymmetric.h index 1181566..843f2f8 100644 --- a/include/GooseFEM/ElementQuad4Axisymmetric.h +++ b/include/GooseFEM/ElementQuad4Axisymmetric.h @@ -1,143 +1,167 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTQUAD4AXISYMMETRIC_H #define GOOSEFEM_ELEMENTQUAD4AXISYMMETRIC_H // ------------------------------------------------------------------------------------------------- -#include "GooseFEM.h" +#include "config.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Quad4 { // ------------------------------------------------------------------------------------------------- class QuadratureAxisymmetric { public: - // fixed dimensions: + // Fixed dimensions: // ndim = 2 - number of dimensions // nne = 4 - number of nodes per element // tdim = 3 - number of dimensions of tensors // - // naming convention: + // Naming convention: // "elemmat" - matrices stored per element - [nelem, nne*ndim, nne*ndim] // "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] // "qtensor" - integration point tensor - [nelem, nip, tdim, tdim] // "qscalar" - integration point scalar - [nelem, nip] - // constructor: integration point coordinates and weights are optional (default: Gauss) + // Constructor: integration point coordinates and weights are optional (default: Gauss) QuadratureAxisymmetric() = default; - QuadratureAxisymmetric(const xt::xtensor &x); + QuadratureAxisymmetric( + const xt::xtensor& x); - QuadratureAxisymmetric(const xt::xtensor &x, const xt::xtensor &xi, const xt::xtensor &w); + QuadratureAxisymmetric( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w); - // update the nodal positions (shape of "x" should match the earlier definition) + // Update the nodal positions (shape of "x" should match the earlier definition) - void update_x(const xt::xtensor &x); + void update_x(const xt::xtensor& x); - // return dimensions + // Return dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t ndim() const; // number of dimension size_t nip() const; // number of integration points - // return integration volume + // Return integration volume - void dV(xt::xtensor &qscalar) const; + void dV(xt::xtensor& qscalar) const; - void dV(xt::xtensor &qtensor) const; // same volume for all tensor components + void dV(xt::xtensor& qtensor) const; // same volume for all tensor components - // dyadic product "qtensor(i,j) += B(m,i,j,k) * elemvec(m,k)", its transpose and its symmetric part + void dV(xt::xarray& qtensor) const; // same volume for all tensor components - void gradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + // Dyadic product (and its transpose and symmetric part) + // qtensor(i,j) += B(m,i,j,k) * elemvec(m,k) - void gradN_vector_T(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void gradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void symGradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void gradN_vector_T( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - // integral of the scalar product "elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV" + void symGradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void int_N_scalar_NT_dV(const xt::xtensor &qscalar, - xt::xtensor &elemmat) const; + // Integral of the scalar product + // elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV - // integral of the assembled product "fm = ( Bm^T : qtensor ) dV" + void int_N_scalar_NT_dV( + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const; // overwritten - void int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const; + // Integral of the assembled product + // fm = ( Bm^T : qtensor ) dV - // integral of the assembled product "Kmn = ( Bm^T : qtensor : Bn ) dV + void int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const; // overwritten - void int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const; + // Integral of the assembled product + // Kmn = ( Bm^T : qtensor : Bn ) dV - // auto allocation of the functions above + void int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const; // overwritten - xt::xtensor dV() const; + // Auto-allocation of the functions above - xt::xtensor dVtensor() const; + xt::xtensor DV() const; - xt::xtensor gradN_vector(const xt::xtensor &elemvec) const; + xt::xarray DV( + size_t rank) const; - xt::xtensor gradN_vector_T(const xt::xtensor &elemvec) const; + xt::xtensor GradN_vector( + const xt::xtensor& elemvec) const; - xt::xtensor symGradN_vector(const xt::xtensor &elemvec) const; + xt::xtensor GradN_vector_T( + const xt::xtensor& elemvec) const; - xt::xtensor int_N_scalar_NT_dV(const xt::xtensor &qscalar) const; + xt::xtensor SymGradN_vector( + const xt::xtensor& elemvec) const; - xt::xtensor int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor) const; + xt::xtensor Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const; - xt::xtensor int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor) const; + xt::xtensor Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const; + + xt::xtensor Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const; private: - // compute "vol" and "B" based on current "x" + // Compute "vol" and "B" based on current "x" void compute_dN(); private: - // dimensions (flexible) + // Dimensions (flexible) size_t m_nelem; // number of elements size_t m_nip; // number of integration points - // dimensions (fixed for this element type) + // Dimensions (fixed for this element type) static const size_t m_nne=4; // number of nodes per element static const size_t m_ndim=2; // number of dimensions static const size_t m_tdim=3; // number of dimensions of tensors + const size_t m_td=3; // number of dimensions of tensors (non-static, sometimes useful) - // data arrays + // Data arrays xt::xtensor m_x; // nodal positions stored per element [nelem, nne, ndim] xt::xtensor m_w; // weight of each integration point [nip] xt::xtensor m_xi; // local coordinate of each integration point [nip, ndim] xt::xtensor m_N; // shape functions [nip, nne] - xt::xtensor m_dNxi; // shape function gradients w.r.t. local coordinate [nip, nne, ndim] + xt::xtensor m_dNxi; // shape function grad. w.r.t. local coor. [nip, nne, ndim] xt::xtensor m_B; // B-matrix [nelem, nne, tdim, tdim, tdim] xt::xtensor m_vol; // integration point volume [nelem, nip] }; // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #include "ElementQuad4Axisymmetric.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementQuad4Axisymmetric.hpp b/include/GooseFEM/ElementQuad4Axisymmetric.hpp index 84fa1c6..e9300a2 100644 --- a/include/GooseFEM/ElementQuad4Axisymmetric.hpp +++ b/include/GooseFEM/ElementQuad4Axisymmetric.hpp @@ -1,578 +1,609 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTQUADAXISYMMETRIC_HPP #define GOOSEFEM_ELEMENTQUADAXISYMMETRIC_HPP // ------------------------------------------------------------------------------------------------- -#include "GooseFEM.h" +#include "ElementQuad4Axisymmetric.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Quad4 { // ================================================================================================= -inline QuadratureAxisymmetric::QuadratureAxisymmetric(const xt::xtensor &x) : +inline QuadratureAxisymmetric::QuadratureAxisymmetric(const xt::xtensor& x) : QuadratureAxisymmetric(x, Gauss::xi(), Gauss::w()) {} // ------------------------------------------------------------------------------------------------- -inline QuadratureAxisymmetric::QuadratureAxisymmetric(const xt::xtensor &x, - const xt::xtensor &xi, const xt::xtensor &w) : +inline QuadratureAxisymmetric::QuadratureAxisymmetric( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w) : m_x(x), m_w(w), m_xi(xi) { - // check input - assert( m_x.shape()[1] == m_nne ); - assert( m_x.shape()[2] == m_ndim ); + assert(m_x.shape()[1] == m_nne ); + assert(m_x.shape()[2] == m_ndim); - // extract number of elements and number of integration points m_nelem = m_x.shape()[0]; m_nip = m_w.size(); - // check input - assert( m_xi.shape()[0] == m_nip ); - assert( m_xi.shape()[1] == m_ndim ); - assert( m_w .size() == m_nip ); + assert(m_xi.shape()[0] == m_nip ); + assert(m_xi.shape()[1] == m_ndim); + assert(m_w .size() == m_nip ); - // allocate arrays m_N = xt::empty({ m_nip, m_nne }); m_dNxi = xt::empty({ m_nip, m_nne, m_ndim }); m_B = xt::empty({m_nelem, m_nip, m_nne, m_tdim, m_tdim, m_tdim}); m_vol = xt::empty({m_nelem, m_nip }); // shape functions - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { m_N(q,0) = .25 * (1.-m_xi(q,0)) * (1.-m_xi(q,1)); m_N(q,1) = .25 * (1.+m_xi(q,0)) * (1.-m_xi(q,1)); m_N(q,2) = .25 * (1.+m_xi(q,0)) * (1.+m_xi(q,1)); m_N(q,3) = .25 * (1.-m_xi(q,0)) * (1.+m_xi(q,1)); } // shape function gradients in local coordinates - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - dN / dxi_0 m_dNxi(q,0,0) = -.25*(1.-m_xi(q,1)); m_dNxi(q,1,0) = +.25*(1.-m_xi(q,1)); m_dNxi(q,2,0) = +.25*(1.+m_xi(q,1)); m_dNxi(q,3,0) = -.25*(1.+m_xi(q,1)); // - dN / dxi_1 m_dNxi(q,0,1) = -.25*(1.-m_xi(q,0)); m_dNxi(q,1,1) = -.25*(1.+m_xi(q,0)); m_dNxi(q,2,1) = +.25*(1.+m_xi(q,0)); m_dNxi(q,3,1) = +.25*(1.-m_xi(q,0)); } // compute the shape function gradients, based on "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- -inline size_t QuadratureAxisymmetric::nelem() const { return m_nelem; }; +inline size_t QuadratureAxisymmetric::nelem() const +{ return m_nelem; }; -inline size_t QuadratureAxisymmetric::nne() const { return m_nne; }; +inline size_t QuadratureAxisymmetric::nne() const +{ return m_nne; }; -inline size_t QuadratureAxisymmetric::ndim() const { return m_ndim; }; +inline size_t QuadratureAxisymmetric::ndim() const +{ return m_ndim; }; -inline size_t QuadratureAxisymmetric::nip() const { return m_nip; }; +inline size_t QuadratureAxisymmetric::nip() const +{ return m_nip; }; // ------------------------------------------------------------------------------------------------- -inline void QuadratureAxisymmetric::dV(xt::xtensor &qscalar) const +inline void QuadratureAxisymmetric::dV(xt::xtensor& qscalar) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); + assert(qscalar.shape()[0] == m_nelem); + assert(qscalar.shape()[1] == m_nip ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) qscalar(e,q) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void QuadratureAxisymmetric::dV(xt::xtensor &qtensor) const +inline void QuadratureAxisymmetric::dV(xt::xtensor& qtensor) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) - for ( size_t i = 0 ; i < m_tdim ; ++i ) - for ( size_t j = 0 ; j < m_tdim ; ++j ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) + for (size_t i = 0 ; i < m_tdim ; ++i) + for (size_t j = 0 ; j < m_tdim ; ++j) qtensor(e,q,i,j) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void QuadratureAxisymmetric::update_x(const xt::xtensor &x) +inline void QuadratureAxisymmetric::dV(xt::xarray& qtensor) const { - assert( x.shape()[0] == m_nelem ); - assert( x.shape()[1] == m_nne ); - assert( x.shape()[2] == m_ndim ); - assert( x.size() == m_x.size() ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + + xt::dynamic_shape strides = { + static_cast(m_vol.strides()[0]), + static_cast(m_vol.strides()[1])}; + + for (size_t i = 2; i < qtensor.shape().size(); ++i) + strides.push_back(0); + + qtensor = xt::strided_view(m_vol, qtensor.shape(), std::move(strides), 0ul, xt::layout_type::dynamic); +} + +// ------------------------------------------------------------------------------------------------- + +inline void QuadratureAxisymmetric::update_x(const xt::xtensor& x) +{ + assert(x.shape()[0] == m_nelem ); + assert(x.shape()[1] == m_nne ); + assert(x.shape()[2] == m_ndim ); + assert(x.size() == m_x.size()); - // update positions xt::noalias(m_x) = x; // update the shape function gradients for the new "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- inline void QuadratureAxisymmetric::compute_dN() { // zero-initialize full B-matrix (most component remain zero, and are not written) m_B.fill(0.0); #pragma omp parallel { // allocate local variables xt::xtensor_fixed> J, Jinv; // loop over all elements (in parallel) #pragma omp for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias nodal positions auto x = xt::adapt(&m_x(e,0,0), xt::xshape()); // loop over integration points - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNxi = xt::adapt(&m_dNxi( q,0,0 ), xt::xshape()); auto B = xt::adapt(&m_B (e,q,0,0,0,0), xt::xshape()); auto N = xt::adapt(&m_N ( q,0 ), xt::xshape()); // - Jacobian (loops unrolled for efficiency) // J(i,j) += dNxi(m,i) * x(m,j); J(0,0) = dNxi(0,0)*x(0,0) + dNxi(1,0)*x(1,0) + dNxi(2,0)*x(2,0) + dNxi(3,0)*x(3,0); J(0,1) = dNxi(0,0)*x(0,1) + dNxi(1,0)*x(1,1) + dNxi(2,0)*x(2,1) + dNxi(3,0)*x(3,1); J(1,0) = dNxi(0,1)*x(0,0) + dNxi(1,1)*x(1,0) + dNxi(2,1)*x(2,0) + dNxi(3,1)*x(3,0); J(1,1) = dNxi(0,1)*x(0,1) + dNxi(1,1)*x(1,1) + dNxi(2,1)*x(2,1) + dNxi(3,1)*x(3,1); // - determinant and inverse of the Jacobian double Jdet = inv(J, Jinv); // - get radius for computation of volume double rq = N(0)*x(0,1) + N(1)*x(1,1) + N(2)*x(2,1) + N(3)*x(3,1); // - compute the B matrix (loops partly unrolled for efficiency) // N.B. "dNx(m,i) += Jinv(i,j) * dNxi(m,j);" for (size_t m = 0; m < m_nne; ++m) { B(m,0,0,0) = Jinv(1,0) * dNxi(m,0) + Jinv(1,1) * dNxi(m,1); // B(m, r, r, r) = dNdx(m,1) B(m,0,2,2) = Jinv(1,0) * dNxi(m,0) + Jinv(1,1) * dNxi(m,1); // B(m, r, z, z) = dNdx(m,1) B(m,1,1,0) = 1./rq * N(m); // B(m, t, t, r) B(m,2,0,0) = Jinv(0,0) * dNxi(m,0) + Jinv(0,1) * dNxi(m,1); // B(m, z, r, r) = dNdx(m,0) B(m,2,2,2) = Jinv(0,0) * dNxi(m,0) + Jinv(0,1) * dNxi(m,1); // B(m, z, z, z) = dNdx(m,0) } // - integration point volume m_vol(e, q) = m_w(q) * Jdet * 2. * M_PI * rq; } } } } // ------------------------------------------------------------------------------------------------- inline void QuadratureAxisymmetric::gradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); // zero-initialize (zero components not written below) qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto B = xt::adapt(&m_B (e,q,0,0,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0 ), xt::xshape()); // - evaluate dyadic product (loops unrolled for efficiency) // gradu(i,j) += B(m,i,j,k) * u(m,perm(k)) // (where perm(0) = 1, perm(2) = 0) gradu(0,0) = B(0,0,0,0)*u(0,1) + B(1,0,0,0)*u(1,1) + B(2,0,0,0)*u(2,1) + B(3,0,0,0)*u(3,1); gradu(1,1) = B(0,1,1,0)*u(0,1) + B(1,1,1,0)*u(1,1) + B(2,1,1,0)*u(2,1) + B(3,1,1,0)*u(3,1); gradu(2,2) = B(0,2,2,2)*u(0,0) + B(1,2,2,2)*u(1,0) + B(2,2,2,2)*u(2,0) + B(3,2,2,2)*u(3,0); gradu(0,2) = B(0,0,2,2)*u(0,0) + B(1,0,2,2)*u(1,0) + B(2,0,2,2)*u(2,0) + B(3,0,2,2)*u(3,0); gradu(2,0) = B(0,2,0,0)*u(0,1) + B(1,2,0,0)*u(1,1) + B(2,2,0,0)*u(2,1) + B(3,2,0,0)*u(3,1); } } } // ------------------------------------------------------------------------------------------------- inline void QuadratureAxisymmetric::gradN_vector_T( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); // zero-initialize (zero components not written below) qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto B = xt::adapt(&m_B (e,q,0,0,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0 ), xt::xshape()); // - evaluate transpose of dyadic product (loops unrolled for efficiency) // gradu(j,i) += B(m,i,j,k) * u(m,perm(k)) // (where perm(0) = 1, perm(2) = 0) gradu(0,0) = B(0,0,0,0)*u(0,1) + B(1,0,0,0)*u(1,1) + B(2,0,0,0)*u(2,1) + B(3,0,0,0)*u(3,1); gradu(1,1) = B(0,1,1,0)*u(0,1) + B(1,1,1,0)*u(1,1) + B(2,1,1,0)*u(2,1) + B(3,1,1,0)*u(3,1); gradu(2,2) = B(0,2,2,2)*u(0,0) + B(1,2,2,2)*u(1,0) + B(2,2,2,2)*u(2,0) + B(3,2,2,2)*u(3,0); gradu(2,0) = B(0,0,2,2)*u(0,0) + B(1,0,2,2)*u(1,0) + B(2,0,2,2)*u(2,0) + B(3,0,2,2)*u(3,0); gradu(0,2) = B(0,2,0,0)*u(0,1) + B(1,2,0,0)*u(1,1) + B(2,2,0,0)*u(2,1) + B(3,2,0,0)*u(3,1); } } } // ------------------------------------------------------------------------------------------------- - inline void QuadratureAxisymmetric::symGradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); // zero-initialize (zero components not written below) qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto B = xt::adapt(&m_B (e,q,0,0,0,0), xt::xshape()); auto eps = xt::adapt(&qtensor(e,q,0,0 ), xt::xshape()); // - evaluate symmetrized dyadic product (loops unrolled for efficiency) // gradu(j,i) += B(m,i,j,k) * u(m,perm(k)) // eps (j,i) = 0.5 * ( gradu(i,j) + gradu(j,i) ) // (where perm(0) = 1, perm(2) = 0) eps(0,0) = B(0,0,0,0)*u(0,1) + B(1,0,0,0)*u(1,1) + B(2,0,0,0)*u(2,1) + B(3,0,0,0)*u(3,1); eps(1,1) = B(0,1,1,0)*u(0,1) + B(1,1,1,0)*u(1,1) + B(2,1,1,0)*u(2,1) + B(3,1,1,0)*u(3,1); eps(2,2) = B(0,2,2,2)*u(0,0) + B(1,2,2,2)*u(1,0) + B(2,2,2,2)*u(2,0) + B(3,2,2,2)*u(3,0); eps(2,0) = ( B(0,0,2,2)*u(0,0) + B(1,0,2,2)*u(1,0) + B(2,0,2,2)*u(2,0) + B(3,0,2,2)*u(3,0) + B(0,2,0,0)*u(0,1) + B(1,2,0,0)*u(1,1) + B(2,2,0,0)*u(2,1) + B(3,2,0,0)*u(3,1) ) * 0.5; eps(0,2) = eps(2,0); } } } // ------------------------------------------------------------------------------------------------- inline void QuadratureAxisymmetric::int_N_scalar_NT_dV( - const xt::xtensor &qscalar, xt::xtensor &elemmat) const + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(qscalar.shape()[0] == m_nelem ); + assert(qscalar.shape()[1] == m_nip ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize: matrix of matrices elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. mass matrix) auto M = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto N = xt::adapt(&m_N(q,0), xt::xshape()); auto& vol = m_vol (e,q); auto& rho = qscalar(e,q); // - evaluate scalar product, for all dimensions, and assemble // M(m*ndim+i,n*ndim+i) += N(m) * scalar * N(n) * dV - for ( size_t m = 0 ; m < m_nne ; ++m ) { - for ( size_t n = 0 ; n < m_nne ; ++n ) { + for (size_t m = 0 ; m < m_nne ; ++m ){ + for (size_t n = 0 ; n < m_nne ; ++n ){ M(m*m_ndim+0, n*m_ndim+0) += N(m) * rho * N(n) * vol; M(m*m_ndim+1, n*m_ndim+1) += N(m) * rho * N(n) * vol; } } } } } // ------------------------------------------------------------------------------------------------- -inline void QuadratureAxisymmetric::int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const +inline void QuadratureAxisymmetric::int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); // zero-initialize output: matrix of vectors elemvec.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto f = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto B = xt::adapt(&m_B(e,q,0,0,0,0), xt::xshape()); auto sig = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble // f(m,i) += B(m,i,j,perm(k)) * sig(i,j) * dV // (where perm(0) = 1, perm(2) = 0) - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { f(m,0) += ( B(m,2,2,2)*sig(2,2) + B(m,0,2,2)*sig(0,2) ) * vol; f(m,1) += ( B(m,0,0,0)*sig(0,0) + B(m,1,1,0)*sig(1,1) + B(m,2,0,0)*sig(2,0) ) * vol; } } } } // ------------------------------------------------------------------------------------------------- -inline void QuadratureAxisymmetric::int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const +inline void QuadratureAxisymmetric::int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); - assert( qtensor.shape()[4] == m_tdim ); - assert( qtensor.shape()[5] == m_tdim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); + assert(qtensor.shape()[4] == m_tdim ); + assert(qtensor.shape()[5] == m_tdim ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize output: matrix of vector elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto K = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto B = xt::adapt(&m_B(e,q,0,0,0,0), xt::xshape()); auto C = xt::adapt(&qtensor(e,q,0,0,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - compute product: // K(m*m_ndim+perm(c), n*m_ndim+perm(f)) = B(m,a,b,c) * C(a,b,d,e) * B(n,e,d,f) * vol; // (where perm(0) = 1, perm(2) = 0) - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { - for ( size_t n = 0 ; n < m_nne ; ++n ) + for (size_t n = 0 ; n < m_nne ; ++n) { K(m*m_ndim+1, n*m_ndim+1) += B(m,0,0,0) * C(0,0,0,0) * B(n,0,0,0) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,0,0,0) * C(0,0,1,1) * B(n,1,1,0) * vol; K(m*m_ndim+1, n*m_ndim+0) += B(m,0,0,0) * C(0,0,2,2) * B(n,2,2,2) * vol; K(m*m_ndim+1, n*m_ndim+0) += B(m,0,0,0) * C(0,0,2,0) * B(n,0,2,2) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,0,0,0) * C(0,0,0,2) * B(n,2,0,0) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,1,1,0) * C(1,1,0,0) * B(n,0,0,0) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,1,1,0) * C(1,1,1,1) * B(n,1,1,0) * vol; K(m*m_ndim+1, n*m_ndim+0) += B(m,1,1,0) * C(1,1,2,2) * B(n,2,2,2) * vol; K(m*m_ndim+1, n*m_ndim+0) += B(m,1,1,0) * C(1,1,2,0) * B(n,0,2,2) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,1,1,0) * C(1,1,0,2) * B(n,2,0,0) * vol; K(m*m_ndim+0, n*m_ndim+1) += B(m,2,2,2) * C(2,2,0,0) * B(n,0,0,0) * vol; K(m*m_ndim+0, n*m_ndim+1) += B(m,2,2,2) * C(2,2,1,1) * B(n,1,1,0) * vol; K(m*m_ndim+0, n*m_ndim+0) += B(m,2,2,2) * C(2,2,2,2) * B(n,2,2,2) * vol; K(m*m_ndim+0, n*m_ndim+0) += B(m,2,2,2) * C(2,2,2,0) * B(n,0,2,2) * vol; K(m*m_ndim+0, n*m_ndim+1) += B(m,2,2,2) * C(2,2,0,2) * B(n,2,0,0) * vol; K(m*m_ndim+0, n*m_ndim+1) += B(m,0,2,2) * C(0,2,0,0) * B(n,0,0,0) * vol; K(m*m_ndim+0, n*m_ndim+1) += B(m,0,2,2) * C(0,2,1,1) * B(n,1,1,0) * vol; K(m*m_ndim+0, n*m_ndim+0) += B(m,0,2,2) * C(0,2,2,2) * B(n,2,2,2) * vol; K(m*m_ndim+0, n*m_ndim+0) += B(m,0,2,2) * C(0,2,2,0) * B(n,0,2,2) * vol; K(m*m_ndim+0, n*m_ndim+1) += B(m,0,2,2) * C(0,2,0,2) * B(n,2,0,0) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,2,0,0) * C(2,0,0,0) * B(n,0,0,0) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,2,0,0) * C(2,0,1,1) * B(n,1,1,0) * vol; K(m*m_ndim+1, n*m_ndim+0) += B(m,2,0,0) * C(2,0,2,2) * B(n,2,2,2) * vol; K(m*m_ndim+1, n*m_ndim+0) += B(m,2,0,0) * C(2,0,2,0) * B(n,0,2,2) * vol; K(m*m_ndim+1, n*m_ndim+1) += B(m,2,0,0) * C(2,0,0,2) * B(n,2,0,0) * vol; } } } } } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::dV() const +inline xt::xtensor QuadratureAxisymmetric::DV() const { xt::xtensor out = xt::empty({m_nelem, m_nip}); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::dVtensor() const +inline xt::xarray QuadratureAxisymmetric::DV(size_t rank) const { - xt::xtensor out = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); + std::vector shape = {m_nelem, m_nip}; + + for (size_t i = 0; i < rank; ++i) + shape.push_back(m_td); + + xt::xarray out = xt::empty(shape); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::gradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor QuadratureAxisymmetric::GradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); this->gradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::gradN_vector_T(const xt::xtensor &elemvec) const +inline xt::xtensor QuadratureAxisymmetric::GradN_vector_T( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); this->gradN_vector_T(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::symGradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor QuadratureAxisymmetric::SymGradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); this->symGradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::int_N_scalar_NT_dV( - const xt::xtensor &qscalar) const +inline xt::xtensor QuadratureAxisymmetric::Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const { xt::xtensor elemmat = xt::empty({m_nelem, m_nne*m_ndim, m_nne*m_ndim}); this->int_N_scalar_NT_dV(qscalar, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::int_gradN_dot_tensor2_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor QuadratureAxisymmetric::Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->int_gradN_dot_tensor2_dV(qtensor, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadratureAxisymmetric::int_gradN_dot_tensor4_dot_gradNT_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor QuadratureAxisymmetric::Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemmat = xt::empty({m_nelem, m_ndim*m_nne, m_ndim*m_nne}); this->int_gradN_dot_tensor4_dot_gradNT_dV(qtensor, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementQuad4Planar.h b/include/GooseFEM/ElementQuad4Planar.h index f7105eb..6b46d78 100644 --- a/include/GooseFEM/ElementQuad4Planar.h +++ b/include/GooseFEM/ElementQuad4Planar.h @@ -1,146 +1,176 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTQUAD4PLANAR_H #define GOOSEFEM_ELEMENTQUAD4PLANAR_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Quad4 { // ------------------------------------------------------------------------------------------------- class QuadraturePlanar { public: - // fixed dimensions: + // Fixed dimensions: // ndim = 2 - number of dimensions // nne = 4 - number of nodes per element // tdim = 3 - number of dimensions of tensors // - // naming convention: + // Naming convention: // "elemmat" - matrices stored per element - [nelem, nne*ndim, nne*ndim] // "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] // "qtensor" - integration point tensor - [nelem, nip, tdim, tdim] // "qscalar" - integration point scalar - [nelem, nip] - // constructor: integration point coordinates and weights are optional (default: Gauss) + // Constructor: integration point coordinates and weights are optional (default: Gauss) QuadraturePlanar() = default; - QuadraturePlanar(const xt::xtensor &x, double thick=1.); + QuadraturePlanar( + const xt::xtensor& x, + double thick=1.); - QuadraturePlanar(const xt::xtensor &x, const xt::xtensor &xi, const xt::xtensor &w, double thick=1.); + QuadraturePlanar( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w, + double thick=1.); - // update the nodal positions (shape of "x" should match the earlier definition) + // Update the nodal positions (shape of "x" should match the earlier definition) - void update_x(const xt::xtensor &x); + void update_x(const xt::xtensor& x); - // return dimensions + // Return dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t ndim() const; // number of dimension size_t nip() const; // number of integration points - // return integration volume + // Return shape function gradients - void dV(xt::xtensor &qscalar) const; + xt::xtensor gradN() const; - void dV(xt::xtensor &qtensor) const; // same volume for all tensor components + // Return integration volume - // dyadic product "qtensor(i,j) += dNdx(m,i) * elemvec(m,j)", its transpose and its symmetric part + void dV(xt::xtensor& qscalar) const; - void gradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void dV(xt::xtensor& qtensor) const; // same volume for all tensor components - void gradN_vector_T(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + void dV(xt::xarray& qtensor) const; // same volume for all tensor components - void symGradN_vector(const xt::xtensor &elemvec, - xt::xtensor &qtensor) const; + // Dyadic product (and its transpose and symmetric part) + // qtensor(i,j) += dNdx(m,i) * elemvec(m,j) - // integral of the scalar product "elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV" + void gradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void int_N_scalar_NT_dV(const xt::xtensor &qscalar, - xt::xtensor &elemmat) const; + void gradN_vector_T( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - // integral of the dot product "elemvec(m,j) += dNdx(m,i) * qtensor(i,j) * dV" + void symGradN_vector( + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const; // overwritten - void int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const; + // Integral of the scalar product + // elemmat(m*ndim+i,n*ndim+i) += N(m) * qscalar * N(n) * dV - // integral of the dot product "elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV" + void int_N_scalar_NT_dV( + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const; // overwritten - void int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const; + // Integral of the dot product + // elemvec(m,j) += dNdx(m,i) * qtensor(i,j) * dV - // auto allocation of the functions above + void int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const; // overwritten - xt::xtensor dV() const; + // Integral of the dot product + // elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV - xt::xtensor dVtensor() const; + void int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const; // overwritten - xt::xtensor gradN_vector(const xt::xtensor &elemvec) const; + // Auto-allocation of the functions above - xt::xtensor gradN_vector_T(const xt::xtensor &elemvec) const; + xt::xtensor DV() const; - xt::xtensor symGradN_vector(const xt::xtensor &elemvec) const; + xt::xarray DV( + size_t rank) const; - xt::xtensor int_N_scalar_NT_dV(const xt::xtensor &qscalar) const; + xt::xtensor GradN_vector( + const xt::xtensor& elemvec) const; - xt::xtensor int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor) const; + xt::xtensor GradN_vector_T( + const xt::xtensor& elemvec) const; - xt::xtensor int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor) const; + xt::xtensor SymGradN_vector( + const xt::xtensor& elemvec) const; + + xt::xtensor Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const; + + xt::xtensor Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const; + + xt::xtensor Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const; private: - // compute "vol" and "dNdx" based on current "x" + // Compute "vol" and "dNdx" based on current "x" void compute_dN(); private: - // dimensions (flexible) + // Dimensions (flexible) size_t m_nelem; // number of elements size_t m_nip; // number of integration points - // dimensions (fixed for this element type) + // Dimensions (fixed for this element type) static const size_t m_nne=4; // number of nodes per element static const size_t m_ndim=2; // number of dimensions static const size_t m_tdim=3; // number of dimensions of tensors + const size_t m_td=3; // number of dimensions of tensors (non-static, sometimes useful) - // data arrays + // Data arrays xt::xtensor m_x; // nodal positions stored per element [nelem, nne, ndim] xt::xtensor m_w; // weight of each integration point [nip] xt::xtensor m_xi; // local coordinate of each integration point [nip, ndim] xt::xtensor m_N; // shape functions [nip, nne] - xt::xtensor m_dNxi; // shape function gradients w.r.t. local coordinate [nip, nne, ndim] - xt::xtensor m_dNx; // shape function gradients w.r.t. global coordinate [nelem, nip, nne, ndim] + xt::xtensor m_dNxi; // shape function grad. w.r.t. local coor. [nip, nne, ndim] + xt::xtensor m_dNx; // shape function grad. w.r.t. global coor. [nelem, nip, nne, ndim] xt::xtensor m_vol; // integration point volume [nelem, nip] - // thickness + // Thickness double m_thick; }; // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #include "ElementQuad4Planar.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/ElementQuad4Planar.hpp b/include/GooseFEM/ElementQuad4Planar.hpp index 108ca47..d8d634c 100644 --- a/include/GooseFEM/ElementQuad4Planar.hpp +++ b/include/GooseFEM/ElementQuad4Planar.hpp @@ -1,529 +1,565 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ELEMENTQUAD4PLANAR_HPP #define GOOSEFEM_ELEMENTQUAD4PLANAR_HPP // ------------------------------------------------------------------------------------------------- #include "ElementQuad4Planar.h" // ================================================================================================= namespace GooseFEM { namespace Element { namespace Quad4 { // ================================================================================================= -inline QuadraturePlanar::QuadraturePlanar(const xt::xtensor &x, double thick) : +inline QuadraturePlanar::QuadraturePlanar(const xt::xtensor& x, double thick) : QuadraturePlanar(x, Gauss::xi(), Gauss::w(), thick) {} // ------------------------------------------------------------------------------------------------- -inline QuadraturePlanar::QuadraturePlanar(const xt::xtensor &x, - const xt::xtensor &xi, const xt::xtensor &w, double thick) : +inline QuadraturePlanar::QuadraturePlanar( + const xt::xtensor& x, + const xt::xtensor& xi, + const xt::xtensor& w, + double thick) : m_x(x), m_w(w), m_xi(xi), m_thick(thick) { - // check input - assert( m_x.shape()[1] == m_nne ); - assert( m_x.shape()[2] == m_ndim ); + assert(m_x.shape()[1] == m_nne ); + assert(m_x.shape()[2] == m_ndim); - // extract number of elements and number of integration points m_nelem = m_x.shape()[0]; m_nip = m_w.size(); - // check input - assert( m_xi.shape()[0] == m_nip ); - assert( m_xi.shape()[1] == m_ndim ); - assert( m_w .size() == m_nip ); + assert(m_xi.shape()[0] == m_nip ); + assert(m_xi.shape()[1] == m_ndim); + assert(m_w .size() == m_nip ); - // allocate arrays m_N = xt::empty({ m_nip, m_nne }); m_dNxi = xt::empty({ m_nip, m_nne, m_ndim}); m_dNx = xt::empty({m_nelem, m_nip, m_nne, m_ndim}); m_vol = xt::empty({m_nelem, m_nip }); // shape functions - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { m_N(q,0) = .25 * (1.-m_xi(q,0)) * (1.-m_xi(q,1)); m_N(q,1) = .25 * (1.+m_xi(q,0)) * (1.-m_xi(q,1)); m_N(q,2) = .25 * (1.+m_xi(q,0)) * (1.+m_xi(q,1)); m_N(q,3) = .25 * (1.-m_xi(q,0)) * (1.+m_xi(q,1)); } // shape function gradients in local coordinates - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - dN / dxi_0 m_dNxi(q,0,0) = -.25*(1.-m_xi(q,1)); m_dNxi(q,1,0) = +.25*(1.-m_xi(q,1)); m_dNxi(q,2,0) = +.25*(1.+m_xi(q,1)); m_dNxi(q,3,0) = -.25*(1.+m_xi(q,1)); // - dN / dxi_1 m_dNxi(q,0,1) = -.25*(1.-m_xi(q,0)); m_dNxi(q,1,1) = -.25*(1.+m_xi(q,0)); m_dNxi(q,2,1) = +.25*(1.+m_xi(q,0)); m_dNxi(q,3,1) = +.25*(1.-m_xi(q,0)); } // compute the shape function gradients, based on "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- -inline size_t QuadraturePlanar::nelem() const { return m_nelem; }; +inline size_t QuadraturePlanar::nelem() const +{ return m_nelem; }; -inline size_t QuadraturePlanar::nne() const { return m_nne; }; +inline size_t QuadraturePlanar::nne() const +{ return m_nne; }; -inline size_t QuadraturePlanar::ndim() const { return m_ndim; }; +inline size_t QuadraturePlanar::ndim() const +{ return m_ndim; }; -inline size_t QuadraturePlanar::nip() const { return m_nip; }; +inline size_t QuadraturePlanar::nip() const +{ return m_nip; }; + +inline xt::xtensor QuadraturePlanar::gradN() const +{ return m_dNx; }; // ------------------------------------------------------------------------------------------------- -inline void QuadraturePlanar::dV(xt::xtensor &qscalar) const +inline void QuadraturePlanar::dV(xt::xtensor& qscalar) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); + assert(qscalar.shape()[0] == m_nelem); + assert(qscalar.shape()[1] == m_nip ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) qscalar(e,q) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void QuadraturePlanar::dV(xt::xtensor &qtensor) const +inline void QuadraturePlanar::dV(xt::xtensor& qtensor) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t q = 0 ; q < m_nip ; ++q ) - for ( size_t i = 0 ; i < m_tdim ; ++i ) - for ( size_t j = 0 ; j < m_tdim ; ++j ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t q = 0 ; q < m_nip ; ++q) + for (size_t i = 0 ; i < m_tdim ; ++i) + for (size_t j = 0 ; j < m_tdim ; ++j) qtensor(e,q,i,j) = m_vol(e,q); } // ------------------------------------------------------------------------------------------------- -inline void QuadraturePlanar::update_x(const xt::xtensor &x) +inline void QuadraturePlanar::dV(xt::xarray& qtensor) const { - assert( x.shape()[0] == m_nelem ); - assert( x.shape()[1] == m_nne ); - assert( x.shape()[2] == m_ndim ); - assert( x.size() == m_x.size() ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + + xt::dynamic_shape strides = { + static_cast(m_vol.strides()[0]), + static_cast(m_vol.strides()[1])}; + + for (size_t i = 2; i < qtensor.shape().size(); ++i) + strides.push_back(0); + + qtensor = xt::strided_view(m_vol, qtensor.shape(), std::move(strides), 0ul, xt::layout_type::dynamic); +} + +// ------------------------------------------------------------------------------------------------- + +inline void QuadraturePlanar::update_x(const xt::xtensor& x) +{ + assert(x.shape()[0] == m_nelem ); + assert(x.shape()[1] == m_nne ); + assert(x.shape()[2] == m_ndim ); + assert(x.size() == m_x.size()); - // update positions xt::noalias(m_x) = x; // update the shape function gradients for the new "x" compute_dN(); } // ------------------------------------------------------------------------------------------------- inline void QuadraturePlanar::compute_dN() { #pragma omp parallel { // allocate local variables xt::xtensor_fixed> J, Jinv; // loop over all elements (in parallel) #pragma omp for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias nodal positions auto x = xt::adapt(&m_x(e,0,0), xt::xshape()); // loop over integration points - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNxi = xt::adapt(&m_dNxi( q,0,0), xt::xshape()); auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); // - Jacobian (loops unrolled for efficiency) // J(i,j) += dNxi(m,i) * x(m,j); J(0,0) = dNxi(0,0)*x(0,0) + dNxi(1,0)*x(1,0) + dNxi(2,0)*x(2,0) + dNxi(3,0)*x(3,0); J(0,1) = dNxi(0,0)*x(0,1) + dNxi(1,0)*x(1,1) + dNxi(2,0)*x(2,1) + dNxi(3,0)*x(3,1); J(1,0) = dNxi(0,1)*x(0,0) + dNxi(1,1)*x(1,0) + dNxi(2,1)*x(2,0) + dNxi(3,1)*x(3,0); J(1,1) = dNxi(0,1)*x(0,1) + dNxi(1,1)*x(1,1) + dNxi(2,1)*x(2,1) + dNxi(3,1)*x(3,1); // - determinant and inverse of the Jacobian double Jdet = inv(J, Jinv); // - shape function gradients wrt global coordinates (loops partly unrolled for efficiency) // dNx(m,i) += Jinv(i,j) * dNxi(m,j); - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { dNx(m,0) = Jinv(0,0) * dNxi(m,0) + Jinv(0,1) * dNxi(m,1); dNx(m,1) = Jinv(1,0) * dNxi(m,0) + Jinv(1,1) * dNxi(m,1); } // - integration point volume m_vol(e,q) = m_w(q) * Jdet * m_thick; } } } } // ------------------------------------------------------------------------------------------------- inline void QuadraturePlanar::gradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); // zero-initialize (zero z-components not written below) qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate dyadic product (loops unrolled for efficiency) // gradu(i,j) += dNx(m,i) * u(m,j) gradu(0,0) = dNx(0,0)*u(0,0) + dNx(1,0)*u(1,0) + dNx(2,0)*u(2,0) + dNx(3,0)*u(3,0); gradu(0,1) = dNx(0,0)*u(0,1) + dNx(1,0)*u(1,1) + dNx(2,0)*u(2,1) + dNx(3,0)*u(3,1); gradu(1,0) = dNx(0,1)*u(0,0) + dNx(1,1)*u(1,0) + dNx(2,1)*u(2,0) + dNx(3,1)*u(3,0); gradu(1,1) = dNx(0,1)*u(0,1) + dNx(1,1)*u(1,1) + dNx(2,1)*u(2,1) + dNx(3,1)*u(3,1); } } } // ------------------------------------------------------------------------------------------------- inline void QuadraturePlanar::gradN_vector_T( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); // zero-initialize (zero z-components not written below) qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto gradu = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate transpose of dyadic product (loops unrolled for efficiency) // gradu(j,i) += dNx(m,i) * u(m,j) gradu(0,0) = dNx(0,0)*u(0,0) + dNx(1,0)*u(1,0) + dNx(2,0)*u(2,0) + dNx(3,0)*u(3,0); gradu(1,0) = dNx(0,0)*u(0,1) + dNx(1,0)*u(1,1) + dNx(2,0)*u(2,1) + dNx(3,0)*u(3,1); gradu(0,1) = dNx(0,1)*u(0,0) + dNx(1,1)*u(1,0) + dNx(2,1)*u(2,0) + dNx(3,1)*u(3,0); gradu(1,1) = dNx(0,1)*u(0,1) + dNx(1,1)*u(1,1) + dNx(2,1)*u(2,1) + dNx(3,1)*u(3,1); } } } // ------------------------------------------------------------------------------------------------- inline void QuadraturePlanar::symGradN_vector( - const xt::xtensor &elemvec, xt::xtensor &qtensor) const + const xt::xtensor& elemvec, + xt::xtensor& qtensor) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nne ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nne ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); // zero-initialize (zero z-components not written below) qtensor.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias element vector (e.g. nodal displacements) auto u = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto eps = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); // - evaluate symmetrized dyadic product (loops unrolled for efficiency) // grad(i,j) += dNx(m,i) * u(m,j) // eps (j,i) = 0.5 * ( grad(i,j) + grad(j,i) ) eps(0,0) = dNx(0,0)*u(0,0) + dNx(1,0)*u(1,0) + dNx(2,0)*u(2,0) + dNx(3,0)*u(3,0); eps(1,1) = dNx(0,1)*u(0,1) + dNx(1,1)*u(1,1) + dNx(2,1)*u(2,1) + dNx(3,1)*u(3,1); eps(0,1) = ( dNx(0,0)*u(0,1) + dNx(1,0)*u(1,1) + dNx(2,0)*u(2,1) + dNx(3,0)*u(3,1) + dNx(0,1)*u(0,0) + dNx(1,1)*u(1,0) + dNx(2,1)*u(2,0) + dNx(3,1)*u(3,0) ) * 0.5; eps(1,0) = eps(0,1); } } } // ------------------------------------------------------------------------------------------------- inline void QuadraturePlanar::int_N_scalar_NT_dV( - const xt::xtensor &qscalar, xt::xtensor &elemmat) const + const xt::xtensor& qscalar, + xt::xtensor& elemmat) const { - assert( qscalar.shape()[0] == m_nelem ); - assert( qscalar.shape()[1] == m_nip ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(qscalar.shape()[0] == m_nelem ); + assert(qscalar.shape()[1] == m_nip ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize: matrix of matrices elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. mass matrix) auto M = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto N = xt::adapt(&m_N(q,0), xt::xshape()); auto& vol = m_vol (e,q); auto& rho = qscalar(e,q); // - evaluate scalar product, for all dimensions, and assemble // M(m*ndim+i,n*ndim+i) += N(m) * scalar * N(n) * dV - for ( size_t m = 0 ; m < m_nne ; ++m ) { - for ( size_t n = 0 ; n < m_nne ; ++n ) { + for (size_t m = 0 ; m < m_nne ; ++m ){ + for (size_t n = 0 ; n < m_nne ; ++n ){ M(m*m_ndim+0, n*m_ndim+0) += N(m) * rho * N(n) * vol; M(m*m_ndim+1, n*m_ndim+1) += N(m) * rho * N(n) * vol; } } } } } // ------------------------------------------------------------------------------------------------- -inline void QuadraturePlanar::int_gradN_dot_tensor2_dV(const xt::xtensor &qtensor, - xt::xtensor &elemvec) const +inline void QuadraturePlanar::int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemvec) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); // zero-initialize output: matrix of vectors elemvec.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto f = xt::adapt(&elemvec(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx (e,q,0,0), xt::xshape()); auto sig = xt::adapt(&qtensor(e,q,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble - for ( size_t m = 0 ; m < m_nne ; ++m ) + for (size_t m = 0 ; m < m_nne ; ++m) { f(m,0) += ( dNx(m,0) * sig(0,0) + dNx(m,1) * sig(1,0) ) * vol; f(m,1) += ( dNx(m,0) * sig(0,1) + dNx(m,1) * sig(1,1) ) * vol; } } } } // ------------------------------------------------------------------------------------------------- -inline void QuadraturePlanar::int_gradN_dot_tensor4_dot_gradNT_dV(const xt::xtensor &qtensor, - xt::xtensor &elemmat) const +inline void QuadraturePlanar::int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor, + xt::xtensor& elemmat) const { - assert( qtensor.shape()[0] == m_nelem ); - assert( qtensor.shape()[1] == m_nip ); - assert( qtensor.shape()[2] == m_tdim ); - assert( qtensor.shape()[3] == m_tdim ); - assert( qtensor.shape()[4] == m_tdim ); - assert( qtensor.shape()[5] == m_tdim ); + assert(qtensor.shape()[0] == m_nelem); + assert(qtensor.shape()[1] == m_nip ); + assert(qtensor.shape()[2] == m_tdim ); + assert(qtensor.shape()[3] == m_tdim ); + assert(qtensor.shape()[4] == m_tdim ); + assert(qtensor.shape()[5] == m_tdim ); - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); // zero-initialize output: matrix of vector elemmat.fill(0.0); // loop over all elements (in parallel) #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) + for (size_t e = 0 ; e < m_nelem ; ++e) { // alias (e.g. nodal force) auto K = xt::adapt(&elemmat(e,0,0), xt::xshape()); // loop over all integration points in element "e" - for ( size_t q = 0 ; q < m_nip ; ++q ) + for (size_t q = 0 ; q < m_nip ; ++q) { // - alias auto dNx = xt::adapt(&m_dNx(e,q,0,0), xt::xshape()); auto C = xt::adapt(&qtensor(e,q,0,0,0,0), xt::xshape()); auto& vol = m_vol(e,q); // - evaluate dot product, and assemble - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t n = 0 ; n < m_nne ; ++n ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) - for ( size_t k = 0 ; k < m_ndim ; ++k ) - for ( size_t l = 0 ; l < m_ndim ; ++l ) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t n = 0 ; n < m_nne ; ++n) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t j = 0 ; j < m_ndim ; ++j) + for (size_t k = 0 ; k < m_ndim ; ++k) + for (size_t l = 0 ; l < m_ndim ; ++l) K(m*m_ndim+j, n*m_ndim+k) += dNx(m,i) * C(i,j,k,l) * dNx(n,l) * vol; } } } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::dV() const +inline xt::xtensor QuadraturePlanar::DV() const { xt::xtensor out = xt::empty({m_nelem, m_nip}); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::dVtensor() const +inline xt::xarray QuadraturePlanar::DV(size_t rank) const { - xt::xtensor out = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); + std::vector shape = {m_nelem, m_nip}; + + for (size_t i = 0; i < rank; ++i) + shape.push_back(m_td); + + xt::xarray out = xt::empty(shape); this->dV(out); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::gradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor QuadraturePlanar::GradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); this->gradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::gradN_vector_T(const xt::xtensor &elemvec) const +inline xt::xtensor QuadraturePlanar::GradN_vector_T( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); this->gradN_vector_T(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::symGradN_vector(const xt::xtensor &elemvec) const +inline xt::xtensor QuadraturePlanar::SymGradN_vector( + const xt::xtensor& elemvec) const { xt::xtensor qtensor = xt::empty({m_nelem, m_nip, m_tdim, m_tdim}); this->symGradN_vector(elemvec, qtensor); return qtensor; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::int_N_scalar_NT_dV( - const xt::xtensor &qscalar) const +inline xt::xtensor QuadraturePlanar::Int_N_scalar_NT_dV( + const xt::xtensor& qscalar) const { xt::xtensor elemmat = xt::empty({m_nelem, m_nne*m_ndim, m_nne*m_ndim}); this->int_N_scalar_NT_dV(qscalar, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::int_gradN_dot_tensor2_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor QuadraturePlanar::Int_gradN_dot_tensor2_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->int_gradN_dot_tensor2_dV(qtensor, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor QuadraturePlanar::int_gradN_dot_tensor4_dot_gradNT_dV( - const xt::xtensor &qtensor) const +inline xt::xtensor QuadraturePlanar::Int_gradN_dot_tensor4_dot_gradNT_dV( + const xt::xtensor& qtensor) const { xt::xtensor elemmat = xt::empty({m_nelem, m_ndim*m_nne, m_ndim*m_nne}); this->int_gradN_dot_tensor4_dot_gradNT_dV(qtensor, elemmat); return elemmat; } // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/GooseFEM.h b/include/GooseFEM/GooseFEM.h index 1ffdf78..07ca2cd 100644 --- a/include/GooseFEM/GooseFEM.h +++ b/include/GooseFEM/GooseFEM.h @@ -1,40 +1,43 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_H #define GOOSEFEM_H // ================================================================================================= #ifdef EIGEN_WORLD_VERSION #define GOOSEFEM_EIGEN #endif // ================================================================================================= -#include "Mesh.h" -#include "MeshTri3.h" -#include "MeshQuad4.h" -#include "MeshHex8.h" #include "Element.h" +#include "ElementHex8.h" #include "ElementQuad4.h" -#include "ElementQuad4Planar.h" #include "ElementQuad4Axisymmetric.h" -#include "ElementHex8.h" -#include "Vector.h" -#include "VectorPartitioned.h" +#include "ElementQuad4Planar.h" +#include "Iterate.h" #include "MatrixDiagonal.h" #include "MatrixDiagonalPartitioned.h" -#include "Iterate.h" +#include "Mesh.h" +#include "MeshHex8.h" +#include "MeshQuad4.h" +#include "MeshTri3.h" +#include "Vector.h" +#include "VectorPartitioned.h" #ifdef GOOSEFEM_EIGEN #include "Matrix.h" #include "MatrixPartitioned.h" +#include "MatrixPartitionedTyings.h" +#include "TyingsPeriodic.h" +#include "VectorPartitionedTyings.h" #endif // ================================================================================================= #endif diff --git a/include/GooseFEM/Iterate.h b/include/GooseFEM/Iterate.h index 5904538..286181a 100644 --- a/include/GooseFEM/Iterate.h +++ b/include/GooseFEM/Iterate.h @@ -1,52 +1,51 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_ITERATE_H #define GOOSEFEM_ITERATE_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { namespace Iterate { // ------------------------------------------------------------------------------------------------- class StopList { -private: - - // list with residuals - std::vector m_res; - public: // constructors StopList(size_t n=1); // reset all residuals to infinity (and change the number of residuals to check) void reset(); void reset(size_t n); // update list of residuals, return true if all residuals are below the tolerance bool stop(double res, double tol); +private: + + // list with residuals + std::vector m_res; }; // ------------------------------------------------------------------------------------------------- }} // namespace ... // ================================================================================================= #include "Iterate.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/Matrix.h b/include/GooseFEM/Matrix.h index 55d645f..43a15d5 100644 --- a/include/GooseFEM/Matrix.h +++ b/include/GooseFEM/Matrix.h @@ -1,112 +1,118 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIX_H #define GOOSEFEM_MATRIX_H // ------------------------------------------------------------------------------------------------- #include "config.h" #include #include #include // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- class Matrix { public: - // constructors + // Constructors Matrix() = default; - Matrix(const xt::xtensor &conn, const xt::xtensor &dofs); - // dimensions + Matrix( + const xt::xtensor &conn, + const xt::xtensor &dofs); + + // Dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t nnode() const; // number of nodes size_t ndim() const; // number of dimensions size_t ndof() const; // number of DOFs // DOF lists xt::xtensor dofs() const; // DOFs - // assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] + // Assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] void assemble(const xt::xtensor &elemmat); - // solve: x = A \ b - // x_u = A_uu \ ( b_u - A_up * x_p ) - // b_p = A_pu * x_u + A_pp * x_p + // Solve + // x_u = A_uu \ ( b_u - A_up * x_p ) - void solve(const xt::xtensor &b, - xt::xtensor &x); + void solve( + const xt::xtensor &b, + xt::xtensor &x); // overwritten - void solve(const xt::xtensor &b, - xt::xtensor &x); + void solve( + const xt::xtensor &b, + xt::xtensor &x); // overwritten - // auto allocation of the functions above + // Auto-allocation of the functions above - xt::xtensor solve(const xt::xtensor &b); + xt::xtensor Solve( + const xt::xtensor &b); - xt::xtensor solve(const xt::xtensor &b); + xt::xtensor Solve( + const xt::xtensor &b); private: - // the matrix - Eigen::SparseMatrix m_data; + // The matrix + Eigen::SparseMatrix m_A; - // the matrix to assemble - std::vector> m_trip; + // Matrix entries + std::vector> m_T; - // solver (re-used to solve different RHS) + // Solver (re-used to solve different RHS) Eigen::SimplicialLDLT> m_solver; - // signal changes to data compare to the last inverse - bool m_change=false; + // Signal changes to data compare to the last inverse + bool m_factor=false; - // bookkeeping + // Bookkeeping xt::xtensor m_conn; // connectivity [nelem, nne ] xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] - // dimensions + // Dimensions size_t m_nelem; // number of elements size_t m_nne; // number of nodes per element size_t m_nnode; // number of nodes size_t m_ndim; // number of dimensions size_t m_ndof; // number of DOFs - // compute inverse (automatically evaluated by "solve") + // Compute inverse (automatically evaluated by "solve") void factorize(); - // convert arrays (see VectorPartitioned, which contains public functions) + // Convert arrays (Eigen version of Vector, which contains public functions) Eigen::VectorXd asDofs(const xt::xtensor &nodevec) const; void asNode(const Eigen::VectorXd &dofval, xt::xtensor &nodevec) const; }; // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #include "Matrix.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/Matrix.hpp b/include/GooseFEM/Matrix.hpp index c4f97bc..b08e162 100644 --- a/include/GooseFEM/Matrix.hpp +++ b/include/GooseFEM/Matrix.hpp @@ -1,208 +1,196 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIX_HPP #define GOOSEFEM_MATRIX_HPP // ------------------------------------------------------------------------------------------------- #include "Matrix.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- -inline Matrix::Matrix(const xt::xtensor &conn, const xt::xtensor &dofs) : +inline Matrix::Matrix( + const xt::xtensor &conn, + const xt::xtensor &dofs) : m_conn(conn), m_dofs(dofs) { - // mesh dimensions m_nelem = m_conn.shape()[0]; m_nne = m_conn.shape()[1]; m_nnode = m_dofs.shape()[0]; m_ndim = m_dofs.shape()[1]; - // dimensions of the system m_ndof = xt::amax(m_dofs)[0] + 1; - // allocate triplet list - m_trip.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_T.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); - // allocate sparse matrices - m_data.resize(m_ndof,m_ndof); + m_A.resize(m_ndof,m_ndof); - // check consistency - assert( xt::amax(m_conn)[0] + 1 == m_nnode ); - assert( m_ndof <= m_nnode * m_ndim ); + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(m_ndof <= m_nnode * m_ndim); } // ------------------------------------------------------------------------------------------------- -inline size_t Matrix::nelem() const { return m_nelem; } +inline size_t Matrix::nelem() const +{ return m_nelem; } -inline size_t Matrix::nne() const { return m_nne; } +inline size_t Matrix::nne() const +{ return m_nne; } -inline size_t Matrix::nnode() const { return m_nnode; } +inline size_t Matrix::nnode() const +{ return m_nnode; } -inline size_t Matrix::ndim() const { return m_ndim; } +inline size_t Matrix::ndim() const +{ return m_ndim; } -inline size_t Matrix::ndof() const { return m_ndof; } +inline size_t Matrix::ndof() const +{ return m_ndof; } -inline xt::xtensor Matrix::dofs() const { return m_dofs; } +inline xt::xtensor Matrix::dofs() const +{ return m_dofs; } // ------------------------------------------------------------------------------------------------- inline void Matrix::factorize() { - // skip for unchanged "m_data" - if ( ! m_change ) return; + if ( ! m_factor ) return; - // factorise - m_solver.compute(m_data); + m_solver.compute(m_A); - // reset signal - m_change = false; -} - -// ------------------------------------------------------------------------------------------------- - -inline Eigen::VectorXd Matrix::asDofs(const xt::xtensor &nodevec) const -{ - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - - Eigen::VectorXd dofval(m_ndof,1); - - #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - dofval(m_dofs(m,i)) = nodevec(m,i); - - return dofval; -} - -// ------------------------------------------------------------------------------------------------- - -inline void Matrix::asNode(const Eigen::VectorXd &dofval, xt::xtensor &nodevec) const -{ - assert( static_cast(dofval.size()) == m_ndof ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - - #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - nodevec(m,i) = dofval(m_dofs(m,i)); + m_factor = false; } // ------------------------------------------------------------------------------------------------- inline void Matrix::assemble(const xt::xtensor &elemmat) { - // check input - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); - - // initialize triplet list - m_trip.clear(); - - // assemble to triplet list - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - for ( size_t n = 0 ; n < m_nne ; ++n ) - for ( size_t j = 0 ; j < m_ndim ; ++j ) - m_trip.push_back(Eigen::Triplet( + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); + + m_T.clear(); + + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + for (size_t n = 0 ; n < m_nne ; ++n) + for (size_t j = 0 ; j < m_ndim ; ++j) + m_T.push_back(Eigen::Triplet( m_dofs(m_conn(e,m),i), m_dofs(m_conn(e,n),j), elemmat(e,m*m_ndim+i,n*m_ndim+j) )); - // convert to sparse matrix - m_data.setFromTriplets(m_trip.begin(), m_trip.end()); + m_A.setFromTriplets(m_T.begin(), m_T.end()); - // signal change - m_change = true; + m_factor = true; } // ------------------------------------------------------------------------------------------------- -inline void Matrix::solve(const xt::xtensor &b, - xt::xtensor &x) +inline void Matrix::solve( + const xt::xtensor &b, + xt::xtensor &x) { - // check input - assert( b.shape()[0] == m_nnode ); - assert( b.shape()[1] == m_ndim ); - assert( x.shape()[0] == m_nnode ); - assert( x.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); - // factorise (if needed) this->factorize(); - // extract dofvals Eigen::VectorXd B = this->asDofs(b); - // solve Eigen::VectorXd X = m_solver.solve(B); - // collect this->asNode(X, x); } // ------------------------------------------------------------------------------------------------- -inline void Matrix::solve(const xt::xtensor &b, - xt::xtensor &x) +inline void Matrix::solve( + const xt::xtensor &b, + xt::xtensor &x) { - // check input - assert( b.size() == m_ndof ); - assert( x.size() == m_ndof ); + assert(b.size() == m_ndof); + assert(x.size() == m_ndof); - // factorise (if needed) this->factorize(); - // solve for "x" - // - allocate Eigen object Eigen::VectorXd B(m_ndof,1); - // - copy to Eigen objects + std::copy(b.begin(), b.end(), B.data()); - // - solve + Eigen::VectorXd X = m_solver.solve(B); - // - copy from Eigen object + std::copy(X.data(), X.data()+m_ndof, x.begin()); } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Matrix::solve( +inline xt::xtensor Matrix::Solve( const xt::xtensor &b) { xt::xtensor x = xt::empty({m_nnode, m_ndim}); this->solve(b, x); return x; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Matrix::solve( +inline xt::xtensor Matrix::Solve( const xt::xtensor &b) { xt::xtensor x = xt::empty({m_ndof}); this->solve(b, x); return x; } // ------------------------------------------------------------------------------------------------- +inline Eigen::VectorXd Matrix::asDofs(const xt::xtensor &nodevec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + Eigen::VectorXd dofval(m_ndof,1); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + dofval(m_dofs(m,i)) = nodevec(m,i); + + return dofval; +} + +// ------------------------------------------------------------------------------------------------- + +inline void Matrix::asNode(const Eigen::VectorXd &dofval, xt::xtensor &nodevec) const +{ + assert(static_cast(dofval.size()) == m_ndof); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + nodevec(m,i) = dofval(m_dofs(m,i)); +} + +// ------------------------------------------------------------------------------------------------- + } // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixDiagonal.h b/include/GooseFEM/MatrixDiagonal.h index dc0f5c0..05f1422 100644 --- a/include/GooseFEM/MatrixDiagonal.h +++ b/include/GooseFEM/MatrixDiagonal.h @@ -1,114 +1,123 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIXDIAGONAL_H #define GOOSEFEM_MATRIXDIAGONAL_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- class MatrixDiagonal { public: - // constructors + // Constructors MatrixDiagonal() = default; - MatrixDiagonal(const xt::xtensor &conn, const xt::xtensor &dofs); - // dimensions + MatrixDiagonal( + const xt::xtensor &conn, + const xt::xtensor &dofs); + + // Dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t nnode() const; // number of nodes size_t ndim() const; // number of dimensions size_t ndof() const; // number of DOFs // DOF lists xt::xtensor dofs() const; // DOFs - // set matrix components + // Set matrix components void set(const xt::xtensor &A); // assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] // WARNING: ignores any off-diagonal terms void assemble(const xt::xtensor &elemmat); - // product: b_i = A_ij * x_j + // Dot-product: + // b_i = A_ij * x_j - void dot(const xt::xtensor &x, - xt::xtensor &b) const; + void dot( + const xt::xtensor &x, + xt::xtensor &b) const; - void dot(const xt::xtensor &x, - xt::xtensor &b) const; + void dot( + const xt::xtensor &x, + xt::xtensor &b) const; - // solve: x = A \ b + // Solve: + // x = A \ b - void solve(const xt::xtensor &b, - xt::xtensor &x); + void solve( + const xt::xtensor &b, + xt::xtensor &x); - void solve(const xt::xtensor &b, - xt::xtensor &x); + void solve( + const xt::xtensor &b, + xt::xtensor &x); - // return matrix as diagonal matrix (column) + // Return matrix as diagonal matrix (column) - xt::xtensor asDiagonal() const; + xt::xtensor AsDiagonal() const; - // auto allocation of the functions above + // Auto-allocation of the functions above - xt::xtensor dot(const xt::xtensor &x) const; + xt::xtensor Dot(const xt::xtensor &x) const; - xt::xtensor dot(const xt::xtensor &x) const; + xt::xtensor Dot(const xt::xtensor &x) const; - xt::xtensor solve(const xt::xtensor &b); + xt::xtensor Solve(const xt::xtensor &b); - xt::xtensor solve(const xt::xtensor &b); + xt::xtensor Solve(const xt::xtensor &b); private: - // the diagonal matrix, and its inverse (re-used to solve different RHS) - xt::xtensor m_data; + // The diagonal matrix, and its inverse (re-used to solve different RHS) + xt::xtensor m_A; xt::xtensor m_inv; - // signal changes to data compare to the last inverse - bool m_change=false; + // Signal changes to data compare to the last inverse + bool m_factor=false; - // bookkeeping + // Bookkeeping xt::xtensor m_conn; // connectivity [nelem, nne ] xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] - // dimensions + // Dimensions size_t m_nelem; // number of elements size_t m_nne; // number of nodes per element size_t m_nnode; // number of nodes size_t m_ndim; // number of dimensions size_t m_ndof; // number of DOFs - // compute inverse (automatically evaluated by "solve") + // Compute inverse (automatically evaluated by "solve") void factorize(); }; // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #include "MatrixDiagonal.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixDiagonal.hpp b/include/GooseFEM/MatrixDiagonal.hpp index beed480..c5ca50e 100644 --- a/include/GooseFEM/MatrixDiagonal.hpp +++ b/include/GooseFEM/MatrixDiagonal.hpp @@ -1,221 +1,222 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIXDIAGONAL_HPP #define GOOSEFEM_MATRIXDIAGONAL_HPP // ------------------------------------------------------------------------------------------------- #include "MatrixDiagonal.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- -inline MatrixDiagonal::MatrixDiagonal(const xt::xtensor &conn, - const xt::xtensor &dofs) : m_conn(conn), m_dofs(dofs) +inline MatrixDiagonal::MatrixDiagonal( + const xt::xtensor &conn, + const xt::xtensor &dofs) : + m_conn(conn), m_dofs(dofs) { - // mesh dimensions m_nelem = m_conn.shape()[0]; m_nne = m_conn.shape()[1]; m_nnode = m_dofs.shape()[0]; m_ndim = m_dofs.shape()[1]; - // dimensions of the system m_ndof = xt::amax(m_dofs)[0] + 1; - // allocate matrix and its inverse - m_data = xt::empty({m_ndof}); + m_A = xt::empty({m_ndof}); + m_inv = xt::empty({m_ndof}); - // check consistency - assert( xt::amax(m_conn)[0] + 1 == m_nnode ); - assert( m_ndof <= m_nnode * m_ndim ); + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(m_ndof <= m_nnode * m_ndim); } // ------------------------------------------------------------------------------------------------- -inline size_t MatrixDiagonal::nelem() const { return m_nelem; } +inline size_t MatrixDiagonal::nelem() const +{ return m_nelem; } -inline size_t MatrixDiagonal::nne() const { return m_nne; } +inline size_t MatrixDiagonal::nne() const +{ return m_nne; } -inline size_t MatrixDiagonal::nnode() const { return m_nnode; } +inline size_t MatrixDiagonal::nnode() const +{ return m_nnode; } -inline size_t MatrixDiagonal::ndim() const { return m_ndim; } +inline size_t MatrixDiagonal::ndim() const +{ return m_ndim; } -inline size_t MatrixDiagonal::ndof() const { return m_ndof; } +inline size_t MatrixDiagonal::ndof() const +{ return m_ndof; } -inline xt::xtensor MatrixDiagonal::dofs() const { return m_dofs; } +inline xt::xtensor MatrixDiagonal::dofs() const +{ return m_dofs; } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonal::factorize() { - // skip for unchanged "m_data" - if ( ! m_change ) return; + if (!m_factor) return; - // invert #pragma omp parallel for - for ( size_t d = 0 ; d < m_ndof ; ++d ) - m_inv(d) = 1. / m_data(d); + for (size_t d = 0 ; d < m_ndof ; ++d) + m_inv(d) = 1. / m_A(d); - // reset signal - m_change = false; + m_factor = false; } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonal::set(const xt::xtensor &A) { - // check input - assert( A.shape()[0] == m_ndof ); + assert(A.shape()[0] == m_ndof); - // copy - std::copy(A.begin(), A.end(), m_data.begin()); + std::copy(A.begin(), A.end(), m_A.begin()); - // signal change - m_change = true; + m_factor = true; } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonal::assemble(const xt::xtensor &elemmat) { - // check input - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); - assert( Element::isDiagonal(elemmat) ); - - // zero-initialize matrix - m_data.fill(0.0); - - // assemble - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - m_data(m_dofs(m_conn(e,m),i)) += elemmat(e,m*m_ndim+i,m*m_ndim+i); - - // signal change - m_change = true; + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); + assert(Element::isDiagonal(elemmat)); + + m_A.fill(0.0); + + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + m_A(m_dofs(m_conn(e,m),i)) += elemmat(e,m*m_ndim+i,m*m_ndim+i); + + m_factor = true; } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonal::dot(const xt::xtensor &x, - xt::xtensor &b) const +inline void MatrixDiagonal::dot( + const xt::xtensor &x, + xt::xtensor &b) const { - assert( x.shape()[0] == m_nnode ); - assert( x.shape()[1] == m_ndim ); - assert( b.shape()[0] == m_nnode ); - assert( b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - b(m,i) = m_data(m_dofs(m,i)) * x(m,i); + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + b(m,i) = m_A(m_dofs(m,i)) * x(m,i); } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonal::dot(const xt::xtensor &x, - xt::xtensor &b) const +inline void MatrixDiagonal::dot( + const xt::xtensor &x, + xt::xtensor &b) const { - assert( x.size() == m_ndof ); - assert( b.size() == m_ndof ); + assert(x.size() == m_ndof); + assert(b.size() == m_ndof); - xt::noalias(b) = m_data * x; + xt::noalias(b) = m_A * x; } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonal::solve(const xt::xtensor &b, - xt::xtensor &x) +inline void MatrixDiagonal::solve( + const xt::xtensor &b, + xt::xtensor &x) { - assert( b.size() == m_ndof ); - assert( x.size() == m_ndof ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); this->factorize(); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) x(m,i) = m_inv(m_dofs(m,i)) * b(m,i); } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonal::solve(const xt::xtensor &b, - xt::xtensor &x) +inline void MatrixDiagonal::solve( + const xt::xtensor &b, + xt::xtensor &x) { - assert( b.size() == m_ndof ); - assert( x.size() == m_ndof ); + assert(b.size() == m_ndof); + assert(x.size() == m_ndof); this->factorize(); xt::noalias(x) = m_inv * b; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonal::asDiagonal() const +inline xt::xtensor MatrixDiagonal::AsDiagonal() const { - return m_data; + return m_A; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonal::dot(const xt::xtensor &x) const +inline xt::xtensor MatrixDiagonal::Dot(const xt::xtensor &x) const { xt::xtensor b = xt::empty({m_nnode, m_ndim}); this->dot(x, b); return b; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonal::dot(const xt::xtensor &x) const +inline xt::xtensor MatrixDiagonal::Dot(const xt::xtensor &x) const { xt::xtensor b = xt::empty({m_ndof}); this->dot(x, b); return b; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonal::solve(const xt::xtensor &b) +inline xt::xtensor MatrixDiagonal::Solve(const xt::xtensor &b) { xt::xtensor x = xt::empty({m_nnode, m_ndim}); this->solve(b, x); return x; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonal::solve(const xt::xtensor &b) +inline xt::xtensor MatrixDiagonal::Solve(const xt::xtensor &b) { xt::xtensor x = xt::empty({m_ndof}); this->solve(b, x); return x; } // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixDiagonalPartitioned.h b/include/GooseFEM/MatrixDiagonalPartitioned.h index e2d854f..8d8f2eb 100644 --- a/include/GooseFEM/MatrixDiagonalPartitioned.h +++ b/include/GooseFEM/MatrixDiagonalPartitioned.h @@ -1,142 +1,191 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIXDIAGONALPARTITIONED_H #define GOOSEFEM_MATRIXDIAGONALPARTITIONED_H // ------------------------------------------------------------------------------------------------- #include "config.h" -#include -#include -#include - // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- class MatrixDiagonalPartitioned { public: - // constructors + // Constructors MatrixDiagonalPartitioned() = default; - MatrixDiagonalPartitioned(const xt::xtensor &conn, const xt::xtensor &dofs, + + MatrixDiagonalPartitioned( + const xt::xtensor &conn, + const xt::xtensor &dofs, const xt::xtensor &iip); - // dimensions + // Dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t nnode() const; // number of nodes size_t ndim() const; // number of dimensions size_t ndof() const; // number of DOFs size_t nnu() const; // number of unknown DOFs size_t nnp() const; // number of prescribed DOFs // DOF lists xt::xtensor dofs() const; // DOFs - xt::xtensor iiu() const; // unknown DOFs + xt::xtensor iiu() const; // unknown DOFs xt::xtensor iip() const; // prescribed DOFs - // assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] + // Assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] // WARNING: ignores any off-diagonal terms void assemble(const xt::xtensor &elemmat); - // product: b_i = A_ij * x_j + // Dot-product: + // b_i = A_ij * x_j + + void dot( + const xt::xtensor &x, + xt::xtensor &b) const; // overwritten + + void dot( + const xt::xtensor &x, + xt::xtensor &b) const; // overwritten + + void dot_u( + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_u) const; // overwritten + + void dot_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_p) const; // overwritten + + // Solve: + // x_u = A_uu \ ( b_u - A_up * x_p ) = A_uu \ b_u + + void solve( + const xt::xtensor &b, + xt::xtensor &x); // modified with "x_u" + + void solve( + const xt::xtensor &b, + xt::xtensor &x); // modified with "x_u" + + void solve_u( + const xt::xtensor &b_u, + const xt::xtensor &x_p, + xt::xtensor &x_u); // overwritten + + // Get right-hand-size for corresponding to the prescribed DOFs: + // b_p = A_pu * x_u + A_pp * x_p = A_pp * x_p - // b = A * x - void dot(const xt::xtensor &x, - xt::xtensor &b) const; + void reaction( + const xt::xtensor &x, + xt::xtensor &b) const; // modified with "b_p" - // b = A * x - void dot(const xt::xtensor &x, - xt::xtensor &b) const; + void reaction( + const xt::xtensor &x, + xt::xtensor &b) const; // modified with "b_p" - // b_u = A_uu * x_u + A_up * x_p == A_uu * x_u - void dot_u(const xt::xtensor &x_u, const xt::xtensor &x_p, - xt::xtensor &b_u) const; + void reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_p) const; // overwritten - // b_p = A_pu * x_u + A_pp * x_p == A_pp * x_p - void dot_p(const xt::xtensor &x_u, const xt::xtensor &x_p, - xt::xtensor &b_p) const; + // Return matrix as diagonal matrix (column) - // solve: x = A \ b - // x_u = A_uu \ ( b_u - A_up * x_p ) == A_uu \ b_u - // b_p = A_pu * x_u + A_pp * x_p == A_pp * x_p + xt::xtensor AsDiagonal() const; - void solve(xt::xtensor &b, - xt::xtensor &x); + // Auto-allocation of the functions above - void solve(xt::xtensor &b, - xt::xtensor &x); + xt::xtensor Dot( + const xt::xtensor &x) const; - void solve_u(const xt::xtensor &b_u, const xt::xtensor &x_p, - xt::xtensor &x_u); + xt::xtensor Dot( + const xt::xtensor &x) const; - // return matrix as diagonal matrix (column) + xt::xtensor Dot_u( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const; - xt::xtensor asDiagonal() const; + xt::xtensor Dot_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const; - // auto allocation of the functions above + xt::xtensor Solve( + const xt::xtensor &b, + const xt::xtensor &x); - xt::xtensor dot(const xt::xtensor &x) const; + xt::xtensor Solve( + const xt::xtensor &b, + const xt::xtensor &x); - xt::xtensor dot(const xt::xtensor &x) const; + xt::xtensor Solve_u( + const xt::xtensor &b_u, + const xt::xtensor &x_p); - xt::xtensor dot_u(const xt::xtensor &x_u, const xt::xtensor &x_p) const; + xt::xtensor Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const; - xt::xtensor dot_p(const xt::xtensor &x_u, const xt::xtensor &x_p) const; + xt::xtensor Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const; - xt::xtensor solve_u(const xt::xtensor &b_u, const xt::xtensor &x_p); + xt::xtensor Reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const; private: - // the diagonal matrix, and its inverse (re-used to solve different RHS) - xt::xtensor m_data_uu; - xt::xtensor m_data_pp; + // The diagonal matrix, and its inverse (re-used to solve different RHS) + xt::xtensor m_Auu; + xt::xtensor m_App; xt::xtensor m_inv_uu; - // signal changes to data compare to the last inverse - bool m_change=false; + // Signal changes to data compare to the last inverse + bool m_factor=false; - // bookkeeping + // Bookkeeping xt::xtensor m_conn; // connectivity [nelem, nne ] xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] xt::xtensor m_part; // DOF-numbers per node, renumbered [nnode, ndim] xt::xtensor m_iiu; // DOF-numbers that are unknown [nnu] xt::xtensor m_iip; // DOF-numbers that are prescribed [nnp] - // dimensions + // Dimensions size_t m_nelem; // number of elements size_t m_nne; // number of nodes per element size_t m_nnode; // number of nodes size_t m_ndim; // number of dimensions size_t m_ndof; // number of DOFs size_t m_nnu; // number of unknown DOFs size_t m_nnp; // number of prescribed DOFs - // compute inverse (automatically evaluated by "solve") + // Compute inverse (automatically evaluated by "solve") void factorize(); }; // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #include "MatrixDiagonalPartitioned.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixDiagonalPartitioned.hpp b/include/GooseFEM/MatrixDiagonalPartitioned.hpp index c826e11..231dcc2 100644 --- a/include/GooseFEM/MatrixDiagonalPartitioned.hpp +++ b/include/GooseFEM/MatrixDiagonalPartitioned.hpp @@ -1,343 +1,458 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIXDIAGONALPARTITIONED_HPP #define GOOSEFEM_MATRIXDIAGONALPARTITIONED_HPP // ------------------------------------------------------------------------------------------------- #include "MatrixDiagonalPartitioned.h" +#include "Mesh.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- -inline MatrixDiagonalPartitioned::MatrixDiagonalPartitioned(const xt::xtensor &conn, - const xt::xtensor &dofs, const xt::xtensor &iip) : +inline MatrixDiagonalPartitioned::MatrixDiagonalPartitioned( + const xt::xtensor &conn, + const xt::xtensor &dofs, + const xt::xtensor &iip) : m_conn(conn), m_dofs(dofs), m_iip(iip) { - // mesh dimensions m_nelem = m_conn.shape()[0]; m_nne = m_conn.shape()[1]; m_nnode = m_dofs.shape()[0]; m_ndim = m_dofs.shape()[1]; - // list with unknown DOFs m_iiu = xt::setdiff1d(dofs, iip); - // dimensions of the system m_ndof = xt::amax(m_dofs)[0] + 1; m_nnp = m_iip.size(); m_nnu = m_iiu.size(); - // DOFs per node, such that iiu = arange(nnu), iip = nnu + arange(nnp) m_part = Mesh::Reorder({m_iiu, m_iip}).get(m_dofs); - // allocate matrix and its inverse - m_data_uu = xt::empty({m_nnu}); - m_data_pp = xt::empty({m_nnp}); - m_inv_uu = xt::empty({m_nnu}); + m_Auu = xt::empty({m_nnu}); + m_App = xt::empty({m_nnp}); - // check consistency - assert( xt::amax(m_conn)[0] + 1 == m_nnode ); - assert( xt::amax(m_iip)[0] <= xt::amax(m_dofs)[0] ); - assert( m_ndof <= m_nnode * m_ndim ); + m_inv_uu = xt::empty({m_nnu}); + + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(xt::amax(m_iip)[0] <= xt::amax(m_dofs)[0]); + assert(m_ndof <= m_nnode * m_ndim); } // ------------------------------------------------------------------------------------------------- -inline size_t MatrixDiagonalPartitioned::nelem() const { return m_nelem; } +inline size_t MatrixDiagonalPartitioned::nelem() const +{ return m_nelem; } -inline size_t MatrixDiagonalPartitioned::nne() const { return m_nne; } +inline size_t MatrixDiagonalPartitioned::nne() const +{ return m_nne; } -inline size_t MatrixDiagonalPartitioned::nnode() const { return m_nnode; } +inline size_t MatrixDiagonalPartitioned::nnode() const +{ return m_nnode; } -inline size_t MatrixDiagonalPartitioned::ndim() const { return m_ndim; } +inline size_t MatrixDiagonalPartitioned::ndim() const +{ return m_ndim; } -inline size_t MatrixDiagonalPartitioned::ndof() const { return m_ndof; } +inline size_t MatrixDiagonalPartitioned::ndof() const +{ return m_ndof; } -inline size_t MatrixDiagonalPartitioned::nnu() const { return m_nnu; } +inline size_t MatrixDiagonalPartitioned::nnu() const +{ return m_nnu; } -inline size_t MatrixDiagonalPartitioned::nnp() const { return m_nnp; } +inline size_t MatrixDiagonalPartitioned::nnp() const +{ return m_nnp; } -inline xt::xtensor MatrixDiagonalPartitioned::dofs() const { return m_dofs; } +inline xt::xtensor MatrixDiagonalPartitioned::dofs() const +{ return m_dofs; } -inline xt::xtensor MatrixDiagonalPartitioned::iiu() const { return m_iiu; } +inline xt::xtensor MatrixDiagonalPartitioned::iiu() const +{ return m_iiu; } -inline xt::xtensor MatrixDiagonalPartitioned::iip() const { return m_iip; } +inline xt::xtensor MatrixDiagonalPartitioned::iip() const +{ return m_iip; } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonalPartitioned::factorize() { - // skip for unchanged "m_data" - if ( ! m_change ) return; + if (!m_factor) return; - // invert #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) - m_inv_uu(d) = 1. / m_data_uu(d); + for (size_t d = 0 ; d < m_nnu ; ++d) + m_inv_uu(d) = 1. / m_Auu(d); - // reset signal - m_change = false; + m_factor = false; } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonalPartitioned::assemble(const xt::xtensor &elemmat) { - // check input - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); - assert( Element::isDiagonal(elemmat) ); - - // zero-initialize matrix - m_data_uu.fill(0.0); - m_data_pp.fill(0.0); - - // assemble - for ( size_t e = 0 ; e < m_nelem ; ++e ) - { - for ( size_t m = 0 ; m < m_nne ; ++m ) - { - for ( size_t i = 0 ; i < m_ndim ; ++i ) - { + assert(elemmat.shape()[0] == m_nelem); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); + assert(Element::isDiagonal(elemmat)); + + m_Auu.fill(0.0); + m_App.fill(0.0); + + for (size_t e = 0 ; e < m_nelem ; ++e) { + for (size_t m = 0 ; m < m_nne ; ++m) { + for (size_t i = 0 ; i < m_ndim ; ++i) { + size_t d = m_part(m_conn(e,m),i); if ( d < m_nnu ) - m_data_uu(d ) += elemmat(e,m*m_ndim+i,m*m_ndim+i); + m_Auu(d ) += elemmat(e,m*m_ndim+i,m*m_ndim+i); else - m_data_pp(d-m_nnu) += elemmat(e,m*m_ndim+i,m*m_ndim+i); + m_App(d-m_nnu) += elemmat(e,m*m_ndim+i,m*m_ndim+i); } } } - // signal change - m_change = true; + m_factor = true; } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonalPartitioned::dot(const xt::xtensor &x, - xt::xtensor &b) const +inline void MatrixDiagonalPartitioned::dot( + const xt::xtensor &x, + xt::xtensor &b) const { - assert( x.shape()[0] == m_nnode ); - assert( x.shape()[1] == m_ndim ); - assert( b.shape()[0] == m_nnode ); - assert( b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - { - for ( size_t i = 0 ; i < m_ndim ; ++i ) - { + for (size_t m = 0 ; m < m_nnode ; ++m) { + for (size_t i = 0 ; i < m_ndim ; ++i) { + size_t d = m_part(m,i); - if ( d < m_nnu ) b(m,i) = m_data_uu(d ) * x(m,i); - else b(m,i) = m_data_pp(d-m_nnu) * x(m,i); + if ( d < m_nnu ) b(m,i) = m_Auu(d ) * x(m,i); + else b(m,i) = m_App(d-m_nnu) * x(m,i); } } } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonalPartitioned::dot(const xt::xtensor &x, - xt::xtensor &b) const +inline void MatrixDiagonalPartitioned::dot( + const xt::xtensor &x, + xt::xtensor &b) const { - assert( x.size() == m_ndof ); - assert( b.size() == m_ndof ); + assert(x.size() == m_ndof); + assert(b.size() == m_ndof); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) - b(m_iiu(d)) = m_data_uu(d) * x(m_iiu(d)); + for (size_t d = 0 ; d < m_nnu ; ++d) + b(m_iiu(d)) = m_Auu(d) * x(m_iiu(d)); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) - b(m_iip(d)) = m_data_pp(d) * x(m_iip(d)); + for (size_t d = 0 ; d < m_nnp ; ++d) + b(m_iip(d)) = m_App(d) * x(m_iip(d)); } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonalPartitioned::dot_u( - const xt::xtensor &x_u, const xt::xtensor &x_p, - xt::xtensor &b_u) const + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_u) const { UNUSED(x_p); - assert( x_u.size() == m_nnu ); - assert( x_p.size() == m_nnp ); - assert( b_u.size() == m_nnu ); + assert(x_u.size() == m_nnu); + assert(x_p.size() == m_nnp); + assert(b_u.size() == m_nnu); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) - b_u(d) = m_data_uu(d) * x_u(d); + for (size_t d = 0 ; d < m_nnu ; ++d) + b_u(d) = m_Auu(d) * x_u(d); } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonalPartitioned::dot_p( - const xt::xtensor &x_u, const xt::xtensor &x_p, - xt::xtensor &b_p) const + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_p) const { UNUSED(x_u); - assert( x_u.size() == m_nnu ); - assert( x_p.size() == m_nnp ); - assert( b_p.size() == m_nnp ); + assert(x_u.size() == m_nnu); + assert(x_p.size() == m_nnp); + assert(b_p.size() == m_nnp); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) - b_p(d) = m_data_pp(d) * x_p(d); + for (size_t d = 0 ; d < m_nnp ; ++d) + b_p(d) = m_App(d) * x_p(d); } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonalPartitioned::solve(xt::xtensor &b, - xt::xtensor &x) +inline void MatrixDiagonalPartitioned::solve( + const xt::xtensor &b, + xt::xtensor &x) { - assert( b.shape()[0] == m_nnode ); - assert( b.shape()[1] == m_ndim ); - assert( x.shape()[0] == m_nnode ); - assert( x.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); this->factorize(); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - { - for ( size_t i = 0 ; i < m_ndim ; ++i ) - { - size_t d = m_part(m,i); - - if ( d < m_nnu ) x(m,i) = m_inv_uu (d ) * b(m,i); - else b(m,i) = m_data_pp(d-m_nnu) * x(m,i); - } - } + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if ( m_part(m,i) < m_nnu ) + x(m,i) = m_inv_uu(m_part(m,i)) * b(m,i); } // ------------------------------------------------------------------------------------------------- -inline void MatrixDiagonalPartitioned::solve(xt::xtensor &b, - xt::xtensor &x) +inline void MatrixDiagonalPartitioned::solve( + const xt::xtensor &b, + xt::xtensor &x) { - assert( b.size() == m_ndof ); - assert( x.size() == m_ndof ); + assert(b.size() == m_ndof); + assert(x.size() == m_ndof); this->factorize(); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) + for (size_t d = 0 ; d < m_nnu ; ++d) x(m_iiu(d)) = m_inv_uu(d) * b(m_iiu(d)); - - #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) - b(m_iip(d)) = m_data_pp(d) * x(m_iip(d)); } // ------------------------------------------------------------------------------------------------- inline void MatrixDiagonalPartitioned::solve_u( - const xt::xtensor &b_u, const xt::xtensor &x_p, - xt::xtensor &x_u) + const xt::xtensor &b_u, + const xt::xtensor &x_p, + xt::xtensor &x_u) { UNUSED(x_p); - assert( b_u.shape()[0] == m_nnu ); - assert( x_p.shape()[0] == m_nnp ); - assert( x_u.shape()[0] == m_nnu ); + assert(b_u.shape()[0] == m_nnu); + assert(x_p.shape()[0] == m_nnp); + assert(x_u.shape()[0] == m_nnu); this->factorize(); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) + for (size_t d = 0 ; d < m_nnu ; ++d) x_u(d) = m_inv_uu(d) * b_u(d); } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonalPartitioned::asDiagonal() const +inline void MatrixDiagonalPartitioned::reaction( + const xt::xtensor &x, + xt::xtensor &b) const +{ + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) >= m_nnu) + b(m,i) = m_App(m_part(m,i)-m_nnu) * x(m,i); +} + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixDiagonalPartitioned::reaction( + const xt::xtensor &x, + xt::xtensor &b) const +{ + assert(x.size() == m_ndof); + assert(b.size() == m_ndof); + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnp ; ++d) + b(m_iip(d)) = m_App(d) * x(m_iip(d)); +} + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixDiagonalPartitioned::reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_p) const +{ + UNUSED(x_u); + + assert(x_u.shape()[0] == m_nnu); + assert(x_p.shape()[0] == m_nnp); + assert(b_p.shape()[0] == m_nnp); + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnp ; ++d) + b_p(d) = m_App(d) * x_p(d); +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixDiagonalPartitioned::AsDiagonal() const { xt::xtensor out = xt::zeros({m_ndof}); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) - out(m_iiu(d)) = m_data_uu(d); + for (size_t d = 0 ; d < m_nnu ; ++d) + out(m_iiu(d)) = m_Auu(d); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) - out(m_iip(d)) = m_data_pp(d); + for (size_t d = 0 ; d < m_nnp ; ++d) + out(m_iip(d)) = m_App(d); return out; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonalPartitioned::dot(const xt::xtensor &x) const +inline xt::xtensor MatrixDiagonalPartitioned::Dot( + const xt::xtensor &x) const { xt::xtensor b = xt::empty({m_nnode, m_ndim}); this->dot(x, b); return b; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonalPartitioned::dot(const xt::xtensor &x) const +inline xt::xtensor MatrixDiagonalPartitioned::Dot( + const xt::xtensor &x) const { xt::xtensor b = xt::empty({m_ndof}); this->dot(x, b); return b; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonalPartitioned::dot_u( - const xt::xtensor &x_u, const xt::xtensor &x_p) const +inline xt::xtensor MatrixDiagonalPartitioned::Dot_u( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const { xt::xtensor b_u = xt::empty({m_nnu}); this->dot_u(x_u, x_p, b_u); return b_u; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonalPartitioned::dot_p( - const xt::xtensor &x_u, const xt::xtensor &x_p) const +inline xt::xtensor MatrixDiagonalPartitioned::Dot_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const { xt::xtensor b_p = xt::empty({m_nnp}); this->dot_p(x_u, x_p, b_p); return b_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixDiagonalPartitioned::solve_u( - const xt::xtensor &b_u, const xt::xtensor &x_p) +inline xt::xtensor MatrixDiagonalPartitioned::Solve( + const xt::xtensor &b, + const xt::xtensor &x) +{ + xt::xtensor out = x; + + this->solve(b, out); + + return out; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixDiagonalPartitioned::Solve( + const xt::xtensor &b, + const xt::xtensor &x) +{ + xt::xtensor out = x; + + this->solve(b, out); + + return out; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixDiagonalPartitioned::Solve_u( + const xt::xtensor &b_u, + const xt::xtensor &x_p) { xt::xtensor x_u = xt::empty({m_nnu}); this->solve_u(b_u, x_p, x_u); return x_u; } // ------------------------------------------------------------------------------------------------- +inline xt::xtensor MatrixDiagonalPartitioned::Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const +{ + xt::xtensor out = b; + + this->reaction(x, out); + + return out; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixDiagonalPartitioned::Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const +{ + xt::xtensor out = b; + + this->reaction(x, out); + + return out; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixDiagonalPartitioned::Reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const +{ + xt::xtensor b_p = xt::empty({m_nnp}); + + this->reaction_p(x_u, x_p, b_p); + + return b_p; +} + +// ------------------------------------------------------------------------------------------------- + } // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixPartitioned.h b/include/GooseFEM/MatrixPartitioned.h index d21edc4..381ba97 100644 --- a/include/GooseFEM/MatrixPartitioned.h +++ b/include/GooseFEM/MatrixPartitioned.h @@ -1,140 +1,172 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIXPARTITIONED_H #define GOOSEFEM_MATRIXPARTITIONED_H // ------------------------------------------------------------------------------------------------- #include "config.h" #include #include #include // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- class MatrixPartitioned { public: - // constructors + // Constructors MatrixPartitioned() = default; MatrixPartitioned( const xt::xtensor &conn, const xt::xtensor &dofs, const xt::xtensor &iip); - // dimensions + // Dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t nnode() const; // number of nodes size_t ndim() const; // number of dimensions size_t ndof() const; // number of DOFs size_t nnu() const; // number of unknown DOFs size_t nnp() const; // number of prescribed DOFs // DOF lists xt::xtensor dofs() const; // DOFs xt::xtensor iiu() const; // unknown DOFs xt::xtensor iip() const; // prescribed DOFs - // assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] + // Assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] void assemble(const xt::xtensor &elemmat); - // solve: x = A \ b - // x_u = A_uu \ ( b_u - A_up * x_p ) - // b_p = A_pu * x_u + A_pp * x_p + // Solve: + // x_u = A_uu \ ( b_u - A_up * x_p ) - void solve(xt::xtensor &b, - xt::xtensor &x); + void solve( + const xt::xtensor &b, + xt::xtensor &x); // modified with "x_u" - void solve(xt::xtensor &b, - xt::xtensor &x); + void solve( + const xt::xtensor &b, + xt::xtensor &x); // modified with "x_u" void solve_u( const xt::xtensor &b_u, const xt::xtensor &x_p, - xt::xtensor &x_u); + xt::xtensor &x_u); // overwritten - // auto allocation of the functions above + // Get right-hand-size for corresponding to the prescribed DOFs: + // b_p = A_pu * x_u + A_pp * x_p = A_pp * x_p - xt::xtensor solve_u( + void reaction( + const xt::xtensor &x, + xt::xtensor &b) const; // modified with "b_p" + + void reaction( + const xt::xtensor &x, + xt::xtensor &b) const; // modified with "b_p" + + void reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_p) const; // overwritten + + // Auto-allocation of the functions above + + xt::xtensor Solve( + const xt::xtensor &b, + const xt::xtensor &x); + + xt::xtensor Solve( + const xt::xtensor &b, + const xt::xtensor &x); + + xt::xtensor Solve_u( const xt::xtensor &b_u, const xt::xtensor &x_p); + xt::xtensor Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const; + + xt::xtensor Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const; + + xt::xtensor Reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const; + private: - // the matrix - Eigen::SparseMatrix m_data_uu; - Eigen::SparseMatrix m_data_up; - Eigen::SparseMatrix m_data_pu; - Eigen::SparseMatrix m_data_pp; + // The matrix + Eigen::SparseMatrix m_Auu; + Eigen::SparseMatrix m_Aup; + Eigen::SparseMatrix m_Apu; + Eigen::SparseMatrix m_App; - // the matrix to assemble - std::vector> m_trip_uu; - std::vector> m_trip_up; - std::vector> m_trip_pu; - std::vector> m_trip_pp; + // Matrix entries + std::vector> m_Tuu; + std::vector> m_Tup; + std::vector> m_Tpu; + std::vector> m_Tpp; - // solver (re-used to solve different RHS) + // Solver (re-used to solve different RHS) Eigen::SimplicialLDLT> m_solver; - // signal changes to data compare to the last inverse - bool m_change=false; + // Signal changes to data compare to the last inverse + bool m_factor=false; - // bookkeeping + // Bookkeeping xt::xtensor m_conn; // connectivity [nelem, nne ] xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] xt::xtensor m_part; // DOF-numbers per node, renumbered [nnode, ndim] - xt::xtensor m_iiu; // DOF-numbers that are unknown [nnu] - xt::xtensor m_iip; // DOF-numbers that are prescribed [nnp] + xt::xtensor m_iiu; // unknown DOFs [nnu] + xt::xtensor m_iip; // prescribed DOFs [nnp] - // dimensions + // Dimensions size_t m_nelem; // number of elements size_t m_nne; // number of nodes per element size_t m_nnode; // number of nodes size_t m_ndim; // number of dimensions size_t m_ndof; // number of DOFs size_t m_nnu; // number of unknown DOFs size_t m_nnp; // number of prescribed DOFs - // compute inverse (automatically evaluated by "solve") - + // Compute inverse (automatically evaluated by "solve") void factorize(); - // convert arrays (Eigen version of VectorPartitioned, which contains public functions) - - Eigen::VectorXd asDofs_u(const xt::xtensor &dofval) const; - + // Convert arrays (Eigen version of VectorPartitioned, which contains public functions) + Eigen::VectorXd asDofs_u(const xt::xtensor &dofval ) const; Eigen::VectorXd asDofs_u(const xt::xtensor &nodevec) const; - - Eigen::VectorXd asDofs_p(const xt::xtensor &dofval) const; - + Eigen::VectorXd asDofs_p(const xt::xtensor &dofval ) const; Eigen::VectorXd asDofs_p(const xt::xtensor &nodevec) const; }; // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #include "MatrixPartitioned.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixPartitioned.hpp b/include/GooseFEM/MatrixPartitioned.hpp index 82a0837..4992254 100644 --- a/include/GooseFEM/MatrixPartitioned.hpp +++ b/include/GooseFEM/MatrixPartitioned.hpp @@ -1,328 +1,429 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MATRIXPARTITIONED_HPP #define GOOSEFEM_MATRIXPARTITIONED_HPP // ------------------------------------------------------------------------------------------------- #include "MatrixPartitioned.h" +#include "Mesh.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- -inline MatrixPartitioned::MatrixPartitioned(const xt::xtensor &conn, - const xt::xtensor &dofs, const xt::xtensor &iip) : +inline MatrixPartitioned::MatrixPartitioned( + const xt::xtensor &conn, + const xt::xtensor &dofs, + const xt::xtensor &iip) : m_conn(conn), m_dofs(dofs), m_iip(iip) { - // mesh dimensions m_nelem = m_conn.shape()[0]; m_nne = m_conn.shape()[1]; m_nnode = m_dofs.shape()[0]; m_ndim = m_dofs.shape()[1]; - // list with unknown DOFs m_iiu = xt::setdiff1d(dofs, iip); - // dimensions of the system m_ndof = xt::amax(m_dofs)[0] + 1; m_nnp = m_iip.size(); m_nnu = m_iiu.size(); - // DOFs per node, such that iiu = arange(nnu), iip = nnu + arange(nnp) m_part = Mesh::Reorder({m_iiu, m_iip}).get(m_dofs); - // allocate triplet list - m_trip_uu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); - m_trip_up.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); - m_trip_pu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); - m_trip_pp.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); - - // allocate sparse matrices - m_data_uu.resize(m_nnu,m_nnu); - m_data_up.resize(m_nnu,m_nnp); - m_data_pu.resize(m_nnp,m_nnu); - m_data_pp.resize(m_nnp,m_nnp); - - // check consistency - assert( xt::amax(m_conn)[0] + 1 == m_nnode ); - assert( xt::amax(m_iip)[0] <= xt::amax(m_dofs)[0] ); - assert( m_ndof <= m_nnode * m_ndim ); + m_Tuu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tup.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tpu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tpp.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + + m_Auu.resize(m_nnu,m_nnu); + m_Aup.resize(m_nnu,m_nnp); + m_Apu.resize(m_nnp,m_nnu); + m_App.resize(m_nnp,m_nnp); + + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(xt::amax(m_iip)[0] <= xt::amax(m_dofs)[0]); + assert(m_ndof <= m_nnode * m_ndim); } // ------------------------------------------------------------------------------------------------- -inline size_t MatrixPartitioned::nelem() const { return m_nelem; } +inline size_t MatrixPartitioned::nelem() const +{ return m_nelem; } -inline size_t MatrixPartitioned::nne() const { return m_nne; } +inline size_t MatrixPartitioned::nne() const +{ return m_nne; } -inline size_t MatrixPartitioned::nnode() const { return m_nnode; } +inline size_t MatrixPartitioned::nnode() const +{ return m_nnode; } -inline size_t MatrixPartitioned::ndim() const { return m_ndim; } +inline size_t MatrixPartitioned::ndim() const +{ return m_ndim; } -inline size_t MatrixPartitioned::ndof() const { return m_ndof; } +inline size_t MatrixPartitioned::ndof() const +{ return m_ndof; } -inline size_t MatrixPartitioned::nnu() const { return m_nnu; } +inline size_t MatrixPartitioned::nnu() const +{ return m_nnu; } -inline size_t MatrixPartitioned::nnp() const { return m_nnp; } +inline size_t MatrixPartitioned::nnp() const +{ return m_nnp; } -inline xt::xtensor MatrixPartitioned::dofs() const { return m_dofs; } +inline xt::xtensor MatrixPartitioned::dofs() const +{ return m_dofs; } -inline xt::xtensor MatrixPartitioned::iiu() const { return m_iiu; } +inline xt::xtensor MatrixPartitioned::iiu() const +{ return m_iiu; } -inline xt::xtensor MatrixPartitioned::iip() const { return m_iip; } +inline xt::xtensor MatrixPartitioned::iip() const +{ return m_iip; } // ------------------------------------------------------------------------------------------------- inline void MatrixPartitioned::factorize() { - // skip for unchanged "m_data" - if ( ! m_change ) return; + if (!m_factor) return; + - // factorise - m_solver.compute(m_data_uu); + m_solver.compute(m_Auu); - // reset signal - m_change = false; + m_factor = false; } // ------------------------------------------------------------------------------------------------- -inline Eigen::VectorXd MatrixPartitioned::asDofs_u(const xt::xtensor &dofval) const +inline void MatrixPartitioned::assemble(const xt::xtensor &elemmat) { - assert( dofval.size() == m_ndof ); + assert(elemmat.shape()[0] == m_nelem); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); - Eigen::VectorXd dofval_u(m_nnu,1); + m_Tuu.clear(); + m_Tup.clear(); + m_Tpu.clear(); + m_Tpp.clear(); - #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) - dofval_u(d) = dofval(m_iiu(d)); + for (size_t e = 0 ; e < m_nelem ; ++e) { + for (size_t m = 0 ; m < m_nne ; ++m) { + for (size_t i = 0 ; i < m_ndim ; ++i) { - return dofval_u; + size_t di = m_part(m_conn(e,m),i); + + for (size_t n = 0 ; n < m_nne ; ++n) { + for (size_t j = 0 ; j < m_ndim ; ++j) { + + size_t dj = m_part(m_conn(e,n),j); + + if (di < m_nnu and dj < m_nnu) + m_Tuu.push_back(Eigen::Triplet(di ,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (di < m_nnu) + m_Tup.push_back(Eigen::Triplet(di ,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (dj < m_nnu) + m_Tpu.push_back(Eigen::Triplet(di-m_nnu,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else + m_Tpp.push_back(Eigen::Triplet(di-m_nnu,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + } + } + } + } + } + + m_Auu.setFromTriplets(m_Tuu.begin(), m_Tuu.end()); + m_Aup.setFromTriplets(m_Tup.begin(), m_Tup.end()); + m_Apu.setFromTriplets(m_Tpu.begin(), m_Tpu.end()); + m_App.setFromTriplets(m_Tpp.begin(), m_Tpp.end()); + + m_factor = true; } // ------------------------------------------------------------------------------------------------- -inline Eigen::VectorXd MatrixPartitioned::asDofs_u(const xt::xtensor &nodevec) const +inline void MatrixPartitioned::solve( + const xt::xtensor &b, + xt::xtensor &x) { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); - Eigen::VectorXd dofval_u(m_nnu,1); + this->factorize(); + + Eigen::VectorXd B_u = this->asDofs_u(b); + Eigen::VectorXd X_p = this->asDofs_p(x); + + Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_Aup * X_p)); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) if ( m_part(m,i) < m_nnu ) - dofval_u(m_part(m,i)) = nodevec(m,i); - - return dofval_u; + x(m,i) = X_u(m_part(m,i)); } // ------------------------------------------------------------------------------------------------- -inline Eigen::VectorXd MatrixPartitioned::asDofs_p(const xt::xtensor &dofval) const +inline void MatrixPartitioned::solve( + const xt::xtensor &b, + xt::xtensor &x) { - assert( dofval.size() == m_ndof ); + assert(b.size() == m_ndof); + assert(x.size() == m_ndof); - Eigen::VectorXd dofval_p(m_nnp,1); + this->factorize(); + + Eigen::VectorXd B_u = this->asDofs_u(b); + Eigen::VectorXd X_p = this->asDofs_p(x); + + Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_Aup * X_p)); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) - dofval_p(d) = dofval(m_iip(d)); + for (size_t d = 0 ; d < m_nnu ; ++d) + x(m_iiu(d)) = X_u(d); +} - return dofval_p; +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitioned::solve_u( + const xt::xtensor &b_u, + const xt::xtensor &x_p, + xt::xtensor &x_u) +{ + assert(b_u.shape()[0] == m_nnu); + assert(x_p.shape()[0] == m_nnp); + assert(x_u.shape()[0] == m_nnu); + + this->factorize(); + + Eigen::VectorXd B_u(m_nnu,1); + Eigen::VectorXd X_p(m_nnp,1); + + std::copy(b_u.begin(), b_u.end(), B_u.data()); + std::copy(x_p.begin(), x_p.end(), X_p.data()); + + Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_Aup * X_p)); + + std::copy(X_u.data(), X_u.data()+m_nnu, x_u.begin()); } // ------------------------------------------------------------------------------------------------- -inline Eigen::VectorXd MatrixPartitioned::asDofs_p(const xt::xtensor &nodevec) const +inline void MatrixPartitioned::reaction( + const xt::xtensor &x, + xt::xtensor &b) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); - Eigen::VectorXd dofval_p(m_nnp,1); + Eigen::VectorXd X_u = this->asDofs_u(x); + Eigen::VectorXd X_p = this->asDofs_p(x); + + Eigen::VectorXd B_p = m_Apu * X_u + m_App * X_p; #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) >= m_nnu ) - dofval_p(m_part(m,i)-m_nnu) = nodevec(m,i); + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) >= m_nnu) + b(m,i) = B_p(m_part(m,i)-m_nnu); +} - return dofval_p; +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitioned::reaction( + const xt::xtensor &x, + xt::xtensor &b) const +{ + assert(x.size() == m_ndof); + assert(b.size() == m_ndof); + + Eigen::VectorXd X_u = this->asDofs_u(x); + Eigen::VectorXd X_p = this->asDofs_p(x); + + Eigen::VectorXd B_p = m_Apu * X_u + m_App * X_p; + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnp ; ++d) + b(m_iip(d)) = B_p(d); } // ------------------------------------------------------------------------------------------------- -inline void MatrixPartitioned::assemble(const xt::xtensor &elemmat) +inline void MatrixPartitioned::reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p, + xt::xtensor &b_p) const { - // check input - assert( elemmat.shape()[0] == m_nelem ); - assert( elemmat.shape()[1] == m_nne*m_ndim ); - assert( elemmat.shape()[2] == m_nne*m_ndim ); - - // initialize triplet list - m_trip_uu.clear(); - m_trip_up.clear(); - m_trip_pu.clear(); - m_trip_pp.clear(); - - // assemble to triplet list - for ( size_t e = 0 ; e < m_nelem ; ++e ) - { - for ( size_t m = 0 ; m < m_nne ; ++m ) - { - for ( size_t i = 0 ; i < m_ndim ; ++i ) - { - size_t di = m_part(m_conn(e,m),i); + assert(x_u.shape()[0] == m_nnu); + assert(x_p.shape()[0] == m_nnp); + assert(b_p.shape()[0] == m_nnp); - for ( size_t n = 0 ; n < m_nne ; ++n ) - { - for ( size_t j = 0 ; j < m_ndim ; ++j ) - { - size_t dj = m_part(m_conn(e,n),j); + Eigen::VectorXd X_u(m_nnu,1); + Eigen::VectorXd X_p(m_nnp,1); - if ( di < m_nnu and dj < m_nnu ) - m_trip_uu.push_back(Eigen::Triplet(di ,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); - else if ( di < m_nnu ) - m_trip_up.push_back(Eigen::Triplet(di ,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); - else if ( dj < m_nnu ) - m_trip_pu.push_back(Eigen::Triplet(di-m_nnu,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); - else - m_trip_pp.push_back(Eigen::Triplet(di-m_nnu,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); - } - } - } - } - } + std::copy(x_u.begin(), x_u.end(), X_u.data()); + std::copy(x_p.begin(), x_p.end(), X_p.data()); - // convert to sparse matrix - m_data_uu.setFromTriplets(m_trip_uu.begin(), m_trip_uu.end()); - m_data_up.setFromTriplets(m_trip_up.begin(), m_trip_up.end()); - m_data_pu.setFromTriplets(m_trip_pu.begin(), m_trip_pu.end()); - m_data_pp.setFromTriplets(m_trip_pp.begin(), m_trip_pp.end()); + Eigen::VectorXd B_p = m_Apu * X_u + m_App * X_p; - // signal change - m_change = true; + std::copy(B_p.data(), B_p.data()+m_nnp, b_p.begin()); } // ------------------------------------------------------------------------------------------------- -inline void MatrixPartitioned::solve(xt::xtensor &b, - xt::xtensor &x) +inline xt::xtensor MatrixPartitioned::Solve( + const xt::xtensor &b, + const xt::xtensor &x) { - // check input - assert( b.shape()[0] == m_nnode ); - assert( b.shape()[1] == m_ndim ); - assert( x.shape()[0] == m_nnode ); - assert( x.shape()[1] == m_ndim ); + xt::xtensor out = x; - // factorise (if needed) - this->factorize(); + this->solve(b, out); - // extract dofvals - Eigen::VectorXd B_u = this->asDofs_u(b); - Eigen::VectorXd X_p = this->asDofs_p(x); + return out; +} - // solve - Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_data_up * X_p)); +// ------------------------------------------------------------------------------------------------- - // right-hand-side for prescribed DOFs - Eigen::VectorXd B_p = m_data_pu * X_u + m_data_pp * X_p; +inline xt::xtensor MatrixPartitioned::Solve( + const xt::xtensor &b, + const xt::xtensor &x) +{ + xt::xtensor out = x; - // collect "x_u" and "b_p" - #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - { - for ( size_t i = 0 ; i < m_ndim ; ++i ) - { - size_t d = m_part(m,i); - - if ( d < m_nnu ) x(m,i) = X_u(d ); - else b(m,i) = B_p(d-m_nnu); - } - } + this->solve(b, out); + + return out; } // ------------------------------------------------------------------------------------------------- -inline void MatrixPartitioned::solve(xt::xtensor &b, - xt::xtensor &x) +inline xt::xtensor MatrixPartitioned::Solve_u( + const xt::xtensor &b_u, + const xt::xtensor &x_p) { - assert( b.size() == m_ndof ); - assert( x.size() == m_ndof ); + xt::xtensor x_u = xt::empty({m_nnu}); - // factorise (if needed) - this->factorize(); + this->solve_u(b_u, x_p, x_u); - // extract dofvals - Eigen::VectorXd B_u = this->asDofs_u(b); - Eigen::VectorXd X_p = this->asDofs_p(x); + return x_u; +} - // solve - Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_data_up * X_p)); +// ------------------------------------------------------------------------------------------------- - // right-hand-side for prescribed DOFs - Eigen::VectorXd B_p = m_data_pu * X_u + m_data_pp * X_p; +inline xt::xtensor MatrixPartitioned::Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const +{ + xt::xtensor out = b; + + this->reaction(x, out); + + return out; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixPartitioned::Reaction( + const xt::xtensor &x, + const xt::xtensor &b) const +{ + xt::xtensor out = b; + + this->reaction(x, out); + + return out; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor MatrixPartitioned::Reaction_p( + const xt::xtensor &x_u, + const xt::xtensor &x_p) const +{ + xt::xtensor b_p = xt::empty({m_nnp}); + + this->reaction_p(x_u, x_p, b_p); + + return b_p; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitioned::asDofs_u(const xt::xtensor &dofval) const +{ + assert(dofval.size() == m_ndof); + + Eigen::VectorXd dofval_u(m_nnu,1); - // collect "x_u" #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) x(m_iiu(d)) = X_u(d); + for (size_t d = 0 ; d < m_nnu ; ++d) + dofval_u(d) = dofval(m_iiu(d)); + + return dofval_u; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitioned::asDofs_u(const xt::xtensor &nodevec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + Eigen::VectorXd dofval_u(m_nnu,1); - // collect "b_p" #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) b(m_iip(d)) = B_p(d); + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) < m_nnu) + dofval_u(m_part(m,i)) = nodevec(m,i); + + return dofval_u; } // ------------------------------------------------------------------------------------------------- -inline void MatrixPartitioned::solve_u( - const xt::xtensor &b_u, const xt::xtensor &x_p, - xt::xtensor &x_u) +inline Eigen::VectorXd MatrixPartitioned::asDofs_p(const xt::xtensor &dofval) const { - // check input - assert( b_u.shape()[0] == m_nnu ); - assert( x_p.shape()[0] == m_nnp ); - assert( x_u.shape()[0] == m_nnu ); + assert(dofval.size() == m_ndof); - // factorise (if needed) - this->factorize(); + Eigen::VectorXd dofval_p(m_nnp,1); - // solve for "x_u" - // - allocate Eigen objects - Eigen::VectorXd B_u(m_nnu,1); - Eigen::VectorXd X_p(m_nnp,1); - // - copy to Eigen objects - std::copy(b_u.begin(), b_u.end(), B_u.data()); - std::copy(x_p.begin(), x_p.end(), X_p.data()); - // - solve - Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_data_up * X_p)); - // - copy from Eigen object - std::copy(X_u.data(), X_u.data()+m_nnu, x_u.begin()); + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnp ; ++d) + dofval_p(d) = dofval(m_iip(d)); + + return dofval_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor MatrixPartitioned::solve_u( - const xt::xtensor &b_u, const xt::xtensor &x_p) +inline Eigen::VectorXd MatrixPartitioned::asDofs_p(const xt::xtensor &nodevec) const { - xt::xtensor x_u = xt::empty({m_nnu}); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); - this->solve_u(b_u, x_p, x_u); + Eigen::VectorXd dofval_p(m_nnp,1); - return x_u; + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) >= m_nnu) + dofval_p(m_part(m,i)-m_nnu) = nodevec(m,i); + + return dofval_p; } // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MatrixPartitionedTyings.h b/include/GooseFEM/MatrixPartitionedTyings.h new file mode 100644 index 0000000..5e07396 --- /dev/null +++ b/include/GooseFEM/MatrixPartitionedTyings.h @@ -0,0 +1,166 @@ +/* ================================================================================================= + +(c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM + +================================================================================================= */ + +#ifndef GOOSEFEM_MATRIXPARTITIONEDTYINGS_H +#define GOOSEFEM_MATRIXPARTITIONEDTYINGS_H + +// ------------------------------------------------------------------------------------------------- + +#include "config.h" + +#include +#include +#include + +// ================================================================================================= + +namespace GooseFEM { + +// ------------------------------------------------------------------------------------------------- + +class MatrixPartitionedTyings +{ +public: + + // Constructors + + MatrixPartitionedTyings() = default; + + MatrixPartitionedTyings( + const xt::xtensor& conn, + const xt::xtensor& dofs, + const Eigen::SparseMatrix& Cdu, + const Eigen::SparseMatrix& Cdp); + + // Dimensions + + size_t nelem() const; // number of elements + size_t nne() const; // number of nodes per element + size_t nnode() const; // number of nodes + size_t ndim() const; // number of dimensions + size_t ndof() const; // number of DOFs + size_t nnu() const; // number of independent, unknown DOFs + size_t nnp() const; // number of independent, prescribed DOFs + size_t nni() const; // number of independent DOFs + size_t nnd() const; // number of dependent DOFs + + // DOF lists + + xt::xtensor dofs() const; // DOFs + xt::xtensor iiu() const; // independent, unknown DOFs + xt::xtensor iip() const; // independent, prescribed DOFs + xt::xtensor iii() const; // independent DOFs + xt::xtensor iid() const; // dependent DOFs + + // Assemble from matrices stored per element [nelem, nne*ndim, nne*ndim] + + void assemble(const xt::xtensor &elemmat); + + // Solve: + // A' = A_ii + K_id * C_di + C_di^T * K_di + C_di^T * K_dd * C_di + // b' = b_i + C_di^T * b_d + // x_u = A'_uu \ ( b'_u - A'_up * x_p ) + // x_i = [x_u, x_p] + // x_d = C_di * x_i + + void solve( + const xt::xtensor &b, + xt::xtensor &x); // modified with "x_u", "x_d" + + void solve( + const xt::xtensor &b, + xt::xtensor &x); // modified with "x_u", "x_d" + + void solve_u( + const xt::xtensor &b_u, + const xt::xtensor &b_d, + const xt::xtensor &x_p, + xt::xtensor &x_u); // overwritten + +private: + + // The matrix + Eigen::SparseMatrix m_Auu; + Eigen::SparseMatrix m_Aup; + Eigen::SparseMatrix m_Apu; + Eigen::SparseMatrix m_App; + Eigen::SparseMatrix m_Aud; + Eigen::SparseMatrix m_Apd; + Eigen::SparseMatrix m_Adu; + Eigen::SparseMatrix m_Adp; + Eigen::SparseMatrix m_Add; + + // The matrix for which the tyings have been applied + Eigen::SparseMatrix m_ACuu; + Eigen::SparseMatrix m_ACup; + Eigen::SparseMatrix m_ACpu; + Eigen::SparseMatrix m_ACpp; + + // Matrix entries + std::vector> m_Tuu; + std::vector> m_Tup; + std::vector> m_Tpu; + std::vector> m_Tpp; + std::vector> m_Tud; + std::vector> m_Tpd; + std::vector> m_Tdu; + std::vector> m_Tdp; + std::vector> m_Tdd; + + // Solver (re-used to solve different RHS) + Eigen::SimplicialLDLT> m_solver; + + // Signal changes to data compare to the last inverse + bool m_factor=false; + + // Bookkeeping + xt::xtensor m_conn; // connectivity [nelem, nne ] + xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] + xt::xtensor m_iiu; // unknown DOFs [nnu] + xt::xtensor m_iip; // prescribed DOFs [nnp] + xt::xtensor m_iid; // dependent DOFs [nnd] + + // Dimensions + size_t m_nelem; // number of elements + size_t m_nne; // number of nodes per element + size_t m_nnode; // number of nodes + size_t m_ndim; // number of dimensions + size_t m_ndof; // number of DOFs + size_t m_nnu; // number of independent, unknown DOFs + size_t m_nnp; // number of independent, prescribed DOFs + size_t m_nni; // number of independent DOFs + size_t m_nnd; // number of dependent DOFs + + // Tyings + Eigen::SparseMatrix m_Cdu; + Eigen::SparseMatrix m_Cdp; + Eigen::SparseMatrix m_Cud; + Eigen::SparseMatrix m_Cpd; + + // Compute inverse (automatically evaluated by "solve") + void factorize(); + + // Convert arrays (Eigen version of VectorPartitioned, which contains public functions) + Eigen::VectorXd asDofs_u(const xt::xtensor &dofval ) const; + Eigen::VectorXd asDofs_u(const xt::xtensor &nodevec) const; + Eigen::VectorXd asDofs_p(const xt::xtensor &dofval ) const; + Eigen::VectorXd asDofs_p(const xt::xtensor &nodevec) const; + Eigen::VectorXd asDofs_d(const xt::xtensor &dofval ) const; + Eigen::VectorXd asDofs_d(const xt::xtensor &nodevec) const; + +}; + +// ------------------------------------------------------------------------------------------------- + +} // namespace ... + +// ================================================================================================= + +#include "MatrixPartitionedTyings.hpp" + +// ================================================================================================= + +#endif diff --git a/include/GooseFEM/MatrixPartitionedTyings.hpp b/include/GooseFEM/MatrixPartitionedTyings.hpp new file mode 100644 index 0000000..a7ecfbd --- /dev/null +++ b/include/GooseFEM/MatrixPartitionedTyings.hpp @@ -0,0 +1,394 @@ +/* ================================================================================================= + +(c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM + +================================================================================================= */ + +#ifndef GOOSEFEM_MATRIXPARTITIONEDTYINGS_HPP +#define GOOSEFEM_MATRIXPARTITIONEDTYINGS_HPP + +// ------------------------------------------------------------------------------------------------- + +#include "MatrixPartitionedTyings.h" + +// ================================================================================================= + +namespace GooseFEM { + +// ------------------------------------------------------------------------------------------------- + +inline MatrixPartitionedTyings::MatrixPartitionedTyings( + const xt::xtensor &conn, + const xt::xtensor &dofs, + const Eigen::SparseMatrix& Cdu, + const Eigen::SparseMatrix& Cdp) : + m_conn(conn), + m_dofs(dofs), + m_Cdu(Cdu), + m_Cdp(Cdp) +{ + assert(Cdu.rows() == Cdp.rows()); + + m_nnu = static_cast(m_Cdu.cols()); + m_nnp = static_cast(m_Cdp.cols()); + m_nnd = static_cast(m_Cdp.rows()); + m_nni = m_nnu + m_nnp; + m_ndof = m_nni + m_nnd; + + m_iiu = xt::arange(m_nnu); + m_iip = xt::arange(m_nnp) + m_nnu; + m_iid = xt::arange(m_nnd) + m_nni; + + m_nelem = m_conn.shape()[0]; + m_nne = m_conn.shape()[1]; + m_nnode = m_dofs.shape()[0]; + m_ndim = m_dofs.shape()[1]; + + m_Cud = m_Cdu.transpose(); + m_Cpd = m_Cdp.transpose(); + + m_Tuu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tup.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tpu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tpp.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tud.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tpd.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tdu.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tdp.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + m_Tdd.reserve(m_nelem*m_nne*m_ndim*m_nne*m_ndim); + + m_Auu.resize(m_nnu,m_nnu); + m_Aup.resize(m_nnu,m_nnp); + m_Apu.resize(m_nnp,m_nnu); + m_App.resize(m_nnp,m_nnp); + m_Aud.resize(m_nnu,m_nnd); + m_Apd.resize(m_nnp,m_nnd); + m_Adu.resize(m_nnd,m_nnu); + m_Adp.resize(m_nnd,m_nnp); + m_Add.resize(m_nnd,m_nnd); + + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(m_ndof <= m_nnode * m_ndim); + assert(m_ndof == xt::amax(m_dofs)[0] + 1); +} + +// ------------------------------------------------------------------------------------------------- + +inline size_t MatrixPartitionedTyings::nelem() const +{ return m_nelem; } + +inline size_t MatrixPartitionedTyings::nne() const +{ return m_nne; } + +inline size_t MatrixPartitionedTyings::nnode() const +{ return m_nnode; } + +inline size_t MatrixPartitionedTyings::ndim() const +{ return m_ndim; } + +inline size_t MatrixPartitionedTyings::ndof() const +{ return m_ndof; } + +inline size_t MatrixPartitionedTyings::nnu() const +{ return m_nnu; } + +inline size_t MatrixPartitionedTyings::nnp() const +{ return m_nnp; } + +inline xt::xtensor MatrixPartitionedTyings::dofs() const +{ return m_dofs; } + +inline xt::xtensor MatrixPartitionedTyings::iiu() const +{ return m_iiu; } + +inline xt::xtensor MatrixPartitionedTyings::iip() const +{ return m_iip; } + +inline xt::xtensor MatrixPartitionedTyings::iii() const +{ return xt::arange(m_nni); } + +inline xt::xtensor MatrixPartitionedTyings::iid() const +{ return m_iid; } + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitionedTyings::factorize() +{ + if (!m_factor) return; + + m_ACuu = m_Auu + m_Aud * m_Cdu + m_Cud * m_Adu + m_Cud * m_Add * m_Cdu; + m_ACup = m_Aup + m_Aud * m_Cdp + m_Cud * m_Adp + m_Cud * m_Add * m_Cdp; + // m_ACpu = m_Apu + m_Apd * m_Cdu + m_Cpd * m_Adu + m_Cpd * m_Add * m_Cdu; + // m_ACpp = m_App + m_Apd * m_Cdp + m_Cpd * m_Adp + m_Cpd * m_Add * m_Cdp; + + m_solver.compute(m_ACuu); + + m_factor = false; +} + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitionedTyings::assemble(const xt::xtensor &elemmat) +{ + assert(elemmat.shape()[0] == m_nelem ); + assert(elemmat.shape()[1] == m_nne*m_ndim); + assert(elemmat.shape()[2] == m_nne*m_ndim); + + m_Tuu.clear(); + m_Tup.clear(); + m_Tpu.clear(); + m_Tpp.clear(); + m_Tud.clear(); + m_Tpd.clear(); + m_Tdu.clear(); + m_Tdp.clear(); + m_Tdd.clear(); + + for (size_t e = 0 ; e < m_nelem ; ++e) { + for (size_t m = 0 ; m < m_nne ; ++m) { + for (size_t i = 0 ; i < m_ndim ; ++i) { + + size_t di = m_dofs(m_conn(e,m),i); + + for (size_t n = 0 ; n < m_nne ; ++n) { + for (size_t j = 0 ; j < m_ndim ; ++j) { + + size_t dj = m_dofs(m_conn(e,n),j); + + if (di < m_nnu and dj < m_nnu) + m_Tuu.push_back(Eigen::Triplet(di ,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (di < m_nnu and dj < m_nni) + m_Tup.push_back(Eigen::Triplet(di ,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (di < m_nnu) + m_Tud.push_back(Eigen::Triplet(di ,dj-m_nni,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (di < m_nni and dj < m_nnu) + m_Tpu.push_back(Eigen::Triplet(di-m_nnu,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (di < m_nni and dj < m_nni) + m_Tpp.push_back(Eigen::Triplet(di-m_nnu,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (di < m_nni) + m_Tpd.push_back(Eigen::Triplet(di-m_nnu,dj-m_nni,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (dj < m_nnu) + m_Tdu.push_back(Eigen::Triplet(di-m_nni,dj ,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else if (dj < m_nni) + m_Tdp.push_back(Eigen::Triplet(di-m_nni,dj-m_nnu,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + else + m_Tdd.push_back(Eigen::Triplet(di-m_nni,dj-m_nni,elemmat(e,m*m_ndim+i,n*m_ndim+j))); + } + } + } + } + } + + m_Auu.setFromTriplets(m_Tuu.begin(), m_Tuu.end()); + m_Aup.setFromTriplets(m_Tup.begin(), m_Tup.end()); + m_Apu.setFromTriplets(m_Tpu.begin(), m_Tpu.end()); + m_App.setFromTriplets(m_Tpp.begin(), m_Tpp.end()); + m_Aud.setFromTriplets(m_Tud.begin(), m_Tud.end()); + m_Apd.setFromTriplets(m_Tpd.begin(), m_Tpd.end()); + m_Adu.setFromTriplets(m_Tdu.begin(), m_Tdu.end()); + m_Adp.setFromTriplets(m_Tdp.begin(), m_Tdp.end()); + m_Add.setFromTriplets(m_Tdd.begin(), m_Tdd.end()); + + m_factor = true; +} + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitionedTyings::solve( + const xt::xtensor &b, + xt::xtensor &x) +{ + assert(b.shape()[0] == m_nnode); + assert(b.shape()[1] == m_ndim ); + assert(x.shape()[0] == m_nnode); + assert(x.shape()[1] == m_ndim ); + + this->factorize(); + + Eigen::VectorXd B_u = this->asDofs_u(b); + Eigen::VectorXd B_d = this->asDofs_d(b); + Eigen::VectorXd X_p = this->asDofs_p(x); + + B_u += m_Cud * B_d; + + Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_ACup * X_p)); + Eigen::VectorXd X_d = m_Cdu * X_u + m_Cdp * X_p; + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) { + for (size_t i = 0 ; i < m_ndim ; ++i) { + if (m_dofs(m,i) < m_nnu) + x(m,i) = X_u(m_dofs(m,i)); + else if (m_dofs(m,i) >= m_nni) + x(m,i) = X_d(m_dofs(m,i)-m_nni); + } + } +} + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitionedTyings::solve( + const xt::xtensor &b, + xt::xtensor &x) +{ + assert(b.size() == m_ndof); + assert(x.size() == m_ndof); + + this->factorize(); + + Eigen::VectorXd B_u = this->asDofs_u(b); + Eigen::VectorXd B_d = this->asDofs_d(b); + Eigen::VectorXd X_p = this->asDofs_p(x); + + Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_ACup * X_p)); + Eigen::VectorXd X_d = m_Cdu * X_u + m_Cdp * X_p; + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnu ; ++d) + x(m_iiu(d)) = X_u(d); + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnd ; ++d) + x(m_iid(d)) = X_d(d); +} + +// ------------------------------------------------------------------------------------------------- + +inline void MatrixPartitionedTyings::solve_u( + const xt::xtensor &b_u, + const xt::xtensor &b_d, + const xt::xtensor &x_p, + xt::xtensor &x_u) +{ + assert(b_u.shape()[0] == m_nnu); + assert(b_d.shape()[0] == m_nnd); + assert(x_p.shape()[0] == m_nnp); + assert(x_u.shape()[0] == m_nnu); + + this->factorize(); + + Eigen::VectorXd B_u(m_nnu,1); + Eigen::VectorXd B_d(m_nnd,1); + Eigen::VectorXd X_p(m_nnp,1); + + std::copy(b_u.begin(), b_u.end(), B_u.data()); + std::copy(b_d.begin(), b_d.end(), B_d.data()); + std::copy(x_p.begin(), x_p.end(), X_p.data()); + + Eigen::VectorXd X_u = m_solver.solve(Eigen::VectorXd(B_u - m_ACup * X_p)); + + std::copy(X_u.data(), X_u.data()+m_nnu, x_u.begin()); +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitionedTyings::asDofs_u( + const xt::xtensor &dofval) const +{ + assert(dofval.size() == m_ndof); + + Eigen::VectorXd dofval_u(m_nnu,1); + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnu ; ++d) + dofval_u(d) = dofval(m_iiu(d)); + + return dofval_u; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitionedTyings::asDofs_u( + const xt::xtensor &nodevec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + Eigen::VectorXd dofval_u(m_nnu,1); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_dofs(m,i) < m_nnu) + dofval_u(m_dofs(m,i)) = nodevec(m,i); + + return dofval_u; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitionedTyings::asDofs_p( + const xt::xtensor &dofval) const +{ + assert(dofval.size() == m_ndof); + + Eigen::VectorXd dofval_p(m_nnp,1); + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnp ; ++d) + dofval_p(d) = dofval(m_iip(d)); + + return dofval_p; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitionedTyings::asDofs_p( + const xt::xtensor &nodevec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + Eigen::VectorXd dofval_p(m_nnp,1); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_dofs(m,i) >= m_nnu and m_dofs(m,i) < m_nni) + dofval_p(m_dofs(m,i)-m_nnu) = nodevec(m,i); + + return dofval_p; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitionedTyings::asDofs_d( + const xt::xtensor &dofval) const +{ + assert(dofval.size() == m_ndof); + + Eigen::VectorXd dofval_d(m_nnd,1); + + #pragma omp parallel for + for (size_t d = 0 ; d < m_nnd ; ++d) + dofval_d(d) = dofval(m_iip(d)); + + return dofval_d; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd MatrixPartitionedTyings::asDofs_d( + const xt::xtensor &nodevec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + Eigen::VectorXd dofval_d(m_nnd,1); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_dofs(m,i) >= m_nni) + dofval_d(m_dofs(m,i)-m_nni) = nodevec(m,i); + + return dofval_d; +} + +// ------------------------------------------------------------------------------------------------- + +} // namespace ... + +// ================================================================================================= + +#endif diff --git a/include/GooseFEM/Mesh.hpp b/include/GooseFEM/Mesh.hpp index 3316018..50b8f18 100644 --- a/include/GooseFEM/Mesh.hpp +++ b/include/GooseFEM/Mesh.hpp @@ -1,192 +1,195 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MESH_HPP #define GOOSEFEM_MESH_HPP // ------------------------------------------------------------------------------------------------- #include "Mesh.h" // ================================================================================================= namespace GooseFEM { namespace Mesh { // ------------------------------------------------------------------------------------------------- inline Renumber::Renumber(const xt::xarray& dofs) { size_t n = xt::amax(dofs)[0]+1; size_t i = 0; xt::xtensor unique = xt::unique(dofs); m_renum = xt::empty({n}); for (auto& j : unique) { m_renum(j) = i; ++i; } } // ------------------------------------------------------------------------------------------------- // apply renumbering, e.g. for a matrix: // // out(i,j) = renum(list(i,j)) template T Renumber::apply(const T& list) const { T out = T::from_shape(list.shape()); auto jt = out.begin(); for ( auto it = list.begin() ; it != list.end() ; ++it, ++jt ) *jt = m_renum(*it); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Renumber::get(const xt::xtensor& dofs) const { return this->apply(dofs); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Renumber::index() const { return m_renum; } // ------------------------------------------------------------------------------------------------- inline Reorder::Reorder(const std::initializer_list> args) { size_t n = 0; size_t i = 0; for (auto& arg : args) + { + if (arg.size() == 0) continue; n = std::max(n, xt::amax(arg)[0]+1); + } #ifndef NDEBUG for (auto& arg : args) assert(xt::unique(arg) == xt::sort(arg)); #endif m_renum = xt::empty({n}); for (auto& arg : args) { for (auto& j : arg) { m_renum(j) = i; ++i; } } } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Reorder::get(const xt::xtensor& dofs) const { return this->apply(dofs); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Reorder::index() const { return m_renum; } // ------------------------------------------------------------------------------------------------- // apply renumbering, e.g. for a matrix: // // out(i,j) = renum(list(i,j)) template T Reorder::apply(const T& list) const { T out = T::from_shape(list.shape()); auto jt = out.begin(); for ( auto it = list.begin() ; it != list.end() ; ++it, ++jt ) *jt = m_renum(*it); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor renumber(const xt::xtensor &dofs) { return Renumber(dofs).get(dofs); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor dofs(size_t nnode, size_t ndim) { return xt::reshape_view(xt::arange(nnode*ndim),{nnode,ndim}); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor coordination(const xt::xtensor &conn) { // get number of nodes size_t nnode = xt::amax(conn)[0] + 1; // number of elements connected to each node // - allocate xt::xtensor N = xt::zeros({nnode}); // - fill from connectivity for ( auto it = conn.begin(); it != conn.end(); ++it ) N(*it) += 1; return N; } // ------------------------------------------------------------------------------------------------- inline std::vector> elem2node(const xt::xtensor &conn) { // get coordination per node auto N = coordination(conn); // get number of nodes auto nnode = N.size(); // allocate std::vector> out; // reserve outer size out.resize(nnode); // reserve inner sizes for ( size_t i = 0 ; i < nnode ; ++i ) out[i].reserve(N(i)); // fill for ( size_t e = 0 ; e < conn.shape()[0] ; ++e ) for ( size_t m = 0 ; m < conn.shape()[1] ; ++m ) out[conn(e,m)].push_back(e); return out; } // ------------------------------------------------------------------------------------------------- }} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MeshHex8.hpp b/include/GooseFEM/MeshHex8.hpp index 1fe195e..19f0661 100644 --- a/include/GooseFEM/MeshHex8.hpp +++ b/include/GooseFEM/MeshHex8.hpp @@ -1,2599 +1,2606 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MESHHEX8_HPP #define GOOSEFEM_MESHHEX8_HPP // ------------------------------------------------------------------------------------------------- #include "MeshHex8.h" // ================================================================================================= namespace GooseFEM { namespace Mesh { namespace Hex8 { // ------------------------------------------------------------------------------------------------- inline Regular::Regular(size_t nelx, size_t nely, size_t nelz, double h): m_h(h), m_nelx(nelx), m_nely(nely), m_nelz(nelz) { assert( m_nelx >= 1 ); assert( m_nely >= 1 ); assert( m_nelz >= 1 ); m_nnode = (m_nelx+1) * (m_nely+1) * (m_nelz+1); m_nelem = m_nelx * m_nely * m_nelz ; } // ------------------------------------------------------------------------------------------------- -inline size_t Regular::nelem() const { return m_nelem; } -inline size_t Regular::nnode() const { return m_nnode; } -inline size_t Regular::nne() const { return m_nne; } -inline size_t Regular::ndim() const { return m_ndim; } +inline size_t Regular::nelem() const +{ return m_nelem; } + +inline size_t Regular::nnode() const +{ return m_nnode; } + +inline size_t Regular::nne() const +{ return m_nne; } + +inline size_t Regular::ndim() const +{ return m_ndim; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::coor() const { xt::xtensor out = xt::empty({m_nnode, m_ndim}); xt::xtensor x = xt::linspace(0.0, m_h*static_cast(m_nelx), m_nelx+1); xt::xtensor y = xt::linspace(0.0, m_h*static_cast(m_nely), m_nely+1); xt::xtensor z = xt::linspace(0.0, m_h*static_cast(m_nelz), m_nelz+1); size_t inode = 0; for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) { for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) { for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy); out(inode,2) = z(iz); ++inode; } } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::conn() const { xt::xtensor out = xt::empty({m_nelem,m_nne}); size_t ielem = 0; for ( size_t iz = 0 ; iz < m_nelz ; ++iz ) { for ( size_t iy = 0 ; iy < m_nely ; ++iy ) { for ( size_t ix = 0 ; ix < m_nelx ; ++ix ) { out(ielem,0) = (iy )*(m_nelx+1) + (ix ) + (iz )*(m_nely+1)*(m_nelx+1); out(ielem,1) = (iy )*(m_nelx+1) + (ix+1) + (iz )*(m_nely+1)*(m_nelx+1); out(ielem,3) = (iy+1)*(m_nelx+1) + (ix ) + (iz )*(m_nely+1)*(m_nelx+1); out(ielem,2) = (iy+1)*(m_nelx+1) + (ix+1) + (iz )*(m_nely+1)*(m_nelx+1); out(ielem,4) = (iy )*(m_nelx+1) + (ix ) + (iz+1)*(m_nely+1)*(m_nelx+1); out(ielem,5) = (iy )*(m_nelx+1) + (ix+1) + (iz+1)*(m_nely+1)*(m_nelx+1); out(ielem,7) = (iy+1)*(m_nelx+1) + (ix ) + (iz+1)*(m_nely+1)*(m_nelx+1); out(ielem,6) = (iy+1)*(m_nelx+1) + (ix+1) + (iz+1)*(m_nely+1)*(m_nelx+1); ++ielem; } } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesFront() const { xt::xtensor out = xt::empty({(m_nelx+1)*(m_nely+1)}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(iy*(m_nelx+1)+ix) = iy*(m_nelx+1) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBack() const { xt::xtensor out = xt::empty({(m_nelx+1)*(m_nely+1)}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(iy*(m_nelx+1)+ix) = iy*(m_nelx+1) + ix + m_nelz*(m_nely+1)*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesLeft() const { xt::xtensor out = xt::empty({(m_nely+1)*(m_nelz+1)}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iz*(m_nely+1)+iy) = iy*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesRight() const { xt::xtensor out = xt::empty({(m_nely+1)*(m_nelz+1)}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iz*(m_nely+1)+iy) = iy*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottom() const { xt::xtensor out = xt::empty({(m_nelx+1)*(m_nelz+1)}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(iz*(m_nelx+1)+ix) = ix + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTop() const { xt::xtensor out = xt::empty({(m_nelx+1)*(m_nelz+1)}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(iz*(m_nelx+1)+ix) = ix + m_nely*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesFrontFace() const { xt::xtensor out = xt::empty({(m_nelx-1)*(m_nely-1)}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out((iy-1)*(m_nelx-1)+(ix-1)) = iy*(m_nelx+1) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBackFace() const { xt::xtensor out = xt::empty({(m_nelx-1)*(m_nely-1)}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) { for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) { out((iy-1)*(m_nelx-1)+(ix-1)) = iy*(m_nelx+1) + ix + m_nelz*(m_nely+1)*(m_nelx+1); } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesLeftFace() const { xt::xtensor out = xt::empty({(m_nely-1)*(m_nelz-1)}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out((iz-1)*(m_nely-1)+(iy-1)) = iy*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesRightFace() const { xt::xtensor out = xt::empty({(m_nely-1)*(m_nelz-1)}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out((iz-1)*(m_nely-1)+(iy-1)) = iy*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomFace() const { xt::xtensor out = xt::empty({(m_nelx-1)*(m_nelz-1)}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out((iz-1)*(m_nelx-1)+(ix-1)) = ix + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopFace() const { xt::xtensor out = xt::empty({(m_nelx-1)*(m_nelz-1)}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out((iz-1)*(m_nelx-1)+(ix-1)) = ix + m_nely*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesFrontBottomEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesFrontTopEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix + m_nely*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesFrontLeftEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesFrontRightEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBackBottomEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix + m_nelz*(m_nely+1)*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBackTopEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = m_nely*(m_nelx+1) + ix + m_nelz*(m_nely+1)*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBackLeftEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1) + m_nelz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBackRightEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1) + m_nelz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomLeftEdge() const { xt::xtensor out = xt::empty({m_nelz+1}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) out(iz) = iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomRightEdge() const { xt::xtensor out = xt::empty({m_nelz+1}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) out(iz) = iz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopLeftEdge() const { xt::xtensor out = xt::empty({m_nelz+1}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) out(iz) = m_nely*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopRightEdge() const { xt::xtensor out = xt::empty({m_nelz+1}); for ( size_t iz = 0 ; iz < m_nelz+1 ; ++iz ) out(iz) = m_nely*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomFrontEdge() const { return nodesFrontBottomEdge(); } inline xt::xtensor Regular::nodesBottomBackEdge() const { return nodesBackBottomEdge(); } inline xt::xtensor Regular::nodesTopFrontEdge() const { return nodesFrontTopEdge(); } inline xt::xtensor Regular::nodesTopBackEdge() const { return nodesBackTopEdge(); } inline xt::xtensor Regular::nodesLeftBottomEdge() const { return nodesBottomLeftEdge(); } inline xt::xtensor Regular::nodesLeftFrontEdge() const { return nodesFrontLeftEdge(); } inline xt::xtensor Regular::nodesLeftBackEdge() const { return nodesBackLeftEdge(); } inline xt::xtensor Regular::nodesLeftTopEdge() const { return nodesTopLeftEdge(); } inline xt::xtensor Regular::nodesRightBottomEdge() const { return nodesBottomRightEdge(); } inline xt::xtensor Regular::nodesRightTopEdge() const { return nodesTopRightEdge(); } inline xt::xtensor Regular::nodesRightFrontEdge() const { return nodesFrontRightEdge(); } inline xt::xtensor Regular::nodesRightBackEdge() const { return nodesBackRightEdge(); } // ------------------- node-numbers along the front-bottom edge, without corners ------------------- inline xt::xtensor Regular::nodesFrontBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix; return out; } // -------------------- node-numbers along the front-top edge, without corners --------------------- inline xt::xtensor Regular::nodesFrontTopOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix + m_nely*(m_nelx+1); return out; } // -------------------- node-numbers along the front-left edge, without corners -------------------- inline xt::xtensor Regular::nodesFrontLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1); return out; } // ------------------- node-numbers along the front-right edge, without corners -------------------- inline xt::xtensor Regular::nodesFrontRightOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1) + m_nelx; return out; } // ------------------- node-numbers along the back-bottom edge, without corners -------------------- inline xt::xtensor Regular::nodesBackBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix + m_nelz*(m_nely+1)*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBackTopOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = m_nely*(m_nelx+1) + ix + m_nelz*(m_nely+1)*(m_nelx+1); return out; } // -------------------- node-numbers along the back-left edge, without corners --------------------- inline xt::xtensor Regular::nodesBackLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1) + m_nelz*(m_nelx+1)*(m_nely+1); return out; } // -------------------- node-numbers along the back-right edge, without corners -------------------- inline xt::xtensor Regular::nodesBackRightOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1) + m_nelz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------- node-numbers along the bottom-left edge, without corners -------------------- inline xt::xtensor Regular::nodesBottomLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nelz-1}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) out(iz-1) = iz*(m_nelx+1)*(m_nely+1); return out; } // ------------------- node-numbers along the bottom-right edge, without corners ------------------- inline xt::xtensor Regular::nodesBottomRightOpenEdge() const { xt::xtensor out = xt::empty({m_nelz-1}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) out(iz-1) = iz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nelz-1}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) out(iz-1) = m_nely*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1); return out; } // -------------------- node-numbers along the top-right edge, without corners --------------------- inline xt::xtensor Regular::nodesTopRightOpenEdge() const { xt::xtensor out = xt::empty({m_nelz-1}); for ( size_t iz = 1 ; iz < m_nelz ; ++iz ) out(iz-1) = m_nely*(m_nelx+1) + iz*(m_nelx+1)*(m_nely+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomFrontOpenEdge() const { return nodesFrontBottomOpenEdge(); } inline xt::xtensor Regular::nodesBottomBackOpenEdge() const { return nodesBackBottomOpenEdge(); } inline xt::xtensor Regular::nodesTopFrontOpenEdge() const { return nodesFrontTopOpenEdge(); } inline xt::xtensor Regular::nodesTopBackOpenEdge() const { return nodesBackTopOpenEdge(); } inline xt::xtensor Regular::nodesLeftBottomOpenEdge() const { return nodesBottomLeftOpenEdge(); } inline xt::xtensor Regular::nodesLeftFrontOpenEdge() const { return nodesFrontLeftOpenEdge(); } inline xt::xtensor Regular::nodesLeftBackOpenEdge() const { return nodesBackLeftOpenEdge(); } inline xt::xtensor Regular::nodesLeftTopOpenEdge() const { return nodesTopLeftOpenEdge(); } inline xt::xtensor Regular::nodesRightBottomOpenEdge() const { return nodesBottomRightOpenEdge(); } inline xt::xtensor Regular::nodesRightTopOpenEdge() const { return nodesTopRightOpenEdge(); } inline xt::xtensor Regular::nodesRightFrontOpenEdge() const { return nodesFrontRightOpenEdge(); } inline xt::xtensor Regular::nodesRightBackOpenEdge() const { return nodesBackRightOpenEdge(); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesFrontBottomLeftCorner() const { return 0; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesFrontBottomRightCorner() const { return m_nelx; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesFrontTopLeftCorner() const { return m_nely*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesFrontTopRightCorner() const { return m_nely*(m_nelx+1) + m_nelx; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBackBottomLeftCorner() const { return m_nelz*(m_nely+1)*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBackBottomRightCorner() const { return m_nelx + m_nelz*(m_nely+1)*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBackTopLeftCorner() const { return m_nely*(m_nelx+1) + m_nelz*(m_nely+1)*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBackTopRightCorner() const { return m_nely*(m_nelx+1) + m_nelx + m_nelz*(m_nely+1)*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesFrontLeftBottomCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t Regular::nodesBottomFrontLeftCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t Regular::nodesBottomLeftFrontCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t Regular::nodesLeftFrontBottomCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t Regular::nodesLeftBottomFrontCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t Regular::nodesFrontRightBottomCorner()const { return nodesFrontBottomRightCorner(); } inline size_t Regular::nodesBottomFrontRightCorner()const { return nodesFrontBottomRightCorner(); } inline size_t Regular::nodesBottomRightFrontCorner()const { return nodesFrontBottomRightCorner(); } inline size_t Regular::nodesRightFrontBottomCorner()const { return nodesFrontBottomRightCorner(); } inline size_t Regular::nodesRightBottomFrontCorner()const { return nodesFrontBottomRightCorner(); } inline size_t Regular::nodesFrontLeftTopCorner() const { return nodesFrontTopLeftCorner(); } inline size_t Regular::nodesTopFrontLeftCorner() const { return nodesFrontTopLeftCorner(); } inline size_t Regular::nodesTopLeftFrontCorner() const { return nodesFrontTopLeftCorner(); } inline size_t Regular::nodesLeftFrontTopCorner() const { return nodesFrontTopLeftCorner(); } inline size_t Regular::nodesLeftTopFrontCorner() const { return nodesFrontTopLeftCorner(); } inline size_t Regular::nodesFrontRightTopCorner() const { return nodesFrontTopRightCorner(); } inline size_t Regular::nodesTopFrontRightCorner() const { return nodesFrontTopRightCorner(); } inline size_t Regular::nodesTopRightFrontCorner() const { return nodesFrontTopRightCorner(); } inline size_t Regular::nodesRightFrontTopCorner() const { return nodesFrontTopRightCorner(); } inline size_t Regular::nodesRightTopFrontCorner() const { return nodesFrontTopRightCorner(); } inline size_t Regular::nodesBackLeftBottomCorner() const { return nodesBackBottomLeftCorner(); } inline size_t Regular::nodesBottomBackLeftCorner() const { return nodesBackBottomLeftCorner(); } inline size_t Regular::nodesBottomLeftBackCorner() const { return nodesBackBottomLeftCorner(); } inline size_t Regular::nodesLeftBackBottomCorner() const { return nodesBackBottomLeftCorner(); } inline size_t Regular::nodesLeftBottomBackCorner() const { return nodesBackBottomLeftCorner(); } inline size_t Regular::nodesBackRightBottomCorner() const { return nodesBackBottomRightCorner(); } inline size_t Regular::nodesBottomBackRightCorner() const { return nodesBackBottomRightCorner(); } inline size_t Regular::nodesBottomRightBackCorner() const { return nodesBackBottomRightCorner(); } inline size_t Regular::nodesRightBackBottomCorner() const { return nodesBackBottomRightCorner(); } inline size_t Regular::nodesRightBottomBackCorner() const { return nodesBackBottomRightCorner(); } inline size_t Regular::nodesBackLeftTopCorner() const { return nodesBackTopLeftCorner(); } inline size_t Regular::nodesTopBackLeftCorner() const { return nodesBackTopLeftCorner(); } inline size_t Regular::nodesTopLeftBackCorner() const { return nodesBackTopLeftCorner(); } inline size_t Regular::nodesLeftBackTopCorner() const { return nodesBackTopLeftCorner(); } inline size_t Regular::nodesLeftTopBackCorner() const { return nodesBackTopLeftCorner(); } inline size_t Regular::nodesBackRightTopCorner() const { return nodesBackTopRightCorner(); } inline size_t Regular::nodesTopBackRightCorner() const { return nodesBackTopRightCorner(); } inline size_t Regular::nodesTopRightBackCorner() const { return nodesBackTopRightCorner(); } inline size_t Regular::nodesRightBackTopCorner() const { return nodesBackTopRightCorner(); } inline size_t Regular::nodesRightTopBackCorner() const { return nodesBackTopRightCorner(); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesPeriodic() const { // faces xt::xtensor fro = nodesFrontFace(); xt::xtensor bck = nodesBackFace(); xt::xtensor lft = nodesLeftFace(); xt::xtensor rgt = nodesRightFace(); xt::xtensor bot = nodesBottomFace(); xt::xtensor top = nodesTopFace(); // edges xt::xtensor froBot = nodesFrontBottomOpenEdge(); xt::xtensor froTop = nodesFrontTopOpenEdge(); xt::xtensor froLft = nodesFrontLeftOpenEdge(); xt::xtensor froRgt = nodesFrontRightOpenEdge(); xt::xtensor bckBot = nodesBackBottomOpenEdge(); xt::xtensor bckTop = nodesBackTopOpenEdge(); xt::xtensor bckLft = nodesBackLeftOpenEdge(); xt::xtensor bckRgt = nodesBackRightOpenEdge(); xt::xtensor botLft = nodesBottomLeftOpenEdge(); xt::xtensor botRgt = nodesBottomRightOpenEdge(); xt::xtensor topLft = nodesTopLeftOpenEdge(); xt::xtensor topRgt = nodesTopRightOpenEdge(); // allocate nodal ties // - number of tying per category size_t tface = fro.size() + lft.size() + bot.size(); size_t tedge = 3*froBot.size() + 3*froLft.size() + 3*botLft.size(); size_t tnode = 7; // - allocate xt::xtensor out = xt::empty({tface+tedge+tnode, std::size_t(2)}); // counter size_t i = 0; // tie all corners to one corner out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesFrontBottomRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackBottomRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackBottomLeftCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesFrontTopLeftCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesFrontTopRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackTopRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackTopLeftCorner(); ++i; // tie all corresponding edges to each other (exclude corners) for ( size_t j = 0 ; j Regular::dofs() const { return GooseFEM::Mesh::dofs(m_nnode,m_ndim); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::dofsPeriodic() const { // DOF-numbers for each component of each node (sequential) xt::xtensor out = GooseFEM::Mesh::dofs(m_nnode,m_ndim); // periodic node-pairs xt::xtensor nodePer = nodesPeriodic(); // eliminate 'dependent' DOFs; renumber "out" to be sequential for the remaining DOFs for ( size_t i = 0 ; i < nodePer.shape()[0] ; ++i ) for ( size_t j = 0 ; j < m_ndim ; ++j ) out(nodePer(i,1),j) = out(nodePer(i,0),j); // renumber "out" to be sequential return GooseFEM::Mesh::renumber(out); } // ------------------------------------------------------------------------------------------------- inline FineLayer::FineLayer(size_t nelx, size_t nely, size_t nelz, double h, size_t nfine): m_h(h) { // basic assumptions assert( nelx >= 1 ); assert( nely >= 1 ); assert( nelz >= 1 ); // store basic info m_Lx = m_h * static_cast(nelx); m_Lz = m_h * static_cast(nelz); // compute element size in y-direction (use symmetry, compute upper half) // ------------------------------------------------------------------------------------------------- // temporary variables size_t nmin, ntot; xt::xtensor nhx = xt::ones({nely}); xt::xtensor nhy = xt::ones({nely}); xt::xtensor nhz = xt::ones({nely}); xt::xtensor refine = -1 * xt::ones ({nely}); // minimum height in y-direction (half of the height because of symmetry) if ( nely % 2 == 0 ) nmin = nely /2; else nmin = (nely +1)/2; // minimum number of fine layers in y-direction (minimum 1, middle layer part of this half) if ( nfine % 2 == 0 ) nfine = nfine /2 + 1; else nfine = (nfine+1)/2; if ( nfine < 1 ) nfine = 1; if ( nfine > nmin ) nfine = nmin; // loop over element layers in y-direction, try to coarsen using these rules: // (1) element size in y-direction <= distance to origin in y-direction // (2) element size in x-(z-)direction should fit the total number of elements in x-(z-)direction // (3) a certain number of layers have the minimum size "1" (are fine) for ( size_t iy = nfine ; ; ) { // initialize current size in y-direction if ( iy == nfine ) ntot = nfine; // check to stop if ( iy >= nely or ntot >= nmin ) { nely = iy; break; } // rules (1,2) satisfied: coarsen in x-direction (and z-direction) if ( 3*nhy(iy) <= ntot and nelx%(3*nhx(iy)) == 0 and ntot+nhy(iy) < nmin ) { // - process refinement in x-direction refine(iy) = 0; nhy (iy) *= 2; auto vnhy = xt::view(nhy, xt::range(iy+1, _)); auto vnhx = xt::view(nhx, xt::range(iy , _)); vnhy *= 3; vnhx *= 3; // - rule (2) satisfied: coarsen next element layer in z-direction if ( iy+1 < nely and ntot+2*nhy(iy) < nmin ) { if ( nelz%(3*nhz(iy+1)) == 0 ) { // - update the number of elements in y-direction ntot += nhy(iy); // - proceed to next element layer in y-direction ++iy; // - process refinement in z-direction refine(iy) = 2; nhy (iy) = nhy(iy-1); auto vnhz = xt::view(nhz, xt::range(iy, _)); vnhz *= 3; } } } // rules (1,2) satisfied: coarse in z-direction else if ( 3*nhy(iy) <= ntot and nelz%(3*nhz(iy)) == 0 and ntot+nhy(iy) < nmin ) { // - process refinement in z-direction refine(iy) = 2; nhy (iy) *= 2; auto vnhy = xt::view(nhy, xt::range(iy+1, _)); auto vnhz = xt::view(nhz, xt::range(iy , _)); vnhy *= 3; vnhz *= 3; } // update the number of elements in y-direction ntot += nhy(iy); // proceed to next element layer in y-direction ++iy; // check to stop if ( iy >= nely or ntot >= nmin ) { nely = iy; break; } } // symmetrize, compute full information // ------------------------------------------------------------------------------------------------- // allocate mesh constructor parameters m_nhx = xt::empty({nely*2-1}); m_nhy = xt::empty({nely*2-1}); m_nhz = xt::empty({nely*2-1}); m_refine = xt::empty ({nely*2-1}); m_nelx = xt::empty({nely*2-1}); m_nelz = xt::empty({nely*2-1}); m_nnd = xt::empty({nely*2 }); m_startElem = xt::empty({nely*2-1}); m_startNode = xt::empty({nely*2 }); // fill // - lower half for ( size_t iy = 0 ; iy < nely ; ++iy ) { m_nhx (iy) = nhx (nely-iy-1); m_nhy (iy) = nhy (nely-iy-1); m_nhz (iy) = nhz (nely-iy-1); m_refine(iy) = refine(nely-iy-1); } // - upper half for ( size_t iy = 0 ; iy < nely-1 ; ++iy ) { m_nhx (iy+nely) = nhx (iy+1); m_nhy (iy+nely) = nhy (iy+1); m_nhz (iy+nely) = nhz (iy+1); m_refine(iy+nely) = refine(iy+1); } // update size nely = m_nhx.size(); // compute the number of elements per element layer in y-direction for ( size_t iy = 0 ; iy < nely ; ++iy ) { m_nelx(iy) = nelx / m_nhx(iy); m_nelz(iy) = nelz / m_nhz(iy); } // compute the number of nodes per node layer in y-direction // - bottom half for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) m_nnd(iy) = (m_nelx(iy)+1) * (m_nelz(iy)+1); // - top half for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) m_nnd(iy+1) = (m_nelx(iy)+1) * (m_nelz(iy)+1); // compute mesh dimensions // ------------------------------------------------------------------------------------------------- // initialize m_nnode = 0; m_nelem = 0; m_startNode(0) = 0; // loop over element layers (bottom -> middle, elements become finer) for ( size_t i = 0 ; i < (nely-1)/2 ; ++i ) { // - store the first element of the layer m_startElem(i) = m_nelem; // - add the nodes of this layer if ( m_refine(i) == 0 ) { m_nnode += (3*m_nelx(i)+1) * ( m_nelz(i)+1); } else if ( m_refine(i) == 2 ) { m_nnode += ( m_nelx(i)+1) * (3*m_nelz(i)+1); } else { m_nnode += ( m_nelx(i)+1) * ( m_nelz(i)+1); } // - add the elements of this layer if ( m_refine(i) == 0 ) { m_nelem += (4*m_nelx(i) ) * ( m_nelz(i) ); } else if ( m_refine(i) == 2 ) { m_nelem += ( m_nelx(i) ) * (4*m_nelz(i) ); } else { m_nelem += ( m_nelx(i) ) * ( m_nelz(i) ); } // - store the starting node of the next layer m_startNode(i+1) = m_nnode; } // loop over element layers (middle -> top, elements become coarser) for ( size_t i = (nely-1)/2 ; i < nely ; ++i ) { // - store the first element of the layer m_startElem(i) = m_nelem; // - add the nodes of this layer if ( m_refine(i) == 0 ) { m_nnode += (5*m_nelx(i)+1) * ( m_nelz(i)+1); } else if ( m_refine(i) == 2 ) { m_nnode += ( m_nelx(i)+1) * (5*m_nelz(i)+1); } else { m_nnode += ( m_nelx(i)+1) * ( m_nelz(i)+1); } // - add the elements of this layer if ( m_refine(i) == 0 ) { m_nelem += (4*m_nelx(i) ) * ( m_nelz(i) ); } else if ( m_refine(i) == 2 ) { m_nelem += ( m_nelx(i) ) * (4*m_nelz(i) ); } else { m_nelem += ( m_nelx(i) ) * ( m_nelz(i) ); } // - store the starting node of the next layer m_startNode(i+1) = m_nnode; } // - add the top row of nodes m_nnode += (m_nelx(nely-1)+1) * (m_nelz(nely-1)+1); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nelem() const { return m_nelem; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nnode() const { return m_nnode; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nne() const { return m_nne; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::ndim() const { return m_ndim; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::shape(size_t i) const { assert( i >= 0 and i <= 2 ); if ( i == 0 ) return xt::amax(m_nelx)[0]; else if ( i == 2 ) return xt::amax(m_nelz)[0]; else return xt::sum (m_nhy )[0]; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::coor() const { // allocate output xt::xtensor out = xt::empty({m_nnode, m_ndim}); // current node, number of element layers size_t inode = 0; size_t nely = static_cast(m_nhy.size()); // y-position of each main node layer (i.e. excluding node layers for refinement/coarsening) // - allocate xt::xtensor y = xt::empty({nely+1}); // - initialize y(0) = 0.0; // - compute for ( size_t iy = 1 ; iy < nely+1 ; ++iy ) y(iy) = y(iy-1) + m_nhy(iy-1) * m_h; // loop over element layers (bottom -> middle) : add bottom layer (+ refinement layer) of nodes // ------------------------------------------------------------------------------------------------- for ( size_t iy = 0 ; ; ++iy ) { // get positions along the x- and z-axis xt::xtensor x = xt::linspace(0.0, m_Lx, m_nelx(iy)+1); xt::xtensor z = xt::linspace(0.0, m_Lz, m_nelz(iy)+1); // add nodes of the bottom layer of this element for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy); out(inode,2) = z(iz); ++inode; } } // stop at middle layer if ( iy == (nely-1)/2 ) break; // add extra nodes of the intermediate layer, for refinement in x-direction if ( m_refine(iy) == 0 ) { // - get position offset in x- and y-direction double dx = m_h * static_cast(m_nhx(iy)/3); double dy = m_h * static_cast(m_nhy(iy)/2); // - add nodes of the intermediate layer for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { for ( size_t j = 0 ; j < 2 ; ++j ) { out(inode,0) = x(ix) + dx * static_cast(j+1); out(inode,1) = y(iy) + dy; out(inode,2) = z(iz); ++inode; } } } } // add extra nodes of the intermediate layer, for refinement in z-direction else if ( m_refine(iy) == 2 ) { // - get position offset in y- and z-direction double dz = m_h * static_cast(m_nhz(iy)/3); double dy = m_h * static_cast(m_nhy(iy)/2); // - add nodes of the intermediate layer for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t j = 0 ; j < 2 ; ++j ) { for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy) + dy; out(inode,2) = z(iz) + dz * static_cast(j+1); ++inode; } } } } } // loop over element layers (middle -> top) : add (refinement layer +) top layer of nodes // ------------------------------------------------------------------------------------------------- for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { // get positions along the x- and z-axis xt::xtensor x = xt::linspace(0.0, m_Lx, m_nelx(iy)+1); xt::xtensor z = xt::linspace(0.0, m_Lz, m_nelz(iy)+1); // add extra nodes of the intermediate layer, for refinement in x-direction if ( m_refine(iy) == 0 ) { // - get position offset in x- and y-direction double dx = m_h * static_cast(m_nhx(iy)/3); double dy = m_h * static_cast(m_nhy(iy)/2); // - add nodes of the intermediate layer for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { for ( size_t j = 0 ; j < 2 ; ++j ) { out(inode,0) = x(ix) + dx * static_cast(j+1); out(inode,1) = y(iy) + dy; out(inode,2) = z(iz); ++inode; } } } } // add extra nodes of the intermediate layer, for refinement in z-direction else if ( m_refine(iy) == 2 ) { // - get position offset in y- and z-direction double dz = m_h * static_cast(m_nhz(iy)/3); double dy = m_h * static_cast(m_nhy(iy)/2); // - add nodes of the intermediate layer for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t j = 0 ; j < 2 ; ++j ) { for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy) + dy; out(inode,2) = z(iz) + dz * static_cast(j+1); ++inode; } } } } // add nodes of the top layer of this element for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(inode,0) = x(ix ); out(inode,1) = y(iy+1); out(inode,2) = z(iz ); ++inode; } } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::conn() const { // allocate output xt::xtensor out = xt::empty({m_nelem, m_nne}); // current element, number of element layers, starting nodes of each node layer size_t ielem = 0; size_t nely = static_cast(m_nhy.size()); size_t bot,mid,top; // loop over all element layers for ( size_t iy = 0 ; iy < nely ; ++iy ) { // - get: starting nodes of bottom(, middle) and top layer bot = m_startNode(iy ); mid = m_startNode(iy ) + m_nnd(iy); top = m_startNode(iy+1); // - define connectivity: no coarsening/refinement if ( m_refine(iy) == -1 ) { for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { out(ielem,0) = bot + (ix ) + (iz ) * (m_nelx(iy)+1); out(ielem,1) = bot + (ix+1) + (iz ) * (m_nelx(iy)+1); out(ielem,2) = top + (ix+1) + (iz ) * (m_nelx(iy)+1); out(ielem,3) = top + (ix ) + (iz ) * (m_nelx(iy)+1); out(ielem,4) = bot + (ix ) + (iz+1) * (m_nelx(iy)+1); out(ielem,5) = bot + (ix+1) + (iz+1) * (m_nelx(iy)+1); out(ielem,6) = top + (ix+1) + (iz+1) * (m_nelx(iy)+1); out(ielem,7) = top + (ix ) + (iz+1) * (m_nelx(iy)+1); ielem++; } } } // - define connectivity: refinement along the x-direction (below the middle layer) else if ( m_refine(iy) == 0 and iy <= (nely-1)/2 ) { for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { // -- bottom element out(ielem,0) = bot + ( ix ) + (iz ) * ( m_nelx(iy)+1); out(ielem,1) = bot + ( ix+1) + (iz ) * ( m_nelx(iy)+1); out(ielem,2) = mid + (2*ix+1) + (iz ) * (2*m_nelx(iy) ); out(ielem,3) = mid + (2*ix ) + (iz ) * (2*m_nelx(iy) ); out(ielem,4) = bot + ( ix ) + (iz+1) * ( m_nelx(iy)+1); out(ielem,5) = bot + ( ix+1) + (iz+1) * ( m_nelx(iy)+1); out(ielem,6) = mid + (2*ix+1) + (iz+1) * (2*m_nelx(iy) ); out(ielem,7) = mid + (2*ix ) + (iz+1) * (2*m_nelx(iy) ); ielem++; // -- top-right element out(ielem,0) = bot + ( ix+1) + (iz ) * ( m_nelx(iy)+1); out(ielem,1) = top + (3*ix+3) + (iz ) * (3*m_nelx(iy)+1); out(ielem,2) = top + (3*ix+2) + (iz ) * (3*m_nelx(iy)+1); out(ielem,3) = mid + (2*ix+1) + (iz ) * (2*m_nelx(iy) ); out(ielem,4) = bot + ( ix+1) + (iz+1) * ( m_nelx(iy)+1); out(ielem,5) = top + (3*ix+3) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,6) = top + (3*ix+2) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,7) = mid + (2*ix+1) + (iz+1) * (2*m_nelx(iy) ); ielem++; // -- top-center element out(ielem,0) = mid + (2*ix ) + (iz ) * (2*m_nelx(iy) ); out(ielem,1) = mid + (2*ix+1) + (iz ) * (2*m_nelx(iy) ); out(ielem,2) = top + (3*ix+2) + (iz ) * (3*m_nelx(iy)+1); out(ielem,3) = top + (3*ix+1) + (iz ) * (3*m_nelx(iy)+1); out(ielem,4) = mid + (2*ix ) + (iz+1) * (2*m_nelx(iy) ); out(ielem,5) = mid + (2*ix+1) + (iz+1) * (2*m_nelx(iy) ); out(ielem,6) = top + (3*ix+2) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,7) = top + (3*ix+1) + (iz+1) * (3*m_nelx(iy)+1); ielem++; // -- top-left element out(ielem,0) = bot + ( ix ) + (iz ) * ( m_nelx(iy)+1); out(ielem,1) = mid + (2*ix ) + (iz ) * (2*m_nelx(iy) ); out(ielem,2) = top + (3*ix+1) + (iz ) * (3*m_nelx(iy)+1); out(ielem,3) = top + (3*ix ) + (iz ) * (3*m_nelx(iy)+1); out(ielem,4) = bot + ( ix ) + (iz+1) * ( m_nelx(iy)+1); out(ielem,5) = mid + (2*ix ) + (iz+1) * (2*m_nelx(iy) ); out(ielem,6) = top + (3*ix+1) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,7) = top + (3*ix ) + (iz+1) * (3*m_nelx(iy)+1); ielem++; } } } // - define connectivity: coarsening along the x-direction (above the middle layer) else if ( m_refine(iy) == 0 and iy > (nely-1)/2 ) { for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { // -- lower-left element out(ielem,0) = bot + (3*ix ) + (iz ) * (3*m_nelx(iy)+1); out(ielem,1) = bot + (3*ix+1) + (iz ) * (3*m_nelx(iy)+1); out(ielem,2) = mid + (2*ix ) + (iz ) * (2*m_nelx(iy) ); out(ielem,3) = top + ( ix ) + (iz ) * ( m_nelx(iy)+1); out(ielem,4) = bot + (3*ix ) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,5) = bot + (3*ix+1) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,6) = mid + (2*ix ) + (iz+1) * (2*m_nelx(iy) ); out(ielem,7) = top + ( ix ) + (iz+1) * ( m_nelx(iy)+1); ielem++; // -- lower-center element out(ielem,0) = bot + (3*ix+1) + (iz ) * (3*m_nelx(iy)+1); out(ielem,1) = bot + (3*ix+2) + (iz ) * (3*m_nelx(iy)+1); out(ielem,2) = mid + (2*ix+1) + (iz ) * (2*m_nelx(iy) ); out(ielem,3) = mid + (2*ix ) + (iz ) * (2*m_nelx(iy) ); out(ielem,4) = bot + (3*ix+1) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,5) = bot + (3*ix+2) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,6) = mid + (2*ix+1) + (iz+1) * (2*m_nelx(iy) ); out(ielem,7) = mid + (2*ix ) + (iz+1) * (2*m_nelx(iy) ); ielem++; // -- lower-right element out(ielem,0) = bot + (3*ix+2) + (iz ) * (3*m_nelx(iy)+1); out(ielem,1) = bot + (3*ix+3) + (iz ) * (3*m_nelx(iy)+1); out(ielem,2) = top + ( ix+1) + (iz ) * ( m_nelx(iy)+1); out(ielem,3) = mid + (2*ix+1) + (iz ) * (2*m_nelx(iy) ); out(ielem,4) = bot + (3*ix+2) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,5) = bot + (3*ix+3) + (iz+1) * (3*m_nelx(iy)+1); out(ielem,6) = top + ( ix+1) + (iz+1) * ( m_nelx(iy)+1); out(ielem,7) = mid + (2*ix+1) + (iz+1) * (2*m_nelx(iy) ); ielem++; // -- upper element out(ielem,0) = mid + (2*ix ) + (iz ) * (2*m_nelx(iy) ); out(ielem,1) = mid + (2*ix+1) + (iz ) * (2*m_nelx(iy) ); out(ielem,2) = top + ( ix+1) + (iz ) * ( m_nelx(iy)+1); out(ielem,3) = top + ( ix ) + (iz ) * ( m_nelx(iy)+1); out(ielem,4) = mid + (2*ix ) + (iz+1) * (2*m_nelx(iy) ); out(ielem,5) = mid + (2*ix+1) + (iz+1) * (2*m_nelx(iy) ); out(ielem,6) = top + ( ix+1) + (iz+1) * ( m_nelx(iy)+1); out(ielem,7) = top + ( ix ) + (iz+1) * ( m_nelx(iy)+1); ielem++; } } } // - define connectivity: refinement along the z-direction (below the middle layer) else if ( m_refine(iy) == 2 and iy <= (nely-1)/2 ) { for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { // -- bottom element out(ielem,0) = bot + (ix ) + iz * (m_nelx(iy)+1); out(ielem,1) = bot + (ix ) + ( iz+1) * (m_nelx(iy)+1); out(ielem,2) = bot + (ix+1) + ( iz+1) * (m_nelx(iy)+1); out(ielem,3) = bot + (ix+1) + iz * (m_nelx(iy)+1); out(ielem,4) = mid + (ix ) + 2*iz * (m_nelx(iy)+1); out(ielem,5) = mid + (ix ) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,6) = mid + (ix+1) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,7) = mid + (ix+1) + 2*iz * (m_nelx(iy)+1); ielem++; // -- top-back element out(ielem,0) = mid + (ix ) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,1) = mid + (ix+1) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,2) = top + (ix+1) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,3) = top + (ix ) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,4) = bot + (ix ) + ( iz+1) * (m_nelx(iy)+1); out(ielem,5) = bot + (ix+1) + ( iz+1) * (m_nelx(iy)+1); out(ielem,6) = top + (ix+1) + (3*iz+3) * (m_nelx(iy)+1); out(ielem,7) = top + (ix ) + (3*iz+3) * (m_nelx(iy)+1); ielem++; // -- top-center element out(ielem,0) = mid + (ix ) + (2*iz ) * (m_nelx(iy)+1); out(ielem,1) = mid + (ix+1) + (2*iz ) * (m_nelx(iy)+1); out(ielem,2) = top + (ix+1) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,3) = top + (ix ) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,4) = mid + (ix ) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,5) = mid + (ix+1) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,6) = top + (ix+1) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,7) = top + (ix ) + (3*iz+2) * (m_nelx(iy)+1); ielem++; // -- top-front element out(ielem,0) = bot + (ix ) + ( iz ) * (m_nelx(iy)+1); out(ielem,1) = bot + (ix+1) + ( iz ) * (m_nelx(iy)+1); out(ielem,2) = top + (ix+1) + (3*iz ) * (m_nelx(iy)+1); out(ielem,3) = top + (ix ) + (3*iz ) * (m_nelx(iy)+1); out(ielem,4) = mid + (ix ) + (2*iz ) * (m_nelx(iy)+1); out(ielem,5) = mid + (ix+1) + (2*iz ) * (m_nelx(iy)+1); out(ielem,6) = top + (ix+1) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,7) = top + (ix ) + (3*iz+1) * (m_nelx(iy)+1); ielem++; } } } // - define connectivity: coarsening along the z-direction (above the middle layer) else if ( m_refine(iy) == 2 and iy > (nely-1)/2 ) { for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { // -- bottom-front element out(ielem,0) = bot + (ix ) + (3*iz ) * (m_nelx(iy)+1); out(ielem,1) = bot + (ix+1) + (3*iz ) * (m_nelx(iy)+1); out(ielem,2) = top + (ix+1) + ( iz ) * (m_nelx(iy)+1); out(ielem,3) = top + (ix ) + ( iz ) * (m_nelx(iy)+1); out(ielem,4) = bot + (ix ) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,5) = bot + (ix+1) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,6) = mid + (ix+1) + (2*iz ) * (m_nelx(iy)+1); out(ielem,7) = mid + (ix ) + (2*iz ) * (m_nelx(iy)+1); ielem++; // -- bottom-center element out(ielem,0) = bot + (ix ) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,1) = bot + (ix+1) + (3*iz+1) * (m_nelx(iy)+1); out(ielem,2) = mid + (ix+1) + (2*iz ) * (m_nelx(iy)+1); out(ielem,3) = mid + (ix ) + (2*iz ) * (m_nelx(iy)+1); out(ielem,4) = bot + (ix ) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,5) = bot + (ix+1) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,6) = mid + (ix+1) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,7) = mid + (ix ) + (2*iz+1) * (m_nelx(iy)+1); ielem++; // -- bottom-back element out(ielem,0) = bot + (ix ) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,1) = bot + (ix+1) + (3*iz+2) * (m_nelx(iy)+1); out(ielem,2) = mid + (ix+1) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,3) = mid + (ix ) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,4) = bot + (ix ) + (3*iz+3) * (m_nelx(iy)+1); out(ielem,5) = bot + (ix+1) + (3*iz+3) * (m_nelx(iy)+1); out(ielem,6) = top + (ix+1) + ( iz+1) * (m_nelx(iy)+1); out(ielem,7) = top + (ix ) + ( iz+1) * (m_nelx(iy)+1); ielem++; // -- top element out(ielem,0) = mid + (ix ) + (2*iz ) * (m_nelx(iy)+1); out(ielem,1) = mid + (ix+1) + (2*iz ) * (m_nelx(iy)+1); out(ielem,2) = top + (ix+1) + ( iz ) * (m_nelx(iy)+1); out(ielem,3) = top + (ix ) + ( iz ) * (m_nelx(iy)+1); out(ielem,4) = mid + (ix ) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,5) = mid + (ix+1) + (2*iz+1) * (m_nelx(iy)+1); out(ielem,6) = top + (ix+1) + ( iz+1) * (m_nelx(iy)+1); out(ielem,7) = top + (ix ) + ( iz+1) * (m_nelx(iy)+1); ielem++; } } } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::elementsMiddleLayer() const { // number of element layers in y-direction, the index of the middle layer size_t nely = static_cast(m_nhy.size()); size_t iy = (nely-1)/2; xt::xtensor out = xt::empty({m_nelx(iy)*m_nelz(iy)}); for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) for ( size_t iz = 0 ; iz < m_nelz(iy) ; ++iz ) out(ix+iz*m_nelx(iy)) = m_startElem(iy) + ix + iz*m_nelx(iy); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesFront() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 + 1; else n += m_nelx(iy) + 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 + 1; else n += m_nelx(iy) + 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(j) = m_startNode(iy) + ix; ++j; } // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy); ++j; } } // -- top node layer for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(j) = m_startNode(iy+1) + ix; ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBack() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 + 1; else n += m_nelx(iy) + 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 + 1; else n += m_nelx(iy) + 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(j) = m_startNode(iy) + ix + (m_nelx(iy)+1)*m_nelz(iy); ++j; } // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy) + 2*m_nelx(iy)*m_nelz(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy) + 2*m_nelx(iy)*m_nelz(iy); ++j; } } // -- top node layer for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(j) = m_startNode(iy+1) + ix + (m_nelx(iy)+1)*m_nelz(iy); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesLeft() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 + 1; else n += m_nelz(iy) + 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 + 1; else n += m_nelz(iy) + 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1); ++j; } // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1) + m_nnd(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1) + m_nnd(iy); ++j; } } // -- top node layer for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { out(j) = m_startNode(iy+1) + iz * (m_nelx(iy)+1); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesRight() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 + 1; else n += m_nelz(iy) + 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 + 1; else n += m_nelz(iy) + 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + m_nnd(iy) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + m_nnd(iy) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } } // -- top node layer for ( size_t iz = 0 ; iz < m_nelz(iy)+1 ; ++iz ) { out(j) = m_startNode(iy+1) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottom() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // allocate node list xt::xtensor out = xt::empty({m_nnd(nely)}); // counter size_t j = 0; // fill node list for ( size_t ix = 0 ; ix < m_nelx(0)+1 ; ++ix ) { for ( size_t iz = 0 ; iz < m_nelz(0)+1 ; ++iz ) { out(j) = m_startNode(0) + ix + iz * (m_nelx(0)+1); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesTop() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // allocate node list xt::xtensor out = xt::empty({m_nnd(nely)}); // counter size_t j = 0; // fill node list for ( size_t ix = 0 ; ix < m_nelx(nely-1)+1 ; ++ix ) { for ( size_t iz = 0 ; iz < m_nelz(nely-1)+1 ; ++iz ) { out(j) = m_startNode(nely) + ix + iz * (m_nelx(nely-1)+1); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesFrontFace() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 - 1; else n += m_nelx(iy) - 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 - 1; else n += m_nelx(iy) - 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t ix = 1 ; ix < m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix; ++j; } // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy); ++j; } } // -- top node layer for ( size_t ix = 1 ; ix < m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy+1) + ix; ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBackFace() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 - 1; else n += m_nelx(iy) - 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { if ( m_refine(iy) == 0 ) n += m_nelx(iy) * 3 - 1; else n += m_nelx(iy) - 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t ix = 1 ; ix < m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + (m_nelx(iy)+1)*m_nelz(iy); ++j; } // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy) + 2*m_nelx(iy)*m_nelz(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 0 ) { for ( size_t ix = 0 ; ix < 2*m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy) + ix + m_nnd(iy) + 2*m_nelx(iy)*m_nelz(iy); ++j; } } // -- top node layer for ( size_t ix = 1 ; ix < m_nelx(iy) ; ++ix ) { out(j) = m_startNode(iy+1) + ix + (m_nelx(iy)+1)*m_nelz(iy); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesLeftFace() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 - 1; else n += m_nelz(iy) - 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 - 1; else n += m_nelz(iy) - 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t iz = 1 ; iz < m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1); ++j; } // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1) + m_nnd(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1) + m_nnd(iy); ++j; } } // -- top node layer for ( size_t iz = 1 ; iz < m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy+1) + iz * (m_nelx(iy)+1); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesRightFace() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // number of boundary nodes // - initialize size_t n = 0; // - bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 - 1; else n += m_nelz(iy) - 1; } // - top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { if ( m_refine(iy) == 2 ) n += m_nelz(iy) * 3 - 1; else n += m_nelz(iy) - 1; } // allocate node-list xt::xtensor out = xt::empty({n}); // initialize counter: current index in the node-list "out" size_t j = 0; // bottom half: bottom node layer (+ middle node layer) for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) { // -- bottom node layer for ( size_t iz = 1 ; iz < m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + m_nnd(iy) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } } } // top half: (middle node layer +) top node layer for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) { // -- refinement layer if ( m_refine(iy) == 2 ) { for ( size_t iz = 0 ; iz < 2*m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy) + m_nnd(iy) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } } // -- top node layer for ( size_t iz = 1 ; iz < m_nelz(iy) ; ++iz ) { out(j) = m_startNode(iy+1) + iz * (m_nelx(iy)+1) + m_nelx(iy); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottomFace() const { // allocate node list xt::xtensor out = xt::empty({(m_nelx(0)-1)*(m_nelz(0)-1)}); // counter size_t j = 0; // fill node list for ( size_t ix = 1 ; ix < m_nelx(0) ; ++ix ) { for ( size_t iz = 1 ; iz < m_nelz(0) ; ++iz ) { out(j) = m_startNode(0) + ix + iz * (m_nelx(0)+1); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesTopFace() const { // number of element layers in y-direction size_t nely = static_cast(m_nhy.size()); // allocate node list xt::xtensor out = xt::empty({(m_nelx(nely-1)-1)*(m_nelz(nely-1)-1)}); // counter size_t j = 0; // fill node list for ( size_t ix = 1 ; ix < m_nelx(nely-1) ; ++ix ) { for ( size_t iz = 1 ; iz < m_nelz(nely-1) ; ++iz ) { out(j) = m_startNode(nely) + ix + iz * (m_nelx(nely-1)+1); ++j; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesFrontBottomEdge() const { xt::xtensor out = xt::empty({m_nelx(0)+1}); for ( size_t ix = 0 ; ix < m_nelx(0)+1 ; ++ix ) out(ix) = m_startNode(0) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesFrontTopEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelx(nely-1)+1}); for ( size_t ix = 0 ; ix < m_nelx(nely-1)+1 ; ++ix ) out(ix) = m_startNode(nely) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesFrontLeftEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely+1}); for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) out(iy) = m_startNode(iy); for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) out(iy+1) = m_startNode(iy+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesFrontRightEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely+1}); for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) out(iy) = m_startNode(iy) + m_nelx(iy); for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) out(iy+1) = m_startNode(iy+1) + m_nelx(iy); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBackBottomEdge() const { xt::xtensor out = xt::empty({m_nelx(0)+1}); for ( size_t ix = 0 ; ix < m_nelx(0)+1 ; ++ix ) out(ix) = m_startNode(0) + ix + (m_nelx(0)+1)*(m_nelz(0)); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBackTopEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelx(nely-1)+1}); for ( size_t ix = 0 ; ix < m_nelx(nely-1)+1 ; ++ix ) out(ix) = m_startNode(nely) + ix + (m_nelx(nely-1)+1)*(m_nelz(nely-1)); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBackLeftEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely+1}); for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) out(iy) = m_startNode(iy) + (m_nelx(iy)+1)*(m_nelz(iy)); for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) out(iy+1) = m_startNode(iy+1) + (m_nelx(iy)+1)*(m_nelz(iy)); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBackRightEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely+1}); for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) out(iy) = m_startNode(iy) + m_nelx(iy) + (m_nelx(iy)+1)*(m_nelz(iy)); for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) out(iy+1) = m_startNode(iy+1) + m_nelx(iy) + (m_nelx(iy)+1)*(m_nelz(iy)); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottomLeftEdge() const { xt::xtensor out = xt::empty({m_nelz(0)+1}); for ( size_t iz = 0 ; iz < m_nelz(0)+1 ; ++iz ) out(iz) = m_startNode(0) + iz * (m_nelx(0)+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottomRightEdge() const { xt::xtensor out = xt::empty({m_nelz(0)+1}); for ( size_t iz = 0 ; iz < m_nelz(0)+1 ; ++iz ) out(iz) = m_startNode(0) + m_nelx(0) + iz * (m_nelx(0)+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesTopLeftEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelz(nely-1)+1}); for ( size_t iz = 0 ; iz < m_nelz(nely-1)+1 ; ++iz ) out(iz) = m_startNode(nely) + iz * (m_nelx(nely-1)+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesTopRightEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelz(nely-1)+1}); for ( size_t iz = 0 ; iz < m_nelz(nely-1)+1 ; ++iz ) out(iz) = m_startNode(nely) + m_nelx(nely-1) + iz * (m_nelx(nely-1)+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottomFrontEdge() const { return nodesFrontBottomEdge(); } inline xt::xtensor FineLayer::nodesBottomBackEdge() const { return nodesBackBottomEdge(); } inline xt::xtensor FineLayer::nodesTopFrontEdge() const { return nodesFrontTopEdge(); } inline xt::xtensor FineLayer::nodesTopBackEdge() const { return nodesBackTopEdge(); } inline xt::xtensor FineLayer::nodesLeftBottomEdge() const { return nodesBottomLeftEdge(); } inline xt::xtensor FineLayer::nodesLeftFrontEdge() const { return nodesFrontLeftEdge(); } inline xt::xtensor FineLayer::nodesLeftBackEdge() const { return nodesBackLeftEdge(); } inline xt::xtensor FineLayer::nodesLeftTopEdge() const { return nodesTopLeftEdge(); } inline xt::xtensor FineLayer::nodesRightBottomEdge() const { return nodesBottomRightEdge(); } inline xt::xtensor FineLayer::nodesRightTopEdge() const { return nodesTopRightEdge(); } inline xt::xtensor FineLayer::nodesRightFrontEdge() const { return nodesFrontRightEdge(); } inline xt::xtensor FineLayer::nodesRightBackEdge() const { return nodesBackRightEdge(); } // ------------------- node-numbers along the front-bottom edge, without corners ------------------- inline xt::xtensor FineLayer::nodesFrontBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx(0)-1}); for ( size_t ix = 1 ; ix < m_nelx(0) ; ++ix ) out(ix-1) = m_startNode(0) + ix; return out; } // -------------------- node-numbers along the front-top edge, without corners --------------------- inline xt::xtensor FineLayer::nodesFrontTopOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelx(nely-1)-1}); for ( size_t ix = 1 ; ix < m_nelx(nely-1) ; ++ix ) out(ix-1) = m_startNode(nely) + ix; return out; } // -------------------- node-numbers along the front-left edge, without corners -------------------- inline xt::xtensor FineLayer::nodesFrontLeftOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely-1}); for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) out(iy-1) = m_startNode(iy); for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) out(iy) = m_startNode(iy+1); return out; } // ------------------- node-numbers along the front-right edge, without corners -------------------- inline xt::xtensor FineLayer::nodesFrontRightOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely-1}); for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) out(iy-1) = m_startNode(iy) + m_nelx(iy); for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) out(iy) = m_startNode(iy+1) + m_nelx(iy); return out; } // ------------------- node-numbers along the back-bottom edge, without corners -------------------- inline xt::xtensor FineLayer::nodesBackBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx(0)-1}); for ( size_t ix = 1 ; ix < m_nelx(0) ; ++ix ) out(ix-1) = m_startNode(0) + ix + (m_nelx(0)+1)*(m_nelz(0)); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBackTopOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelx(nely-1)-1}); for ( size_t ix = 1 ; ix < m_nelx(nely-1) ; ++ix ) out(ix-1) = m_startNode(nely) + ix + (m_nelx(nely-1)+1)*(m_nelz(nely-1)); return out; } // -------------------- node-numbers along the back-left edge, without corners --------------------- inline xt::xtensor FineLayer::nodesBackLeftOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely-1}); for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) out(iy-1) = m_startNode(iy) + (m_nelx(iy)+1)*(m_nelz(iy)); for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) out(iy) = m_startNode(iy+1) + (m_nelx(iy)+1)*(m_nelz(iy)); return out; } // -------------------- node-numbers along the back-right edge, without corners -------------------- inline xt::xtensor FineLayer::nodesBackRightOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely-1}); for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) out(iy-1) = m_startNode(iy) + m_nelx(iy) + (m_nelx(iy)+1)*(m_nelz(iy)); for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) out(iy) = m_startNode(iy+1) + m_nelx(iy) + (m_nelx(iy)+1)*(m_nelz(iy)); return out; } // ------------------- node-numbers along the bottom-left edge, without corners -------------------- inline xt::xtensor FineLayer::nodesBottomLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nelz(0)-1}); for ( size_t iz = 1 ; iz < m_nelz(0) ; ++iz ) out(iz-1) = m_startNode(0) + iz * (m_nelx(0)+1); return out; } // ------------------- node-numbers along the bottom-right edge, without corners ------------------- inline xt::xtensor FineLayer::nodesBottomRightOpenEdge() const { xt::xtensor out = xt::empty({m_nelz(0)-1}); for ( size_t iz = 1 ; iz < m_nelz(0) ; ++iz ) out(iz-1) = m_startNode(0) + m_nelx(0) + iz * (m_nelx(0)+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesTopLeftOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelz(nely-1)-1}); for ( size_t iz = 1 ; iz < m_nelz(nely-1) ; ++iz ) out(iz-1) = m_startNode(nely) + iz * (m_nelx(nely-1)+1); return out; } // -------------------- node-numbers along the top-right edge, without corners --------------------- inline xt::xtensor FineLayer::nodesTopRightOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelz(nely-1)-1}); for ( size_t iz = 1 ; iz < m_nelz(nely-1) ; ++iz ) out(iz-1) = m_startNode(nely) + m_nelx(nely-1) + iz * (m_nelx(nely-1)+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottomFrontOpenEdge() const { return nodesFrontBottomOpenEdge(); } inline xt::xtensor FineLayer::nodesBottomBackOpenEdge() const { return nodesBackBottomOpenEdge(); } inline xt::xtensor FineLayer::nodesTopFrontOpenEdge() const { return nodesFrontTopOpenEdge(); } inline xt::xtensor FineLayer::nodesTopBackOpenEdge() const { return nodesBackTopOpenEdge(); } inline xt::xtensor FineLayer::nodesLeftBottomOpenEdge() const { return nodesBottomLeftOpenEdge(); } inline xt::xtensor FineLayer::nodesLeftFrontOpenEdge() const { return nodesFrontLeftOpenEdge(); } inline xt::xtensor FineLayer::nodesLeftBackOpenEdge() const { return nodesBackLeftOpenEdge(); } inline xt::xtensor FineLayer::nodesLeftTopOpenEdge() const { return nodesTopLeftOpenEdge(); } inline xt::xtensor FineLayer::nodesRightBottomOpenEdge() const { return nodesBottomRightOpenEdge(); } inline xt::xtensor FineLayer::nodesRightTopOpenEdge() const { return nodesTopRightOpenEdge(); } inline xt::xtensor FineLayer::nodesRightFrontOpenEdge() const { return nodesFrontRightOpenEdge(); } inline xt::xtensor FineLayer::nodesRightBackOpenEdge() const { return nodesBackRightOpenEdge(); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesFrontBottomLeftCorner() const { return m_startNode(0); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesFrontBottomRightCorner() const { return m_startNode(0) + m_nelx(0); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesFrontTopLeftCorner() const { size_t nely = static_cast(m_nhy.size()); return m_startNode(nely); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesFrontTopRightCorner() const { size_t nely = static_cast(m_nhy.size()); return m_startNode(nely) + m_nelx(nely-1); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesBackBottomLeftCorner() const { return m_startNode(0) + (m_nelx(0)+1)*(m_nelz(0)); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesBackBottomRightCorner() const { return m_startNode(0) + m_nelx(0) + (m_nelx(0)+1)*(m_nelz(0)); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesBackTopLeftCorner() const { size_t nely = static_cast(m_nhy.size()); return m_startNode(nely) + (m_nelx(nely-1)+1)*(m_nelz(nely-1)); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesBackTopRightCorner() const { size_t nely = static_cast(m_nhy.size()); return m_startNode(nely) + m_nelx(nely-1) + (m_nelx(nely-1)+1)*(m_nelz(nely-1)); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesFrontLeftBottomCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t FineLayer::nodesBottomFrontLeftCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t FineLayer::nodesBottomLeftFrontCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t FineLayer::nodesLeftFrontBottomCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t FineLayer::nodesLeftBottomFrontCorner() const { return nodesFrontBottomLeftCorner(); } inline size_t FineLayer::nodesFrontRightBottomCorner() const { return nodesFrontBottomRightCorner(); } inline size_t FineLayer::nodesBottomFrontRightCorner() const { return nodesFrontBottomRightCorner(); } inline size_t FineLayer::nodesBottomRightFrontCorner() const { return nodesFrontBottomRightCorner(); } inline size_t FineLayer::nodesRightFrontBottomCorner() const { return nodesFrontBottomRightCorner(); } inline size_t FineLayer::nodesRightBottomFrontCorner() const { return nodesFrontBottomRightCorner(); } inline size_t FineLayer::nodesFrontLeftTopCorner() const { return nodesFrontTopLeftCorner(); } inline size_t FineLayer::nodesTopFrontLeftCorner() const { return nodesFrontTopLeftCorner(); } inline size_t FineLayer::nodesTopLeftFrontCorner() const { return nodesFrontTopLeftCorner(); } inline size_t FineLayer::nodesLeftFrontTopCorner() const { return nodesFrontTopLeftCorner(); } inline size_t FineLayer::nodesLeftTopFrontCorner() const { return nodesFrontTopLeftCorner(); } inline size_t FineLayer::nodesFrontRightTopCorner() const { return nodesFrontTopRightCorner(); } inline size_t FineLayer::nodesTopFrontRightCorner() const { return nodesFrontTopRightCorner(); } inline size_t FineLayer::nodesTopRightFrontCorner() const { return nodesFrontTopRightCorner(); } inline size_t FineLayer::nodesRightFrontTopCorner() const { return nodesFrontTopRightCorner(); } inline size_t FineLayer::nodesRightTopFrontCorner() const { return nodesFrontTopRightCorner(); } inline size_t FineLayer::nodesBackLeftBottomCorner() const { return nodesBackBottomLeftCorner(); } inline size_t FineLayer::nodesBottomBackLeftCorner() const { return nodesBackBottomLeftCorner(); } inline size_t FineLayer::nodesBottomLeftBackCorner() const { return nodesBackBottomLeftCorner(); } inline size_t FineLayer::nodesLeftBackBottomCorner() const { return nodesBackBottomLeftCorner(); } inline size_t FineLayer::nodesLeftBottomBackCorner() const { return nodesBackBottomLeftCorner(); } inline size_t FineLayer::nodesBackRightBottomCorner() const { return nodesBackBottomRightCorner(); } inline size_t FineLayer::nodesBottomBackRightCorner() const { return nodesBackBottomRightCorner(); } inline size_t FineLayer::nodesBottomRightBackCorner() const { return nodesBackBottomRightCorner(); } inline size_t FineLayer::nodesRightBackBottomCorner() const { return nodesBackBottomRightCorner(); } inline size_t FineLayer::nodesRightBottomBackCorner() const { return nodesBackBottomRightCorner(); } inline size_t FineLayer::nodesBackLeftTopCorner() const { return nodesBackTopLeftCorner(); } inline size_t FineLayer::nodesTopBackLeftCorner() const { return nodesBackTopLeftCorner(); } inline size_t FineLayer::nodesTopLeftBackCorner() const { return nodesBackTopLeftCorner(); } inline size_t FineLayer::nodesLeftBackTopCorner() const { return nodesBackTopLeftCorner(); } inline size_t FineLayer::nodesLeftTopBackCorner() const { return nodesBackTopLeftCorner(); } inline size_t FineLayer::nodesBackRightTopCorner() const { return nodesBackTopRightCorner(); } inline size_t FineLayer::nodesTopBackRightCorner() const { return nodesBackTopRightCorner(); } inline size_t FineLayer::nodesTopRightBackCorner() const { return nodesBackTopRightCorner(); } inline size_t FineLayer::nodesRightBackTopCorner() const { return nodesBackTopRightCorner(); } inline size_t FineLayer::nodesRightTopBackCorner() const { return nodesBackTopRightCorner(); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesPeriodic() const { // faces xt::xtensor fro = nodesFrontFace(); xt::xtensor bck = nodesBackFace(); xt::xtensor lft = nodesLeftFace(); xt::xtensor rgt = nodesRightFace(); xt::xtensor bot = nodesBottomFace(); xt::xtensor top = nodesTopFace(); // edges xt::xtensor froBot = nodesFrontBottomOpenEdge(); xt::xtensor froTop = nodesFrontTopOpenEdge(); xt::xtensor froLft = nodesFrontLeftOpenEdge(); xt::xtensor froRgt = nodesFrontRightOpenEdge(); xt::xtensor bckBot = nodesBackBottomOpenEdge(); xt::xtensor bckTop = nodesBackTopOpenEdge(); xt::xtensor bckLft = nodesBackLeftOpenEdge(); xt::xtensor bckRgt = nodesBackRightOpenEdge(); xt::xtensor botLft = nodesBottomLeftOpenEdge(); xt::xtensor botRgt = nodesBottomRightOpenEdge(); xt::xtensor topLft = nodesTopLeftOpenEdge(); xt::xtensor topRgt = nodesTopRightOpenEdge(); // allocate nodal ties // - number of tying per category size_t tface = fro.size() + lft.size() + bot.size(); size_t tedge = 3*froBot.size() + 3*froLft.size() + 3*botLft.size(); size_t tnode = 7; // - allocate xt::xtensor out = xt::empty({tface+tedge+tnode, std::size_t(2)}); // counter size_t i = 0; // tie all corners to one corner out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesFrontBottomRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackBottomRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackBottomLeftCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesFrontTopLeftCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesFrontTopRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackTopRightCorner(); ++i; out(i,0) = nodesFrontBottomLeftCorner(); out(i,1) = nodesBackTopLeftCorner(); ++i; // tie all corresponding edges to each other (exclude corners) for ( size_t j = 0 ; j FineLayer::dofs() const { return GooseFEM::Mesh::dofs(m_nnode,m_ndim); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::dofsPeriodic() const { // DOF-numbers for each component of each node (sequential) xt::xtensor out = GooseFEM::Mesh::dofs(m_nnode,m_ndim); // periodic node-pairs xt::xtensor nodePer = nodesPeriodic(); // eliminate 'dependent' DOFs; renumber "out" to be sequential for the remaining DOFs for ( size_t i = 0 ; i < nodePer.shape()[0] ; ++i ) for ( size_t j = 0 ; j < m_ndim ; ++j ) out(nodePer(i,1),j) = out(nodePer(i,0),j); // renumber "out" to be sequential return GooseFEM::Mesh::renumber(out); } // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MeshQuad4.hpp b/include/GooseFEM/MeshQuad4.hpp index bbbc21d..96b3e54 100644 --- a/include/GooseFEM/MeshQuad4.hpp +++ b/include/GooseFEM/MeshQuad4.hpp @@ -1,900 +1,900 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MESHQUAD4_HPP #define GOOSEFEM_MESHQUAD4_HPP // ------------------------------------------------------------------------------------------------- #include "MeshQuad4.h" // ================================================================================================= namespace GooseFEM { namespace Mesh { namespace Quad4 { // ------------------------------------------------------------------------------------------------- inline Regular::Regular(size_t nelx, size_t nely, double h): m_h(h), m_nelx(nelx), m_nely(nely) { assert( m_nelx >= 1 ); assert( m_nely >= 1 ); m_nnode = (m_nelx+1) * (m_nely+1); m_nelem = m_nelx * m_nely ; } // ------------------------------------------------------------------------------------------------- -inline size_t Regular::nelem() const { return m_nelem; } -inline size_t Regular::nnode() const { return m_nnode; } -inline size_t Regular::nne() const { return m_nne; } -inline size_t Regular::ndim() const { return m_ndim; } +inline size_t Regular::nelem() const +{ return m_nelem; } + +inline size_t Regular::nnode() const +{ return m_nnode; } + +inline size_t Regular::nne() const +{ return m_nne; } + +inline size_t Regular::ndim() const +{ return m_ndim; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nnodePeriodic() const { return (m_nelx+1) * (m_nely+1) - (m_nely+1) - (m_nelx); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::coor() const { xt::xtensor out = xt::empty({m_nnode, m_ndim}); xt::xtensor x = xt::linspace(0.0, m_h*static_cast(m_nelx), m_nelx+1); xt::xtensor y = xt::linspace(0.0, m_h*static_cast(m_nely), m_nely+1); size_t inode = 0; for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) { for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy); ++inode; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::conn() const { xt::xtensor out = xt::empty({m_nelem,m_nne}); size_t ielem = 0; for ( size_t iy = 0 ; iy < m_nely ; ++iy ) { for ( size_t ix = 0 ; ix < m_nelx ; ++ix ) { out(ielem,0) = (iy )*(m_nelx+1) + (ix ); out(ielem,1) = (iy )*(m_nelx+1) + (ix+1); out(ielem,3) = (iy+1)*(m_nelx+1) + (ix ); out(ielem,2) = (iy+1)*(m_nelx+1) + (ix+1); ++ielem; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix + m_nely*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesLeftEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesRightEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1) + m_nelx; return out; } // ---------------------- node-numbers along the bottom edge, without corners ---------------------- inline xt::xtensor Regular::nodesBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix; return out; } // ----------------------- node-numbers along the top edge, without corners ------------------------ inline xt::xtensor Regular::nodesTopOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix + m_nely*(m_nelx+1); return out; } // ----------------------- node-numbers along the left edge, without corners ----------------------- inline xt::xtensor Regular::nodesLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1); return out; } // ---------------------- node-numbers along the right edge, without corners ----------------------- inline xt::xtensor Regular::nodesRightOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBottomLeftCorner() const { return 0; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBottomRightCorner() const { return m_nelx; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesTopLeftCorner() const { return m_nely*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesTopRightCorner() const { return m_nely*(m_nelx+1) + m_nelx; } // ------------------------------------------------------------------------------------------------- -inline size_t Regular::nodesLeftBottomCorner() const { return nodesBottomLeftCorner(); } -inline size_t Regular::nodesLeftTopCorner() const { return nodesTopLeftCorner(); } -inline size_t Regular::nodesRightBottomCorner() const { return nodesBottomRightCorner(); } -inline size_t Regular::nodesRightTopCorner() const { return nodesTopRightCorner(); } +inline size_t Regular::nodesLeftBottomCorner() const +{ return nodesBottomLeftCorner(); } + +inline size_t Regular::nodesLeftTopCorner() const +{ return nodesTopLeftCorner(); } + +inline size_t Regular::nodesRightBottomCorner() const +{ return nodesBottomRightCorner(); } + +inline size_t Regular::nodesRightTopCorner() const +{ return nodesTopRightCorner(); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesPeriodic() const { // edges (without corners) xt::xtensor bot = nodesBottomOpenEdge(); xt::xtensor top = nodesTopOpenEdge(); xt::xtensor lft = nodesLeftOpenEdge(); xt::xtensor rgt = nodesRightOpenEdge(); // allocate nodal ties // - number of tying per category size_t tedge = bot.size() + lft.size(); size_t tnode = 3; // - allocate xt::xtensor out = xt::empty({tedge+tnode, std::size_t(2)}); // counter size_t i = 0; // tie all corners to one corner out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesBottomRightCorner(); ++i; out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesTopRightCorner(); ++i; out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesTopLeftCorner(); ++i; // tie all corresponding edges to each other for ( size_t j = 0 ; j Regular::dofs() const { return GooseFEM::Mesh::dofs(m_nnode,m_ndim); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::dofsPeriodic() const { // DOF-numbers for each component of each node (sequential) xt::xtensor out = GooseFEM::Mesh::dofs(m_nnode,m_ndim); // periodic node-pairs xt::xtensor nodePer = nodesPeriodic(); // eliminate 'dependent' DOFs; renumber "out" to be sequential for the remaining DOFs for ( size_t i = 0 ; i < nodePer.shape()[0] ; ++i ) for ( size_t j = 0 ; j < m_ndim ; ++j ) out(nodePer(i,1),j) = out(nodePer(i,0),j); // renumber "out" to be sequential return GooseFEM::Mesh::renumber(out); } // ------------------------------------------------------------------------------------------------- inline FineLayer::FineLayer(size_t nelx, size_t nely, double h, size_t nfine): m_h(h) { // basic assumptions assert( nelx >= 1 ); assert( nely >= 1 ); // store basic info m_Lx = m_h * static_cast(nelx); // compute element size in y-direction (use symmetry, compute upper half) // ------------------------------------------------------------------------------------------------- // temporary variables size_t nmin, ntot; xt::xtensor nhx = xt::ones({nely}); xt::xtensor nhy = xt::ones({nely}); xt::xtensor refine = -1 * xt::ones ({nely}); // minimum height in y-direction (half of the height because of symmetry) if ( nely % 2 == 0 ) nmin = nely /2; else nmin = (nely +1)/2; // minimum number of fine layers in y-direction (minimum 1, middle layer part of this half) if ( nfine % 2 == 0 ) nfine = nfine /2 + 1; else nfine = (nfine+1)/2; if ( nfine < 1 ) nfine = 1; if ( nfine > nmin ) nfine = nmin; // loop over element layers in y-direction, try to coarsen using these rules: // (1) element size in y-direction <= distance to origin in y-direction // (2) element size in x-direction should fit the total number of elements in x-direction // (3) a certain number of layers have the minimum size "1" (are fine) for ( size_t iy = nfine ; ; ) { // initialize current size in y-direction if ( iy == nfine ) ntot = nfine; // check to stop if ( iy >= nely or ntot >= nmin ) { nely = iy; break; } // rules (1,2) satisfied: coarsen in x-direction if ( 3*nhy(iy) <= ntot and nelx%(3*nhx(iy)) == 0 and ntot+nhy(iy) < nmin ) { refine(iy) = 0; nhy (iy) *= 2; auto vnhy = xt::view(nhy, xt::range(iy+1, _)); auto vnhx = xt::view(nhx, xt::range(iy , _)); vnhy *= 3; vnhx *= 3; } // update the number of elements in y-direction ntot += nhy(iy); // proceed to next element layer in y-direction ++iy; // check to stop if ( iy >= nely or ntot >= nmin ) { nely = iy; break; } } // symmetrize, compute full information // ------------------------------------------------------------------------------------------------- // allocate mesh constructor parameters m_nhx = xt::empty({nely*2-1}); m_nhy = xt::empty({nely*2-1}); m_refine = xt::empty ({nely*2-1}); m_nelx = xt::empty({nely*2-1}); m_nnd = xt::empty({nely*2 }); m_startElem = xt::empty({nely*2-1}); m_startNode = xt::empty({nely*2 }); // fill // - lower half for ( size_t iy = 0 ; iy < nely ; ++iy ) { m_nhx (iy) = nhx (nely-iy-1); m_nhy (iy) = nhy (nely-iy-1); m_refine(iy) = refine(nely-iy-1); } // - upper half for ( size_t iy = 0 ; iy < nely-1 ; ++iy ) { m_nhx (iy+nely) = nhx (iy+1); m_nhy (iy+nely) = nhy (iy+1); m_refine(iy+nely) = refine(iy+1); } // update size nely = m_nhx.size(); // compute the number of elements per element layer in y-direction for ( size_t iy = 0 ; iy < nely ; ++iy ) m_nelx(iy) = nelx / m_nhx(iy); // compute the number of nodes per node layer in y-direction for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) m_nnd(iy ) = m_nelx(iy)+1; for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) m_nnd(iy+1) = m_nelx(iy)+1; // compute mesh dimensions // ----------------------- // initialize m_nnode = 0; m_nelem = 0; m_startNode(0) = 0; // loop over element layers (bottom -> middle, elements become finer) for ( size_t i = 0 ; i < (nely-1)/2 ; ++i ) { // - store the first element of the layer m_startElem(i) = m_nelem; // - add the nodes of this layer if ( m_refine(i) == 0 ) { m_nnode += (3*m_nelx(i)+1); } else { m_nnode += ( m_nelx(i)+1); } // - add the elements of this layer if ( m_refine(i) == 0 ) { m_nelem += (4*m_nelx(i) ); } else { m_nelem += ( m_nelx(i) ); } // - store the starting node of the next layer m_startNode(i+1) = m_nnode; } // loop over element layers (middle -> top, elements become coarser) for ( size_t i = (nely-1)/2 ; i < nely ; ++i ) { // - store the first element of the layer m_startElem(i) = m_nelem; // - add the nodes of this layer if ( m_refine(i) == 0 ) { m_nnode += (5*m_nelx(i)+1); } else { m_nnode += ( m_nelx(i)+1); } // - add the elements of this layer if ( m_refine(i) == 0 ) { m_nelem += (4*m_nelx(i) ); } else { m_nelem += ( m_nelx(i) ); } // - store the starting node of the next layer m_startNode(i+1) = m_nnode; } // - add the top row of nodes m_nnode += m_nelx(nely-1)+1; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nelem() const -{ - return m_nelem; -} - -// ------------------------------------------------------------------------------------------------- +{ return m_nelem; } inline size_t FineLayer::nnode() const -{ - return m_nnode; -} - -// ------------------------------------------------------------------------------------------------- +{ return m_nnode; } inline size_t FineLayer::nne() const -{ - return m_nne; -} - -// ------------------------------------------------------------------------------------------------- +{ return m_nne; } inline size_t FineLayer::ndim() const -{ - return m_ndim; -} +{ return m_ndim; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::shape(size_t i) const { assert( i >= 0 and i <= 1 ); if ( i == 0 ) return xt::amax(m_nelx)[0]; else return xt::sum (m_nhy )[0]; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::coor() const { // allocate output xt::xtensor out = xt::empty({m_nnode, m_ndim}); // current node, number of element layers size_t inode = 0; size_t nely = static_cast(m_nhy.size()); // y-position of each main node layer (i.e. excluding node layers for refinement/coarsening) // - allocate xt::xtensor y = xt::empty({nely+1}); // - initialize y(0) = 0.0; // - compute for ( size_t iy = 1 ; iy < nely+1 ; ++iy ) y(iy) = y(iy-1) + m_nhy(iy-1) * m_h; // loop over element layers (bottom -> middle) : add bottom layer (+ refinement layer) of nodes // ------------------------------------------------------------------------------------------------- for ( size_t iy = 0 ; ; ++iy ) { // get positions along the x- and z-axis xt::xtensor x = xt::linspace(0.0, m_Lx, m_nelx(iy)+1); // add nodes of the bottom layer of this element for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy); ++inode; } // stop at middle layer if ( iy == (nely-1)/2 ) break; // add extra nodes of the intermediate layer, for refinement in x-direction if ( m_refine(iy) == 0 ) { // - get position offset in x- and y-direction double dx = m_h * static_cast(m_nhx(iy)/3); double dy = m_h * static_cast(m_nhy(iy)/2); // - add nodes of the intermediate layer for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { for ( size_t j = 0 ; j < 2 ; ++j ) { out(inode,0) = x(ix) + dx * static_cast(j+1); out(inode,1) = y(iy) + dy; ++inode; } } } } // loop over element layers (middle -> top) : add (refinement layer +) top layer of nodes // ------------------------------------------------------------------------------------------------- for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) { // get positions along the x- and z-axis xt::xtensor x = xt::linspace(0.0, m_Lx, m_nelx(iy)+1); // add extra nodes of the intermediate layer, for refinement in x-direction if ( m_refine(iy) == 0 ) { // - get position offset in x- and y-direction double dx = m_h * static_cast(m_nhx(iy)/3); double dy = m_h * static_cast(m_nhy(iy)/2); // - add nodes of the intermediate layer for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { for ( size_t j = 0 ; j < 2 ; ++j ) { out(inode,0) = x(ix) + dx * static_cast(j+1); out(inode,1) = y(iy) + dy; ++inode; } } } // add nodes of the top layer of this element for ( size_t ix = 0 ; ix < m_nelx(iy)+1 ; ++ix ) { out(inode,0) = x(ix ); out(inode,1) = y(iy+1); ++inode; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::conn() const { // allocate output xt::xtensor out = xt::empty({m_nelem, m_nne}); // current element, number of element layers, starting nodes of each node layer size_t ielem = 0; size_t nely = static_cast(m_nhy.size()); size_t bot,mid,top; // loop over all element layers for ( size_t iy = 0 ; iy < nely ; ++iy ) { // - get: starting nodes of bottom(, middle) and top layer bot = m_startNode(iy ); mid = m_startNode(iy ) + m_nnd(iy); top = m_startNode(iy+1); // - define connectivity: no coarsening/refinement if ( m_refine(iy) == -1 ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { out(ielem,0) = bot + (ix ); out(ielem,1) = bot + (ix+1); out(ielem,2) = top + (ix+1); out(ielem,3) = top + (ix ); ielem++; } } // - define connectivity: refinement along the x-direction (below the middle layer) else if ( m_refine(iy) == 0 and iy <= (nely-1)/2 ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { // -- bottom element out(ielem,0) = bot + ( ix ); out(ielem,1) = bot + ( ix+1); out(ielem,2) = mid + (2*ix+1); out(ielem,3) = mid + (2*ix ); ielem++; // -- top-right element out(ielem,0) = bot + ( ix+1); out(ielem,1) = top + (3*ix+3); out(ielem,2) = top + (3*ix+2); out(ielem,3) = mid + (2*ix+1); ielem++; // -- top-center element out(ielem,0) = mid + (2*ix ); out(ielem,1) = mid + (2*ix+1); out(ielem,2) = top + (3*ix+2); out(ielem,3) = top + (3*ix+1); ielem++; // -- top-left element out(ielem,0) = bot + ( ix ); out(ielem,1) = mid + (2*ix ); out(ielem,2) = top + (3*ix+1); out(ielem,3) = top + (3*ix ); ielem++; } } // - define connectivity: coarsening along the x-direction (above the middle layer) else if ( m_refine(iy) == 0 and iy > (nely-1)/2 ) { for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) { // -- lower-left element out(ielem,0) = bot + (3*ix ); out(ielem,1) = bot + (3*ix+1); out(ielem,2) = mid + (2*ix ); out(ielem,3) = top + ( ix ); ielem++; // -- lower-center element out(ielem,0) = bot + (3*ix+1); out(ielem,1) = bot + (3*ix+2); out(ielem,2) = mid + (2*ix+1); out(ielem,3) = mid + (2*ix ); ielem++; // -- lower-right element out(ielem,0) = bot + (3*ix+2); out(ielem,1) = bot + (3*ix+3); out(ielem,2) = top + ( ix+1); out(ielem,3) = mid + (2*ix+1); ielem++; // -- upper element out(ielem,0) = mid + (2*ix ); out(ielem,1) = mid + (2*ix+1); out(ielem,2) = top + ( ix+1); out(ielem,3) = top + ( ix ); ielem++; } } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::elementsMiddleLayer() const { // number of element layers in y-direction, the index of the middle layer size_t nely = static_cast(m_nhy.size()); size_t iy = (nely-1)/2; xt::xtensor out = xt::empty({m_nelx(iy)}); for ( size_t ix = 0 ; ix < m_nelx(iy) ; ++ix ) out(ix) = m_startElem(iy) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesBottomEdge() const { xt::xtensor out = xt::empty({m_nelx(0)+1}); for ( size_t ix = 0 ; ix < m_nelx(0)+1 ; ++ix ) out(ix) = m_startNode(0) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesTopEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelx(nely-1)+1}); for ( size_t ix = 0 ; ix < m_nelx(nely-1)+1 ; ++ix ) out(ix) = m_startNode(nely) + ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesLeftEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely+1}); for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) out(iy) = m_startNode(iy); for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) out(iy+1) = m_startNode(iy+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesRightEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely+1}); for ( size_t iy = 0 ; iy < (nely+1)/2 ; ++iy ) out(iy) = m_startNode(iy) + m_nelx(iy); for ( size_t iy = (nely-1)/2 ; iy < nely ; ++iy ) out(iy+1) = m_startNode(iy+1) + m_nelx(iy); return out; } // ---------------------- node-numbers along the bottom edge, without corners ---------------------- inline xt::xtensor FineLayer::nodesBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx(0)-1}); for ( size_t ix = 1 ; ix < m_nelx(0) ; ++ix ) out(ix-1) = m_startNode(0) + ix; return out; } // ----------------------- node-numbers along the top edge, without corners ------------------------ inline xt::xtensor FineLayer::nodesTopOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({m_nelx(nely-1)-1}); for ( size_t ix = 1 ; ix < m_nelx(nely-1) ; ++ix ) out(ix-1) = m_startNode(nely) + ix; return out; } // ----------------------- node-numbers along the left edge, without corners ----------------------- inline xt::xtensor FineLayer::nodesLeftOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely-1}); for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) out(iy-1) = m_startNode(iy); for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) out(iy) = m_startNode(iy+1); return out; } // ---------------------- node-numbers along the right edge, without corners ----------------------- inline xt::xtensor FineLayer::nodesRightOpenEdge() const { size_t nely = static_cast(m_nhy.size()); xt::xtensor out = xt::empty({nely-1}); for ( size_t iy = 1 ; iy < (nely+1)/2 ; ++iy ) out(iy-1) = m_startNode(iy) + m_nelx(iy); for ( size_t iy = (nely-1)/2 ; iy < nely-1 ; ++iy ) out(iy) = m_startNode(iy+1) + m_nelx(iy); return out; } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesBottomLeftCorner() const { return m_startNode(0); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesBottomRightCorner() const { return m_startNode(0) + m_nelx(0); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesTopLeftCorner() const { size_t nely = static_cast(m_nhy.size()); return m_startNode(nely); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesTopRightCorner() const { size_t nely = static_cast(m_nhy.size()); return m_startNode(nely) + m_nelx(nely-1); } // ------------------------------------------------------------------------------------------------- inline size_t FineLayer::nodesLeftBottomCorner() const { return nodesBottomLeftCorner(); } inline size_t FineLayer::nodesRightBottomCorner() const { return nodesBottomRightCorner(); } inline size_t FineLayer::nodesLeftTopCorner() const { return nodesTopLeftCorner(); } inline size_t FineLayer::nodesRightTopCorner() const { return nodesTopRightCorner(); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::nodesPeriodic() const { // edges (without corners) xt::xtensor bot = nodesBottomOpenEdge(); xt::xtensor top = nodesTopOpenEdge(); xt::xtensor lft = nodesLeftOpenEdge(); xt::xtensor rgt = nodesRightOpenEdge(); // allocate nodal ties // - number of tying per category size_t tedge = bot.size() + lft.size(); size_t tnode = 3; // - allocate xt::xtensor out = xt::empty({tedge+tnode, std::size_t(2)}); // counter size_t i = 0; // tie all corners to one corner out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesBottomRightCorner(); ++i; out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesTopRightCorner(); ++i; out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesTopLeftCorner(); ++i; // tie all corresponding edges to each other for ( size_t j = 0 ; j FineLayer::dofs() const { return GooseFEM::Mesh::dofs(m_nnode,m_ndim); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor FineLayer::dofsPeriodic() const { // DOF-numbers for each component of each node (sequential) xt::xtensor out = GooseFEM::Mesh::dofs(m_nnode,m_ndim); // periodic node-pairs xt::xtensor nodePer = nodesPeriodic(); // eliminate 'dependent' DOFs; renumber "out" to be sequential for the remaining DOFs for ( size_t i = 0 ; i < nodePer.shape()[0] ; ++i ) for ( size_t j = 0 ; j < m_ndim ; ++j ) out(nodePer(i,1),j) = out(nodePer(i,0),j); // renumber "out" to be sequential return GooseFEM::Mesh::renumber(out); } // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/MeshTri3.hpp b/include/GooseFEM/MeshTri3.hpp index 5eab8d4..a7c5d4a 100644 --- a/include/GooseFEM/MeshTri3.hpp +++ b/include/GooseFEM/MeshTri3.hpp @@ -1,579 +1,593 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_MESHTRI3_HPP #define GOOSEFEM_MESHTRI3_HPP // ------------------------------------------------------------------------------------------------- #include "MeshTri3.h" // ================================================================================================= namespace GooseFEM { namespace Mesh { namespace Tri3 { // ------------------------------------------------------------------------------------------------- inline Regular::Regular(size_t nelx, size_t nely, double h): m_h(h), m_nelx(nelx), m_nely(nely) { assert( m_nelx >= 1 ); assert( m_nely >= 1 ); m_nnode = (m_nelx+1) * (m_nely+1); m_nelem = m_nelx * m_nely * 2; } // ------------------------------------------------------------------------------------------------- -inline size_t Regular::nelem() const { return m_nelem; } -inline size_t Regular::nnode() const { return m_nnode; } -inline size_t Regular::nne() const { return m_nne; } -inline size_t Regular::ndim() const { return m_ndim; } +inline size_t Regular::nelem() const +{ return m_nelem; } + +inline size_t Regular::nnode() const +{ return m_nnode; } + +inline size_t Regular::nne() const +{ return m_nne; } + +inline size_t Regular::ndim() const +{ return m_ndim; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::coor() const { xt::xtensor out = xt::empty({m_nnode, m_ndim}); xt::xtensor x = xt::linspace(0.0, m_h*static_cast(m_nelx), m_nelx+1); xt::xtensor y = xt::linspace(0.0, m_h*static_cast(m_nely), m_nely+1); size_t inode = 0; for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) { for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) { out(inode,0) = x(ix); out(inode,1) = y(iy); ++inode; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::conn() const { xt::xtensor out = xt::empty({m_nelem,m_nne}); size_t ielem = 0; for ( size_t iy = 0 ; iy < m_nely ; ++iy ) { for ( size_t ix = 0 ; ix < m_nelx ; ++ix ) { out(ielem,0) = (iy )*(m_nelx+1) + (ix ); out(ielem,1) = (iy )*(m_nelx+1) + (ix+1); out(ielem,2) = (iy+1)*(m_nelx+1) + (ix ); ++ielem; out(ielem,0) = (iy )*(m_nelx+1) + (ix+1); out(ielem,1) = (iy+1)*(m_nelx+1) + (ix+1); out(ielem,2) = (iy+1)*(m_nelx+1) + (ix ); ++ielem; } } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopEdge() const { xt::xtensor out = xt::empty({m_nelx+1}); for ( size_t ix = 0 ; ix < m_nelx+1 ; ++ix ) out(ix) = ix + m_nely*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesLeftEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesRightEdge() const { xt::xtensor out = xt::empty({m_nely+1}); for ( size_t iy = 0 ; iy < m_nely+1 ; ++iy ) out(iy) = iy*(m_nelx+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesBottomOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix; return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesTopOpenEdge() const { xt::xtensor out = xt::empty({m_nelx-1}); for ( size_t ix = 1 ; ix < m_nelx ; ++ix ) out(ix-1) = ix + m_nely*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesLeftOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesRightOpenEdge() const { xt::xtensor out = xt::empty({m_nely-1}); for ( size_t iy = 1 ; iy < m_nely ; ++iy ) out(iy-1) = iy*(m_nelx+1) + m_nelx; return out; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBottomLeftCorner() const { return 0; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesBottomRightCorner() const { return m_nelx; } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesTopLeftCorner() const { return m_nely*(m_nelx+1); } // ------------------------------------------------------------------------------------------------- inline size_t Regular::nodesTopRightCorner() const { return m_nely*(m_nelx+1) + m_nelx; } // ------------------------------------------------------------------------------------------------- -inline size_t Regular::nodesLeftBottomCorner() const { return nodesBottomLeftCorner(); } -inline size_t Regular::nodesLeftTopCorner() const { return nodesTopLeftCorner(); } -inline size_t Regular::nodesRightBottomCorner() const { return nodesBottomRightCorner(); } -inline size_t Regular::nodesRightTopCorner() const { return nodesTopRightCorner(); } +inline size_t Regular::nodesLeftBottomCorner() const +{ return nodesBottomLeftCorner(); } + +inline size_t Regular::nodesLeftTopCorner() const +{ return nodesTopLeftCorner(); } + +inline size_t Regular::nodesRightBottomCorner() const +{ return nodesBottomRightCorner(); } + +inline size_t Regular::nodesRightTopCorner() const +{ return nodesTopRightCorner(); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::nodesPeriodic() const { // edges (without corners) xt::xtensor bot = nodesBottomOpenEdge(); xt::xtensor top = nodesTopOpenEdge(); xt::xtensor lft = nodesLeftOpenEdge(); xt::xtensor rgt = nodesRightOpenEdge(); // allocate nodal ties // - number of tying per category size_t tedge = bot.size() + lft.size(); size_t tnode = 3; // - allocate xt::xtensor out = xt::empty({tedge+tnode, std::size_t(2)}); // counter size_t i = 0; // tie all corners to one corner out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesBottomRightCorner(); ++i; out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesTopRightCorner(); ++i; out(i,0) = nodesBottomLeftCorner(); out(i,1) = nodesTopLeftCorner(); ++i; // tie all corresponding edges to each other for ( size_t j = 0 ; j Regular::dofs() const { return GooseFEM::Mesh::dofs(m_nnode,m_ndim); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor Regular::dofsPeriodic() const { // DOF-numbers for each component of each node (sequential) xt::xtensor out = GooseFEM::Mesh::dofs(m_nnode,m_ndim); // periodic node-pairs xt::xtensor nodePer = nodesPeriodic(); // eliminate 'dependent' DOFs; renumber "out" to be sequential for the remaining DOFs for ( size_t i = 0 ; i < nodePer.shape()[0] ; ++i ) for ( size_t j = 0 ; j < m_ndim ; ++j ) out(nodePer(i,1),j) = out(nodePer(i,0),j); // renumber "out" to be sequential return GooseFEM::Mesh::renumber(out); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor getOrientation(const xt::xtensor &coor, const xt::xtensor &conn) { assert( conn.shape()[1] == 3 ); assert( coor.shape()[1] == 2 ); double k; size_t nelem = conn.shape()[0]; xt::xtensor out = xt::empty({nelem}); for ( size_t ielem = 0 ; ielem < nelem ; ++ielem ) { auto v1 = xt::view(coor, conn(ielem,0), xt::all()) - xt::view(coor, conn(ielem,1), xt::all()); auto v2 = xt::view(coor, conn(ielem,2), xt::all()) - xt::view(coor, conn(ielem,1), xt::all()); k = v1(0) * v2(1) - v2(0) * v1(1); if ( k < 0 ) out(ielem) = -1; else out(ielem) = +1; } return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor setOrientation(const xt::xtensor &coor, const xt::xtensor &conn, int orientation) { assert( conn.shape()[1] == 3 ); assert( coor.shape()[1] == 2 ); assert( orientation == -1 || orientation == +1 ); xt::xtensor val = getOrientation(coor, conn); return setOrientation(coor, conn, val, orientation); } // ------------------------------------------------------------------------------------------------- inline xt::xtensor setOrientation(const xt::xtensor &coor, const xt::xtensor &conn, const xt::xtensor &val, int orientation) { assert( conn.shape()[1] == 3 ); assert( coor.shape()[1] == 2 ); assert( conn.shape()[0] == val.size() ); assert( orientation == -1 || orientation == +1 ); // avoid compiler warning UNUSED(coor); size_t nelem = conn.shape()[0]; xt::xtensor out = conn; for ( size_t ielem = 0 ; ielem < nelem ; ++ielem ) if ( ( orientation == -1 and val(ielem) > 0 ) or ( orientation == +1 and val(ielem) < 0 ) ) std::swap( out(ielem,2) , out(ielem,1) ); return out; } // ------------------------------------------------------------------------------------------------- inline xt::xtensor retriangulate(const xt::xtensor &coor, const xt::xtensor &conn, int orientation) { // get the orientation of all elements xt::xtensor dir = getOrientation(coor, conn); // check the orientation bool eq = static_cast(std::abs(xt::sum(dir)[0])) == conn.shape()[0]; // new connectivity xt::xtensor out; // perform re-triangulation // - use "TriUpdate" if ( eq ) { Private::TriUpdate obj(coor,conn); obj.eval(); out = obj.conn(); } // - using TriCompute else { throw std::runtime_error("Work-in-progress, has to be re-triangulated using 'TriCompute'"); } return setOrientation(coor,out,orientation); } // ================================================================================================= namespace Private { // ------------------------------------------------------------------------------------------------- inline TriUpdate::TriUpdate(const xt::xtensor &coor, const xt::xtensor &conn): m_conn(conn), m_coor(coor) { assert( conn.shape()[1] == 3 ); assert( coor.shape()[1] == 2 ); // store shapes m_nnode = coor.shape()[0]; m_ndim = coor.shape()[1]; m_nelem = conn.shape()[0]; m_nne = conn.shape()[1]; // set default to out-of-bounds, to make clear that nothing happened yet m_elem = m_nelem * xt::ones({2}); m_node = m_nnode * xt::ones({4}); edge(); } // ------------------------------------------------------------------------------------------------- inline void TriUpdate::edge() { // signal that nothing has been set m_edge = m_nelem * xt::ones({m_nelem , m_nne}); std::vector idx = {0,1,2}; // lists to convert connectivity -> edges std::vector jdx = {1,2,0}; // lists to convert connectivity -> edges std::vector edge; edge.reserve(m_nelem*idx.size()); for ( size_t e = 0 ; e < m_nelem ; ++e ) for ( size_t i = 0 ; i < m_nne ; ++i ) edge.push_back( Edge( m_conn(e,idx[i]), m_conn(e,jdx[i]) , e , i , true ) ); std::sort( edge.begin() , edge.end() , Edge_sort ); for ( size_t i = 0 ; i < edge.size()-1 ; ++i ) { if ( edge[i].n1 == edge[i+1].n1 and edge[i].n2 == edge[i+1].n2 ) { m_edge( edge[i ].elem , edge[i ].edge ) = edge[i+1].elem; m_edge( edge[i+1].elem , edge[i+1].edge ) = edge[i ].elem; } } } // ------------------------------------------------------------------------------------------------- inline void TriUpdate::chedge(size_t edge, size_t old_elem, size_t new_elem) { size_t m; size_t neigh = m_edge(old_elem , edge); if ( neigh >= m_nelem ) return; for ( m = 0 ; m < m_nne ; ++m ) if ( m_edge( neigh , m ) == old_elem ) break; m_edge( neigh , m ) = new_elem; } // ------------------------------------------------------------------------------------------------- inline bool TriUpdate::eval() { bool change = false; while ( increment() ) { change = true; } return change; } // ------------------------------------------------------------------------------------------------- inline bool TriUpdate::increment() { size_t ielem,jelem,iedge,jedge; double phi1,phi2; xt::xtensor_fixed> c = xt::empty({4}); xt::xtensor_fixed> n = xt::empty({4}); // loop over all elements for ( ielem = 0 ; ielem < m_nelem ; ++ielem ) { // loop over all edges for ( iedge = 0 ; iedge < m_nne ; ++iedge ) { // only proceed if the edge is shared with another element if ( m_edge(ielem,iedge) >= m_nelem ) continue; // read "jelem" jelem = m_edge(ielem,iedge); // find the edge involved for "jelem" for ( jedge=0; jedge M_PI ) { // update connectivity m_conn(ielem,0) = c(0); m_conn(ielem,1) = c(3); m_conn(ielem,2) = c(2); m_conn(jelem,0) = c(0); m_conn(jelem,1) = c(1); m_conn(jelem,2) = c(3); // change list with neighbors for the elements around (only two neighbors change) if ( iedge==0 ) { chedge(2,ielem,jelem); } else if ( iedge==1 ) { chedge(0,ielem,jelem); } else if ( iedge==2 ) { chedge(1,ielem,jelem); } if ( jedge==0 ) { chedge(2,jelem,ielem); } else if ( jedge==1 ) { chedge(0,jelem,ielem); } else if ( jedge==2 ) { chedge(1,jelem,ielem); } // convert to four static nodes if ( iedge==0 ) { n(0)=m_edge(ielem,2); n(3)=m_edge(ielem,1); } else if ( iedge==1 ) { n(0)=m_edge(ielem,0); n(3)=m_edge(ielem,2); } else if ( iedge==2 ) { n(0)=m_edge(ielem,1); n(3)=m_edge(ielem,0); } if ( jedge==0 ) { n(1)=m_edge(jelem,1); n(2)=m_edge(jelem,2); } else if ( jedge==1 ) { n(1)=m_edge(jelem,2); n(2)=m_edge(jelem,0); } else if ( jedge==2 ) { n(1)=m_edge(jelem,0); n(2)=m_edge(jelem,1); } // store the neighbors for the changed elements m_edge(ielem,0) = jelem; m_edge(jelem,0) = n(0) ; m_edge(ielem,1) = n(2) ; m_edge(jelem,1) = n(1) ; m_edge(ielem,2) = n(3) ; m_edge(jelem,2) = ielem; // store information for transfer algorithm m_node = c; m_elem(0) = ielem; m_elem(1) = jelem; return true; } } } return false; } // ------------------------------------------------------------------------------------------------- inline Edge::Edge(size_t i, size_t j, size_t el, size_t ed, bool sort): n1(i), n2(j), elem(el), edge(ed) { if ( sort && n1>n2 ) std::swap(n1,n2); } // ------------------------------------------------------------------------------------------------- inline bool Edge_cmp(Edge a, Edge b) { if ( a.n1 == b.n1 and a.n2 == b.n2 ) return true; return false; } // ------------------------------------------------------------------------------------------------- inline bool Edge_sort(Edge a, Edge b) { if ( a.n1 < b.n1 or a.n2 < b.n2 ) return true; return false; } // ------------------------------------------------------------------------------------------------- } // namespace Private // ------------------------------------------------------------------------------------------------- }}} // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/TyingsPeriodic.h b/include/GooseFEM/TyingsPeriodic.h new file mode 100644 index 0000000..91c4a50 --- /dev/null +++ b/include/GooseFEM/TyingsPeriodic.h @@ -0,0 +1,94 @@ +/* ================================================================================================= + +(c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM + +================================================================================================= */ + +#ifndef GOOSEFEM_TYINGSPERIODIC_H +#define GOOSEFEM_TYINGSPERIODIC_H + +// ------------------------------------------------------------------------------------------------- + +#include "config.h" + +#include +#include + +// ================================================================================================= + +namespace GooseFEM { +namespace Tyings { + +// ------------------------------------------------------------------------------------------------- + +class Periodic +{ +public: + + // Constructors + + Periodic() = default; + + Periodic( + const xt::xtensor& coor, + const xt::xtensor& dofs, + const xt::xtensor& control_dofs, + const xt::xtensor& nodal_tyings); + + Periodic( + const xt::xtensor& coor, + const xt::xtensor& dofs, + const xt::xtensor& control_dofs, + const xt::xtensor& nodal_tyings, + const xt::xtensor& iip); + + // Dimensions + + size_t nnd() const; // dependent DOFs + size_t nni() const; // independent DOFs + size_t nnu() const; // independent, unknown DOFs + size_t nnp() const; // independent, prescribed DOFs + + // DOF lists + + xt::xtensor dofs() const; // DOFs + xt::xtensor control() const; // control DOFs + xt::xtensor iid() const; // dependent DOFs + xt::xtensor iii() const; // independent DOFs + xt::xtensor iiu() const; // independent, unknown DOFs + xt::xtensor iip() const; // independent, prescribed DOFs + + // Return the tying matrix + // u_d = C_di * u_i + // u_d = [C_du, C_dp]^T * [u_u, u_p] = C_du * u_u + C_dp * u_p + + Eigen::SparseMatrix Cdi() const; + Eigen::SparseMatrix Cdu() const; + Eigen::SparseMatrix Cdp() const; + +private: + + size_t m_nnu; + size_t m_nnp; + size_t m_nni; + size_t m_nnd; + size_t m_ndim; + size_t m_nties; + xt::xtensor m_dofs; + xt::xtensor m_control; + xt::xtensor m_tyings; + xt::xtensor m_coor; + +}; + +// ------------------------------------------------------------------------------------------------- + +}} // namespace ... + +// ------------------------------------------------------------------------------------------------- + +#include "TyingsPeriodic.hpp" + +// ================================================================================================= + +#endif diff --git a/include/GooseFEM/TyingsPeriodic.hpp b/include/GooseFEM/TyingsPeriodic.hpp new file mode 100644 index 0000000..cdac5af --- /dev/null +++ b/include/GooseFEM/TyingsPeriodic.hpp @@ -0,0 +1,200 @@ +/* ================================================================================================= + +(c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM + +================================================================================================= */ + +#ifndef GOOSEFEM_TYINGSPERIODIC_HPP +#define GOOSEFEM_TYINGSPERIODIC_HPP + +// ------------------------------------------------------------------------------------------------- + +#include "TyingsPeriodic.h" +#include "Mesh.h" + +#include +#include + +// ================================================================================================= + +namespace GooseFEM { +namespace Tyings { + +// ------------------------------------------------------------------------------------------------- + +inline Periodic::Periodic( + const xt::xtensor& coor, + const xt::xtensor& dofs, + const xt::xtensor& control, + const xt::xtensor& nodal_tyings) : + Periodic(coor, dofs, control, nodal_tyings, xt::empty({0})) +{ +} + +// ------------------------------------------------------------------------------------------------- + +inline Periodic::Periodic( + const xt::xtensor& coor, + const xt::xtensor& dofs, + const xt::xtensor& control, + const xt::xtensor& nodal_tyings, + const xt::xtensor& iip) : + m_tyings(nodal_tyings), + m_coor(coor) +{ + m_ndim = m_coor .shape()[1]; + m_nties = m_tyings.shape()[0]; + + xt::xtensor dependent = xt::view(m_tyings, xt::all(), 1); + + xt::xtensor iid = xt::flatten(xt::view(dofs, xt::keep(dependent), xt::all())); + xt::xtensor iii = xt::setdiff1d(dofs, iid); + xt::xtensor iiu = xt::setdiff1d(iii , iip); + + m_nnu = iiu.size(); + m_nnp = iip.size(); + m_nni = iii.size(); + m_nnd = iid.size(); + + GooseFEM::Mesh::Reorder reorder({iiu, iip, iid}); + + m_dofs = reorder.apply(dofs); + m_control = reorder.apply(control); +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor Periodic::dofs() const +{ return m_dofs; } + +inline xt::xtensor Periodic::control() const +{ return m_control; } + +inline size_t Periodic::nnu() const +{ return m_nnu; } + +inline size_t Periodic::nnp() const +{ return m_nnp; } + +inline size_t Periodic::nni() const +{ return m_nni; } + +inline size_t Periodic::nnd() const +{ return m_nnd; } + +inline xt::xtensor Periodic::iiu() const +{ return xt::arange(m_nnu); } + +inline xt::xtensor Periodic::iip() const +{ return xt::arange(m_nnp) + m_nnu; } + +inline xt::xtensor Periodic::iii() const +{ return xt::arange(m_nni); } + +inline xt::xtensor Periodic::iid() const +{ return xt::arange(m_nnd) + m_nni; } + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::SparseMatrix Periodic::Cdi() const +{ + std::vector> data; + + data.reserve(m_nties*m_ndim*(m_ndim+1)); + + for (size_t i = 0; i < m_nties; ++i) + { + for (size_t j = 0; j < m_ndim; ++j) + { + size_t ni = m_tyings(i,0); + size_t nd = m_tyings(i,1); + + data.push_back(Eigen::Triplet(i*m_ndim+j, m_dofs(ni,j), +1.)); + + for (size_t k = 0; k < m_ndim; ++k) + data.push_back(Eigen::Triplet(i*m_ndim+j, m_control(j,k), m_coor(nd,k)-m_coor(ni,k))); + } + } + + Eigen::SparseMatrix Cdi; + + Cdi.resize(m_nnd, m_nni); + + Cdi.setFromTriplets(data.begin(), data.end()); + + return Cdi; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::SparseMatrix Periodic::Cdu() const +{ + std::vector> data; + + data.reserve(m_nties*m_ndim*(m_ndim+1)); + + for (size_t i = 0; i < m_nties; ++i) + { + for (size_t j = 0; j < m_ndim; ++j) + { + size_t ni = m_tyings(i,0); + size_t nd = m_tyings(i,1); + + if ( m_dofs(ni,j) < m_nnu ) + data.push_back(Eigen::Triplet(i*m_ndim+j, m_dofs(ni,j), +1.)); + + for (size_t k = 0; k < m_ndim; ++k) + if ( m_control(j,k) < m_nnu ) + data.push_back(Eigen::Triplet(i*m_ndim+j, m_control(j,k), m_coor(nd,k)-m_coor(ni,k))); + } + } + + Eigen::SparseMatrix Cdu; + + Cdu.resize(m_nnd, m_nnu); + + Cdu.setFromTriplets(data.begin(), data.end()); + + return Cdu; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::SparseMatrix Periodic::Cdp() const +{ + std::vector> data; + + data.reserve(m_nties*m_ndim*(m_ndim+1)); + + for (size_t i = 0; i < m_nties; ++i) + { + for (size_t j = 0; j < m_ndim; ++j) + { + size_t ni = m_tyings(i,0); + size_t nd = m_tyings(i,1); + + if ( m_dofs(ni,j) >= m_nnu ) + data.push_back(Eigen::Triplet(i*m_ndim+j, m_dofs(ni,j)-m_nnu, +1.)); + + for (size_t k = 0; k < m_ndim; ++k) + if ( m_control(j,k) >= m_nnu ) + data.push_back(Eigen::Triplet(i*m_ndim+j, m_control(j,k)-m_nnu, m_coor(nd,k)-m_coor(ni,k))); + } + } + + Eigen::SparseMatrix Cdp; + + Cdp.resize(m_nnd, m_nnp); + + Cdp.setFromTriplets(data.begin(), data.end()); + + return Cdp; +} + +// ------------------------------------------------------------------------------------------------- + +}} // namespace ... + +// ================================================================================================= + +#endif diff --git a/include/GooseFEM/Vector.h b/include/GooseFEM/Vector.h index 1480f50..68865ae 100644 --- a/include/GooseFEM/Vector.h +++ b/include/GooseFEM/Vector.h @@ -1,135 +1,156 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_VECTOR_H #define GOOSEFEM_VECTOR_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- /* "nodevec" - nodal vectors - [nnode, ndim] "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] "dofval" - DOF values - [ndof] */ class Vector { public: - // constructor + // Constructor Vector() = default; - Vector(const xt::xtensor &conn, const xt::xtensor &dofs); + Vector( + const xt::xtensor& conn, + const xt::xtensor& dofs); - // dimensions + // Dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t nnode() const; // number of nodes size_t ndim() const; // number of dimensions size_t ndof() const; // number of DOFs // DOF lists xt::xtensor dofs() const; // DOFs - // copy nodevec to another nodevec + // Copy nodevec to another nodevec - void copy(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const; + void copy( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const; - // convert to "dofval" (overwrite entries that occur more than once) -- (auto allocation below) + // Convert to "dofval" (overwrite entries that occur more than once) -- (auto allocation below) - void asDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const; + void asDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const; - void asDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const; + void asDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const; - // convert to "nodevec" (overwrite entries that occur more than once) -- (auto allocation below) + // Convert to "nodevec" (overwrite entries that occur more than once) -- (auto allocation below) - void asNode(const xt::xtensor &dofval, - xt::xtensor &nodevec) const; + void asNode( + const xt::xtensor& dofval, + xt::xtensor& nodevec) const; - void asNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const; + void asNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const; - // convert to "elemvec" (overwrite entries that occur more than once) -- (auto allocation below) + // Convert to "elemvec" (overwrite entries that occur more than once) -- (auto allocation below) - void asElement(const xt::xtensor &dofval, - xt::xtensor &elemvec) const; + void asElement( + const xt::xtensor& dofval, + xt::xtensor& elemvec) const; - void asElement(const xt::xtensor &nodevec, - xt::xtensor &elemvec) const; + void asElement( + const xt::xtensor& nodevec, + xt::xtensor& elemvec) const; - // assemble "dofval" (adds entries that occur more that once) -- (auto allocation below) + // Assemble "dofval" (adds entries that occur more that once) -- (auto allocation below) - void assembleDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const; + void assembleDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const; - void assembleDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const; + void assembleDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const; - // assemble "nodevec" (adds entries that occur more that once) -- (auto allocation below) + // Assemble "nodevec" (adds entries that occur more that once) -- (auto allocation below) - void assembleNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const; + void assembleNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const; - // auto allocation of the functions above + // Auto-allocation of the functions above - xt::xtensor asDofs(const xt::xtensor &nodevec) const; + xt::xtensor AsDofs( + const xt::xtensor& nodevec) const; - xt::xtensor asDofs(const xt::xtensor &elemvec) const; + xt::xtensor AsDofs( + const xt::xtensor& elemvec) const; - xt::xtensor asNode(const xt::xtensor &dofval) const; + xt::xtensor AsNode( + const xt::xtensor& dofval) const; - xt::xtensor asNode(const xt::xtensor &elemvec) const; + xt::xtensor AsNode( + const xt::xtensor& elemvec) const; - xt::xtensor asElement(const xt::xtensor &dofval) const; + xt::xtensor AsElement( + const xt::xtensor& dofval) const; - xt::xtensor asElement(const xt::xtensor &nodevec) const; + xt::xtensor AsElement( + const xt::xtensor& nodevec) const; - xt::xtensor assembleDofs(const xt::xtensor &nodevec) const; + xt::xtensor AssembleDofs( + const xt::xtensor& nodevec) const; - xt::xtensor assembleDofs(const xt::xtensor &elemvec) const; + xt::xtensor AssembleDofs( + const xt::xtensor& elemvec) const; - xt::xtensor assembleNode(const xt::xtensor &elemvec) const; + xt::xtensor AssembleNode( + const xt::xtensor& elemvec) const; private: - // bookkeeping + // Bookkeeping xt::xtensor m_conn; // connectivity [nelem, nne ] xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] - // dimensions + // Dimensions size_t m_nelem; // number of elements size_t m_nne; // number of nodes per element size_t m_nnode; // number of nodes size_t m_ndim; // number of dimensions size_t m_ndof; // number of DOFs }; // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #include "Vector.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/Vector.hpp b/include/GooseFEM/Vector.hpp index 48dc27f..2d96b0f 100644 --- a/include/GooseFEM/Vector.hpp +++ b/include/GooseFEM/Vector.hpp @@ -1,324 +1,348 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_VECTOR_HPP #define GOOSEFEM_VECTOR_HPP // ------------------------------------------------------------------------------------------------- #include "Vector.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- -inline Vector::Vector(const xt::xtensor &conn, const xt::xtensor &dofs) : +inline Vector::Vector( + const xt::xtensor& conn, + const xt::xtensor& dofs) : m_conn(conn), m_dofs(dofs) { // mesh dimensions m_nelem = m_conn.shape()[0]; m_nne = m_conn.shape()[1]; m_nnode = m_dofs.shape()[0]; m_ndim = m_dofs.shape()[1]; // dimensions of the system m_ndof = xt::amax(m_dofs)[0] + 1; // check consistency - assert( xt::amax(m_conn)[0] + 1 == m_nnode ); - assert( m_ndof <= m_nnode * m_ndim ); + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(m_ndof <= m_nnode * m_ndim); } // ------------------------------------------------------------------------------------------------- -inline size_t Vector::nelem() const { return m_nelem; } +inline size_t Vector::nelem() const +{ return m_nelem; } -inline size_t Vector::nne() const { return m_nne; } +inline size_t Vector::nne() const +{ return m_nne; } -inline size_t Vector::nnode() const { return m_nnode; } +inline size_t Vector::nnode() const +{ return m_nnode; } -inline size_t Vector::ndim() const { return m_ndim; } +inline size_t Vector::ndim() const +{ return m_ndim; } -inline size_t Vector::ndof() const { return m_ndof; } +inline size_t Vector::ndof() const +{ return m_ndof; } -inline xt::xtensor Vector::dofs() const { return m_dofs; } +inline xt::xtensor Vector::dofs() const +{ return m_dofs; } // ------------------------------------------------------------------------------------------------- -inline void Vector::copy(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const +inline void Vector::copy( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const { - assert( nodevec_src .shape()[0] == m_nnode ); - assert( nodevec_src .shape()[1] == m_ndim ); - assert( nodevec_dest.shape()[0] == m_nnode ); - assert( nodevec_dest.shape()[1] == m_ndim ); + assert(nodevec_src .shape()[0] == m_nnode); + assert(nodevec_src .shape()[1] == m_ndim ); + assert(nodevec_dest.shape()[0] == m_nnode); + assert(nodevec_dest.shape()[1] == m_ndim ); xt::noalias(nodevec_dest) = nodevec_src; } // ------------------------------------------------------------------------------------------------- -inline void Vector::asDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const +inline void Vector::asDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval.size() == m_ndof ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m,i)) = nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void Vector::asDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const +inline void Vector::asDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m_conn(e,m),i)) = elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void Vector::asNode(const xt::xtensor &dofval, - xt::xtensor &nodevec) const +inline void Vector::asNode( + const xt::xtensor& dofval, + xt::xtensor& nodevec) const { - assert( dofval.size() == m_ndof ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) nodevec(m,i) = dofval(m_dofs(m,i)); } // ------------------------------------------------------------------------------------------------- -inline void Vector::asNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const +inline void Vector::asNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) nodevec(m_conn(e,m),i) = elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void Vector::asElement(const xt::xtensor &dofval, - xt::xtensor &elemvec) const +inline void Vector::asElement( + const xt::xtensor& dofval, + xt::xtensor& elemvec) const { - assert( dofval.size() == m_ndof ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) elemvec(e,m,i) = dofval(m_dofs(m_conn(e,m),i)); } // ------------------------------------------------------------------------------------------------- -inline void Vector::asElement(const xt::xtensor &nodevec, - xt::xtensor &elemvec) const +inline void Vector::asElement( + const xt::xtensor& nodevec, + xt::xtensor& elemvec) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) elemvec(e,m,i) = nodevec(m_conn(e,m),i); } // ------------------------------------------------------------------------------------------------- -inline void Vector::assembleDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const +inline void Vector::assembleDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval.size() == m_ndof ); dofval.fill(0.0); - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m,i)) += nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void Vector::assembleDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const +inline void Vector::assembleDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); dofval.fill(0.0); - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m_conn(e,m),i)) += elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void Vector::assembleNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const +inline void Vector::assembleNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); // assemble to DOFs - xt::xtensor dofval = this->assembleDofs(elemvec); + xt::xtensor dofval = this->AssembleDofs(elemvec); // read from DOFs this->asNode(dofval, nodevec); } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::asDofs(const xt::xtensor &nodevec) const +inline xt::xtensor Vector::AsDofs( + const xt::xtensor& nodevec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->asDofs(nodevec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::asDofs(const xt::xtensor &elemvec) const +inline xt::xtensor Vector::AsDofs( + const xt::xtensor& elemvec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->asDofs(elemvec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::asNode(const xt::xtensor &dofval) const +inline xt::xtensor Vector::AsNode( + const xt::xtensor& dofval) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->asNode(dofval, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::asNode(const xt::xtensor &elemvec) const +inline xt::xtensor Vector::AsNode( + const xt::xtensor& elemvec) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->asNode(elemvec, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::asElement(const xt::xtensor &dofval) const +inline xt::xtensor Vector::AsElement( + const xt::xtensor& dofval) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->asElement(dofval, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::asElement(const xt::xtensor &nodevec) const +inline xt::xtensor Vector::AsElement( + const xt::xtensor& nodevec) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->asElement(nodevec, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::assembleDofs( - const xt::xtensor &nodevec) const +inline xt::xtensor Vector::AssembleDofs( + const xt::xtensor& nodevec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->assembleDofs(nodevec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::assembleDofs( - const xt::xtensor &elemvec) const +inline xt::xtensor Vector::AssembleDofs( + const xt::xtensor& elemvec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->assembleDofs(elemvec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor Vector::assembleNode( - const xt::xtensor &elemvec) const +inline xt::xtensor Vector::AssembleNode( + const xt::xtensor& elemvec) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->assembleNode(elemvec, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/VectorPartitioned.h b/include/GooseFEM/VectorPartitioned.h index 4cafea0..f49fa87 100644 --- a/include/GooseFEM/VectorPartitioned.h +++ b/include/GooseFEM/VectorPartitioned.h @@ -1,221 +1,275 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_VECTORPARTITIONED_H #define GOOSEFEM_VECTORPARTITIONED_H // ------------------------------------------------------------------------------------------------- #include "config.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- /* "nodevec" - nodal vectors - [nnode, ndim] "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] "dofval" - DOF values - [ndof] "dofval_u" - DOF values (Unknown) "== dofval[iiu]" - [nnu] "dofval_p" - DOF values (Prescribed) "== dofval[iiu]" - [nnp] */ class VectorPartitioned { public: - // constructor + // Constructor VectorPartitioned() = default; - VectorPartitioned(const xt::xtensor &conn, const xt::xtensor &dofs, - const xt::xtensor &iip); + VectorPartitioned( + const xt::xtensor& conn, + const xt::xtensor& dofs, + const xt::xtensor& iip); - // dimensions + // Dimensions size_t nelem() const; // number of elements size_t nne() const; // number of nodes per element size_t nnode() const; // number of nodes size_t ndim() const; // number of dimensions size_t ndof() const; // number of DOFs size_t nnu() const; // number of unknown DOFs size_t nnp() const; // number of prescribed DOFs // DOF lists xt::xtensor dofs() const; // DOFs xt::xtensor iiu() const; // unknown DOFs xt::xtensor iip() const; // prescribed DOFs - // copy (part of) nodevec to another nodevec - // for "u" only unknown DOFs are copied, for "p" only prescribed DOFs are copied + // Copy (part of) nodevec/dofval to another nodevec/dofval - void copy(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const; + void copy( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const; // overwritted - void copy_u(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const; + void copy_u( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const; // "iiu" updated - void copy_p(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const; + void copy_p( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const; // "iip" updated - // convert to "dofval" (overwrite entries that occur more than once) -- (auto allocation below) + // Convert to "dofval" (overwrite entries that occur more than once) - void asDofs(const xt::xtensor &dofval_u, const xt::xtensor &dofval_p, - xt::xtensor &dofval) const; + void asDofs( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p, + xt::xtensor& dofval) const; - void asDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const; + void asDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const; - void asDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const; + void asDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const; - void asDofs_u(const xt::xtensor &dofval, - xt::xtensor &dofval_u) const; + void asDofs_u( + const xt::xtensor& dofval, + xt::xtensor& dofval_u) const; - void asDofs_u(const xt::xtensor &nodevec, - xt::xtensor &dofval_u) const; + void asDofs_u( + const xt::xtensor& nodevec, + xt::xtensor& dofval_u) const; - void asDofs_u(const xt::xtensor &elemvec, - xt::xtensor &dofval_u) const; + void asDofs_u( + const xt::xtensor& elemvec, + xt::xtensor& dofval_u) const; - void asDofs_p(const xt::xtensor &dofval, - xt::xtensor &dofval_p) const; + void asDofs_p( + const xt::xtensor& dofval, + xt::xtensor& dofval_p) const; - void asDofs_p(const xt::xtensor &nodevec, - xt::xtensor &dofval_p) const; + void asDofs_p( + const xt::xtensor& nodevec, + xt::xtensor& dofval_p) const; - void asDofs_p(const xt::xtensor &elemvec, - xt::xtensor &dofval_p) const; + void asDofs_p( + const xt::xtensor& elemvec, + xt::xtensor& dofval_p) const; - // convert to "nodevec" (overwrite entries that occur more than once) -- (auto allocation below) + // Convert to "nodevec" (overwrite entries that occur more than once) -- (auto allocation below) - void asNode(const xt::xtensor &dofval_u, const xt::xtensor &dofval_p, - xt::xtensor &nodevec) const; + void asNode( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p, + xt::xtensor& nodevec) const; - void asNode(const xt::xtensor &dofval, - xt::xtensor &nodevec) const; + void asNode( + const xt::xtensor& dofval, + xt::xtensor& nodevec) const; - void asNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const; + void asNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const; - // convert to "elemvec" (overwrite entries that occur more than once) -- (auto allocation below) + // Convert to "elemvec" (overwrite entries that occur more than once) -- (auto allocation below) - void asElement(const xt::xtensor &dofval_u, const xt::xtensor &dofval_p, - xt::xtensor &elemvec) const; + void asElement( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p, + xt::xtensor& elemvec) const; - void asElement(const xt::xtensor &dofval, - xt::xtensor &elemvec) const; + void asElement( + const xt::xtensor& dofval, + xt::xtensor& elemvec) const; - void asElement(const xt::xtensor &nodevec, - xt::xtensor &elemvec) const; + void asElement( + const xt::xtensor& nodevec, + xt::xtensor& elemvec) const; - // assemble "dofval" (adds entries that occur more that once) -- (auto allocation below) + // Assemble "dofval" (adds entries that occur more that once) -- (auto allocation below) - void assembleDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const; + void assembleDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const; - void assembleDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const; + void assembleDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const; - void assembleDofs_u(const xt::xtensor &nodevec, - xt::xtensor &dofval_u) const; + void assembleDofs_u( + const xt::xtensor& nodevec, + xt::xtensor& dofval_u) const; - void assembleDofs_u(const xt::xtensor &elemvec, - xt::xtensor &dofval_u) const; + void assembleDofs_u( + const xt::xtensor& elemvec, + xt::xtensor& dofval_u) const; - void assembleDofs_p(const xt::xtensor &nodevec, - xt::xtensor &dofval_p) const; + void assembleDofs_p( + const xt::xtensor& nodevec, + xt::xtensor& dofval_p) const; - void assembleDofs_p(const xt::xtensor &elemvec, - xt::xtensor &dofval_p) const; + void assembleDofs_p( + const xt::xtensor& elemvec, + xt::xtensor& dofval_p) const; - // assemble "nodevec" (adds entries that occur more that once) -- (auto allocation below) + // Assemble "nodevec" (adds entries that occur more that once) -- (auto allocation below) - void assembleNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const; + void assembleNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const; - // auto allocation of the functions above + // Auto-allocation of the functions above - xt::xtensor asDofs(const xt::xtensor &dofval_u, const xt::xtensor &dofval_p) const; + xt::xtensor AsDofs( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p) const; - xt::xtensor asDofs(const xt::xtensor &nodevec) const; + xt::xtensor AsDofs( + const xt::xtensor& nodevec) const; - xt::xtensor asDofs(const xt::xtensor &elemvec) const; + xt::xtensor AsDofs( + const xt::xtensor& elemvec) const; - xt::xtensor asDofs_u(const xt::xtensor &dofval) const; + xt::xtensor AsDofs_u( + const xt::xtensor& dofval) const; - xt::xtensor asDofs_u(const xt::xtensor &nodevec) const; + xt::xtensor AsDofs_u( + const xt::xtensor& nodevec) const; - xt::xtensor asDofs_u(const xt::xtensor &elemvec) const; + xt::xtensor AsDofs_u( + const xt::xtensor& elemvec) const; - xt::xtensor asDofs_p(const xt::xtensor &dofval) const; + xt::xtensor AsDofs_p( + const xt::xtensor& dofval) const; - xt::xtensor asDofs_p(const xt::xtensor &nodevec) const; + xt::xtensor AsDofs_p( + const xt::xtensor& nodevec) const; - xt::xtensor asDofs_p(const xt::xtensor &elemvec) const; + xt::xtensor AsDofs_p( + const xt::xtensor& elemvec) const; - xt::xtensor asNode(const xt::xtensor &dofval_u, const xt::xtensor &dofval_p) const; + xt::xtensor AsNode( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p) const; - xt::xtensor asNode(const xt::xtensor &dofval) const; + xt::xtensor AsNode( + const xt::xtensor& dofval) const; - xt::xtensor asNode(const xt::xtensor &elemvec) const; + xt::xtensor AsNode( + const xt::xtensor& elemvec) const; - xt::xtensor asElement(const xt::xtensor &dofval_u, const xt::xtensor &dofval_p) const; + xt::xtensor AsElement( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p) const; - xt::xtensor asElement(const xt::xtensor &dofval) const; + xt::xtensor AsElement( + const xt::xtensor& dofval) const; - xt::xtensor asElement(const xt::xtensor &nodevec) const; + xt::xtensor AsElement( + const xt::xtensor& nodevec) const; - xt::xtensor assembleDofs(const xt::xtensor &nodevec) const; + xt::xtensor AssembleDofs( + const xt::xtensor& nodevec) const; - xt::xtensor assembleDofs(const xt::xtensor &elemvec) const; + xt::xtensor AssembleDofs( + const xt::xtensor& elemvec) const; - xt::xtensor assembleDofs_u(const xt::xtensor &nodevec) const; + xt::xtensor AssembleDofs_u( + const xt::xtensor& nodevec) const; - xt::xtensor assembleDofs_u(const xt::xtensor &elemvec) const; + xt::xtensor AssembleDofs_u( + const xt::xtensor& elemvec) const; - xt::xtensor assembleDofs_p(const xt::xtensor &nodevec) const; + xt::xtensor AssembleDofs_p( + const xt::xtensor& nodevec) const; - xt::xtensor assembleDofs_p(const xt::xtensor &elemvec) const; + xt::xtensor AssembleDofs_p( + const xt::xtensor& elemvec) const; - xt::xtensor assembleNode(const xt::xtensor &elemvec) const; + xt::xtensor AssembleNode( + const xt::xtensor& elemvec) const; private: - // bookkeeping + // Bookkeeping xt::xtensor m_conn; // connectivity [nelem, nne ] xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] xt::xtensor m_iiu; // DOF-numbers that are unknown [nnu] xt::xtensor m_iip; // DOF-numbers that are prescribed [nnp] // DOFs per node, such that iiu = arange(nnu), iip = nnu + arange(nnp) xt::xtensor m_part; - // dimensions + // Dimensions size_t m_nelem; // number of elements size_t m_nne; // number of nodes per element size_t m_nnode; // number of nodes size_t m_ndim; // number of dimensions size_t m_ndof; // number of DOFs size_t m_nnu; // number of unknown DOFs size_t m_nnp; // number of prescribed DOFs }; // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #include "VectorPartitioned.hpp" // ================================================================================================= #endif diff --git a/include/GooseFEM/VectorPartitioned.hpp b/include/GooseFEM/VectorPartitioned.hpp index dac2556..8bc45c8 100644 --- a/include/GooseFEM/VectorPartitioned.hpp +++ b/include/GooseFEM/VectorPartitioned.hpp @@ -1,753 +1,806 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_VECTORPARTITIONED_HPP #define GOOSEFEM_VECTORPARTITIONED_HPP // ------------------------------------------------------------------------------------------------- #include "VectorPartitioned.h" +#include "Mesh.h" // ================================================================================================= namespace GooseFEM { // ------------------------------------------------------------------------------------------------- inline VectorPartitioned::VectorPartitioned( - const xt::xtensor &conn, - const xt::xtensor &dofs, - const xt::xtensor &iip) : + const xt::xtensor& conn, + const xt::xtensor& dofs, + const xt::xtensor& iip) : m_conn(conn), m_dofs(dofs), m_iip(iip) { - // mesh dimensions m_nelem = m_conn.shape()[0]; m_nne = m_conn.shape()[1]; m_nnode = m_dofs.shape()[0]; m_ndim = m_dofs.shape()[1]; - // list with unknown DOFs m_iiu = xt::setdiff1d(dofs, iip); - // dimensions of the system m_ndof = xt::amax(m_dofs)[0] + 1; m_nnp = m_iip.size(); m_nnu = m_iiu.size(); - // DOFs per node, such that iiu = arange(nnu), iip = nnu + arange(nnp) m_part = Mesh::Reorder({m_iiu, m_iip}).get(m_dofs); - // check consistency - assert( xt::amax(m_conn)[0] + 1 == m_nnode ); - assert( xt::amax(m_iip)[0] <= xt::amax(m_dofs)[0] ); - assert( m_ndof <= m_nnode * m_ndim ); + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(xt::amax(m_iip)[0] <= xt::amax(m_dofs)[0]); + assert(m_ndof <= m_nnode * m_ndim); } // ------------------------------------------------------------------------------------------------- -inline size_t VectorPartitioned::nelem() const { return m_nelem; } +inline size_t VectorPartitioned::nelem() const +{ return m_nelem; } -inline size_t VectorPartitioned::nne() const { return m_nne; } +inline size_t VectorPartitioned::nne() const +{ return m_nne; } -inline size_t VectorPartitioned::nnode() const { return m_nnode; } +inline size_t VectorPartitioned::nnode() const +{ return m_nnode; } -inline size_t VectorPartitioned::ndim() const { return m_ndim; } +inline size_t VectorPartitioned::ndim() const +{ return m_ndim; } -inline size_t VectorPartitioned::ndof() const { return m_ndof; } +inline size_t VectorPartitioned::ndof() const +{ return m_ndof; } -inline size_t VectorPartitioned::nnu() const { return m_nnu; } +inline size_t VectorPartitioned::nnu() const +{ return m_nnu; } -inline size_t VectorPartitioned::nnp() const { return m_nnp; } +inline size_t VectorPartitioned::nnp() const +{ return m_nnp; } -inline xt::xtensor VectorPartitioned::dofs() const { return m_dofs; } +inline xt::xtensor VectorPartitioned::dofs() const +{ return m_dofs; } -inline xt::xtensor VectorPartitioned::iiu() const { return m_iiu; } +inline xt::xtensor VectorPartitioned::iiu() const +{ return m_iiu; } -inline xt::xtensor VectorPartitioned::iip() const { return m_iip; } +inline xt::xtensor VectorPartitioned::iip() const +{ return m_iip; } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::copy(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const +inline void VectorPartitioned::copy( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const { - assert( nodevec_src .shape()[0] == m_nnode ); - assert( nodevec_src .shape()[1] == m_ndim ); - assert( nodevec_dest.shape()[0] == m_nnode ); - assert( nodevec_dest.shape()[1] == m_ndim ); + assert(nodevec_src .shape()[0] == m_nnode); + assert(nodevec_src .shape()[1] == m_ndim ); + assert(nodevec_dest.shape()[0] == m_nnode); + assert(nodevec_dest.shape()[1] == m_ndim ); xt::noalias(nodevec_dest) = nodevec_src; } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::copy_u(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const +inline void VectorPartitioned::copy_u( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const { - assert( nodevec_src .shape()[0] == m_nnode ); - assert( nodevec_src .shape()[1] == m_ndim ); - assert( nodevec_dest.shape()[0] == m_nnode ); - assert( nodevec_dest.shape()[1] == m_ndim ); + assert(nodevec_src .shape()[0] == m_nnode); + assert(nodevec_src .shape()[1] == m_ndim ); + assert(nodevec_dest.shape()[0] == m_nnode); + assert(nodevec_dest.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) < m_nnu ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) < m_nnu) nodevec_dest(m,i) = nodevec_src(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::copy_p(const xt::xtensor &nodevec_src, - xt::xtensor &nodevec_dest) const +inline void VectorPartitioned::copy_p( + const xt::xtensor& nodevec_src, + xt::xtensor& nodevec_dest) const { - assert( nodevec_src .shape()[0] == m_nnode ); - assert( nodevec_src .shape()[1] == m_ndim ); - assert( nodevec_dest.shape()[0] == m_nnode ); - assert( nodevec_dest.shape()[1] == m_ndim ); + assert(nodevec_src .shape()[0] == m_nnode); + assert(nodevec_src .shape()[1] == m_ndim ); + assert(nodevec_dest.shape()[0] == m_nnode); + assert(nodevec_dest.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) >= m_nnu ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) >= m_nnu) nodevec_dest(m,i) = nodevec_src(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs(const xt::xtensor &dofval_u, - const xt::xtensor &dofval_p, xt::xtensor &dofval) const +inline void VectorPartitioned::asDofs( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p, + xt::xtensor& dofval) const { - assert( dofval_u.size() == m_nnu ); - assert( dofval_p.size() == m_nnp ); - assert( dofval.size() == m_ndof ); + assert(dofval_u.size() == m_nnu ); + assert(dofval_p.size() == m_nnp ); + assert(dofval.size() == m_ndof); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) dofval(m_iiu(d)) = dofval_u(d); + for (size_t d = 0 ; d < m_nnu ; ++d) + dofval(m_iiu(d)) = dofval_u(d); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) dofval(m_iip(d)) = dofval_p(d); + for (size_t d = 0 ; d < m_nnp ; ++d) + dofval(m_iip(d)) = dofval_p(d); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const +inline void VectorPartitioned::asDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval.size() == m_ndof ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m,i)) = nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs_u(const xt::xtensor &dofval, - xt::xtensor &dofval_u) const +inline void VectorPartitioned::asDofs_u( + const xt::xtensor& dofval, + xt::xtensor& dofval_u) const { - assert( dofval.size() == m_ndof ); - assert( dofval_u.size() == m_nnu ); + assert(dofval.size() == m_ndof); + assert(dofval_u.size() == m_nnu ); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnu ; ++d ) + for (size_t d = 0 ; d < m_nnu ; ++d) dofval_u(d) = dofval(m_iiu(d)); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs_u(const xt::xtensor &nodevec, - xt::xtensor &dofval_u) const +inline void VectorPartitioned::asDofs_u( + const xt::xtensor& nodevec, + xt::xtensor& dofval_u) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval_u.size() == m_nnu ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval_u.size() == m_nnu ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) < m_nnu ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) < m_nnu) dofval_u(m_part(m,i)) = nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs_p(const xt::xtensor &dofval, - xt::xtensor &dofval_p) const +inline void VectorPartitioned::asDofs_p( + const xt::xtensor& dofval, + xt::xtensor& dofval_p) const { - assert( dofval.size() == m_ndof ); - assert( dofval_p.size() == m_nnp ); + assert(dofval.size() == m_ndof); + assert(dofval_p.size() == m_nnp ); #pragma omp parallel for - for ( size_t d = 0 ; d < m_nnp ; ++d ) + for (size_t d = 0 ; d < m_nnp ; ++d) dofval_p(d) = dofval(m_iip(d)); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs_p(const xt::xtensor &nodevec, - xt::xtensor &dofval_p) const +inline void VectorPartitioned::asDofs_p( + const xt::xtensor& nodevec, + xt::xtensor& dofval_p) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval_p.size() == m_nnp ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval_p.size() == m_nnp ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) >= m_nnu ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) >= m_nnu) dofval_p(m_part(m,i)-m_nnu) = nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const +inline void VectorPartitioned::asDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m_conn(e,m),i)) = elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs_u(const xt::xtensor &elemvec, - xt::xtensor &dofval_u) const +inline void VectorPartitioned::asDofs_u( + const xt::xtensor& elemvec, + xt::xtensor& dofval_u) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval_u.size() == m_nnu ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval_u.size() == m_nnu ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m_conn(e,m),i) < m_nnu ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m_conn(e,m),i) < m_nnu) dofval_u(m_part(m_conn(e,m),i)) = elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asDofs_p(const xt::xtensor &elemvec, - xt::xtensor &dofval_p) const +inline void VectorPartitioned::asDofs_p( + const xt::xtensor& elemvec, + xt::xtensor& dofval_p) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval_p.size() == m_nnp ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval_p.size() == m_nnp ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m_conn(e,m),i) >= m_nnu ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m_conn(e,m),i) >= m_nnu) dofval_p(m_part(m_conn(e,m),i)-m_nnu) = elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asNode(const xt::xtensor &dofval, - xt::xtensor &nodevec) const +inline void VectorPartitioned::asNode( + const xt::xtensor& dofval, + xt::xtensor& nodevec) const { - assert( dofval.size() == m_ndof ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) nodevec(m,i) = dofval(m_dofs(m,i)); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asNode(const xt::xtensor &dofval_u, - const xt::xtensor &dofval_p, xt::xtensor &nodevec) const +inline void VectorPartitioned::asNode( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p, + xt::xtensor& nodevec) const { - assert( dofval_u.size() == m_nnu ); - assert( dofval_p.size() == m_nnp ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(dofval_u.size() == m_nnu ); + assert(dofval_p.size() == m_nnp ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t m = 0 ; m < m_nnode ; ++m ) { - for ( size_t i = 0 ; i < m_ndim ; ++i ) { - if ( m_part(m,i) < m_nnu ) nodevec(m,i) = dofval_u(m_part(m,i) ); - else nodevec(m,i) = dofval_p(m_part(m,i)-m_nnu); + for (size_t m = 0 ; m < m_nnode ; ++m) { + for (size_t i = 0 ; i < m_ndim ; ++i) { + if (m_part(m,i) < m_nnu) + nodevec(m,i) = dofval_u(m_part(m,i)); + else + nodevec(m,i) = dofval_p(m_part(m,i)-m_nnu); } } } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const +inline void VectorPartitioned::asNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) nodevec(m_conn(e,m),i) = elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asElement(const xt::xtensor &dofval, - xt::xtensor &elemvec) const +inline void VectorPartitioned::asElement( + const xt::xtensor& dofval, + xt::xtensor& elemvec) const { - assert( dofval.size() == m_ndof ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) elemvec(e,m,i) = dofval(m_dofs(m_conn(e,m),i)); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::asElement(const xt::xtensor &dofval_u, - const xt::xtensor &dofval_p, xt::xtensor &elemvec) const +inline void VectorPartitioned::asElement( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p, + xt::xtensor& elemvec) const { - assert( dofval_u.size() == m_nnu ); - assert( dofval_p.size() == m_nnp ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(dofval_u.size() == m_nnu ); + assert(dofval_p.size() == m_nnp ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) { - for ( size_t m = 0 ; m < m_nne ; ++m ) { - for ( size_t i = 0 ; i < m_ndim ; ++i ) { - if ( m_part(m_conn(e,m),i) &nodevec, - xt::xtensor &elemvec) const +inline void VectorPartitioned::asElement( + const xt::xtensor& nodevec, + xt::xtensor& elemvec) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); #pragma omp parallel for - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) elemvec(e,m,i) = nodevec(m_conn(e,m),i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleDofs(const xt::xtensor &nodevec, - xt::xtensor &dofval) const +inline void VectorPartitioned::assembleDofs( + const xt::xtensor& nodevec, + xt::xtensor& dofval) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval.size() == m_ndof ); dofval.fill(0.0); - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m,i)) += nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleDofs_u(const xt::xtensor &nodevec, - xt::xtensor &dofval_u) const +inline void VectorPartitioned::assembleDofs_u( + const xt::xtensor& nodevec, + xt::xtensor& dofval_u) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval_u.size() == m_nnu ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval_u.size() == m_nnu ); dofval_u.fill(0.0); - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) < m_nnu ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) < m_nnu) dofval_u(m_part(m,i)) += nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleDofs_p(const xt::xtensor &nodevec, - xt::xtensor &dofval_p) const +inline void VectorPartitioned::assembleDofs_p( + const xt::xtensor& nodevec, + xt::xtensor& dofval_p) const { - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); - assert( dofval_p.size() == m_nnp ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval_p.size() == m_nnp ); dofval_p.fill(0.0); - for ( size_t m = 0 ; m < m_nnode ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m,i) >= m_nnu ) + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m,i) >= m_nnu) dofval_p(m_part(m,i)-m_nnu) += nodevec(m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleDofs(const xt::xtensor &elemvec, - xt::xtensor &dofval) const +inline void VectorPartitioned::assembleDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval.size() == m_ndof ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); dofval.fill(0.0); - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) dofval(m_dofs(m_conn(e,m),i)) += elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleDofs_u(const xt::xtensor &elemvec, - xt::xtensor &dofval_u) const +inline void VectorPartitioned::assembleDofs_u( + const xt::xtensor& elemvec, + xt::xtensor& dofval_u) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval_u.size() == m_nnu ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval_u.size() == m_nnu ); dofval_u.fill(0.0); - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m_conn(e,m),i) < m_nnu ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m_conn(e,m),i) < m_nnu) dofval_u(m_part(m_conn(e,m),i)) += elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleDofs_p(const xt::xtensor &elemvec, - xt::xtensor &dofval_p) const +inline void VectorPartitioned::assembleDofs_p( + const xt::xtensor& elemvec, + xt::xtensor& dofval_p) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( dofval_p.size() == m_nnp ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval_p.size() == m_nnp ); dofval_p.fill(0.0); - for ( size_t e = 0 ; e < m_nelem ; ++e ) - for ( size_t m = 0 ; m < m_nne ; ++m ) - for ( size_t i = 0 ; i < m_ndim ; ++i ) - if ( m_part(m_conn(e,m),i) >= m_nnu ) + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_part(m_conn(e,m),i) >= m_nnu) dofval_p(m_part(m_conn(e,m),i)-m_nnu) += elemvec(e,m,i); } // ------------------------------------------------------------------------------------------------- -inline void VectorPartitioned::assembleNode(const xt::xtensor &elemvec, - xt::xtensor &nodevec) const +inline void VectorPartitioned::assembleNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const { - assert( elemvec.shape()[0] == m_nelem ); - assert( elemvec.shape()[1] == m_nne ); - assert( elemvec.shape()[2] == m_ndim ); - assert( nodevec.shape()[0] == m_nnode ); - assert( nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); - // assemble to DOFs - xt::xtensor dofval = this->assembleDofs(elemvec); + xt::xtensor dofval = this->AssembleDofs(elemvec); - // read from DOFs this->asNode(dofval, nodevec); } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs(const xt::xtensor &dofval_u, - const xt::xtensor &dofval_p) const +inline xt::xtensor VectorPartitioned::AsDofs( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p) const { xt::xtensor dofval = xt::empty({m_ndof}); this->asDofs(dofval_u, dofval_p, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs(const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AsDofs( + const xt::xtensor& nodevec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->asDofs(nodevec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs_u(const xt::xtensor &dofval) const +inline xt::xtensor VectorPartitioned::AsDofs_u( + const xt::xtensor& dofval) const { xt::xtensor dofval_u = xt::empty({m_nnu}); this->asDofs_u(dofval, dofval_u); return dofval_u; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs_u(const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AsDofs_u( + const xt::xtensor& nodevec) const { xt::xtensor dofval_u = xt::empty({m_nnu}); this->asDofs_u(nodevec, dofval_u); return dofval_u; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs_p(const xt::xtensor &dofval) const +inline xt::xtensor VectorPartitioned::AsDofs_p( + const xt::xtensor& dofval) const { xt::xtensor dofval_p = xt::empty({m_nnp}); this->asDofs_p(dofval, dofval_p); return dofval_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs_p(const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AsDofs_p( + const xt::xtensor& nodevec) const { xt::xtensor dofval_p = xt::empty({m_nnp}); this->asDofs_p(nodevec, dofval_p); return dofval_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs(const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AsDofs( + const xt::xtensor& elemvec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->asDofs(elemvec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs_u(const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AsDofs_u( + const xt::xtensor& elemvec) const { xt::xtensor dofval_u = xt::empty({m_nnu}); this->asDofs_u(elemvec, dofval_u); return dofval_u; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asDofs_p(const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AsDofs_p( + const xt::xtensor& elemvec) const { xt::xtensor dofval_p = xt::empty({m_nnp}); this->asDofs_p(elemvec, dofval_p); return dofval_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asNode(const xt::xtensor &dofval) const +inline xt::xtensor VectorPartitioned::AsNode( + const xt::xtensor& dofval) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->asNode(dofval, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asNode(const xt::xtensor &dofval_u, - const xt::xtensor &dofval_p) const +inline xt::xtensor VectorPartitioned::AsNode( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->asNode(dofval_u, dofval_p, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asNode(const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AsNode( + const xt::xtensor& elemvec) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->asNode(elemvec, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asElement(const xt::xtensor &dofval) const +inline xt::xtensor VectorPartitioned::AsElement( + const xt::xtensor& dofval) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->asElement(dofval, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asElement(const xt::xtensor &dofval_u, - const xt::xtensor &dofval_p) const +inline xt::xtensor VectorPartitioned::AsElement( + const xt::xtensor& dofval_u, + const xt::xtensor& dofval_p) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->asElement(dofval_u, dofval_p, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::asElement(const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AsElement( + const xt::xtensor& nodevec) const { xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); this->asElement(nodevec, elemvec); return elemvec; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleDofs( - const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AssembleDofs( + const xt::xtensor& nodevec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->assembleDofs(nodevec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleDofs_u( - const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AssembleDofs_u( + const xt::xtensor& nodevec) const { xt::xtensor dofval_u = xt::empty({m_nnu}); this->assembleDofs_u(nodevec, dofval_u); return dofval_u; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleDofs_p( - const xt::xtensor &nodevec) const +inline xt::xtensor VectorPartitioned::AssembleDofs_p( + const xt::xtensor& nodevec) const { xt::xtensor dofval_p = xt::empty({m_nnp}); this->assembleDofs_p(nodevec, dofval_p); return dofval_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleDofs( - const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AssembleDofs( + const xt::xtensor& elemvec) const { xt::xtensor dofval = xt::empty({m_ndof}); this->assembleDofs(elemvec, dofval); return dofval; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleDofs_u( - const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AssembleDofs_u( + const xt::xtensor& elemvec) const { xt::xtensor dofval_u = xt::empty({m_nnu}); this->assembleDofs_u(elemvec, dofval_u); return dofval_u; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleDofs_p( - const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AssembleDofs_p( + const xt::xtensor& elemvec) const { xt::xtensor dofval_p = xt::empty({m_nnp}); this->assembleDofs_p(elemvec, dofval_p); return dofval_p; } // ------------------------------------------------------------------------------------------------- -inline xt::xtensor VectorPartitioned::assembleNode( - const xt::xtensor &elemvec) const +inline xt::xtensor VectorPartitioned::AssembleNode( + const xt::xtensor& elemvec) const { xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); this->assembleNode(elemvec, nodevec); return nodevec; } // ------------------------------------------------------------------------------------------------- } // namespace ... // ================================================================================================= #endif diff --git a/include/GooseFEM/VectorPartitionedTyings.h b/include/GooseFEM/VectorPartitionedTyings.h new file mode 100644 index 0000000..1ede0f6 --- /dev/null +++ b/include/GooseFEM/VectorPartitionedTyings.h @@ -0,0 +1,159 @@ +/* ================================================================================================= + +(c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM + +================================================================================================= */ + +#ifndef GOOSEFEM_VECTORPARTITIONEDTYINGS_H +#define GOOSEFEM_VECTORPARTITIONEDTYINGS_H + +// ------------------------------------------------------------------------------------------------- + +#include "config.h" + +#include +#include + +// ================================================================================================= + +namespace GooseFEM { + +// ------------------------------------------------------------------------------------------------- + +/* + "nodevec" - nodal vectors - [nnode, ndim] + "elemvec" - nodal vectors stored per element - [nelem, nne, ndim] + "dofval" - DOF values - [ndof] + "dofval_u" - DOF values (Unknown) "== dofval[iiu]" - [nnu] + "dofval_p" - DOF values (Prescribed) "== dofval[iiu]" - [nnp] +*/ + +class VectorPartitionedTyings +{ +public: + + // Constructor + + VectorPartitionedTyings() = default; + + VectorPartitionedTyings( + const xt::xtensor& conn, + const xt::xtensor& dofs, + const Eigen::SparseMatrix& Cdu, + const Eigen::SparseMatrix& Cdp, + const Eigen::SparseMatrix& Cdi); + + // Dimensions + + size_t nelem() const; // number of elements + size_t nne() const; // number of nodes per element + size_t nnode() const; // number of nodes + size_t ndim() const; // number of dimensions + size_t ndof() const; // number of DOFs + size_t nnu() const; // number of independent, unknown DOFs + size_t nnp() const; // number of independent, prescribed DOFs + size_t nni() const; // number of independent DOFs + size_t nnd() const; // number of dependent DOFs + + // DOF lists + + xt::xtensor dofs() const; // DOFs + xt::xtensor iiu() const; // independent, unknown DOFs + xt::xtensor iip() const; // independent, prescribed DOFs + xt::xtensor iii() const; // independent DOFs + xt::xtensor iid() const; // dependent DOFs + + // Copy (part of) nodevec/dofval to another nodevec/dofval + + void copy_p( + const xt::xtensor& dofval_src, + xt::xtensor& dofval_dest) const; // "iip" updated + + // Convert to "dofval" (overwrite entries that occur more than once) + + void asDofs_i( + const xt::xtensor& nodevec, + xt::xtensor& dofval_i, + bool apply_tyings=true) const; // overwritten + + // Convert to "nodevec" (overwrite entries that occur more than once) -- (auto allocation below) + + void asNode( + const xt::xtensor& dofval, + xt::xtensor& nodevec) const; + + // Convert to "elemvec" (overwrite entries that occur more than once) -- (auto allocation below) + + void asElement( + const xt::xtensor& nodevec, + xt::xtensor& elemvec) const; + + // Assemble "dofval" (adds entries that occur more that once) -- (auto allocation below) + + void assembleDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const; + + // Assemble "nodevec" (adds entries that occur more that once) -- (auto allocation below) + + void assembleNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const; + + // Auto-allocation of the functions above + + xt::xtensor AsElement( + const xt::xtensor& nodevec) const; + + xt::xtensor AssembleDofs( + const xt::xtensor& elemvec) const; + + xt::xtensor AssembleNode( + const xt::xtensor& elemvec) const; + +private: + + // Bookkeeping + xt::xtensor m_conn; // connectivity [nelem, nne ] + xt::xtensor m_dofs; // DOF-numbers per node [nnode, ndim] + xt::xtensor m_iiu; // unknown DOFs [nnu] + xt::xtensor m_iip; // prescribed DOFs [nnp] + xt::xtensor m_iid; // dependent DOFs [nnd] + + // Dimensions + size_t m_nelem; // number of elements + size_t m_nne; // number of nodes per element + size_t m_nnode; // number of nodes + size_t m_ndim; // number of dimensions + size_t m_ndof; // number of DOFs + size_t m_nnu; // number of independent, unknown DOFs + size_t m_nnp; // number of independent, prescribed DOFs + size_t m_nni; // number of independent DOFs + size_t m_nnd; // number of dependent DOFs + + // Tyings + Eigen::SparseMatrix m_Cdu; + Eigen::SparseMatrix m_Cdp; + Eigen::SparseMatrix m_Cdi; + Eigen::SparseMatrix m_Cud; + Eigen::SparseMatrix m_Cpd; + Eigen::SparseMatrix m_Cid; + + // equivalent Eigen functions + + Eigen::VectorXd Eigen_asDofs_d( + const xt::xtensor& nodevec) const; + +}; + +// ------------------------------------------------------------------------------------------------- + +} // namespace ... + +// ================================================================================================= + +#include "VectorPartitionedTyings.hpp" + +// ================================================================================================= + +#endif diff --git a/include/GooseFEM/VectorPartitionedTyings.hpp b/include/GooseFEM/VectorPartitionedTyings.hpp new file mode 100644 index 0000000..25a6965 --- /dev/null +++ b/include/GooseFEM/VectorPartitionedTyings.hpp @@ -0,0 +1,278 @@ +/* ================================================================================================= + +(c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM + +================================================================================================= */ + +#ifndef GOOSEFEM_VECTORPARTITIONEDTYINGS_HPP +#define GOOSEFEM_VECTORPARTITIONEDTYINGS_HPP + +// ------------------------------------------------------------------------------------------------- + +#include "VectorPartitionedTyings.h" + +// ================================================================================================= + +namespace GooseFEM { + +// ------------------------------------------------------------------------------------------------- + +inline VectorPartitionedTyings::VectorPartitionedTyings( + const xt::xtensor &conn, + const xt::xtensor &dofs, + const Eigen::SparseMatrix& Cdu, + const Eigen::SparseMatrix& Cdp, + const Eigen::SparseMatrix& Cdi) : + m_conn(conn), + m_dofs(dofs), + m_Cdu(Cdu), + m_Cdp(Cdp), + m_Cdi(Cdi) +{ + assert(Cdu.rows() == Cdp.rows()); + assert(Cdi.rows() == Cdp.rows()); + + m_nnu = static_cast(m_Cdu.cols()); + m_nnp = static_cast(m_Cdp.cols()); + m_nnd = static_cast(m_Cdp.rows()); + m_nni = m_nnu + m_nnp; + m_ndof = m_nni + m_nnd; + + m_iiu = xt::arange(m_nnu); + m_iip = xt::arange(m_nnp) + m_nnu; + m_iid = xt::arange(m_nnd) + m_nni; + + m_nelem = m_conn.shape()[0]; + m_nne = m_conn.shape()[1]; + m_nnode = m_dofs.shape()[0]; + m_ndim = m_dofs.shape()[1]; + + m_Cud = m_Cdu.transpose(); + m_Cpd = m_Cdp.transpose(); + m_Cid = m_Cdi.transpose(); + + assert(static_cast(m_Cdi.cols()) == m_nni); + assert(xt::amax(m_conn)[0] + 1 == m_nnode); + assert(m_ndof <= m_nnode * m_ndim); + assert(m_ndof == xt::amax(m_dofs)[0] + 1); +} + +// ------------------------------------------------------------------------------------------------- + +inline size_t VectorPartitionedTyings::nelem() const +{ return m_nelem; } + +inline size_t VectorPartitionedTyings::nne() const +{ return m_nne; } + +inline size_t VectorPartitionedTyings::nnode() const +{ return m_nnode; } + +inline size_t VectorPartitionedTyings::ndim() const +{ return m_ndim; } + +inline size_t VectorPartitionedTyings::ndof() const +{ return m_ndof; } + +inline size_t VectorPartitionedTyings::nnu() const +{ return m_nnu; } + +inline size_t VectorPartitionedTyings::nnp() const +{ return m_nnp; } + +inline size_t VectorPartitionedTyings::nni() const +{ return m_nni; } + +inline size_t VectorPartitionedTyings::nnd() const +{ return m_nnd; } + +inline xt::xtensor VectorPartitionedTyings::dofs() const +{ return m_dofs; } + +inline xt::xtensor VectorPartitionedTyings::iiu() const +{ return m_iiu; } + +inline xt::xtensor VectorPartitionedTyings::iip() const +{ return m_iip; } + +inline xt::xtensor VectorPartitionedTyings::iii() const +{ return xt::arange(m_nni); } + +inline xt::xtensor VectorPartitionedTyings::iid() const +{ return m_iid; } + +// ------------------------------------------------------------------------------------------------- + +inline void VectorPartitionedTyings::copy_p( + const xt::xtensor& dofval_src, + xt::xtensor& dofval_dest) const +{ + assert(dofval_src.size() == m_ndof || dofval_src.size() == m_nni); + assert(dofval_dest.size() == m_ndof || dofval_dest.size() == m_nni); + + #pragma omp parallel for + for (size_t i = m_nnu; i < m_nni; ++i) + dofval_dest(i) = dofval_src(i); +} + +// ------------------------------------------------------------------------------------------------- + +inline void VectorPartitionedTyings::asDofs_i( + const xt::xtensor& nodevec, + xt::xtensor& dofval_i, + bool apply_tyings) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(dofval_i.size() == m_nni ); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_dofs(m,i) < m_nni) + dofval_i(m_dofs(m,i)) = nodevec(m,i); + + if (!apply_tyings) + return; + + Eigen::VectorXd Dofval_d = this->Eigen_asDofs_d(nodevec); + Eigen::VectorXd Dofval_i = m_Cid * Dofval_d; + + #pragma omp parallel for + for (size_t i = 0 ; i < m_nni ; ++i) + dofval_i(i) += Dofval_i(i); +} + +// ------------------------------------------------------------------------------------------------- + +inline void VectorPartitionedTyings::asNode( + const xt::xtensor& dofval, + xt::xtensor& nodevec) const +{ + assert(dofval.size() == m_ndof ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + nodevec(m,i) = dofval(m_dofs(m,i)); +} + +// ------------------------------------------------------------------------------------------------- + +inline void VectorPartitionedTyings::asElement( + const xt::xtensor& nodevec, + xt::xtensor& elemvec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + + #pragma omp parallel for + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + elemvec(e,m,i) = nodevec(m_conn(e,m),i); +} + +// ------------------------------------------------------------------------------------------------- + +inline void VectorPartitionedTyings::assembleDofs( + const xt::xtensor& elemvec, + xt::xtensor& dofval) const +{ + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(dofval.size() == m_ndof ); + + dofval.fill(0.0); + + for (size_t e = 0 ; e < m_nelem ; ++e) + for (size_t m = 0 ; m < m_nne ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + dofval(m_dofs(m_conn(e,m),i)) += elemvec(e,m,i); +} + +// ------------------------------------------------------------------------------------------------- + +inline void VectorPartitionedTyings::assembleNode( + const xt::xtensor& elemvec, + xt::xtensor& nodevec) const +{ + assert(elemvec.shape()[0] == m_nelem); + assert(elemvec.shape()[1] == m_nne ); + assert(elemvec.shape()[2] == m_ndim ); + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + xt::xtensor dofval = this->AssembleDofs(elemvec); + + this->asNode(dofval, nodevec); +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor VectorPartitionedTyings::AsElement( + const xt::xtensor& nodevec) const +{ + xt::xtensor elemvec = xt::empty({m_nelem, m_nne, m_ndim}); + + this->asElement(nodevec, elemvec); + + return elemvec; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor VectorPartitionedTyings::AssembleDofs( + const xt::xtensor& elemvec) const +{ + xt::xtensor dofval = xt::empty({m_ndof}); + + this->assembleDofs(elemvec, dofval); + + return dofval; +} + +// ------------------------------------------------------------------------------------------------- + +inline xt::xtensor VectorPartitionedTyings::AssembleNode( + const xt::xtensor& elemvec) const +{ + xt::xtensor nodevec = xt::empty({m_nnode, m_ndim}); + + this->assembleNode(elemvec, nodevec); + + return nodevec; +} + +// ------------------------------------------------------------------------------------------------- + +inline Eigen::VectorXd VectorPartitionedTyings::Eigen_asDofs_d( + const xt::xtensor& nodevec) const +{ + assert(nodevec.shape()[0] == m_nnode); + assert(nodevec.shape()[1] == m_ndim ); + + Eigen::VectorXd dofval_d(m_nnd,1); + + #pragma omp parallel for + for (size_t m = 0 ; m < m_nnode ; ++m) + for (size_t i = 0 ; i < m_ndim ; ++i) + if (m_dofs(m,i) >= m_nni) + dofval_d(m_dofs(m,i)-m_nni) = nodevec(m,i); + + return dofval_d; +} + +// ------------------------------------------------------------------------------------------------- + +} // namespace ... + +// ================================================================================================= + +#endif diff --git a/include/GooseFEM/config.h b/include/GooseFEM/config.h index d4eeb37..2c33e6c 100644 --- a/include/GooseFEM/config.h +++ b/include/GooseFEM/config.h @@ -1,66 +1,65 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #ifndef GOOSEFEM_CONFIG_H #define GOOSEFEM_CONFIG_H // ================================================================================================= #define _USE_MATH_DEFINES // to use "M_PI" from "math.h" #include #include #include #include #include #include #include #include #include #include #include #include -#include #include +#include #include -#include -#include +#include #include -#include -#include -#include -#include #include -#include +#include #include +#include +#include +#include +#include using namespace xt::placeholders; // ================================================================================================= #define GOOSEFEM_WORLD_VERSION 0 -#define GOOSEFEM_MAJOR_VERSION 1 -#define GOOSEFEM_MINOR_VERSION 1 +#define GOOSEFEM_MAJOR_VERSION 2 +#define GOOSEFEM_MINOR_VERSION 0 #define GOOSEFEM_VERSION_AT_LEAST(x,y,z) \ (GOOSEFEM_WORLD_VERSION>x || (GOOSEFEM_WORLD_VERSION>=x && \ (GOOSEFEM_MAJOR_VERSION>y || (GOOSEFEM_MAJOR_VERSION>=y && \ GOOSEFEM_MINOR_VERSION>=z)))) #define GOOSEFEM_VERSION(x,y,z) \ (GOOSEFEM_WORLD_VERSION==x && \ GOOSEFEM_MAJOR_VERSION==y && \ GOOSEFEM_MINOR_VERSION==z) // ================================================================================================= // dummy operation that can be use to suppress the "unused parameter" warnings #define UNUSED(p) ( (void)(p) ) // ================================================================================================= #endif diff --git a/python/ElementHex8.hpp b/python/ElementHex8.hpp index 175ae37..ae9289a 100644 --- a/python/ElementHex8.hpp +++ b/python/ElementHex8.hpp @@ -1,73 +1,73 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #include #include #include #include #include "../include/GooseFEM/GooseFEM.h" // ================================================================================================= namespace py = pybind11; // ================================================================================================= void init_ElementHex8(py::module &m) { py::class_(m, "Quadrature") .def(py::init &>(), "Quadrature", py::arg("x")) .def(py::init &, const xt::xtensor &, const xt::xtensor &>(), "Quadrature", py::arg("x"), py::arg("xi"), py::arg("w")) .def("nelem", &GooseFEM::Element::Hex8::Quadrature::nelem, "Return number of elements" ) .def("nne" , &GooseFEM::Element::Hex8::Quadrature::nne , "Return number of nodes per element" ) .def("ndim" , &GooseFEM::Element::Hex8::Quadrature::ndim , "Return number of dimensions" ) .def("nip" , &GooseFEM::Element::Hex8::Quadrature::nip , "Return number of integration points") - .def("dV" , py::overload_cast<>(&GooseFEM::Element::Hex8::Quadrature::dV , py::const_), "Integration point volume (qscalar)") - .def("dVtensor", py::overload_cast<>(&GooseFEM::Element::Hex8::Quadrature::dVtensor, py::const_), "Integration point volume (qtensor)") + .def("DV", py::overload_cast(&GooseFEM::Element::Hex8::Quadrature::DV, py::const_), "Integration point volume (qtensor)") + .def("DV", py::overload_cast<> (&GooseFEM::Element::Hex8::Quadrature::DV, py::const_), "Integration point volume (qscalar)") - .def("gradN_vector" , py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::gradN_vector , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) - .def("gradN_vector_T" , py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::gradN_vector_T , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) - .def("symGradN_vector", py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::symGradN_vector, py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) + .def("GradN_vector" , py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::GradN_vector , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) + .def("GradN_vector_T" , py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::GradN_vector_T , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) + .def("SymGradN_vector", py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::SymGradN_vector, py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) - .def("int_N_scalar_NT_dV" , py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::int_N_scalar_NT_dV , py::const_), "Integration, returns 'elemmat'", py::arg("qscalar")) - .def("int_gradN_dot_tensor2_dV", py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::int_gradN_dot_tensor2_dV, py::const_), "Integration, returns 'elemvec'", py::arg("qtensor")) + .def("Int_N_scalar_NT_dV" , py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::Int_N_scalar_NT_dV , py::const_), "Integration, returns 'elemmat'", py::arg("qscalar")) + .def("Int_gradN_dot_tensor2_dV", py::overload_cast &>(&GooseFEM::Element::Hex8::Quadrature::Int_gradN_dot_tensor2_dV, py::const_), "Integration, returns 'elemvec'", py::arg("qtensor")) .def("__repr__", [](const GooseFEM::Element::Hex8::Quadrature &){ return ""; }); } // ------------------------------------------------------------------------------------------------- void init_ElementHex8Gauss(py::module &m) { m.def("nip", &GooseFEM::Element::Hex8::Gauss::nip, "Return number of integration point" ); m.def("xi" , &GooseFEM::Element::Hex8::Gauss::xi , "Return integration point coordinates"); m.def("w" , &GooseFEM::Element::Hex8::Gauss::w , "Return integration point weights" ); } // ------------------------------------------------------------------------------------------------- void init_ElementHex8Nodal(py::module &m) { m.def("nip", &GooseFEM::Element::Hex8::Nodal::nip, "Return number of integration point" ); m.def("xi" , &GooseFEM::Element::Hex8::Nodal::xi , "Return integration point coordinates"); m.def("w" , &GooseFEM::Element::Hex8::Nodal::w , "Return integration point weights" ); } // ================================================================================================= diff --git a/python/ElementQuad4.hpp b/python/ElementQuad4.hpp index 8bc0943..b8c77be 100644 --- a/python/ElementQuad4.hpp +++ b/python/ElementQuad4.hpp @@ -1,73 +1,73 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #include #include #include #include #include "../include/GooseFEM/GooseFEM.h" // ================================================================================================= namespace py = pybind11; // ================================================================================================= void init_ElementQuad4(py::module &m) { py::class_(m, "Quadrature") .def(py::init &>(), "Quadrature", py::arg("x")) .def(py::init &, const xt::xtensor &, const xt::xtensor &>(), "Quadrature", py::arg("x"), py::arg("xi"), py::arg("w")) .def("nelem", &GooseFEM::Element::Quad4::Quadrature::nelem, "Return number of elements" ) .def("nne" , &GooseFEM::Element::Quad4::Quadrature::nne , "Return number of nodes per element" ) .def("ndim" , &GooseFEM::Element::Quad4::Quadrature::ndim , "Return number of dimensions" ) .def("nip" , &GooseFEM::Element::Quad4::Quadrature::nip , "Return number of integration points") - .def("dV" , py::overload_cast<>(&GooseFEM::Element::Quad4::Quadrature::dV , py::const_), "Integration point volume (qscalar)") - .def("dVtensor", py::overload_cast<>(&GooseFEM::Element::Quad4::Quadrature::dVtensor, py::const_), "Integration point volume (qtensor)") + .def("DV", py::overload_cast(&GooseFEM::Element::Quad4::Quadrature::DV, py::const_), "Integration point volume (qtensor)") + .def("DV", py::overload_cast<> (&GooseFEM::Element::Quad4::Quadrature::DV, py::const_), "Integration point volume (qscalar)") - .def("gradN_vector" , py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::gradN_vector , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) - .def("gradN_vector_T" , py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::gradN_vector_T , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) - .def("symGradN_vector", py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::symGradN_vector, py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) + .def("GradN_vector" , py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::GradN_vector , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) + .def("GradN_vector_T" , py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::GradN_vector_T , py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) + .def("SymGradN_vector", py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::SymGradN_vector, py::const_), "Dyadic product, returns 'qtensor'", py::arg("elemvec")) - .def("int_N_scalar_NT_dV" , py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::int_N_scalar_NT_dV , py::const_), "Integration, returns 'elemmat'", py::arg("qscalar")) - .def("int_gradN_dot_tensor2_dV", py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::int_gradN_dot_tensor2_dV, py::const_), "Integration, returns 'elemvec'", py::arg("qtensor")) + .def("Int_N_scalar_NT_dV" , py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::Int_N_scalar_NT_dV , py::const_), "Integration, returns 'elemmat'", py::arg("qscalar")) + .def("Int_gradN_dot_tensor2_dV", py::overload_cast &>(&GooseFEM::Element::Quad4::Quadrature::Int_gradN_dot_tensor2_dV, py::const_), "Integration, returns 'elemvec'", py::arg("qtensor")) .def("__repr__", [](const GooseFEM::Element::Quad4::Quadrature &){ return ""; }); } // ------------------------------------------------------------------------------------------------- void init_ElementQuad4Gauss(py::module &m) { m.def("nip", &GooseFEM::Element::Quad4::Gauss::nip, "Return number of integration point" ); m.def("xi" , &GooseFEM::Element::Quad4::Gauss::xi , "Return integration point coordinates"); m.def("w" , &GooseFEM::Element::Quad4::Gauss::w , "Return integration point weights" ); } // ------------------------------------------------------------------------------------------------- void init_ElementQuad4Nodal(py::module &m) { m.def("nip", &GooseFEM::Element::Quad4::Nodal::nip, "Return number of integration point" ); m.def("xi" , &GooseFEM::Element::Quad4::Nodal::xi , "Return integration point coordinates"); m.def("w" , &GooseFEM::Element::Quad4::Nodal::w , "Return integration point weights" ); } // ================================================================================================= diff --git a/python/MatrixDiagonalPartitioned.hpp b/python/MatrixDiagonalPartitioned.hpp index 2ce687d..d477beb 100644 --- a/python/MatrixDiagonalPartitioned.hpp +++ b/python/MatrixDiagonalPartitioned.hpp @@ -1,54 +1,54 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #include #include #include #include #include "../include/GooseFEM/GooseFEM.h" // ================================================================================================= namespace py = pybind11; // ================================================================================================= void init_MatrixDiagonalPartitioned(py::module &m) { py::class_(m, "MatrixDiagonalPartitioned") .def(py::init &, const xt::xtensor &, const xt::xtensor &>(), "Diagonal matrix", py::arg("conn"), py::arg("dofs"), py::arg("iip")) .def("nelem", &GooseFEM::MatrixDiagonalPartitioned::nelem, "Return number of element") .def("nne" , &GooseFEM::MatrixDiagonalPartitioned::nne , "Return number of nodes per element") .def("nnode", &GooseFEM::MatrixDiagonalPartitioned::nnode, "Return number of nodes") .def("ndim" , &GooseFEM::MatrixDiagonalPartitioned::ndim , "Return number of dimensions") .def("ndof" , &GooseFEM::MatrixDiagonalPartitioned::ndof , "Return number of degrees-of-freedom") .def("nnu" , &GooseFEM::MatrixDiagonalPartitioned::nnu , "Return number of unknown degrees-of-freedom") .def("nnp" , &GooseFEM::MatrixDiagonalPartitioned::nnp , "Return number of prescribed degrees-of-freedom") .def("assemble", &GooseFEM::MatrixDiagonalPartitioned::assemble, "Assemble matrix from 'elemmat", py::arg("elemmat")) .def("dofs" , &GooseFEM::MatrixDiagonalPartitioned::dofs , "Return degrees-of-freedom") .def("iiu" , &GooseFEM::MatrixDiagonalPartitioned::iiu , "Return unknown degrees-of-freedom") .def("iip" , &GooseFEM::MatrixDiagonalPartitioned::iip , "Return prescribed degrees-of-freedom") - .def("dot" , py::overload_cast& >(&GooseFEM::MatrixDiagonalPartitioned::dot , py::const_), "Dot product 'b_i = A_ij * x_j" , py::arg("x")) - .def("dot_u", py::overload_cast&, const xt::xtensor&>(&GooseFEM::MatrixDiagonalPartitioned::dot_u, py::const_), "Dot product 'b_i = A_ij * x_j (b_u = A_uu * x_u + A_up * x_p == A_uu * x_u)", py::arg("x_u"), py::arg("x_p")) - .def("dot_p", py::overload_cast&, const xt::xtensor&>(&GooseFEM::MatrixDiagonalPartitioned::dot_p, py::const_), "Dot product 'b_i = A_ij * x_j (b_p = A_pu * x_u + A_pp * x_p == A_pp * x_p)", py::arg("x_u"), py::arg("x_p")) + .def("Dot" , py::overload_cast& >(&GooseFEM::MatrixDiagonalPartitioned::Dot , py::const_), "Dot product 'b_i = A_ij * x_j" , py::arg("x")) + .def("Dot_u", py::overload_cast&, const xt::xtensor&>(&GooseFEM::MatrixDiagonalPartitioned::Dot_u, py::const_), "Dot product 'b_i = A_ij * x_j (b_u = A_uu * x_u + A_up * x_p == A_uu * x_u)", py::arg("x_u"), py::arg("x_p")) + .def("Dot_p", py::overload_cast&, const xt::xtensor&>(&GooseFEM::MatrixDiagonalPartitioned::Dot_p, py::const_), "Dot product 'b_i = A_ij * x_j (b_p = A_pu * x_u + A_pp * x_p == A_pp * x_p)", py::arg("x_u"), py::arg("x_p")) - .def("asDiagonal", &GooseFEM::MatrixDiagonalPartitioned::asDiagonal, "Return as diagonal matrix (column)") + .def("AsDiagonal", &GooseFEM::MatrixDiagonalPartitioned::AsDiagonal, "Return as diagonal matrix (column)") .def("__repr__", [](const GooseFEM::MatrixDiagonalPartitioned &){ return ""; }); } // ================================================================================================= diff --git a/python/Vector.hpp b/python/Vector.hpp index 36949d5..c4028e8 100644 --- a/python/Vector.hpp +++ b/python/Vector.hpp @@ -1,55 +1,55 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #include #include #include #include #include "../include/GooseFEM/GooseFEM.h" // ================================================================================================= namespace py = pybind11; // ================================================================================================= void init_Vector(py::module &m) { py::class_(m, "Vector") .def(py::init &, const xt::xtensor &>(), "Switch between dofval/nodevec/elemvec", py::arg("conn"), py::arg("dofs")) .def("nelem", &GooseFEM::Vector::nelem, "Return number of element") .def("nne" , &GooseFEM::Vector::nne , "Return number of nodes per element") .def("nnode", &GooseFEM::Vector::nnode, "Return number of nodes") .def("ndim" , &GooseFEM::Vector::ndim , "Return number of dimensions") .def("ndof" , &GooseFEM::Vector::ndof , "Return number of degrees-of-freedom") .def("dofs" , &GooseFEM::Vector::dofs , "Return degrees-of-freedom") - .def("asDofs", py::overload_cast&>(&GooseFEM::Vector::asDofs, py::const_), "Set 'dofval", py::arg("nodevec")) - .def("asDofs", py::overload_cast&>(&GooseFEM::Vector::asDofs, py::const_), "Set 'dofval", py::arg("elemvec")) + .def("AsDofs", py::overload_cast&>(&GooseFEM::Vector::AsDofs, py::const_), "Set 'dofval", py::arg("nodevec")) + .def("AsDofs", py::overload_cast&>(&GooseFEM::Vector::AsDofs, py::const_), "Set 'dofval", py::arg("elemvec")) - .def("asNode", py::overload_cast&>(&GooseFEM::Vector::asNode, py::const_), "Set 'nodevec", py::arg("dofval")) - .def("asNode", py::overload_cast&>(&GooseFEM::Vector::asNode, py::const_), "Set 'nodevec", py::arg("elemvec")) + .def("AsNode", py::overload_cast&>(&GooseFEM::Vector::AsNode, py::const_), "Set 'nodevec", py::arg("dofval")) + .def("AsNode", py::overload_cast&>(&GooseFEM::Vector::AsNode, py::const_), "Set 'nodevec", py::arg("elemvec")) - .def("asElement", py::overload_cast&>(&GooseFEM::Vector::asElement, py::const_), "Set 'elemvec", py::arg("dofval")) - .def("asElement", py::overload_cast&>(&GooseFEM::Vector::asElement, py::const_), "Set 'elemvec", py::arg("nodevec")) + .def("AsElement", py::overload_cast&>(&GooseFEM::Vector::AsElement, py::const_), "Set 'elemvec", py::arg("dofval")) + .def("AsElement", py::overload_cast&>(&GooseFEM::Vector::AsElement, py::const_), "Set 'elemvec", py::arg("nodevec")) - .def("assembleDofs", py::overload_cast&>(&GooseFEM::Vector::assembleDofs, py::const_), "Assemble 'dofval'", py::arg("nodevec")) - .def("assembleDofs", py::overload_cast&>(&GooseFEM::Vector::assembleDofs, py::const_), "Assemble 'dofval'", py::arg("elemvec")) + .def("AssembleDofs", py::overload_cast&>(&GooseFEM::Vector::AssembleDofs, py::const_), "Assemble 'dofval'", py::arg("nodevec")) + .def("AssembleDofs", py::overload_cast&>(&GooseFEM::Vector::AssembleDofs, py::const_), "Assemble 'dofval'", py::arg("elemvec")) - .def("assembleNode", py::overload_cast&>(&GooseFEM::Vector::assembleNode, py::const_), "Assemble 'nodevec'", py::arg("elemvec")) + .def("AssembleNode", py::overload_cast&>(&GooseFEM::Vector::AssembleNode, py::const_), "Assemble 'nodevec'", py::arg("elemvec")) .def("__repr__", [](const GooseFEM::Vector &){ return ""; }); } // ================================================================================================= diff --git a/python/VectorPartitioned.hpp b/python/VectorPartitioned.hpp index c31a630..337dcfd 100644 --- a/python/VectorPartitioned.hpp +++ b/python/VectorPartitioned.hpp @@ -1,72 +1,72 @@ /* ================================================================================================= (c - GPLv3) T.W.J. de Geus (Tom) | tom@geus.me | www.geus.me | github.com/tdegeus/GooseFEM ================================================================================================= */ #include #include #include #include #include "../include/GooseFEM/GooseFEM.h" // ================================================================================================= namespace py = pybind11; // ================================================================================================= void init_VectorPartitioned(py::module &m) { py::class_(m, "VectorPartitioned") .def(py::init &, const xt::xtensor &, const xt::xtensor &>(), "Switch between dofval/nodevec/elemvec", py::arg("conn"), py::arg("dofs"), py::arg("iip")) .def("nelem", &GooseFEM::VectorPartitioned::nelem, "Return number of element") .def("nne" , &GooseFEM::VectorPartitioned::nne , "Return number of nodes per element") .def("nnode", &GooseFEM::VectorPartitioned::nnode, "Return number of nodes") .def("ndim" , &GooseFEM::VectorPartitioned::ndim , "Return number of dimensions") .def("ndof" , &GooseFEM::VectorPartitioned::ndof , "Return number of degrees-of-freedom") .def("nnu" , &GooseFEM::VectorPartitioned::nnu , "Return number of unknown degrees-of-freedom") .def("nnp" , &GooseFEM::VectorPartitioned::nnp , "Return number of prescribed degrees-of-freedom") .def("dofs" , &GooseFEM::VectorPartitioned::dofs , "Return degrees-of-freedom") .def("iiu" , &GooseFEM::VectorPartitioned::iiu , "Return unknown degrees-of-freedom") .def("iip" , &GooseFEM::VectorPartitioned::iip , "Return prescribed degrees-of-freedom") - .def("asDofs", py::overload_cast&,const xt::xtensor&>(&GooseFEM::VectorPartitioned::asDofs, py::const_), "Set 'dofval" , py::arg("dofval_u"), py::arg("dofval_p")) - .def("asDofs", py::overload_cast& >(&GooseFEM::VectorPartitioned::asDofs, py::const_), "Set 'dofval" , py::arg("nodevec")) - .def("asDofs", py::overload_cast& >(&GooseFEM::VectorPartitioned::asDofs, py::const_), "Set 'dofval" , py::arg("elemvec")) - - .def("asDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::asDofs_u , py::const_), "Set 'dofval" , py::arg("nodevec")) - .def("asDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::asDofs_u , py::const_), "Set 'dofval" , py::arg("elemvec")) - .def("asDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::asDofs_p , py::const_), "Set 'dofval" , py::arg("nodevec")) - .def("asDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::asDofs_p , py::const_), "Set 'dofval" , py::arg("elemvec")) - - .def("asNode", py::overload_cast&,const xt::xtensor&>(&GooseFEM::VectorPartitioned::asNode, py::const_), "Set 'nodevec", py::arg("dofval_u"), py::arg("dofval_p")) - .def("asNode", py::overload_cast& >(&GooseFEM::VectorPartitioned::asNode, py::const_), "Set 'nodevec", py::arg("dofval")) - .def("asNode", py::overload_cast& >(&GooseFEM::VectorPartitioned::asNode, py::const_), "Set 'nodevec", py::arg("elemvec")) - - .def("asElement", py::overload_cast&,const xt::xtensor&>(&GooseFEM::VectorPartitioned::asElement, py::const_), "Set 'elemvec", py::arg("dofval_u"), py::arg("dofval_p")) - .def("asElement", py::overload_cast& >(&GooseFEM::VectorPartitioned::asElement, py::const_), "Set 'elemvec", py::arg("dofval")) - .def("asElement", py::overload_cast& >(&GooseFEM::VectorPartitioned::asElement, py::const_), "Set 'elemvec", py::arg("nodevec")) - - .def("assembleDofs" , py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleDofs , py::const_), "Assemble 'dofval'" , py::arg("nodevec")) - .def("assembleDofs" , py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleDofs , py::const_), "Assemble 'dofval'" , py::arg("elemvec")) - .def("assembleDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleDofs_u, py::const_), "Assemble 'dofval'" , py::arg("nodevec")) - .def("assembleDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleDofs_u, py::const_), "Assemble 'dofval'" , py::arg("elemvec")) - .def("assembleDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleDofs_p, py::const_), "Assemble 'dofval'" , py::arg("nodevec")) - .def("assembleDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleDofs_p, py::const_), "Assemble 'dofval'" , py::arg("elemvec")) - - .def("assembleNode" , py::overload_cast&>(&GooseFEM::VectorPartitioned::assembleNode , py::const_), "Assemble 'nodevec'", py::arg("elemvec")) + .def("AsDofs", py::overload_cast&,const xt::xtensor&>(&GooseFEM::VectorPartitioned::AsDofs, py::const_), "Set 'dofval" , py::arg("dofval_u"), py::arg("dofval_p")) + .def("AsDofs", py::overload_cast& >(&GooseFEM::VectorPartitioned::AsDofs, py::const_), "Set 'dofval" , py::arg("nodevec")) + .def("AsDofs", py::overload_cast& >(&GooseFEM::VectorPartitioned::AsDofs, py::const_), "Set 'dofval" , py::arg("elemvec")) + + .def("AsDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::AsDofs_u , py::const_), "Set 'dofval" , py::arg("nodevec")) + .def("AsDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::AsDofs_u , py::const_), "Set 'dofval" , py::arg("elemvec")) + .def("AsDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::AsDofs_p , py::const_), "Set 'dofval" , py::arg("nodevec")) + .def("AsDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::AsDofs_p , py::const_), "Set 'dofval" , py::arg("elemvec")) + + .def("AsNode", py::overload_cast&,const xt::xtensor&>(&GooseFEM::VectorPartitioned::AsNode, py::const_), "Set 'nodevec", py::arg("dofval_u"), py::arg("dofval_p")) + .def("AsNode", py::overload_cast& >(&GooseFEM::VectorPartitioned::AsNode, py::const_), "Set 'nodevec", py::arg("dofval")) + .def("AsNode", py::overload_cast& >(&GooseFEM::VectorPartitioned::AsNode, py::const_), "Set 'nodevec", py::arg("elemvec")) + + .def("AsElement", py::overload_cast&,const xt::xtensor&>(&GooseFEM::VectorPartitioned::AsElement, py::const_), "Set 'elemvec", py::arg("dofval_u"), py::arg("dofval_p")) + .def("AsElement", py::overload_cast& >(&GooseFEM::VectorPartitioned::AsElement, py::const_), "Set 'elemvec", py::arg("dofval")) + .def("AsElement", py::overload_cast& >(&GooseFEM::VectorPartitioned::AsElement, py::const_), "Set 'elemvec", py::arg("nodevec")) + + .def("AssembleDofs" , py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleDofs , py::const_), "Assemble 'dofval'" , py::arg("nodevec")) + .def("AssembleDofs" , py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleDofs , py::const_), "Assemble 'dofval'" , py::arg("elemvec")) + .def("AssembleDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleDofs_u, py::const_), "Assemble 'dofval'" , py::arg("nodevec")) + .def("AssembleDofs_u", py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleDofs_u, py::const_), "Assemble 'dofval'" , py::arg("elemvec")) + .def("AssembleDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleDofs_p, py::const_), "Assemble 'dofval'" , py::arg("nodevec")) + .def("AssembleDofs_p", py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleDofs_p, py::const_), "Assemble 'dofval'" , py::arg("elemvec")) + + .def("AssembleNode" , py::overload_cast&>(&GooseFEM::VectorPartitioned::AssembleNode , py::const_), "Assemble 'nodevec'", py::arg("elemvec")) .def("__repr__", [](const GooseFEM::VectorPartitioned &){ return ""; }); } // ================================================================================================= diff --git a/test/ElementHex8.cpp b/test/ElementHex8.cpp index 6d4acf5..9a26131 100644 --- a/test/ElementHex8.cpp +++ b/test/ElementHex8.cpp @@ -1,148 +1,148 @@ #include "support.h" // ================================================================================================= TEST_CASE("GooseFEM::ElementHex8", "ElementHex8.h") { using T2 = xt::xtensor_fixed>; // ================================================================================================= SECTION( "int_N_scalar_NT_dV" ) { // mesh GooseFEM::Mesh::Hex8::Regular mesh(3,3,3); // vector-definition, and a diagonal matrix GooseFEM::Vector vec(mesh.conn(), mesh.dofsPeriodic()); GooseFEM::MatrixDiagonal mat(mesh.conn(), mesh.dofsPeriodic()); // element definition, with nodal quadrature GooseFEM::Element::Hex8::Quadrature quad( - vec.asElement(mesh.coor()), + vec.AsElement(mesh.coor()), GooseFEM::Element::Hex8::Nodal::xi(), GooseFEM::Element::Hex8::Nodal::w() ); // scalar per quadrature point (e.g. mass-density "rho") xt::xtensor rho = xt::ones({mesh.nelem(), quad.nip()}); // evaluate integral and assemble diagonal matrix (e.g. mass matrix) - mat.assemble(quad.int_N_scalar_NT_dV(rho)); + mat.assemble(quad.Int_N_scalar_NT_dV(rho)); // check matrix // - get the matrix - xt::xtensor M = mat.asDiagonal(); + xt::xtensor M = mat.AsDiagonal(); // - check the size REQUIRE( M.size() == vec.ndof() ); // - check each component REQUIRE( xt::allclose(M, 1.) ); } // ================================================================================================= SECTION( "symGradN_vector" ) { // mesh GooseFEM::Mesh::Hex8::FineLayer mesh(27,27,27); // vector-definition GooseFEM::Vector vec(mesh.conn(), mesh.dofs()); // element definition, with Gauss quadrature - GooseFEM::Element::Hex8::Quadrature quad( vec.asElement(mesh.coor()) ); + GooseFEM::Element::Hex8::Quadrature quad( vec.AsElement(mesh.coor()) ); // macroscopic deformation gradient and strain // - zero-initialize T2 F = xt::zeros({3,3}); T2 EPS = xt::zeros({3,3}); // - set non-zero components F (0,1) = 0.1; EPS(0,1) = 0.05; EPS(1,0) = 0.05; // nodal coordinates and displacement xt::xtensor coor = mesh.coor();; xt::xtensor disp = xt::zeros(coor.shape()); // apply macroscopic deformation gradient for ( size_t n = 0 ; n < mesh.nnode() ; ++n ) for ( size_t i = 0 ; i < F.shape()[0] ; ++i ) for ( size_t j = 0 ; j < F.shape()[1] ; ++j ) disp(n,i) += F(i,j) * coor(n,j); // compute quadrature point tensors - xt::xtensor eps = quad.symGradN_vector(vec.asElement(disp)); + xt::xtensor eps = quad.SymGradN_vector(vec.AsElement(disp)); // integration point volume xt::xtensor dV = eps; quad.dV(dV); // volume averaged strain tensor auto epsbar = xt::average(eps, dV, {0,1}); // check local strain tensors // - check sizes REQUIRE( eps.shape()[0] == mesh.nelem() ); REQUIRE( eps.shape()[1] == quad.nip() ); REQUIRE( eps.shape()[2] == mesh.ndim() ); REQUIRE( eps.shape()[3] == mesh.ndim() ); // - check all components for ( size_t e = 0 ; e < mesh.nelem() ; ++e ) { for ( size_t k = 0 ; k < quad.nip() ; ++k ) { auto Eps = xt::view(eps, e, k); REQUIRE( xt::allclose(Eps, EPS)); } } // check macroscopic tensor REQUIRE( xt::allclose(epsbar, EPS)); } // ================================================================================================= SECTION( "symGradN_vector, int_gradN_dot_tensor2s_dV" ) { // mesh GooseFEM::Mesh::Hex8::FineLayer mesh(27,27,27); // vector-definition GooseFEM::Vector vec(mesh.conn(), mesh.dofsPeriodic()); // element definition, with Gauss quadrature - GooseFEM::Element::Hex8::Quadrature quad( vec.asElement(mesh.coor()) ); + GooseFEM::Element::Hex8::Quadrature quad( vec.AsElement(mesh.coor()) ); // macroscopic deformation gradient and strain // - zero-initialize T2 F = xt::zeros({3,3}); // - set non-zero components F(0,1) = 0.1; // nodal coordinates and displacement xt::xtensor coor = mesh.coor();; xt::xtensor disp = xt::zeros(coor.shape()); // apply macroscopic deformation gradient for ( size_t n = 0 ; n < mesh.nnode() ; ++n ) for ( size_t i = 0 ; i < F.shape()[0] ; ++i ) for ( size_t j = 0 ; j < F.shape()[1] ; ++j ) disp(n,i) += F(i,j) * coor(n,j); // compute quadrature point tensors - xt::xtensor eps = quad.symGradN_vector(vec.asElement(disp)); + xt::xtensor eps = quad.SymGradN_vector(vec.AsElement(disp)); // nodal force vector (should be zero, as it is only sensitive to periodic fluctuations) - xt::xtensor Fi = vec.assembleDofs(quad.int_gradN_dot_tensor2_dV(eps)); + xt::xtensor Fi = vec.AssembleDofs(quad.Int_gradN_dot_tensor2_dV(eps)); // check // - size REQUIRE( Fi.size() == vec.ndof() ); // - check each component REQUIRE( xt::allclose(Fi, 0.) ); } // ================================================================================================= } diff --git a/test/ElementQuad4.cpp b/test/ElementQuad4.cpp index fbaea1e..3f69986 100644 --- a/test/ElementQuad4.cpp +++ b/test/ElementQuad4.cpp @@ -1,148 +1,148 @@ #include "support.h" // ================================================================================================= TEST_CASE("GooseFEM::ElementQuad4", "ElementQuad4.h") { using T2 = xt::xtensor_fixed>; // ================================================================================================= SECTION( "int_N_scalar_NT_dV" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(3,3); // vector-definition, and a diagonal matrix GooseFEM::Vector vec(mesh.conn(), mesh.dofsPeriodic()); GooseFEM::MatrixDiagonal mat(mesh.conn(), mesh.dofsPeriodic()); // element definition, with nodal quadrature GooseFEM::Element::Quad4::Quadrature quad( - vec.asElement(mesh.coor()), + vec.AsElement(mesh.coor()), GooseFEM::Element::Quad4::Nodal::xi(), GooseFEM::Element::Quad4::Nodal::w() ); // scalar per quadrature point (e.g. mass-density "rho") xt::xtensor rho = xt::ones({mesh.nelem(), quad.nip()}); // evaluate integral and assemble diagonal matrix (e.g. mass matrix) - mat.assemble(quad.int_N_scalar_NT_dV(rho)); + mat.assemble(quad.Int_N_scalar_NT_dV(rho)); // check matrix // - get the matrix - xt::xtensor M = mat.asDiagonal(); + xt::xtensor M = mat.AsDiagonal(); // - check the size REQUIRE( M.size() == vec.ndof() ); // - check each component REQUIRE( xt::allclose(M, 1.) ); } // ================================================================================================= SECTION( "symGradN_vector" ) { // mesh GooseFEM::Mesh::Quad4::FineLayer mesh(27,27); // vector-definition GooseFEM::Vector vec(mesh.conn(), mesh.dofs()); // element definition, with Gauss quadrature - GooseFEM::Element::Quad4::Quadrature quad( vec.asElement(mesh.coor()) ); + GooseFEM::Element::Quad4::Quadrature quad( vec.AsElement(mesh.coor()) ); // macroscopic deformation gradient and strain // - zero-initialize T2 F = xt::zeros({2,2}); T2 EPS = xt::zeros({2,2}); // - set non-zero components F (0,1) = 0.1; EPS(0,1) = 0.05; EPS(1,0) = 0.05; // nodal coordinates and displacement xt::xtensor coor = mesh.coor();; xt::xtensor disp = xt::zeros(coor.shape()); // apply macroscopic deformation gradient for ( size_t n = 0 ; n < mesh.nnode() ; ++n ) for ( size_t i = 0 ; i < F.shape()[0] ; ++i ) for ( size_t j = 0 ; j < F.shape()[1] ; ++j ) disp(n,i) += F(i,j) * coor(n,j); // compute quadrature point tensors - xt::xtensor eps = quad.symGradN_vector(vec.asElement(disp)); + xt::xtensor eps = quad.SymGradN_vector(vec.AsElement(disp)); // integration point volume xt::xtensor dV = eps; quad.dV(dV); // volume averaged strain tensor auto epsbar = xt::average(eps, dV, {0,1}); // check local strain tensors // - check sizes REQUIRE( eps.shape()[0] == mesh.nelem() ); REQUIRE( eps.shape()[1] == quad.nip() ); REQUIRE( eps.shape()[2] == mesh.ndim() ); REQUIRE( eps.shape()[3] == mesh.ndim() ); // - check all components for ( size_t e = 0 ; e < mesh.nelem() ; ++e ) { for ( size_t k = 0 ; k < quad.nip() ; ++k ) { auto Eps = xt::view(eps, e, k); REQUIRE( xt::allclose(Eps, EPS)); } } // check macroscopic tensor REQUIRE( xt::allclose(epsbar, EPS)); } // ================================================================================================= SECTION( "symGradN_vector, int_gradN_dot_tensor2s_dV" ) { // mesh GooseFEM::Mesh::Quad4::FineLayer mesh(27,27); // vector-definition GooseFEM::Vector vec(mesh.conn(), mesh.dofsPeriodic()); // element definition, with Gauss quadrature - GooseFEM::Element::Quad4::Quadrature quad( vec.asElement(mesh.coor()) ); + GooseFEM::Element::Quad4::Quadrature quad( vec.AsElement(mesh.coor()) ); // macroscopic deformation gradient and strain // - zero-initialize T2 F = xt::zeros({2,2}); // - set non-zero components F(0,1) = 0.1; // nodal coordinates and displacement xt::xtensor coor = mesh.coor();; xt::xtensor disp = xt::zeros(coor.shape()); // apply macroscopic deformation gradient for ( size_t n = 0 ; n < mesh.nnode() ; ++n ) for ( size_t i = 0 ; i < F.shape()[0] ; ++i ) for ( size_t j = 0 ; j < F.shape()[1] ; ++j ) disp(n,i) += F(i,j) * coor(n,j); // compute quadrature point tensors - xt::xtensor eps = quad.symGradN_vector(vec.asElement(disp)); + xt::xtensor eps = quad.SymGradN_vector(vec.AsElement(disp)); // nodal force vector (should be zero, as it is only sensitive to periodic fluctuations) - xt::xtensor Fi = vec.assembleDofs(quad.int_gradN_dot_tensor2_dV(eps)); + xt::xtensor Fi = vec.AssembleDofs(quad.Int_gradN_dot_tensor2_dV(eps)); // check // - size REQUIRE( Fi.size() == vec.ndof() ); // - check each component REQUIRE( xt::allclose(Fi, 0.) ); } // ================================================================================================= } diff --git a/test/MatrixDiagonal.cpp b/test/MatrixDiagonal.cpp index 34c4f1f..bdc1801 100644 --- a/test/MatrixDiagonal.cpp +++ b/test/MatrixDiagonal.cpp @@ -1,78 +1,78 @@ #include "support.h" // ================================================================================================= TEST_CASE("GooseFEM::MatrixDiagonal", "MatrixDiagonal.h") { // ================================================================================================= SECTION( "dot" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(2,2); // random matrix and column xt::xtensor a = xt::random::rand({mesh.nnode()*mesh.ndim()}); xt::xtensor b = xt::random::rand({mesh.nnode()*mesh.ndim()}); xt::xtensor c; // compute product c = a * b; // convert to GooseFEM // - allocate GooseFEM::MatrixDiagonal A(mesh.conn(), mesh.dofs()); xt::xtensor C; // - set A.set(a); // compute product - C = A.dot(b); + C = A.Dot(b); // check // - size REQUIRE( C.size() == c.size() ); // - components REQUIRE( xt::allclose(C, c) ); } // ------------------------------------------------------------------------------------------------- SECTION( "solve" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(2,2); // random matrix and column xt::xtensor a = xt::random::rand({mesh.nnode()*mesh.ndim()}); xt::xtensor b = xt::random::rand({mesh.nnode()*mesh.ndim()}); xt::xtensor c; // compute product c = a * b; // convert to GooseFEM // - allocate GooseFEM::MatrixDiagonal A(mesh.conn(), mesh.dofs()); xt::xtensor B, C; // - set A.set(a); // compute product - C = A.dot(b); + C = A.Dot(b); // solve - B = A.solve(C); + B = A.Solve(C); // check // - size REQUIRE( B.size() == b.size() ); // - components REQUIRE( xt::allclose(B, b) ); } // // ================================================================================================= } diff --git a/test/Vector.cpp b/test/Vector.cpp index 82a1a1e..c384b16 100644 --- a/test/Vector.cpp +++ b/test/Vector.cpp @@ -1,177 +1,177 @@ #include "support.h" // ================================================================================================= TEST_CASE("GooseFEM::Vector", "Vector.h") { // ================================================================================================= SECTION( "asDofs - nodevec" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(2,2); // vector-definition GooseFEM::Vector vector(mesh.conn(), mesh.dofsPeriodic()); // velocity field // - allocate xt::xtensor v = xt::empty({mesh.nnode(), std::size_t(2)}); // - set periodic v(0,0) = 1.0; v(0,1) = 0.0; v(1,0) = 1.0; v(1,1) = 0.0; v(2,0) = 1.0; v(2,1) = 0.0; v(3,0) = 1.5; v(3,1) = 0.0; v(4,0) = 1.5; v(4,1) = 0.0; v(5,0) = 1.5; v(5,1) = 0.0; v(6,0) = 1.0; v(6,1) = 0.0; v(7,0) = 1.0; v(7,1) = 0.0; v(8,0) = 1.0; v(8,1) = 0.0; // convert to DOFs - xt::xtensor V = vector.asDofs(v); + xt::xtensor V = vector.AsDofs(v); // check // - size REQUIRE( V.size() == mesh.nnodePeriodic() * mesh.ndim() ); // - individual entries EQ( V(0), v(0,0) ); EQ( V(1), v(0,1) ); EQ( V(2), v(1,0) ); EQ( V(3), v(1,1) ); EQ( V(4), v(3,0) ); EQ( V(5), v(3,1) ); EQ( V(6), v(4,0) ); EQ( V(7), v(4,1) ); } // ------------------------------------------------------------------------------------------------- SECTION( "asDofs - elemvec" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(2,2); // vector-definition GooseFEM::Vector vector(mesh.conn(), mesh.dofsPeriodic()); // velocity field // - allocate xt::xtensor v = xt::empty({mesh.nnode(), std::size_t(2)}); // - set periodic v(0,0) = 1.0; v(0,1) = 0.0; v(1,0) = 1.0; v(1,1) = 0.0; v(2,0) = 1.0; v(2,1) = 0.0; v(3,0) = 1.5; v(3,1) = 0.0; v(4,0) = 1.5; v(4,1) = 0.0; v(5,0) = 1.5; v(5,1) = 0.0; v(6,0) = 1.0; v(6,1) = 0.0; v(7,0) = 1.0; v(7,1) = 0.0; v(8,0) = 1.0; v(8,1) = 0.0; // convert to DOFs - element - DOFs - xt::xtensor V = vector.asDofs(vector.asElement(vector.asDofs(v))); + xt::xtensor V = vector.AsDofs(vector.AsElement(vector.AsDofs(v))); // check // - size REQUIRE( V.size() == mesh.nnodePeriodic() * mesh.ndim() ); // - individual entries EQ( V(0), v(0,0) ); EQ( V(1), v(0,1) ); EQ( V(2), v(1,0) ); EQ( V(3), v(1,1) ); EQ( V(4), v(3,0) ); EQ( V(5), v(3,1) ); EQ( V(6), v(4,0) ); EQ( V(7), v(4,1) ); } // ------------------------------------------------------------------------------------------------- SECTION( "asDofs - assembleDofs" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(2,2); // vector-definition GooseFEM::Vector vector(mesh.conn(), mesh.dofsPeriodic()); // force field // - allocate xt::xtensor f = xt::empty({mesh.nnode(), std::size_t(2)}); // - set periodic f(0,0) = -1.0; f(0,1) = -1.0; f(1,0) = 0.0; f(1,1) = -1.0; f(2,0) = 1.0; f(2,1) = -1.0; f(3,0) = -1.0; f(3,1) = 0.0; f(4,0) = 0.0; f(4,1) = 0.0; f(5,0) = 1.0; f(5,1) = 0.0; f(6,0) = -1.0; f(6,1) = 1.0; f(7,0) = 0.0; f(7,1) = 1.0; f(8,0) = 1.0; f(8,1) = 1.0; // assemble as DOFs - xt::xtensor F = vector.assembleDofs(f); + xt::xtensor F = vector.AssembleDofs(f); // check // - size REQUIRE( F.size() == mesh.nnodePeriodic() * mesh.ndim() ); // - 'analytical' result EQ( F(0), 0 ); EQ( F(1), 0 ); EQ( F(2), 0 ); EQ( F(3), 0 ); EQ( F(4), 0 ); EQ( F(5), 0 ); EQ( F(6), 0 ); EQ( F(7), 0 ); } // ------------------------------------------------------------------------------------------------- SECTION( "asDofs - assembleNode" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(2,2); // vector-definition GooseFEM::Vector vector(mesh.conn(), mesh.dofsPeriodic()); // force field // - allocate xt::xtensor f = xt::empty({mesh.nnode(), std::size_t(2)}); // - set periodic f(0,0) = -1.0; f(0,1) = -1.0; f(1,0) = 0.0; f(1,1) = -1.0; f(2,0) = 1.0; f(2,1) = -1.0; f(3,0) = -1.0; f(3,1) = 0.0; f(4,0) = 0.0; f(4,1) = 0.0; f(5,0) = 1.0; f(5,1) = 0.0; f(6,0) = -1.0; f(6,1) = 1.0; f(7,0) = 0.0; f(7,1) = 1.0; f(8,0) = 1.0; f(8,1) = 1.0; // convert to element, assemble as DOFs - xt::xtensor F = vector.assembleDofs( vector.asElement(f) ); + xt::xtensor F = vector.AssembleDofs( vector.AsElement(f) ); // check // - size REQUIRE( F.size() == mesh.nnodePeriodic() * mesh.ndim() ); // - 'analytical' result EQ( F(0), 0 ); EQ( F(1), 0 ); EQ( F(2), 0 ); EQ( F(3), 0 ); EQ( F(4), 0 ); EQ( F(5), 0 ); EQ( F(6), 0 ); EQ( F(7), 0 ); } // ------------------------------------------------------------------------------------------------- // ================================================================================================= } diff --git a/test/VectorPartitioned.cpp b/test/VectorPartitioned.cpp index fe45764..baec9ea 100644 --- a/test/VectorPartitioned.cpp +++ b/test/VectorPartitioned.cpp @@ -1,113 +1,113 @@ #include "support.h" // ================================================================================================= TEST_CASE("GooseFEM::VectorPartitioned", "VectorPartitioned.h") { // ================================================================================================= SECTION( "asDofs" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(9,9); // prescribed DOFs xt::xtensor dofs = mesh.dofs(); xt::xtensor nodesLeft = mesh.nodesLeftOpenEdge(); xt::xtensor nodesRight = mesh.nodesRightOpenEdge(); xt::xtensor nodesTop = mesh.nodesTopEdge(); xt::xtensor nodesBottom = mesh.nodesBottomEdge(); for ( size_t i = 0 ; i < nodesLeft.size() ; ++i ) for ( size_t j = 0 ; j < mesh.ndim() ; ++j ) dofs(nodesRight(i),j) = dofs(nodesLeft(i),j); xt::xtensor iip = xt::empty({4*nodesBottom.size()}); size_t i = 0; for ( auto &n : nodesTop ) { iip(i) = dofs(n,0); ++i; } for ( auto &n : nodesTop ) { iip(i) = dofs(n,1); ++i; } for ( auto &n : nodesBottom ) { iip(i) = dofs(n,0); ++i; } for ( auto &n : nodesBottom ) { iip(i) = dofs(n,1); ++i; } // random displacement xt::xtensor u = xt::random::randn({mesh.nnode(), mesh.ndim()}); for ( size_t i = 0 ; i < nodesLeft.size() ; ++i ) for ( size_t j = 0 ; j < mesh.ndim() ; ++j ) u(nodesRight(i),j) = u(nodesLeft(i),j); // vector-definition GooseFEM::VectorPartitioned vector(mesh.conn(), dofs, iip); // convert - xt::xtensor u_u = vector.asDofs_u(u); - xt::xtensor u_p = vector.asDofs_p(u); + xt::xtensor u_u = vector.AsDofs_u(u); + xt::xtensor u_p = vector.AsDofs_p(u); - REQUIRE( xt::allclose(u, vector.asNode(u_u, u_p)) ); + REQUIRE( xt::allclose(u, vector.AsNode(u_u, u_p)) ); } // ================================================================================================= SECTION( "copy_u, copy_p" ) { // mesh GooseFEM::Mesh::Quad4::Regular mesh(9,9); // prescribed DOFs xt::xtensor dofs = mesh.dofs(); xt::xtensor nodesLeft = mesh.nodesLeftOpenEdge(); xt::xtensor nodesRight = mesh.nodesRightOpenEdge(); xt::xtensor nodesTop = mesh.nodesTopEdge(); xt::xtensor nodesBottom = mesh.nodesBottomEdge(); for ( size_t i = 0 ; i < nodesLeft.size() ; ++i ) for ( size_t j = 0 ; j < mesh.ndim() ; ++j ) dofs(nodesRight(i),j) = dofs(nodesLeft(i),j); xt::xtensor iip = xt::empty({4*nodesBottom.size()}); size_t i = 0; for ( auto &n : nodesTop ) { iip(i) = dofs(n,0); ++i; } for ( auto &n : nodesTop ) { iip(i) = dofs(n,1); ++i; } for ( auto &n : nodesBottom ) { iip(i) = dofs(n,0); ++i; } for ( auto &n : nodesBottom ) { iip(i) = dofs(n,1); ++i; } // random displacement xt::xtensor u = xt::random::randn({mesh.nnode(), mesh.ndim()}); for ( size_t i = 0 ; i < nodesLeft.size() ; ++i ) for ( size_t j = 0 ; j < mesh.ndim() ; ++j ) u(nodesRight(i),j) = u(nodesLeft(i),j); // vector-definition GooseFEM::VectorPartitioned vector(mesh.conn(), dofs, iip); // convert xt::xtensor v = xt::empty({mesh.nnode(), mesh.ndim()}); vector.copy_u(u, v); vector.copy_p(u, v); REQUIRE( xt::allclose(u, v) ); } // ================================================================================================= }