diff --git a/SConstruct b/SConstruct index a37f170..1b6e3b1 100644 --- a/SConstruct +++ b/SConstruct @@ -1,455 +1,450 @@ # -*- mode:python; coding: utf-8 -*- # vim: set ft=python: # @file # @section LICENSE # # Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ------------------------------------------------------------------------------ # Imports # ------------------------------------------------------------------------------ from __future__ import print_function import os import SCons from os.path import join, abspath # Import below not strictly necessary, but good for pep8 from SCons.Script import ( EnsurePythonVersion, EnsureSConsVersion, Help, Environment, Variables, EnumVariable, PathVariable, BoolVariable, Split, SConscript, Export, Dir ) from version import get_git_subst from detect import ( FindFFTW, FindBoost, FindThrust, FindCuda, FindExpolit, FindPybind11 ) # ------------------------------------------------------------------------------ EnsurePythonVersion(2, 7) EnsureSConsVersion(2, 4) # ------------------------------------------------------------------------------ tamaas_info = dict( version="2.1.1", authors=[ u'Lucas Frérot', 'Guillaume Anciaux', 'Valentine Rey', 'Son Pham-Ba', u'Jean-François Molinari' ], maintainer=u'Lucas Frérot', email='lucas.frerot@epfl.ch', copyright=u"Copyright (©) 2016-2020 EPFL " + u"(École Polytechnique Fédérale de Lausanne), " + u"Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)" ) # ------------------------------------------------------------------------------ def detect_dependencies(env): """Detect all dependencies""" fftw_components = ['omp'] if main_env['use_mpi']: fftw_components.append('mpi') FindFFTW(env, fftw_components, precision=env['real_type']) FindBoost(env, ['boost/preprocessor/seq.hpp']) FindExpolit(env) thrust_var = 'THRUST_ROOT' # Take cuda version of thrust if available if 'CUDA_ROOT' in env['ENV']: thrust_var = 'CUDA_ROOT' FindThrust(env, env['backend'], thrust_var) # Activate cuda if needed if env['backend'] == 'cuda': FindCuda(env) if env['build_python']: FindPybind11(env) # ------------------------------------------------------------------------------ # Main compilation # ------------------------------------------------------------------------------ # Compilation colors colors = { 'cyan': '\033[96m', 'purple': '\033[95m', 'blue': '\033[94m', 'green': '\033[92m', 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' } # Inherit all environment variables (for CXX detection, etc.) main_env = Environment( ENV=os.environ, ) # Set tamaas information for k, v in tamaas_info.items(): main_env[k] = v main_env['COLOR_DICT'] = colors # Build variables vars = Variables('build-setup.conf') vars.AddVariables( EnumVariable('build_type', 'Build type', 'release', allowed_values=('release', 'profiling', 'debug'), ignorecase=2), EnumVariable('backend', 'Thrust backend', 'omp', allowed_values=('omp', 'tbb'), # allowed_values=('omp', 'cuda'), ignorecase=2), EnumVariable('sanitizer', 'Sanitizer type', 'none', allowed_values=('none', 'memory', 'leaks', 'address'), ignorecase=2), PathVariable('prefix', 'Prefix where to install', '/usr/local'), # Dependencies paths PathVariable('FFTW_ROOT', 'FFTW custom path', os.getenv('FFTW_ROOT', ''), PathVariable.PathAccept), PathVariable('THRUST_ROOT', 'Thrust custom path', os.getenv('THRUST_ROOT', ''), PathVariable.PathAccept), PathVariable('BOOST_ROOT', 'Boost custom path', os.getenv('BOOST_ROOT', ''), PathVariable.PathAccept), PathVariable('CUDA_ROOT', 'Cuda custom path', os.getenv('CUDA_ROOT', ''), PathVariable.PathAccept), # Dependencies provided as submodule get different default PathVariable('GTEST_ROOT', 'Googletest custom path', os.getenv('GTEST_ROOT', '#third-party/googletest/googletest'), PathVariable.PathAccept), PathVariable('PYBIND11_ROOT', 'Pybind11 custom path', os.getenv('PYBIND11_ROOT', '#third-party/pybind11/include'), PathVariable.PathAccept), PathVariable('EXPOLIT_ROOT', 'Expolit custom path', os.getenv('EXPOLIT_ROOT', '#third-party/expolit/include'), PathVariable.PathAccept), # Executables ('CXX', 'Compiler', os.getenv('CXX', 'g++')), ('MPICXX', 'MPI Compiler wrapper', os.getenv('MPICXX', 'mpicxx')), ('py_exec', 'Python executable', 'python3'), # Compiler flags ('CXXFLAGS', 'C++ compiler flags', os.getenv('CXXFLAGS', "")), # Compile legacy code BoolVariable('legacy_bem', 'Compile legacy BEM code', False), # Cosmetic BoolVariable('verbose', 'Activate verbosity', False), BoolVariable('color', 'Color the non-verbose compilation output', False), # Tamaas components BoolVariable('build_doc', 'Build documentation', False), BoolVariable('build_tests', 'Build test suite', False), BoolVariable('build_python', 'Build python wrapper', True), # Dependencies BoolVariable('use_googletest', 'Build tests using GTest', False), BoolVariable('use_mpi', 'Builds multi-process parallelism', False), # Strip binary of extra info BoolVariable('strip_info', 'Strip binary of added information', False), # Type variables EnumVariable('real_type', 'Type for real precision variables', 'double', allowed_values=('double', 'long double')), EnumVariable('integer_type', 'Type for integer variables', 'int', allowed_values=('int', 'long')), ) # Set variables of environment vars.Update(main_env) help_text = vars.GenerateHelpText(main_env) help_text += """ Commands: scons [build] [options]... Compile Tamaas (and additional modules/tests) scons install [prefix=/your/prefix] [options]... Install Tamaas to prefix scons dev Install symlink to Tamaas python module (useful to development purposes) scons test Run tests with pytest scons doc Compile documentation with Doxygen and Sphinx+Breathe scons archive Create a gzipped archive from source """ # noqa Help(help_text) # Save all options, not just those that differ from default with open('build-setup.conf', 'w') as setup: for option in vars.options: setup.write("# " + option.help + "\n") setup.write("{} = '{}'\n".format(option.key, main_env[option.key])) main_env['should_configure'] = \ not main_env.GetOption('clean') and not main_env.GetOption('help') build_type = main_env['build_type'] build_dir = 'build-' + main_env['build_type'] main_env['build_dir'] = build_dir if main_env['should_configure']: print("Building in " + build_dir) verbose = main_env['verbose'] # Remove colors if not set if not main_env['color']: for key in colors: colors[key] = '' if not verbose: main_env['CXXCOMSTR'] = main_env['SHCXXCOMSTR'] = \ u'{0}[Compiling ($SHCXX)] {1}$SOURCE'.format(colors['green'], colors['end']) main_env['LINKCOMSTR'] = main_env['SHLINKCOMSTR'] = \ u'{0}[Linking] {1}$TARGET'.format(colors['purple'], colors['end']) main_env['PRINT_CMD_LINE_FUNC'] = pretty_cmd_print # Include paths main_env.AppendUnique(CPPPATH=['#/src', '#/src/core', '#/src/mpi', '#/src/bem', '#/src/surface', '#/src/python', '#/src/percolation', '#/src/model', '#/src/model/elasto_plastic', '#/src/solvers', '#/src/gpu', '#/python']) # Changing the shared object extension main_env['SHOBJSUFFIX'] = '.o' # Back to gcc if cuda is activated if main_env['backend'] == "cuda" and "g++" not in main_env['CXX']: raise SCons.Errors.StopError('GCC should be used when compiling with CUDA') # OpenMP flags - compiler dependent omp_flags = { "g++": ["-fopenmp"], "clang++": ["-fopenmp"], "icpc": ["-qopenmp"] } def cxx_alias(cxx): for k in omp_flags.keys(): if k in cxx: return k raise SCons.Errors.StopError('Unsupported compiler: ' + cxx) cxx = cxx_alias(main_env['CXX']) main_env['CXXFLAGS'] = Split(main_env['CXXFLAGS']) main_env.AppendUnique(CXXFLAGS='-std=c++14 -Wall -Wextra -pedantic'.split()) # Append openmp flags regardless of backend for fftw_omp main_env.AppendUnique(CXXFLAGS=omp_flags[cxx]) main_env.AppendUnique(LINKFLAGS=omp_flags[cxx]) # Correct bug in clang? if main_env['backend'] == 'omp' and cxx == "clang++": main_env.AppendUnique(LIBS=["atomic"]) elif main_env['backend'] == 'tbb': main_env.AppendUnique(LIBS=['tbb']) # Force deactivate legacy when using intel # Intel does not like old openmp loops if cxx == 'icpc' and main_env['legacy_bem']: print("Using intel compiler => deactivating legacy code") main_env['legacy_bem'] = False # Manage MPI compiler if main_env['use_mpi']: main_env['CXX'] = '$MPICXX' main_env.AppendUnique(CPPDEFINES=['USE_MPI']) main_env.AppendUnique(CXXFLAGS=['-Wno-cast-function-type']) # Flags and options if main_env['build_type'] == 'debug': main_env.AppendUnique(CPPDEFINES=['TAMAAS_DEBUG']) # Define the scalar types main_env.AppendUnique(CPPDEFINES={'REAL_TYPE': '${real_type}', 'INT_TYPE': '${integer_type}'}) # Compilation flags cxxflags_dict = { "debug": Split("-g -O0"), "profiling": Split("-g -O3 -fno-omit-frame-pointer"), "release": Split("-O3") } # Link flags for shared libs shlinkflags_dict = { "debug": [], "profiling": Split("-g -O3 -fno-omit-frame-pointer"), "release": [] } if main_env['sanitizer'] != 'none': if main_env['backend'] == 'cuda': raise SCons.Errors.StopError( "Sanitizers with cuda are not yet supported!") cxxflags_dict[build_type].append('-fsanitize=${sanitizer}') shlinkflags_dict[build_type].append('-fsanitize=${sanitizer}') main_env.AppendUnique(CXXFLAGS=cxxflags_dict[build_type]) main_env.AppendUnique(SHLINKFLAGS=shlinkflags_dict[build_type]) main_env.AppendUnique(LINKFLAGS=shlinkflags_dict[build_type]) -# Adding -main_env['LIBPATH'] = [abspath(join(build_dir, 'src')), - abspath(join(main_env['prefix'], 'lib'))] -main_env['RPATH'] = "$LIBPATH" - if main_env['should_configure']: detect_dependencies(main_env) # Writing information file main_env.Tool('textfile') main_env['SUBST_DICT'] = get_git_subst() # Empty values if requested if main_env['strip_info']: for k in main_env['SUBST_DICT']: main_env['SUBST_DICT'][k] = "" # Substitution of environment file main_env['SUBST_DICT'].update({ '@build_type@': '$build_type', '@build_dir@': abspath(build_dir), }) # Environment file content env_content = """export PYTHONPATH=@build_dir@/python:$$PYTHONPATH export LD_LIBRARY_PATH=@build_dir@/src:$$LD_LIBRARY_PATH """ # Writing environment file env_file = main_env.Textfile(join(build_dir, 'tamaas_environment.sh'), env_content) # Building sub-directories def subdir(dir): return SConscript(join(dir, 'SConscript'), variant_dir=join(build_dir, dir), duplicate=True) # Building Tamaas library Export('main_env') subdir('src') build_targets = ['build-cpp', env_file] install_targets = ['install-lib'] # Building Tamaas extra components for dir in ['python', 'tests']: if main_env['build_{}'.format(dir)] and not main_env.GetOption('help'): subdir(dir) build_targets.append('build-{}'.format(dir)) # Building API + Sphinx documentation if requested if main_env['build_doc']: doc_env = main_env.Clone() doc = doc_env.Command('#.phony_doc', '', 'make -C {doc} clean && make -C {doc}' .format(doc=Dir('#/doc'))) if doc_env['build_python']: doc_env.PrependENVPath('PYTHONPATH', doc_env.subst('../${build_dir}/python')) doc_env.Depends(doc, 'build-python') main_env.Alias('doc', doc) else: dummy_command(main_env, 'doc', 'Command "doc" does not do anything' ' without documentation activated ("build_doc=True")') # Define dummy dev command when python is deactivated if not main_env['build_python']: dummy_command(main_env, 'dev', 'Command "dev" does not do anything' + ' without python activated ("build_python=True")') else: install_targets.append('install-python') # Define dummy test command when tests are deactivated if not main_env['build_tests']: dummy_command(main_env, 'test', 'Command "test" does not do anything' + ' without tests activated ("build_tests=True")') # Definition of target aliases, a.k.a. sub-commands main_env.Alias('build', build_targets) # Define proper install targets main_env.Alias('install', install_targets) # Default target is to build stuff main_env.Default('build') # Building a tar archive archive = main_env.Command( '../tamaas-{}.tar.gz'.format(main_env['version']), '', ('tar --exclude-vcs --exclude-vcs-ignores ' '--exclude=third-party/googletest ' '--exclude=third-party/pybind11 ' '--exclude=joss ' '-czf $TARGET .'), ) main_env.Alias('archive', archive) diff --git a/doc/sphinx/source/contact.rst b/doc/sphinx/source/contact.rst index 48de2e0..657e94d 100644 --- a/doc/sphinx/source/contact.rst +++ b/doc/sphinx/source/contact.rst @@ -1,133 +1,133 @@ Solving contact =============== The resolution of contact problems is done with classes that inherit from :cpp:class:`tamaas::ContactSolver`. These usually take as argument a :cpp:class:`tamaas::Model` object, a surface described by a :cpp:class:`tamaas::Grid` or a 2D numpy array, and a tolerance. We will see the specificities of the different solver objects below. Elastic contact --------------- The most common case is normal elastic contact, and is most efficiently solved with :cpp:class:`tamaas::PolonskyKeerRey`. The advantage of this class is that it combines two algorithms into one. By default, it considers that the contact pressure field is the unknown, and tries to minimize the complementary energy of the system under the constraint that the mean pressure should be equal to the value supplied by the user, for example:: # ... solver = tm.PolonskyKeerRey(model, surface, 1e-12) solver.solve(1e-2) Here the average pressure is ``1e-2``. The solver can also be instanciated by specifying the the constraint should be on the mean gap instead of mean pressure:: solver = tm.PolonskyKeerRey(model, surface, 1e-12, constraint_type=tm.PolonskyKeerRey.gap) solver.solve(1e-2) The contact problem is now solved for a mean gap of ``1e-2``. Note that the choice of constraint affects the performance of the algorithm. Contact with adhesion --------------------- The second algorithm hidden in :cpp:class:`tamaas::PolonskyKeerRey` considers the **gap** to be the unknown. The constaint on the mean value can be chosen as above:: solver = tm.PolonskyKeerRey(model, surface, 1e-12, primal_type=tm.PolonskyKeerRey.gap, constraint_type=tm.PolonskyKeerRey.gap) solver.solve(1e-2) The advantage of this formulation is to be able to solve adhesion problems (`Rey et al. `_). This is done by adding a term to the potential energy functional that the solver tries to minimize:: adhesion_params = { "rho": 2e-3, "surface_energy": 2e-5 } adhesion = tm.ExponentialAdhesionFunctional(surface) adhesion.setParameters(adhesion) solver.addFunctionalterm(adhesion) solver.solve(1e-2) Custom classes can be used in place of the example term here. One has to inherit from :cpp:class:`tamaas::Functional`:: class AdhesionPython(tm.Functional): """ Functional class that extends a C++ class and implements the virtual methods """ def __init__(self, rho, gamma): super().__init__(self) self.rho = rho self.gamma = gamma def computeF(self, gap, pressure): return -self.gamma * np.sum(np.exp(-gap / self.rho)) def computeGradF(self, gap, gradient): gradient += self.gamma * np.exp(-gap / self.rho) / self.rho This example is actually equivalent to :cpp:class:`tamaas::functional::ExponentialAdhesionFunctional`. Tangential contact ------------------ For frictional contact, several solver classes are available. Among them, :cpp:class:`tamaas::Condat` is able to solve a coupled normal/tangential contact problem regardless of the material properties. It however solves an associated version of the Coulomb friction law. In general, the Coulomb friction used in contact makes the problem ill-posed. Solving a tangential contact problem is not much different from normal contact:: mu = 0.3 # Friction coefficient solver = tm.Condat(model, surface, 1e-12, mu) solver.setMaxIterations(5000) # The default of 1000 may be too little solver.solve([1e-2, 0, 1e-2]) # 3D components of applied mean pressure Elasto-plastic contact ---------------------- For elastic-plastic contact, one needs three different solvers: an elastic contact solver like the ones described above, a non-linear solver and a coupling solver. The non-linear solvers available in Tamaas are implemented in python and inherit from the C++ class :cpp:class:`tamaas::EPSolver`. They make use of the non-linear solvers available in scipy: :py:class:`DFSANESolver ` Implements an interface to :py:func:`scipy.optimize.root` wiht the DFSANE method. :py:class:`NewtonKrylovSolver ` Implements an interface to :py:func:`scipy.optimize.newton_krylov`. These solvers require a residual vector to cancel, the creation of which is handled with :cpp:class:`tamaas::ModelFactory`. Then, an instance of :cpp:class:`tamaas::EPICSolver` is needed to couple the contact and non-linear solvers for the elastic-plastic contact problem:: import tamaas as tm from tamaas.nonlinear_solvers import DFSANESolver # Definition of modeled domain model_type = tm.model_type.volume_2d discretization = [32, 51, 51] # Order: [z, x, y] flat_domain = [1, 1] system_size = [0.5] + flat_domain # Setup for plasticity residual = tm.ModelFactory.createResidual(model, sigma_y=0.1 * model.E, hardening=0.01 * model.E) epsolver = DFSANESolver(residual, model) # ... csolver = tm.PolonskyKeerRey(model, surface, 1e-12) - epic = tm.EPICSolver(csolver, epsolver, 1e7, relaxation=0.3) + epic = tm.EPICSolver(csolver, epsolver, 1e-7, relaxation=0.3) epic.solve(1e-3) # or use an accelerated scheme (which can sometimes not converge) epic.acceleratedSolve(1e-3) By default, :cpp:func:`tamaas::EPICSolver::solve` uses a relaxed fixed point. It can be tricky to make it converge: you need to decrease the relaxation parameter passed as argument of the constructor, but this also hinders the convergence rate. The function :cpp:func:`tamaas::EPICSolver::acceleratedSolve` does not require the tweaking of a relaxation parameter, so it can be faster if the latter does not have an optimal value. However, it is not guaranteed to converge. Finally, during the first iterations, the fixed point error will be large compared to the error of the non-linear solver. It can improve performance to have the tolerance of the non-linear solver (which is the most expensive step of the fixed point solve) decrease over the iterations. This can be achieved with the decorator class :py:class:`ToleranceManager `:: from tamaas.nonlinear_solvers import ToleranceManager, DFSANESolver @ToleranceManager(start=1e-2, end=1e-9, rate=1 / 3) class Solver(DFSANESolver): pass # ... epsolver = Solver(residual, model) diff --git a/doc/sphinx/source/rough_surfaces.rst b/doc/sphinx/source/rough_surfaces.rst index c69154a..6a1413a 100644 --- a/doc/sphinx/source/rough_surfaces.rst +++ b/doc/sphinx/source/rough_surfaces.rst @@ -1,98 +1,98 @@ Random rough surfaces ===================== The generation of stochatsticly rough surfaces is controlled in Tamaas by two abstract classes: :cpp:class:`tamaas::SurfaceGenerator` and :cpp:class:`tamaas::Filter`. The former provides access lets the user set the surface sizes and random seed, while the latter encodes the information of the spectrum of the surface. Two surface generation methods are provided: - :cpp:class:`tamaas::SurfaceGeneratorFilter` implements a Fourier filtering algorithm (see `Hu & Tonder `_), - :cpp:class:`tamaas::SurfaceGeneratorRandomPhase` implements a random phase filter. Both of these rely on a :cpp:class:`tamaas::Filter` object to provided the filtering information (usually power spectrum density coefficients). Tamaas provides two such classes and allows for Python subclassing: - :cpp:class:`tamaas::Isopowerlaw` provides a roll-off powerlaw, - :cpp:class:`tamaas::RegularizedPowerlaw` provides a powerlaw with a regularized rolloff. Tamaas also provided routines for surface statistics. Generating a surface -------------------- Let us now see how to generate a surface. Frist create a filter object and set the surface sizes:: import tamaas as tm # Create spectrum object spectrum = tm.Isopowerlaw2D() # Set spectrum parameters spectrum.q0 = 4 spectrum.q1 = 4 spectrum.q2 = 32 spectrum.hurst = 0.8 The ``spectrum`` object can be queried for information, such as the root-mean-square of heights, the various statistical moments, the spectrum bandwidth, etc. Then we create a generator object and build the random surface:: generator = tm.SurfaceGeneratorFilter2D() generator.setSpectrum(spectrum) generator.random_seed = 0 surface = generator.buildSurface() The ``surface`` object is a :py:class:`numpy.ndarray` wrapped around internal memory in the ``generator`` object, so a subsequent call to :py:func:`buildSurface ` may change its content. Note that if ``generator`` goes out of scope its memory will not be freed if there is still a live reference to the surface data. Custom filter ------------- Tamaas provides several classes that can be derived directly with Python classes, and :cpp:class:`tamaas::Filter` is one of them. Since it provides a single pure virtual method :cpp:func:`computeFilter `, it is easy to write a sub-class. Here we implement a class that takes a user-defined auto-correlation function and implements the :cpp:func:`computeFilter ` virtual function:: class AutocorrelationFilter(tm.Filter2D): def __init__(self, autocorrelation): - super().__init__(self) + tm.Filter2D.__init__(self) self.autocorrelation = autocorrelation.copy() def computeFilter(self, filter_coefficients): shifted_ac = np.fft.ifftshift(self.autocorrelation) filter_coefficients[...] = np.sqrt(np.fft.rfft2(shifted_ac)) Here ``filter_coefficients`` is also a :py:class:`numpy.ndarray` and is therefore easily manipulated. The creation of the surface then follows the same pattern as previously:: # Create spectrum object autocorrelation = ... # set your desired autocorrelation spectrum = AutocorrelationFilter(autocorrelation) generator = tm.SurfaceGenerator2D() generator.setSpectrum(spectrum) surface = generator.buildSurface() The lifetime of the ``spectrum`` object is associated to the ``generator`` when :py:func:`setSpectrum ` is called. Surface Satistics ----------------- Tamaas provides the C++ class :cpp:class:`tamaas::Statistics` and its wrapper :py:class:`Statistics2D ` to compute statistics on surfaces, including: - power spectrum density - autocorrelation - spectrum moments -- root-mean-square of heigts :math:`\sqrt{\langle h^2 \rangle}` +- root-mean-square of heights :math:`\sqrt{\langle h^2 \rangle}` - root-mean-square of slopes (computed in Fourier domain) :math:`\sqrt{\langle |\nabla h|^2\rangle}` All these quantities are computed in a discretization-independent manner: increasing the number of points in the surface should not drastically change the computed values (for a given spectrum). This allows to refine the discretization as much as possible to approximate a continuum. Note that the autocorrelation and PSD are fft-shifted. Here is a sample code plotting the PSD and autocorrelation:: psd = tm.Statistics2D.computePowerSpectrum(surface) psd = np.fft.fftshift(psd, axes=0) # Shifting only once axis because of R2C transform import matplotlib.pyplot as plt from matplotlib.colors import LogNorm plt.imshow(psd.real, norm=LogNorm()) acf = tm.Statistics2D.computeAutocorrelation(surface) acf = np.fft.fftshift(acf) plt.figure() plt.imshow(acf) plt.show() See ``examples/statistics.py`` for more usage examples of statistics. diff --git a/joss/paper.bibtex b/joss/paper.bibtex index b374579..677dab1 100644 --- a/joss/paper.bibtex +++ b/joss/paper.bibtex @@ -1,330 +1,337 @@ @book{bonnet_boundary_1995, title = {Boundary Integral Equation Methods for Solids and Fluids}, author = {Bonnet, Marc}, year = {1995}, publisher = {{J. Wiley}}, address = {{Chichester ; New York}}, isbn = {978-0-471-97184-9}, keywords = {Boundary element methods,Fluid mechanics,Mathematics,Mechanics; Applied}, lccn = {TA347.B69 B6848 1995}, note = {OCLC: ocm41368652} } @article{frerot_crack_2019, title = {Crack {{Nucleation}} in the {{Adhesive Wear}} of an {{Elastic}}-{{Plastic Half}}-{{Space}}}, author = {Fr{\'e}rot, Lucas and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2019}, month = oct, abstract = {The detachment of material in an adhesive wear process is driven by a fracture mechanism which is controlled by a critical length-scale. Previous efforts in multi-asperity wear modeling have applied this microscopic process to rough elastic contact. However, experimental data shows that the assumption of purely elastic deformation at rough contact interfaces is unrealistic, and that asperities in contact must deform plastically to accommodate the large contact stresses. We therefore investigate the consequences of plastic deformation on the macro-scale wear response. The crack nucleation process in a rough elastic-plastic contact is investigated in a comparative study with a classical \$J\_2\$ plasticity approach and a saturation plasticity model. We show that plastic residual deformations in the \$J\_2\$ model heighten the surface tensile stresses, leading to a higher crack nucleation likelihood for contacts. This effect is shown to be stronger when the material is more ductile. We also show that elastic interactions between contacts can increase the likelihood of individual contacts nucleating cracks, irrespective of the contact constitutive model. This is confirmed by a statistical approach we develop based on a Greenwood--Williamson model modified to take into account the elastic interactions between contacts and the shear strength of the contact junction.}, archivePrefix = {arXiv}, eprint = {1910.05163}, eprinttype = {arxiv}, journal = {arXiv:1910.05163 [cond-mat]}, keywords = {Condensed Matter - Soft Condensed Matter}, primaryClass = {cond-mat} } @article{frerot_fourieraccelerated_2019, title = {A {{Fourier}}-Accelerated Volume Integral Method for Elastoplastic Contact}, author = {Fr{\'e}rot, Lucas and Bonnet, Marc and Molinari, Jean-Fran{\c c}ois and Anciaux, Guillaume}, year = {2019}, month = jul, volume = {351}, pages = {951--976}, issn = {0045-7825}, doi = {10.1016/j.cma.2019.04.006}, abstract = {The contact of solids with rough surfaces plays a fundamental role in physical phenomena such as friction, wear, sealing, and thermal transfer. However, its simulation is a challenging problem due to surface asperities covering a wide range of length-scales. In addition, non-linear local processes, such as plasticity, are expected to occur even at the lightest loads. In this context, robust and efficient computational approaches are required. We therefore present a novel numerical method, based on integral equations, capable of handling the large discretization requirements of real rough surfaces as well as the non-linear plastic flow occurring below and at the contacting asperities. This method is based on a new derivation of the Mindlin fundamental solution in Fourier space, which leverages the computational efficiency of the fast Fourier transform. The use of this Mindlin solution allows a dramatic reduction of the memory imprint (as the Fourier coefficients are computed on-the-fly), a reduction of the discretization error, and the exploitation of the structure of the functions to speed up computation of the integral operators. We validate our method against an elastic\textendash{}plastic FEM Hertz normal contact simulation and showcase its ability to simulate contact of rough surfaces with plastic flow.}, copyright = {All rights reserved}, journal = {Computer Methods in Applied Mechanics and Engineering}, keywords = {Condensed Matter - Soft Condensed Matter,Contact,Fourier,Mindlin,Physics - Computational Physics,Plasticity,Rough surface,Volume integral equation} } @article{frerot_mechanistic_2018, title = {A Mechanistic Understanding of the Wear Coefficient: {{From}} Single to Multiple Asperities Contact}, shorttitle = {A Mechanistic Understanding of the Wear Coefficient}, author = {Fr{\'e}rot, Lucas and Aghababaei, Ramin and Molinari, Jean-Fran{\c c}ois}, year = {2018}, month = may, volume = {114}, pages = {172--184}, issn = {0022-5096}, doi = {10.1016/j.jmps.2018.02.015}, abstract = {Sliding contact between solids leads to material detaching from their surfaces in the form of debris particles, a process known as wear. According to the well-known Archard wear model, the wear volume (i.e. the volume of detached particles) is proportional to the load and the sliding distance, while being inversely proportional to the hardness. The influence of other parameters are empirically merged into a factor, referred to as wear coefficient, which does not stem from any theoretical development, thus limiting the predictive capacity of the model. Based on a recent understanding of a critical length-scale controlling wear particle formation, we present two novel derivations of the wear coefficient: one based on Archard's interpretation of the wear coefficient as the probability of wear particle detachment and one that follows naturally from the up-scaling of asperity-level physics into a generic multi-asperity wear model. As a result, the variation of wear rate and wear coefficient are discussed in terms of the properties of the interface, surface roughness parameters and applied load for various rough contact situations. Both new wear interpretations are evaluated analytically and numerically, and recover some key features of wear observed in experiments. This work shines new light on the understanding of wear, potentially opening a pathway for calculating the wear coefficient from first principles.}, copyright = {All rights reserved}, journal = {Journal of the Mechanics and Physics of Solids}, keywords = {Cluster statistics,Contact,Self-affine surface,Wear coefficient} } @article{hu_simulation_1992, title = {Simulation of 3-{{D}} Random Rough Surface by 2-{{D}} Digital Filter and Fourier Analysis}, author = {Hu, Y.Z. and Tonder, K.}, year = {1992}, month = feb, volume = {32}, pages = {83--90}, issn = {08906955}, doi = {10.1016/0890-6955(92)90064-N}, journal = {International Journal of Machine Tools and Manufacture}, language = {en}, number = {1-2} } @article{mindlin_thermoelastic_1950, title = {Thermoelastic {{Stress}} in the {{Semi}}-{{Infinite Solid}}}, author = {Mindlin, Raymond D. and Cheng, David H.}, year = {1950}, month = sep, volume = {21}, pages = {931--933}, issn = {0021-8979}, doi = {10.1063/1.1699786}, journal = {Journal of Applied Physics}, number = {9} } @article{persson_nature_2005, title = {On the Nature of Surface Roughness with Application to Contact Mechanics, Sealing, Rubber Friction and Adhesion}, author = {Persson, B. N. J. and Albohr, O. and Tartaglino, U. and Volokitin, A. I. and Tosatti, E.}, year = {2005}, volume = {17}, pages = {R1}, issn = {0953-8984}, doi = {10.1088/0953-8984/17/1/R01}, abstract = {Surface roughness has a huge impact on many important phenomena. The most important property of rough surfaces is the surface roughness power spectrum C ( q ). We present surface roughness power spectra of many surfaces of practical importance, obtained from the surface height profile measured using optical methods and the atomic force microscope. We show how the power spectrum determines the contact area between two solids. We also present applications to sealing, rubber friction and adhesion for rough surfaces, where the power spectrum enters as an important input.}, journal = {Journal of Physics: Condensed Matter}, language = {en}, number = {1} } @article{polonsky_numerical_1999, title = {A Numerical Method for Solving Rough Contact Problems Based on the Multi-Level Multi-Summation and Conjugate Gradient Techniques}, author = {Polonsky, I. A. and Keer, L. M.}, year = {1999}, month = jul, volume = {231}, pages = {206--219}, issn = {0043-1648}, doi = {10.1016/S0043-1648(99)00113-1}, abstract = {An alternative numerical method for solving contact problems for real rough surfaces is proposed. The real area of contact and the contact pressure distribution are determined using a single-loop iteration scheme based on the conjugate gradient method, which converges for arbitrary rough surfaces. The surface deflections and subsurface stresses are computed using an alternative two-dimensional multi-level multi-summation algorithm, which allows the summation error to be kept under the discretization error for any number of contact points. The proposed method is fast: rough contact problems for surface samples with 105\textendash{}106 data points are solved on a personal computer in a few hours. The numerical algorithms are described in full detail so that an interested reader can implement the new contact solver in a computer code. Numerical examples demonstrating the method advantages are presented. The method is compared with other fast contact solvers that have emerged in the last few years.}, journal = {Wear}, keywords = {Conjugate gradient techniques,Multi-level multi-summation,Rough contact problems}, number = {2} } @article{renard_constant_2013, title = {Constant Dimensionality of Fault Roughness from the Scale of Micro-Fractures to the Scale of Continents}, author = {Renard, Fran{\c c}ois and Candela, Thibault and Bouchaud, Elisabeth}, year = {2013}, volume = {40}, pages = {83--87}, issn = {1944-8007}, doi = {10.1029/2012GL054143}, abstract = {Many faults and fractures in various natural and man-made materials share a remarkable common fractal property in their morphology. We report on the roughness of faults in rocks by analyzing the out-of-plane fluctuations of slip surfaces. They display a statistical power-law relationship with a nearly constant fractal exponent from millimeter scale micro-fractures in fault zones to coastlines measuring thousands of kilometers that have recorded continental breakup. A possible origin of this striking fractal relationship over 11 orders of magnitude of length scales is that all faulting processes in rocks share common characteristics that play a crucial role in the shaping of fault surfaces, such as the effects of elastic long-range stress interactions and stress screening by mechanical heterogeneities during quasi-static fracture growth.}, copyright = {\textcopyright{}2012. American Geophysical Union. All Rights Reserved.}, journal = {Geophysical Research Letters}, language = {en}, number = {1} } @article{rey_normal_2017, title = {Normal Adhesive Contact on Rough Surfaces: Efficient Algorithm for {{FFT}}-Based {{BEM}} Resolution}, shorttitle = {Normal Adhesive Contact on Rough Surfaces}, author = {Rey, Valentine and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2017}, month = mar, pages = {1--13}, issn = {0178-7675, 1432-0924}, doi = {10.1007/s00466-017-1392-5}, abstract = {We introduce a numerical methodology to compute the solution of an adhesive normal contact problem on rough surfaces with the Boundary Element Method. Based on the Fast Fourier Transform and the Westergaard's fundamental solution, the proposed algorithm enables to solve efficiently the constrained minimization problem: the numerical solution strictly verifies contact orthogonality and the algorithm takes advantage of the constraints to speed up the minimization. Comparisons with the analytical solution of the Hertz case prove the quality of the numerical computation. The method is also used to compute normal adhesive contact between rough surfaces made of multiple asperities.}, journal = {Computational Mechanics}, language = {en} } @article{rey_quantifying_2019, title = {Quantifying Uncertainties in Contact Mechanics of Rough Surfaces Using the Multilevel {{Monte Carlo}} Method}, author = {Rey, V. and Krumscheid, S. and Nobile, F.}, year = {2019}, month = may, volume = {138}, pages = {50--64}, issn = {0020-7225}, doi = {10.1016/j.ijengsci.2019.02.003}, abstract = {We quantify the effect of uncertainties on quantities of interest related to contact mechanics of rough surfaces. Specifically, we consider the problem of frictionless non adhesive normal contact between two semi infinite linear elastic solids subject to uncertainties. These uncertainties may for example originate from an incomplete surface description. To account for surface uncertainties, we model a rough surface as a suitable Gaussian random field whose covariance function encodes the surface's roughness, which is experimentally measurable. Then, we introduce the multilevel Monte Carlo method which is a computationally efficient sampling method for the computation of the expectation and higher statistical moments of uncertain system output's, such as those derived from contact simulations. In particular, we consider two different quantities of interest, namely the contact area and the number of contact clusters, and show via numerical experiments that the multilevel Monte Carlo method offers significant computational gains compared to an approximation via a classic Monte Carlo sampling.}, journal = {International Journal of Engineering Science}, keywords = {Contact clusters,Frictionless contact,Multilevel Monte Carlo,Random surface generator,Rough surfaces} } @article{rey_stability_2018, title = {Stability Analysis of Rough Surfaces in Adhesive Normal Contact}, author = {Rey, Valentine and Bleyer, Jeremy}, year = {2018}, month = mar, pages = {1--13}, issn = {0178-7675, 1432-0924}, doi = {10.1007/s00466-018-1556-y}, abstract = {This paper deals with adhesive frictionless normal contact between one elastic flat solid and one stiff solid with rough surface. After computation of the equilibrium solution of the energy minimization principle and respecting the contact constraints, we aim at studying the stability of this equilibrium solution. This study of stability implies solving an eigenvalue problem with inequality constraints. To achieve this goal, we propose a proximal algorithm which enables qualifying the solution as stable or unstable and that gives the instability modes. This method has a low computational cost since no linear system inversion is required and is also suitable for parallel implementation. Illustrations are given for the Hertzian contact and for rough contact.}, journal = {Computational Mechanics}, language = {en} } @article{richart_implementation_2015, title = {Implementation of a Parallel Finite-Element Library: {{Test}} Case on a Non-Local Continuum Damage Model}, shorttitle = {Implementation of a Parallel Finite-Element Library}, author = {Richart, N. and Molinari, J. F.}, year = {2015}, month = aug, volume = {100}, pages = {41--46}, issn = {0168-874X}, doi = {10.1016/j.finel.2015.02.003}, abstract = {This paper presents an efficient method to implement a damage law within an explicit time-integration scheme, in an open-source object-oriented finite-element framework. The hybrid object/vector design of the framework and implementation choices are detailed in the special case of non-local continuum damage constitutive laws. The computationally demanding aspect of such constitutive laws requires efficient algorithms, capable of using High Performance Computing (HPC) clusters. The performance of our approach is demonstrated on a numerically and physically challenging 3D dynamic brittle-fragmentation test case. An almost perfect scalability is achieved on parallel computations. The global dynamics and energy terms are in good agreement with classical cohesive models' predictions.}, journal = {Finite Elements in Analysis and Design}, keywords = {Continuum damage,Finite element method,Non-local approach,Parallel computing} } @article{stanley_fftbased_1997, title = {An {{FFT}}-{{Based Method}} for {{Rough Surface Contact}}}, author = {Stanley, H. M. and Kato, T.}, year = {1997}, month = jul, volume = {119}, pages = {481--485}, issn = {0742-4787}, doi = {10.1115/1.2833523}, abstract = {Elastic contact between a rigid plane and a halfspace whose surface height is described by a bandwidth-limited Fourier series is considered. The surface normal displacements and contact pressures are found by a numerical technique that exploits the structure of the Fast Fourier Transform (FFT) and an exact result in linear elasticity. The multiscale nature of rough surface contact is implicit to the method, and features such as contact agglomeration and asperity interaction\textemdash{}a source of difficulty for asperity-based models\textemdash{}evolve naturally. Both two-dimensional (2-D) and three-dimensional (3-D) contact are handled with equal ease. Finally, the implementation is simple, compact, and fast.}, journal = {Journal of Tribology}, number = {3} } @article{telles_application_1979, title = {On the Application of the Boundary Element Method to Plasticity}, author = {Telles, J. C. F. and Brebbia, C. A.}, year = {1979}, month = dec, volume = {3}, pages = {466--470}, issn = {0307-904X}, doi = {10.1016/S0307-904X(79)80030-X}, journal = {Applied Mathematical Modelling}, number = {6} } @article{yastrebov_accurate_2017, title = {On the Accurate Computation of the True Contact-Area in Mechanical Contact of Random Rough Surfaces}, author = {Yastrebov, Vladislav A. and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2017}, issn = {0301-679X}, doi = {10.1016/j.triboint.2017.04.023}, abstract = {We introduce a corrective function to compensate errors in contact area computations coming from mesh discretization. The correction is based on geometrical arguments, and apart from the contact area itself requires only one additional quantity to be computed: the length of contact/non-contact interfaces. The new technique enables to evaluate accurately the true contact area using a very coarse mesh, for which the shortest wavelength in the surface spectrum reaches the grid size. The validity of the approach is demonstrated for surfaces with different fractal dimensions and different spectral content using a properly designed mesh convergence test. In addition, we use a topology preserving smoothing technique to adjust the morphology of contact clusters obtained with a coarse grid.}, journal = {Tribology International}, keywords = {Boundary element Method,Contact area,Contact area correction,Simulations,surface roughness} } @article{yastrebov_contact_2012, title = {Contact between Representative Rough Surfaces}, author = {Yastrebov, Vladislav A. and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2012}, month = sep, volume = {86}, issn = {1539-3755, 1550-2376}, doi = {10.1103/PhysRevE.86.035601}, journal = {Physical Review E}, language = {en}, number = {3} } @article{yastrebov_contact_2014, title = {The {{Contact}} of {{Elastic Regular Wavy Surfaces Revisited}}}, author = {Yastrebov, Vladislav A. and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2014}, month = oct, volume = {56}, pages = {171--183}, issn = {1023-8883, 1573-2711}, doi = {10.1007/s11249-014-0395-z}, abstract = {We revisit the classic problem of an elastic solid with a two-dimensional wavy surface squeezed against an elastic flat half-space from infinitesimal to full contact. Through extensive numerical calculations and analytic derivations, we discover previously overlooked transition regimes. These are seen in particular in the evolution with applied load of the contact area and perimeter, the mean pressure and the probability density of contact pressure. These transitions are correlated with the contact area shape, which is affected by long range elastic interactions. Our analysis has implications for general random rough surfaces, as similar local transitions occur continuously at detached areas or coalescing contact zones. We show that the probability density of null contact pressures is nonzero at full contact. This might suggest revisiting the conditions necessary for applying Persson's model at partial contacts and guide the comparisons with numerical simulations. We also address the evaluation of the contact perimeter for discrete geometries and the applicability of Westergaard's solution for three-dimensional geometries.}, journal = {Tribology Letters}, language = {en}, number = {1} } @article{yastrebov_infinitesimal_2015, title = {From Infinitesimal to Full Contact between Rough Surfaces: {{Evolution}} of the Contact Area}, shorttitle = {From Infinitesimal to Full Contact between Rough Surfaces}, author = {Yastrebov, Vladislav A. and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2015}, month = jan, volume = {52}, pages = {83--102}, issn = {00207683}, doi = {10.1016/j.ijsolstr.2014.09.019}, journal = {International Journal of Solids and Structures}, language = {en} } @article{yastrebov_role_2017, title = {The Role of the Roughness Spectral Breadth in Elastic Contact of Rough Surfaces}, author = {Yastrebov, Vladislav A. and Anciaux, Guillaume and Molinari, Jean-Fran{\c c}ois}, year = {2017}, month = oct, volume = {107}, pages = {469--493}, issn = {0022-5096}, doi = {10.1016/j.jmps.2017.07.016}, abstract = {We study frictionless and non-adhesive contact between elastic half-spaces with self-affine surfaces. Using a recently suggested corrective technique, we ensure an unprecedented accuracy in computation of the true contact area evolution under increasing pressure. This accuracy enables us to draw conclusions on the role of the surface's spectrum breadth (Nayak parameter) in the contact area evolution. We show that for a given normalized pressure, the contact area decreases logarithmically with the Nayak parameter. By linking the Nayak parameter with the Hurst exponent (or fractal dimension), we show the effect of the latter on the true contact area. This effect, undetectable for surfaces with poor spectral content, is quite strong for surfaces with rich spectra. Numerical results are compared with analytical models and other available numerical results. A phenomenological equation for the contact area growth is suggested with coefficients depending on the Nayak parameter. Using this equation, the pressure-dependent friction coefficient is deduced based on the adhesive theory of friction. Some observations on Persson's model of rough contact, whose prediction does not depend on Nayak parameter, are reported. Overall, the paper provides a unifying picture of rough elastic contact and clarifies discrepancies between preceding results.}, journal = {Journal of the Mechanics and Physics of Solids}, keywords = {Contact area,Hurst exponent,Nayak parameter,Pressure-dependent friction,Roughness,Spectrum breadth} } @article{brink_parameter_2020, title = {A Parameter-Free Mechanistic Model of the Adhesive Wear Process of Rough Surfaces in Sliding Contact}, author = {Brink, Tobias and Fr{\'e}rot, Lucas and Molinari, Jean-Fran{\c c}ois}, year = {2020}, month = apr, archivePrefix = {arXiv}, eprint = {2004.00559}, eprinttype = {arxiv}, journal = {arXiv:2004.00559 [physics]}, keywords = {Physics - Applied Physics}, primaryClass = {physics} } @article{archard_elastic_1957, title={Elastic deformation and the laws of friction}, author={Archard, J.F.}, journal={Proceedings of the Royal Society of London. Series A. Mathematical and Physical Sciences}, volume={243}, number={1233}, pages={190--205}, year={1957}, publisher={The Royal Society London} } @article{condat_primal_2012, title = {A {{Primal}}\textendash{{Dual Splitting Method}} for {{Convex Optimization Involving Lipschitzian}}, {{Proximable}} and {{Linear Composite Terms}}}, author = {Condat, Laurent}, year = {2012}, month = dec, volume = {158}, pages = {460--479}, issn = {0022-3239, 1573-2878}, doi = {10.1007/s10957-012-0245-9}, journal = {Journal of Optimization Theory and Applications}, language = {en}, number = {2} } @article{almqvist_dry_2007, title = {On the Dry Elasto-Plastic Contact of Nominally Flat Surfaces}, author = {Almqvist, A. and Sahlin, F. and Larsson, R. and Glavatskih, S.}, year = {2007}, month = apr, volume = {40}, pages = {574--579}, issn = {0301679X}, doi = {10.1016/j.triboint.2005.11.008}, journal = {Tribology International}, language = {en}, number = {4} } + +@misc{pybind11, + author = {Wenzel Jakob and Jason Rhinelander and Dean Moldovan}, + year = {2017}, + note = {https://github.com/pybind/pybind11}, + title = {pybind11 -- Seamless operability between C++11 and Python} +} \ No newline at end of file diff --git a/joss/paper.md b/joss/paper.md index d47a550..f8ada02 100644 --- a/joss/paper.md +++ b/joss/paper.md @@ -1,159 +1,158 @@ --- title: 'Tamaas: a library for elastic-plastic contact of periodic rough surfaces' tags: - C++ - Python - contact - rough surface - plasticity authors: - name: Lucas Frérot orcid: 0000-0002-4138-1052 affiliation: 1 - name: Guillaume Anciaux orcid: 0000-0002-9624-5621 affiliation: 1 - name: Valentine Rey orcid: 0000-0003-1019-1819 affiliation: 2 - name: Son Pham-Ba orcid: 0000-0003-3451-7297 affiliation: 1 - name: Jean-François Molinari orcid: 0000-0002-1728-1844 affiliation: 1 affiliations: - name: Civil Engineering Institute, École Polytechnique Fédérale de Lausanne, Switzerland index: 1 - name: Université de Nantes Sciences et Techniques, Nantes, France index: 2 date: 13 December 2019 bibliography: paper.bibtex --- # Summary Physical phenomena that happen at solid contact interfaces, such as friction and wear, are largely entwined with the roughness of the surfaces in contact. For example, the fact that the friction force between two solids in contact is independent of their apparent contact area is due to roughness, as the solids are only in contact over a smaller "true contact area" which only depends on the normal force [@archard_elastic_1957]. Roughness occurs on most man-made and natural surfaces [@persson_nature_2005] and can span many orders of magnitude, from the nanometer scale to the kilometer scale [@renard_constant_2013]. This poses a serious challenge to conventional numerical approaches in solid mechanics such as the finite-element method (FEM). Boundary integral methods [@bonnet_boundary_1995] are commonly employed in place of the FEM for rough elastic contact because of an inherent dimensionality reduction: the computational effort is focused on the contact interface whereas the FEM requires discretization of the volume of the solids in contact. In addition, the use of a half-space geometry provides a translational invariance: the computation of periodic equilibrium solutions can then be accelerated with the fast-Fourier Transform [@stanley_fftbased_1997]. However, because of the roughness, the total contact load is distributed over a small area and local contact pressures are expected to cause non-linear material behavior, such as plasticity. In this case, volume integral methods can be employed to account for plastic deformation [@telles_application_1979]. These enjoy properties analogous to boundary integral methods and can also be accelerated with a Fourier approach [@frerot_fourieraccelerated_2019]. Taking plasticity into account is necessary in the accurate description of contact interfaces for the understanding of friction and wear. Moreover, high performance implementations are needed to model realistic rough surfaces with roughness spanning many orders of magnitude in scale. -``Tamaas`` is a C++ library with a Python interface, developed in the -[Computational Solid Mechanics -Laboratory](https://www.epfl.ch/labs/lsms) at EPFL, that implements a -unique Fourier-accelerated volume integral formulation of equilibrium -[@frerot_fourieraccelerated_2019] for the solution of elastic-plastic -rough contact problems. The use of C++ allows for a particular focus -on performance: most loops are paralelized using ``Thrust/OpenMP`` and -the fast-Fourier transforms are computed with ``FFTW3/OpenMP``. Thanks -to this, it can handle simulations with upwards of 100 million degrees -of freedom on a single compute node [@frerot_fourieraccelerated_2019]. -``Tamaas`` is aimed at researchers and practitioners wishing to compute -realistic contact solutions for the study of interface phenomena. +``Tamaas`` is a C++ library with a Python interface [@pybind11], developed in +the [Computational Solid Mechanics Laboratory](https://www.epfl.ch/labs/lsms) at +EPFL, that implements a unique Fourier-accelerated volume integral formulation +of equilibrium [@frerot_fourieraccelerated_2019] for the solution of +elastic-plastic rough contact problems. The use of C++ allows for a particular +focus on performance: most loops are paralelized using ``Thrust/OpenMP`` and the +fast-Fourier transforms are computed with ``FFTW3/OpenMP``. Thanks to this, it +can handle simulations with upwards of 100 million degrees of freedom on a +single compute node [@frerot_fourieraccelerated_2019]. ``Tamaas`` is aimed at +researchers and practitioners wishing to compute realistic contact solutions for +the study of interface phenomena. # Features and Performance ``Tamaas`` provides access in its Python API to random rough surface generation procedures (e.g. @hu_simulation_1992), statistical tools (e.g. autocorrelation and power spectrum computations) and a variety of contact algorithms: - Normal and adhesive contact schemes based on the conjugate gradient [@polonsky_numerical_1999; @rey_normal_2017] and using the boundary integral method; - Associated frictional contact using proximal algorithms [@condat_primal_2012]; - Elastic-plastic contact using the Fourier-accelerated volume integral method [@frerot_fourieraccelerated_2019] and saturated surface pressure [@almqvist_dry_2007]. We are not aware of any public software package providing implementation to all of the above methods, although the web-based package [contact.engineering](https://contact.engineering/) allows elastic normal contact solutions using a boundary integral method as well. ``Tamaas`` also exposes in its Python API the accelerated linear operators it uses to compute equilibrium solutions, making prototyping new algorithms convenient. We compare in figure 1 the scaling properties of ``Tamaas`` to a reference high-performance C++ FEM implementation named [``Akantu``](https://gitlab.com/akantu/akantu) [@richart_implementation_2015] which uses the direct solver [MUMPS](http://mumps.enseeiht.fr/). The reference problem is the elastic equilibrium of a half-space with an isotropic spherical inclusion [@mindlin_thermoelastic_1950], which is computed in serial for both implementations. $N$ represents the number of points in the computational domain. For large $N$, ``Tamaas`` is two orders of magnitude faster than ``Akantu``. ![Scaling comparison between the acclerated volume integral method implemented in ``Tamaas`` and a finite-element method with a direct solver for the solution of the equilibrium of a half-space with a spherical isotropic inclusion. $N$ is the number of points in the computational domain. When $N=2^{18}$ ``Tamaas`` is 200 times faster than the FEM implementation ``Akantu``.](complexity.pdf) Figure 2 shows the sub-surface plastic zones in a rough contact simulation, with color indicating their depth. The Fourier-accelerated approach allows an unprecendented level of detail on the topography of the zones which can have an influence on friction and wear [@frerot_crack_2019]. ![Sub-surface plastic zones in an elastic-plastic rough contact simulation. Lighter shades are zones deeper below the contact interface. The simulation used to produce this picture had more than 100 million degrees of freedom and ran on a single compute node (2 $\times$ 14 Intel Broadwell cores + 128GB RAM).](plastic_zones.png) The following publications have been made possible with ``Tamaas``: - @yastrebov_contact_2012 - @yastrebov_contact_2014 - @yastrebov_infinitesimal_2015 - @yastrebov_accurate_2017 - @yastrebov_role_2017 - @rey_normal_2017 - @rey_stability_2018 - @rey_quantifying_2019 - @frerot_mechanistic_2018 - @frerot_fourieraccelerated_2019 - @frerot_crack_2019 - @brink_parameter_2020 # Acknowledgements We acknowledge the financial support of the Swiss National Science Foundation (grant #162569 "Contact mechanics of rough surfaces"). # References diff --git a/python/SConscript b/python/SConscript index 216b4f5..ed088af 100644 --- a/python/SConscript +++ b/python/SConscript @@ -1,145 +1,153 @@ # -*- mode:python; coding: utf-8 -*- # vim: set ft=python: # @file # @section LICENSE # # Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . from __future__ import print_function from os.path import abspath, join from SCons.Script import Import, Split, Copy, Dir Import('main_env') # Pybind11 wrapper env_pybind = main_env.Clone(SHLIBPREFIX='') # Remove pedantic warnings cxx_flags = env_pybind['CXXFLAGS'] try: del cxx_flags[cxx_flags.index('-pedantic')] except ValueError: pass env_pybind.Tool(pybind11) pybind_sources = Split(""" tamaas_module.cpp wrap/core.cpp wrap/percolation.cpp wrap/surface.cpp wrap/model.cpp wrap/solvers.cpp wrap/compute.cpp wrap/test_features.cpp """) # Adding legacy wrap code if env_pybind['legacy_bem']: env_pybind.AppendUnique(CPPDEFINES=['LEGACY_BEM']) pybind_sources += ["wrap/bem.cpp"] +# Setting paths to find libTamaas +env_pybind.AppendUnique( + LIBPATH=[Dir(join('#${build_dir}', 'src'))], + RPATH=[ + "'$$$$ORIGIN/../../src'", #< path to lib in build_dir + "'$$$$ORIGIN/../../..'", #< path to lib in install prefix +]) + # Building the pybind library tamaas_wrap = env_pybind.Pybind11Module( target='tamaas/_tamaas', source=pybind_sources, LIBS=['Tamaas'], ) # For some reason link happens too early Import('libTamaas') env_pybind.Depends(tamaas_wrap, libTamaas) # Copying the __init__.py file with extra python classes copy_env = env_pybind.Clone() # Copying additional python files python_files = """ compute.py dumpers/__init__.py dumpers/_helper.py nonlinear_solvers/__init__.py """.split() targets = [tamaas_wrap] targets += [ copy_env.Command(join('tamaas', f), join(abspath(str(Dir('#python/tamaas'))), f), Copy("$TARGET", "$SOURCE")) for f in python_files ] targets.append(copy_env.Command('MANIFEST.in', '#python/MANIFEST.in', Copy("$TARGET", "$SOURCE"))) subst_env = env_pybind.Clone( SUBST_DICT={ '@version@': '$version', '@authors@': str(copy_env['authors']), '@email@': '$email', # TODO change when issue with unicode fixed # '@copyright@': '$copyright', # '@maintainer@': '$maintainer', } ) subst_env.Tool('textfile') targets.append(subst_env.Substfile('setup.py.in')) targets.append(subst_env.Substfile('tamaas/__init__.py.in')) # Defining alias for python builds main_env.Alias('build-python', targets) # Checking if we can use pip to install (more convenient for end-user) conf = Configure(main_env, custom_tests={'CheckPythonModule': CheckPythonModule}) has_pip = conf.CheckPythonModule('pip') install_env = conf.Finish() # Current build directory install_env['PYDIR'] = Dir('.') # Setting command line for installation if has_pip: - install_env['PYINSTALLCOM'] = '${py_exec} -m pip install $PYOPTIONS .' + install_env['PYINSTALLCOM'] = '${py_exec} -m pip install -U $PYOPTIONS .' install_env['PYDEVELOPCOM'] = \ '${py_exec} -m pip install $PYOPTIONS -e .[solvers,dumpers]' else: install_env['PYINSTALLCOM'] = '${py_exec} setup.py install $PYOPTIONS' install_env['PYDEVELOPCOM'] = '${py_exec} setup.py develop $PYOPTIONS' install_env['py_version'] = get_python_version(install_env) install_env.PrependENVPath( 'PYTHONPATH', install_env.subst('${prefix}/lib/python${py_version}/site-packages')) # Specify install target python_install = install_env.Command( join('$prefix', 'dummy_target'), targets, install_env['PYINSTALLCOM'], PYOPTIONS='--prefix ${prefix}', chdir=install_env['PYDIR']) python_install_dev = install_env.Command( join('$prefix', 'dummy_target_local'), targets, install_env['PYDEVELOPCOM'], PYOPTIONS='--user', chdir=install_env['PYDIR']) main_env.Alias('install-python', python_install) main_env.Alias('dev', python_install_dev) diff --git a/python/tamaas/nonlinear_solvers/__init__.py b/python/tamaas/nonlinear_solvers/__init__.py index ae63365..42c4c67 100644 --- a/python/tamaas/nonlinear_solvers/__init__.py +++ b/python/tamaas/nonlinear_solvers/__init__.py @@ -1,220 +1,222 @@ # -*- mode:python; coding: utf-8 -*- # @file # @section LICENSE # # Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . """ Pulling solvers to nonlinear_solvers module """ from sys import stderr from functools import wraps import numpy as np from scipy.sparse.linalg import LinearOperator, lgmres from scipy.linalg import norm from scipy.optimize import newton_krylov, root -from .. import EPSolver +from .. import EPSolver, Logger, LogLevel __all__ = ['NLNoConvergence', 'DFSANESolver', 'NewtonKrylovSolver', 'ToleranceManager'] class NLNoConvergence(Exception): """Convergence not reached exception""" class NewtonRaphsonSolver(EPSolver): """ Newton-Raphson based on CTO """ def __init__(self, residual, model): super(NewtonRaphsonSolver, self).__init__(residual) self.model = model self._residual = self.getResidual() self._x = self.getStrainIncrement() self.last_solution = np.zeros_like(self._x) def applyTangent(self, _input, strain): _input = np.reshape(_input, strain.shape) out = np.zeros_like(_input) self._residual.applyTangent(out, _input, strain) return out def solve(self): self._x[...] = 0 # For initial guess, compute the strain due to boundary tractions self._residual.computeResidual(self._x) self._x[...] = self._residual.getVector() res_vec = np.ravel(self._residual.getVector()) initial_norm = norm(res_vec) self._residual.computeResidual(self._x) n_max = 100 i = 0 while norm(res_vec) / initial_norm > 1e-10 and i < n_max: # Making linear tangent tangent = LinearOperator( (self._x.size, self._x.size), matvec=lambda x: self.applyTangent(x, self._x)) res_vec *= -1 correction, ok = lgmres(tangent, res_vec, tol=1e-12, maxiter=100) if ok != 0: raise Exception("LGMRES not converged") self._x += np.reshape(correction, self._x.shape) self._residual.computeResidual(self._x) i += 1 - print("Solved Newton-Raphson in {} iterations".format(i), file=stderr) + Logger().get(LogLevel.info) << \ + "Solved Newton-Raphson in {} iterations".format(i) # Computing the strain correction self.last_solution *= -1 self.last_solution += self._x # Computing displacements self._residual.computeResidualDisplacement(self.last_solution) self.last_solution[...] = self._x[...] class ScipySolver(EPSolver): """ Base class for solvers wrapping SciPy routines """ def __init__(self, residual, model, callback=None): super(ScipySolver, self).__init__(residual) self.model = model self.callback = callback self._x = self.getStrainIncrement() self._residual = self.getResidual() self.options = {'ftol': 0, 'fatol': 1e-9} @property def tolerance(self): "Return solver absolute tolerance" return self.options['fatol'] @tolerance.setter def tolerance(self, v): "Set solver absolute tolerance" self.options['fatol'] = v def solve(self): """ Solve the nonlinear plasticity equation using the scipy_solve routine """ # For initial guess, compute the strain due to boundary tractions # self._residual.computeResidual(self._x) # self._x[...] = self._residual.getVector() # Scipy root callback def compute_residual(x): self._residual.computeResidual(x) return self._residual.getVector().copy() # Solve self._x[...] = self.scipy_solve(compute_residual) # Computing displacements self._residual.computeResidualDisplacement(self._x) def reset(self): self._x[...] = 0 class NewtonKrylovSolver(ScipySolver): """ Solve using a finite-difference Newton-Krylov method """ def __init__(self, residual, model, callback=None): ScipySolver.__init__(self, residual, model, callback) def scipy_solve(self, compute_residual): "Solve R(delta epsilon) = 0 using a newton-krylov method" try: return newton_krylov(compute_residual, self._x, f_tol=self.options['fatol'], verbose=True, callback=self.callback) except Exception as e: raise NLNoConvergence(e.what()) class DFSANESolver(ScipySolver): """ Solve using a spectral residual jacobianless method """ def __init__(self, residual, model, callback=None): ScipySolver.__init__(self, residual, model, callback) def scipy_solve(self, compute_residual): "Solve R(delta epsilon) = 0 using a df-sane method" solution = root(compute_residual, self._x, method='df-sane', options=self.options, callback=self.callback) - print("DF-SANE: {} ({} iterations, {})".format( - solution.message, - solution.nit, - self.options), file=stderr) + Logger().get(LogLevel.info) << \ + "DF-SANE: {} ({} iterations, {})".format( + solution.message, + solution.nit, + self.options) if not solution.success: raise NLNoConvergence("DF-SANE did not converge") return solution.x.copy() def ToleranceManager(start, end, rate, key='fatol'): "Decorator to manage tolerance of non-linear solver" start /= rate # just anticipating first multiplication def actual_decorator(cls): orig_init = cls.__init__ orig_solve = cls.scipy_solve orig_update_state = cls.updateState @wraps(cls.__init__) def __init__(obj, *args, **kwargs): orig_init(obj, *args, **kwargs) obj.options[key] = start @wraps(cls.scipy_solve) def scipy_solve(obj, *args, **kwargs): ftol = obj.options[key] ftol *= rate obj.options[key] = max(ftol, end) return orig_solve(obj, *args, **kwargs) @wraps(cls.updateState) def updateState(obj, *args, **kwargs): obj.options[key] = start return orig_update_state(obj, *args, **kwargs) cls.__init__ = __init__ cls.scipy_solve = scipy_solve cls.updateState = updateState return cls return actual_decorator diff --git a/python/wrap/core.cpp b/python/wrap/core.cpp index 8c58609..bc72938 100644 --- a/python/wrap/core.cpp +++ b/python/wrap/core.cpp @@ -1,114 +1,130 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "logger.hh" #include "statistics.hh" #include "surface_statistics.hh" #include "wrap.hh" #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ namespace wrap { +using namespace py::literals; + /* -------------------------------------------------------------------------- */ template void wrapStatistics(py::module& mod) { auto name = makeDimensionName("Statistics", dim); py::class_>(mod, name.c_str()) .def_static("computePowerSpectrum", &Statistics::computePowerSpectrum, py::return_value_policy::move) .def_static("computeAutocorrelation", &Statistics::computeAutocorrelation, py::return_value_policy::move) .def_static("computeMoments", &Statistics::computeMoments) .def_static("computeSpectralRMSSlope", &Statistics::computeSpectralRMSSlope) .def_static("computeRMSHeights", &Statistics::computeRMSHeights); } /* -------------------------------------------------------------------------- */ void wrapCore(py::module& mod) { // Exposing logging facility py::enum_(mod, "LogLevel") .value("debug", LogLevel::debug) .value("info", LogLevel::info) .value("warning", LogLevel::warning) .value("error", LogLevel::error); mod.def("set_log_level", [](LogLevel level) { Logger::setLevel(level); }); + py::class_(mod, "Logger") + .def(py::init<>()) + .def("get", + [](Logger& logger, LogLevel level) -> Logger& { + logger.get(level); + return logger; + }) + .def("__lshift__", [](Logger& logger, std::string msg) -> Logger& { + if (logger.getWishLevel() >= Logger::getCurrentLevel()) { + py::print(msg, "file"_a = py::module::import("sys").attr("stderr")); + } + return logger; + }); + // Exposing SurfaceStatistics (legacy) #if defined(LEGACY_BEM) py::class_(mod, "SurfaceStatistics") - .def_static("computeMaximum", &SurfaceStatistics::computeMaximum) - .def_static("computeMinimum", &SurfaceStatistics::computeMinimum) - .def_static("computeSpectralRMSSlope", - &SurfaceStatistics::computeSpectralRMSSlope) - .def_static("computeRMSSlope", &SurfaceStatistics::computeRMSSlope) - .def_static("computeMoments", &SurfaceStatistics::computeMoments) - .def_static("computeSkewness", &SurfaceStatistics::computeSkewness) - .def_static("computeKurtosis", &SurfaceStatistics::computeKurtosis) - .def_static("computeSpectralMeanCurvature", - &SurfaceStatistics::computeSpectralMeanCurvature) - .def_static("computeSpectralStdev", - &SurfaceStatistics::computeSpectralStdev) - .def_static("computeAnalyticFractalMoment", - &SurfaceStatistics::computeAnalyticFractalMoment) - .def_static("computePerimeter", &SurfaceStatistics::computePerimeter) - .def_static("computeContactArea", &SurfaceStatistics::computeContactArea) - .def_static("computeContactAreaRatio", - &SurfaceStatistics::computeContactAreaRatio) - .def_static("computeSpectralDistribution", - &SurfaceStatistics::computeSpectralDistribution) - .def_static("computeSum", &SurfaceStatistics::computeSum) - .def_static("computeAutocorrelation", - &SurfaceStatistics::computeAutocorrelation, - py::return_value_policy::copy) - .def_static("computePowerSpectrum", - &SurfaceStatistics::computePowerSpectrum, - py::return_value_policy::copy); + .def_static("computeMaximum", &SurfaceStatistics::computeMaximum) + .def_static("computeMinimum", &SurfaceStatistics::computeMinimum) + .def_static("computeSpectralRMSSlope", + &SurfaceStatistics::computeSpectralRMSSlope) + .def_static("computeRMSSlope", &SurfaceStatistics::computeRMSSlope) + .def_static("computeMoments", &SurfaceStatistics::computeMoments) + .def_static("computeSkewness", &SurfaceStatistics::computeSkewness) + .def_static("computeKurtosis", &SurfaceStatistics::computeKurtosis) + .def_static("computeSpectralMeanCurvature", + &SurfaceStatistics::computeSpectralMeanCurvature) + .def_static("computeSpectralStdev", + &SurfaceStatistics::computeSpectralStdev) + .def_static("computeAnalyticFractalMoment", + &SurfaceStatistics::computeAnalyticFractalMoment) + .def_static("computePerimeter", &SurfaceStatistics::computePerimeter) + .def_static("computeContactArea", &SurfaceStatistics::computeContactArea) + .def_static("computeContactAreaRatio", + &SurfaceStatistics::computeContactAreaRatio) + .def_static("computeSpectralDistribution", + &SurfaceStatistics::computeSpectralDistribution) + .def_static("computeSum", &SurfaceStatistics::computeSum) + .def_static("computeAutocorrelation", + &SurfaceStatistics::computeAutocorrelation, + py::return_value_policy::copy) + .def_static("computePowerSpectrum", + &SurfaceStatistics::computePowerSpectrum, + py::return_value_policy::copy); #endif wrapStatistics<1>(mod); wrapStatistics<2>(mod); mod.def("to_voigt", [](const Grid& field) { if (field.getNbComponents() == 9) { Grid voigt(field.sizes(), 6); Loop::loop([](auto in, auto out) { out.symmetrize(in); }, range>(field), range>(voigt)); return voigt; } else TAMAAS_EXCEPTION("Wrong number of components to symmetrize"); }, "Convert a 3D tensor field to voigt notation", py::return_value_policy::copy); } } // namespace wrap /* -------------------------------------------------------------------------- */ } // namespace tamaas diff --git a/python/wrap/model.cpp b/python/wrap/model.cpp index 41ba139..8805685 100644 --- a/python/wrap/model.cpp +++ b/python/wrap/model.cpp @@ -1,284 +1,285 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "model.hh" #include "adhesion_functional.hh" #include "functional.hh" #include "integral_operator.hh" #include "model_dumper.hh" #include "model_extensions.hh" #include "model_factory.hh" #include "numpy.hh" #include "residual.hh" #include "wrap.hh" #include /* -------------------------------------------------------------------------- */ namespace tamaas { namespace wrap { using namespace py::literals; /// Wrap functional classes void wrapFunctionals(py::module& mod) { - py::class_ func( - mod, "Functional"); + py::class_, + functional::wrap::PyFunctional> + func(mod, "Functional"); func.def(py::init<>()) .def("computeF", &functional::Functional::computeF) .def("computeGradF", &functional::Functional::computeGradF); py::class_ adh(mod, "AdhesionFunctional", func); adh.def_property("parameters", &functional::AdhesionFunctional::getParameters, &functional::AdhesionFunctional::setParameters) // legacy wrapper code .def("setParameters", &functional::AdhesionFunctional::setParameters); py::class_( mod, "ExponentialAdhesionFunctional", adh) .def(py::init&>(), "surface"_a); py::class_( mod, "MaugisAdhesionFunctional", adh) .def(py::init&>(), "surface"_a); py::class_( mod, "SquaredExponentialAdhesionFunctional", adh) .def(py::init&>(), "surface"_a); } template std::unique_ptr> instanciateFromNumpy(numpy& num) { std::unique_ptr> result = nullptr; switch (num.ndim()) { case 2: result = std::make_unique>>(num); return result; case 3: result = std::make_unique>>(num); return result; case 4: result = std::make_unique>>(num); return result; default: TAMAAS_EXCEPTION("instanciateFromNumpy expects the last dimension of numpy " "array to be the number of components"); } } /// Wrap IntegralOperator void wrapIntegralOperator(py::module& mod) { py::class_(mod, "IntegralOperator") .def("apply", [](IntegralOperator& op, numpy input, numpy output) { auto in = instanciateFromNumpy(input); auto out = instanciateFromNumpy(output); op.apply(*in, *out); }) .def("updateFromModel", &IntegralOperator::updateFromModel) .def("getModel", &IntegralOperator::getModel, py::return_value_policy::reference) .def("getKind", &IntegralOperator::getKind) .def("getType", &IntegralOperator::getType); py::enum_(mod, "integration_method") .value("linear", integration_method::linear) .value("cutoff", integration_method::cutoff); } /// Wrap BEEngine classes void wrapBEEngine(py::module& mod) { py::class_(mod, "BEEngine") .def("solveNeumann", &BEEngine::solveNeumann) .def("solveDirichlet", &BEEngine::solveDirichlet) .def("getModel", &BEEngine::getModel, py::return_value_policy::reference) .def("registerNeumann", &BEEngine::registerNeumann) .def("registerDirichlet", &BEEngine::registerDirichlet); } /// Wrap Models void wrapModelClass(py::module& mod) { py::enum_(mod, "model_type") .value("basic_1d", model_type::basic_1d) .value("basic_2d", model_type::basic_2d) .value("surface_1d", model_type::surface_1d) .value("surface_2d", model_type::surface_2d) .value("volume_1d", model_type::volume_1d) .value("volume_2d", model_type::volume_2d); py::class_(mod, "Model") .def_property_readonly("type", &Model::getType) .def("setElasticity", &Model::setElasticity, "E"_a, "nu"_a) .def_property("E", &Model::getYoungModulus, &Model::setYoungModulus) .def_property("nu", &Model::getPoissonRatio, &Model::setPoissonRatio) .def("getHertzModulus", &Model::getHertzModulus) .def("getYoungModulus", &Model::getYoungModulus) .def("getShearModulus", &Model::getShearModulus) .def("getPoissonRatio", &Model::getPoissonRatio) .def("getTraction", (GridBase & (Model::*)()) & Model::getTraction, py::return_value_policy::reference_internal) .def("getDisplacement", (GridBase & (Model::*)()) & Model::getDisplacement, py::return_value_policy::reference_internal) .def("getSystemSize", &Model::getSystemSize) .def("getDiscretization", &Model::getDiscretization) .def("getBoundarySystemSize", &Model::getBoundarySystemSize) .def("getBoundaryDiscretization", &Model::getBoundaryDiscretization) .def("solveNeumann", &Model::solveNeumann) .def("solveDirichlet", &Model::solveDirichlet) .def("dump", &Model::dump) .def("addDumper", &Model::addDumper, "dumper"_a, py::keep_alive<1, 2>()) .def("setDumper", [](Model&, const ModelDumper&) { TAMAAS_EXCEPTION("setDumper() is not a member of Model; use " "addDumper() instead"); }) .def("getBEEngine", &Model::getBEEngine, py::return_value_policy::reference_internal) .def("getIntegralOperator", &Model::getIntegralOperator, "operator_name"_a, py::return_value_policy::reference_internal) .def("registerField", [](Model& m, std::string name, numpy field) { auto f = instanciateFromNumpy(field); m.registerField(name, std::move(f)); }, "field_name"_a, "field"_a, py::keep_alive<1, 3>()) .def("getField", (GridBase & (Model::*)(const std::string&)) & Model::getField, "field_name"_a, py::return_value_policy::reference_internal) .def("getFields", &Model::getFields) .def("applyElasticity", [](Model& model, numpy stress, numpy strain) { auto out = instanciateFromNumpy(stress); auto in = instanciateFromNumpy(strain); model.applyElasticity(*out, *in); }) .def("__repr__", [](const Model& m) { std::stringstream ss; ss << m; return ss.str(); }) .def("__getitem__", [](const Model& m, std::string key) -> const GridBase& { try { return m.getField(key); } catch (std::out_of_range&) { throw py::key_error(); } }, py::return_value_policy::reference_internal) .def("__contains__", [](const Model& m, std::string key) { const auto fields = m.getFields(); return std::find(fields.begin(), fields.end(), key) != fields.end(); }, py::keep_alive<0, 1>()) .def("__iter__", [](const Model& m) { const auto& fields = m.getFieldsMap(); return py::make_key_iterator(fields.cbegin(), fields.cend()); }, py::keep_alive<0, 1>()); py::class_>( mod, "ModelDumper") .def(py::init<>()) .def("dump", &ModelDumper::dump, "model"_a) .def("__lshift__", [](ModelDumper& dumper, Model& model) { dumper << model; }); } /// Wrap factory for models void wrapModelFactory(py::module& mod) { py::class_(mod, "ModelFactory") .def_static("createModel", &ModelFactory::createModel, "model_type"_a, "system_size"_a, "discretization"_a) .def_static("createResidual", &ModelFactory::createResidual, "model_type"_a, "sigma_y"_a, "hardening"_a = 0.) .def_static("registerVolumeOperators", &ModelFactory::registerVolumeOperators, "model"_a); } /// Wrap residual class void wrapResidual(py::module& mod) { // TODO adapt to n-dim py::class_(mod, "Residual") .def(py::init()) .def("computeResidual", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.computeResidual(*in); }) .def("computeStress", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.computeStress(*in); }) .def("updateState", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.updateState(*in); }) .def("computeResidualDisplacement", [](Residual& res, numpy& x) { auto in = instanciateFromNumpy(x); res.computeResidualDisplacement(*in); }) .def("applyTangent", [](Residual& res, numpy& output, numpy& input, numpy& current_strain_inc) { auto out = instanciateFromNumpy(output); auto in = instanciateFromNumpy(input); auto inc = instanciateFromNumpy(current_strain_inc); res.applyTangent(*out, *in, *inc); }, "output"_a, "input"_a, "current_strain_increment"_a) .def("getVector", &Residual::getVector, py::return_value_policy::reference_internal) .def("getPlasticStrain", &Residual::getPlasticStrain, py::return_value_policy::reference_internal) .def("getStress", &Residual::getStress, py::return_value_policy::reference_internal) .def("setIntegrationMethod", &Residual::setIntegrationMethod, "method"_a, "cutoff"_a = 1e-12) .def_property("yield_stress", &Residual::getYieldStress, &Residual::setYieldStress) .def_property("hardening_modulus", &Residual::getHardeningModulus, &Residual::setHardeningModulus); } void wrapModel(py::module& mod) { wrapBEEngine(mod); wrapModelClass(mod); wrapModelFactory(mod); wrapFunctionals(mod); wrapResidual(mod); wrapIntegralOperator(mod); } } // namespace wrap } // namespace tamaas diff --git a/python/wrap/solvers.cpp b/python/wrap/solvers.cpp index c75e485..2f59bef 100644 --- a/python/wrap/solvers.cpp +++ b/python/wrap/solvers.cpp @@ -1,146 +1,155 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "beck_teboulle.hh" #include "condat.hh" #include "contact_solver.hh" #include "ep_solver.hh" #include "epic.hh" #include "kato.hh" #include "kato_saturated.hh" #include "polonsky_keer_rey.hh" #include "polonsky_keer_tan.hh" #include "wrap.hh" + +#include /* -------------------------------------------------------------------------- */ namespace tamaas { namespace wrap { /* -------------------------------------------------------------------------- */ using namespace py::literals; class PyEPSolver : public EPSolver { public: using EPSolver::EPSolver; void solve() override { PYBIND11_OVERLOAD_PURE(void, EPSolver, solve); } void updateState() override { PYBIND11_OVERLOAD(void, EPSolver, updateState); } }; /* -------------------------------------------------------------------------- */ void wrapSolvers(py::module& mod) { py::class_(mod, "ContactSolver") // .def(py::init&, Real>(), "model"_a, // "surface"_a, "tolerance"_a) .def("setMaxIterations", &ContactSolver::setMaxIterations, "max_iter"_a) .def("setDumpFrequency", &ContactSolver::setDumpFrequency, "dump_freq"_a) .def("addFunctionalTerm", &ContactSolver::addFunctionalTerm) .def("solve", py::overload_cast>(&ContactSolver::solve), - "target_force"_a) + "target_force"_a, + py::call_guard()) .def("solve", py::overload_cast(&ContactSolver::solve), - "target_normal_pressure"_a); + "target_normal_pressure"_a, + py::call_guard()); py::class_ pkr(mod, "PolonskyKeerRey"); // Need to export enum values before defining PKR constructor py::enum_(pkr, "type") .value("gap", PolonskyKeerRey::gap) .value("pressure", PolonskyKeerRey::pressure) .export_values(); pkr.def(py::init&, Real, PolonskyKeerRey::type, PolonskyKeerRey::type>(), "model"_a, "surface"_a, "tolerance"_a, "primal_type"_a = PolonskyKeerRey::type::pressure, "constraint_type"_a = PolonskyKeerRey::type::pressure, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) .def("computeError", &PolonskyKeerRey::computeError); py::class_(mod, "KatoSaturated") .def(py::init&, Real, Real>(), "model"_a, "surface"_a, "tolerance"_a, "pmax"_a, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) .def_property("pmax", &KatoSaturated::getPMax, &KatoSaturated::setPMax); py::class_ kato(mod, "Kato"); kato.def(py::init&, Real, Real>(), "model"_a, "surface"_a, "tolerance"_a, "mu"_a, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) .def("solve", &Kato::solve, "p0"_a, "proj_iter"_a = 50) .def("solveRelaxed", &Kato::solveRelaxed, "g0"_a) .def("solveRegularized", &Kato::solveRegularized, "p0"_a, "r"_a = 0.01) .def("computeCost", &Kato::computeCost, "use_tresca"_a = false); py::class_ bt(mod, "BeckTeboulle"); bt.def(py::init&, Real, Real>(), "model"_a, "surface"_a, "tolerance"_a, "mu"_a, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) .def("solve", &BeckTeboulle::solve, "p0"_a) .def("computeCost", &BeckTeboulle::computeCost); py::class_ cd(mod, "Condat"); cd.def(py::init&, Real, Real>(), "model"_a, "surface"_a, "tolerance"_a, "mu"_a, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) .def("solve", &Condat::solve, "p0"_a, "grad_step"_a = 0.9) .def("computeCost", &Condat::computeCost); py::class_ pkt(mod, "PolonskyKeerTan"); pkt.def(py::init&, Real, Real>(), "model"_a, "surface"_a, "tolerance"_a, "mu"_a, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) .def("solve", &PolonskyKeerTan::solve, "p0"_a) .def("solveTresca", &PolonskyKeerTan::solveTresca, "p0"_a) .def("computeCost", &PolonskyKeerTan::computeCost, "use_tresca"_a = false); py::class_(mod, "EPSolver") .def(py::init(), "residual"_a, py::keep_alive<1, 2>()) .def("solve", &EPSolver::solve) .def("getStrainIncrement", &EPSolver::getStrainIncrement, py::return_value_policy::reference_internal) .def("getResidual", &EPSolver::getResidual, py::return_value_policy::reference_internal) .def("updateState", &EPSolver::updateState); py::class_(mod, "EPICSolver") .def(py::init(), "contact_solver"_a, "elasto_plastic_solver"_a, "tolerance"_a = 1e-10, "relaxation"_a = 0.3, py::keep_alive<1, 2>(), py::keep_alive<1, 3>()) - .def("solve", &EPICSolver::solve, "force"_a) .def("solve", [](EPICSolver& solver, Real pressure) { return solver.solve(std::vector{pressure}); }, - "normal_pressure"_a) + "normal_pressure"_a, + py::call_guard()) .def("acceleratedSolve", [](EPICSolver& solver, Real pressure) { return solver.acceleratedSolve(std::vector{pressure}); }, - "normal_pressure"_a); + "normal_pressure"_a, + py::call_guard()); } } // namespace wrap } // namespace tamaas diff --git a/site_scons/detect.py b/site_scons/detect.py index d1874c0..c7da2d4 100644 --- a/site_scons/detect.py +++ b/site_scons/detect.py @@ -1,247 +1,254 @@ # -*- coding: utf-8 -*- # @file # @section LICENSE # # Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . from __future__ import print_function import SCons from os.path import join, abspath, exists, isdir from SCons.Script import Configure # ------------------------------------------------------------------------------ def _get_path(env, ext, module_var): path = "" if module_var in env and env[module_var] != "": root = abspath(env[module_var]) if not exists(root) or not isdir(root): raise RuntimeError("{} is set to a non-existing path '{}'".format( module_var, root)) path = join(root, ext) if not exists(path) or not isdir(path): raise RuntimeError("{} does not contain '{}' directory".format( module_var, ext)) return [path] # ------------------------------------------------------------------------------ def FindFFTW(env, components=None, precision='double', module_var='FFTW_ROOT'): """Find FFTW3 and set relevant environment variables""" if not env.get('should_configure', True): return if components is None: components = [] fftw_vars = {} fftw_vars['CPPPATH'] = _get_path(env, 'include', module_var) fftw_vars['LIBPATH'] = _get_path(env, 'lib', module_var) + try: + fftw_vars['LIBPATH'] += _get_path(env, 'lib64', module_var) + except RuntimeError: + pass + + fftw_vars['RPATH'] = fftw_vars['LIBPATH'] + # Setting up FFTW wishes = ['main'] + components fftw_name = "fftw3{}" # Components lib_names = {'main': fftw_name, 'thread': fftw_name + '_threads', 'omp': fftw_name + '_omp', 'mpi': fftw_name + '_mpi'} inc_names = ['fftw3.h'] # Checking list of wishes try: libs = [lib_names[i].format("") for i in wishes] except KeyError: raise SCons.Errors.StopError( 'Incompatible FFTW wishlist {0}'.format(wishes)) fftw_vars['LIBS'] = libs # Add long precision libraries if precision == 'long double': fftw_vars['LIBS'] += [lib_names[i].format("l") for i in wishes] conf_env = env.Clone(**fftw_vars) conf = Configure(conf_env) if not conf.CheckLibWithHeader(libs, inc_names, 'c++'): raise SCons.Errors.StopError( 'Failed to find libraries {0} or ' 'headers {1}.'.format( str([lib_names[i].format("") for i in wishes]), str(inc_names))) conf_env = conf.Finish() # Update modified variables env.AppendUnique(**fftw_vars) # ------------------------------------------------------------------------------ def FindBoost(env, headers=['boost/version.hpp'], module_var='BOOST_ROOT'): """Find Boost and set relevant environment variables""" if not env.get('should_configure', True): return boost_vars = {} boost_vars['CPPPATH'] = _get_path(env, 'include', module_var) conf_env = env.Clone(**boost_vars) conf = Configure(conf_env) if not conf.CheckCXXHeader(headers): raise SCons.Errors.StopError( 'Failed to find Boost headers {}'.format(headers)) conf_env = conf.Finish() # Update modified variables env.AppendUnique(**boost_vars) # ------------------------------------------------------------------------------ def FindThrust(env, backend='omp', module_var='THRUST_ROOT'): """Find Thrust and set relevant environment variables""" if not env.get('should_configure', True): return if backend not in ('omp', 'cuda', 'tbb'): raise SCons.Errors.StopError( 'Unknown thrust backend "{}"'.format(backend)) thrust_vars = {} thrust_vars['CPPPATH'] = _get_path(env, 'include', module_var) if "clang++" in env['CXX']: thrust_vars['CXXFLAGS'] = ["-Wno-unused-local-typedef"] thrust_vars['CPPDEFINES'] = [ "THRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_{}".format(backend.upper()) ] if backend == 'cuda': thrust_vars['CPPDEFINES'].append("USE_CUDA") conf_env = env.Clone(**thrust_vars) conf = Configure(conf_env) if not conf.CheckCXXHeader('thrust/version.h'): raise SCons.Errors.StopError( 'Failed to find Thrust') conf_env = conf.Finish() # Update modified variables env.AppendUnique(**thrust_vars) # ------------------------------------------------------------------------------ def FindPybind11(env, module_var='PYBIND11_ROOT'): """Detech Pybind11 and set appropriate build variables""" if not env.get('should_configure', True) \ or env.get("PYBIND11_FOUND", False): return pybind11_vars = {} clone = env.Clone(CPPPATH=[]) clone.ParseConfig('${py_exec}-config --includes') clone.AppendUnique(CPPPATH=['$PYBIND11_ROOT']) pybind11_vars['CPPPATH'] = clone['CPPPATH'] conf = Configure(clone) if not conf.CheckCXXHeader('pybind11/pybind11.h'): raise SCons.Errors.StopError( 'Failed to find pybind11 header\n' + "Run 'git submodule update --init --recursive " + "third-party/pybind11'") conf.Finish() pybind11_vars['PYBIND11_FOUND'] = True # Update variables env.AppendUnique(**pybind11_vars) # ------------------------------------------------------------------------------ def FindCuda(env, module_var="CUDA_ROOT"): """Detect cuda on clusters""" if not env.get('should_configure', True): return if 'CUDA_ROOT' in env: env['CUDA_TOOLKIT_PATH'] = _get_path(env, '', module_var) else: env['CUDA_TOOLKIT_PATH'] = '/opt/cuda' env['CUDA_COMPONENTS'] = ['cufft'] env['CUDA_ARCH_FLAG'] = '-arch=sm_60' colors = env['COLOR_DICT'] if not env['verbose']: env['NVCCCOMSTR'] = env['SHCXXCOMSTR'] env['SHLINKCOMSTR'] = \ u'{0}[Linking (cuda)] {1}$TARGET{2}'.format(colors['purple'], colors['blue'], colors['end']) env.AppendUnique(CXXFLAGS=[ "-expt-extended-lambda", # experimental lambda support "-expt-relaxed-constexpr", # experimental constexpr ]) if env['build_type'] == 'debug': env.AppendUnique(CXXFLAGS="-G") env.Tool('nvcc') # ------------------------------------------------------------------------------ def FindGTest(env): """A tool to configure GoogleTest""" if not env.get('should_configure', True): return conf = Configure(env) if not conf.CheckCXXHeader('gtest/gtest.h'): raise SCons.Errors.StopError( 'Failed to find GoogleTest header\n' + "Run 'git submodule update --init --recursive " + "third-party/googletest'") env = conf.Finish() # ------------------------------------------------------------------------------ def FindExpolit(env): """A tool to configure Expolit""" if not env.get('should_configure', True): return expolit_vars = {"CPPPATH": "#/third-party/expolit/include"} conf_env = env.Clone(**expolit_vars) conf = Configure(conf_env) if not conf.CheckCXXHeader('expolit/expolit'): raise SCons.Errors.StopError( 'Failed to find Expolit header\n' + "Run 'git submodule update --init " + "third-party/expolit'") conf_env = conf.Finish() env.AppendUnique(**expolit_vars) diff --git a/src/core/logger.cpp b/src/core/logger.cpp index 9efd401..3f66976 100644 --- a/src/core/logger.cpp +++ b/src/core/logger.cpp @@ -1,65 +1,66 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "logger.hh" #include "mpi_interface.hh" #include +#include #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ LogLevel Logger::current_level = LogLevel::info; namespace { const std::map repr{{LogLevel::debug, "debug"}, {LogLevel::info, "info"}, {LogLevel::warning, "warning"}, {LogLevel::error, "error"}}; } std::ostream& operator<<(std::ostream& o, const LogLevel& val) { o << repr.at(val); return o; } Logger::~Logger() noexcept { if (wish_level >= current_level and not(mpi::rank() != 0 and wish_level == LogLevel::info)) { - fprintf(stderr, "%s", stream.str().c_str()); - fflush(stderr); + std::cerr << stream.str(); + std::cerr.flush(); } } std::ostream& Logger::get(LogLevel level) { wish_level = level; // Print rank if not info level if (wish_level != LogLevel::info) stream << '[' << mpi::rank() << "] "; return stream; } void Logger::setLevel(LogLevel level) { current_level = level; } /* -------------------------------------------------------------------------- */ } // namespace tamaas diff --git a/src/core/logger.hh b/src/core/logger.hh index b277975..a0f2b1f 100644 --- a/src/core/logger.hh +++ b/src/core/logger.hh @@ -1,60 +1,66 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef LOGGER_HH #define LOGGER_HH /* -------------------------------------------------------------------------- */ #include "tamaas.hh" #include /* -------------------------------------------------------------------------- */ namespace tamaas { /* -------------------------------------------------------------------------- */ /// Log levels enumeration enum class LogLevel { debug = 0, info = 1, warning = 2, error = 3 }; /** @brief Logging class Inspired from https://www.drdobbs.com/cpp/logging-in-c/201804215 by Petru Marginean. */ class Logger { public: /// Writing to stderr ~Logger() noexcept; /// Get stream std::ostream& get(LogLevel level = LogLevel::debug); + /// Get wish log level + decltype(auto) getWishLevel() const { return wish_level; } + + /// Get current log level + static decltype(auto) getCurrentLevel() { return current_level; } + /// Set acceptable logging level static void setLevel(LogLevel level); private: static LogLevel current_level; std::ostringstream stream; LogLevel wish_level = LogLevel::debug; }; /* -------------------------------------------------------------------------- */ } // namespace tamaas #endif // LOGGER_HH diff --git a/src/model/meta_functional.cpp b/src/model/meta_functional.cpp index e1973e6..e09d341 100644 --- a/src/model/meta_functional.cpp +++ b/src/model/meta_functional.cpp @@ -1,70 +1,55 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "meta_functional.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { namespace functional { -MetaFunctional::~MetaFunctional() { - for (auto& f : functionals) { - if (f.second) - delete f.first; - } -} - Real MetaFunctional::computeF(GridBase& variable, GridBase& dual) const { Real F = 0; - for (auto& f : functionals) { - F += f.first->computeF(variable, dual); - } + for (auto& f : functionals) + F += f->computeF(variable, dual); return F; } /* -------------------------------------------------------------------------- */ void MetaFunctional::computeGradF(GridBase& variable, GridBase& gradient) const { gradient = 0; - for (auto& f : functionals) { - f.first->computeGradF(variable, gradient); - } -} - -/* -------------------------------------------------------------------------- */ - -void MetaFunctional::addFunctionalTerm(Functional* term, bool own) { - this->addFunctionalTerm(std::make_pair(term, own)); + for (auto& f : functionals) + f->computeGradF(variable, gradient); } /* -------------------------------------------------------------------------- */ -void MetaFunctional::addFunctionalTerm(std::pair element) { - functionals.push_back(element); +void MetaFunctional::addFunctionalTerm(std::shared_ptr functional) { + functionals.push_back(std::move(functional)); } } // namespace functional } // namespace tamaas /* -------------------------------------------------------------------------- */ diff --git a/src/model/meta_functional.hh b/src/model/meta_functional.hh index 379bcc8..8f1f186 100644 --- a/src/model/meta_functional.hh +++ b/src/model/meta_functional.hh @@ -1,64 +1,59 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef __META_FUNCTIONAL_HH__ #define __META_FUNCTIONAL_HH__ #include "functional.hh" #include "tamaas.hh" #include #include namespace tamaas { namespace functional { /// Meta functional that contains list of functionals class MetaFunctional : public Functional { - using FunctionalList = std::list>; - -public: - ~MetaFunctional() override; + using FunctionalList = std::list>; public: /// Compute functional Real computeF(GridBase& variable, GridBase& dual) const override; /// Compute functional gradient void computeGradF(GridBase& variable, GridBase& gradient) const override; /// Add functional to the list - void addFunctionalTerm(Functional* term, bool own); - /// Add functional to the list - void addFunctionalTerm(std::pair element); + void addFunctionalTerm(std::shared_ptr term); protected: FunctionalList functionals; }; } // namespace functional } // namespace tamaas #endif // __META_FUNCTIONAL_HH__ diff --git a/src/solvers/contact_solver.cpp b/src/solvers/contact_solver.cpp index 12ef5e9..019a78d 100644 --- a/src/solvers/contact_solver.cpp +++ b/src/solvers/contact_solver.cpp @@ -1,64 +1,65 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "contact_solver.hh" #include "logger.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { ContactSolver::ContactSolver(Model& model, const GridBase& surface, Real tolerance) : model(model), surface(), functional(), tolerance(tolerance), surface_stddev(std::sqrt(surface.var())) { auto discretization = model.getBoundaryDiscretization(); auto n_points = model.getTraction().getNbPoints(); if (n_points != surface.dataSize()) TAMAAS_EXCEPTION("Model size and surface size do not match!"); this->surface.wrap(surface); _gap = allocateGrid(model.getType(), discretization); model.registerField("gap", _gap); } /* -------------------------------------------------------------------------- */ void ContactSolver::printState(UInt iter, Real cost_f, Real error) { // UInt precision = std::cout.precision(); if (iter % dump_frequency) return; Logger().get(LogLevel::info) << std::setw(5) << iter << " " << std::setw(15) << std::scientific << cost_f << " " << std::setw(15) << error << std::endl << std::fixed; } /* -------------------------------------------------------------------------- */ -void ContactSolver::addFunctionalTerm(functional::Functional* func) { - functional.addFunctionalTerm(func, false); +void ContactSolver::addFunctionalTerm( + std::shared_ptr func) { + functional.addFunctionalTerm(std::move(func)); } } // namespace tamaas /* -------------------------------------------------------------------------- */ diff --git a/src/solvers/contact_solver.hh b/src/solvers/contact_solver.hh index 51b28f8..1105694 100644 --- a/src/solvers/contact_solver.hh +++ b/src/solvers/contact_solver.hh @@ -1,77 +1,77 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef __CONTACT_SOLVER_HH__ #define __CONTACT_SOLVER_HH__ /* -------------------------------------------------------------------------- */ #include "model.hh" #include "tamaas.hh" #include "meta_functional.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { class ContactSolver { public: /// Constructor ContactSolver(Model& model, const GridBase& surface, Real tolerance); /// Destructor virtual ~ContactSolver() = default; public: /// Print state of solve virtual void printState(UInt iter, Real cost_f, Real error); /// Set maximum number of iterations void setMaxIterations(UInt n) { max_iterations = n; } /// Set dump_frequency void setDumpFrequency(UInt n) { dump_frequency = n; } /// Add term to functional - void addFunctionalTerm(functional::Functional* func); + void addFunctionalTerm(std::shared_ptr func); // Virtual functions public: virtual Real solve(std::vector /*load*/) { TAMAAS_EXCEPTION("solve() not implemented"); } virtual Real solve(Real load) { return solve(std::vector{load}); } public: GridBase& getSurface() { return surface; } Model& getModel() { return model; } protected: Model& model; GridBase surface; std::shared_ptr> _gap = nullptr; functional::MetaFunctional functional; Real tolerance; UInt max_iterations = 1000; Real surface_stddev; UInt dump_frequency = 100; }; } // namespace tamaas /* -------------------------------------------------------------------------- */ #endif // __CONTACT_SOLVER_HH__ diff --git a/src/solvers/polonsky_keer_rey.cpp b/src/solvers/polonsky_keer_rey.cpp index 6a9346c..66aa18a 100644 --- a/src/solvers/polonsky_keer_rey.cpp +++ b/src/solvers/polonsky_keer_rey.cpp @@ -1,307 +1,302 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #include "polonsky_keer_rey.hh" #include "elastic_functional.hh" #include "logger.hh" #include "loop.hh" #include "model_type.hh" #include #include /* -------------------------------------------------------------------------- */ namespace tamaas { PolonskyKeerRey::PolonskyKeerRey(Model& model, const GridBase& surface, Real tolerance, type variable_type, type constraint_type) : ContactSolver(model, surface, tolerance), variable_type(variable_type), constraint_type(constraint_type) { #define SET_VIEWS(_, __, type) \ case type: { \ setViews(); \ break; \ } switch (model.getType()) { BOOST_PP_SEQ_FOR_EACH(SET_VIEWS, ~, TAMAAS_MODEL_TYPES); } #undef SET_VIEWS search_direction = allocateGrid( operation_type, model.getBoundaryDiscretization()); projected_search_direction = allocateGrid( operation_type, model.getBoundaryDiscretization()); switch (variable_type) { case gap: { model.getBEEngine().registerDirichlet(); primal = gap_view.get(); dual = pressure_view.get(); this->functional.addFunctionalTerm( - new functional::ElasticFunctionalGap(*integral_op, this->surface), - true); + std::make_shared(*integral_op, + this->surface)); break; } case pressure: { model.getBEEngine().registerNeumann(); primal = pressure_view.get(); dual = gap_view.get(); this->functional.addFunctionalTerm( - new functional::ElasticFunctionalPressure(*integral_op, this->surface), - true); + std::make_shared(*integral_op, + this->surface)); break; } } } /* -------------------------------------------------------------------------- */ Real PolonskyKeerRey::solve(std::vector target_v) { // Update in case of a surface change between solves this->surface_stddev = std::sqrt(this->surface.var()); // Printing column headers Logger().get(LogLevel::info) << std::setw(5) << "Iter" << " " << std::setw(15) << "Cost_f" << " " << std::setw(15) << "Error" << '\n' << std::fixed; const Real target = target_v.back(); // Setting uniform value if constraint if (constraint_type == variable_type && std::abs(primal->sum()) <= 0) *primal = target; else if (constraint_type == variable_type) *primal *= target / primal->mean(); else if (constraint_type != variable_type) *primal = this->surface_stddev; // Algorithm variables Real Gold = 1, error = 0; UInt n = 0; bool delta = false; *search_direction = 0; do { // Computing functional gradient functional.computeGradF(*primal, *dual); const Real dbar = meanOnUnsaturated(*dual); // Enforcing dual constraint via gradient if (constraint_type != variable_type) { *dual += 2 * target + dbar; } else { // Centering dual on primal > 0 *dual -= dbar; } // Computing gradient norm const Real G = computeSquaredNorm(*dual); // Updating search direction (conjugate gradient) updateSearchDirection(delta * G / Gold); Gold = G; // Computing critical step const Real tau = computeCriticalStep(target); // Update primal variable delta = updatePrimal(tau); // We should scale to constraint if (constraint_type == variable_type) enforceMeanValue(target); error = computeError(); const Real cost_f = functional.computeF(*primal, *dual); printState(n, cost_f, error); } while (error > this->tolerance and n++ < this->max_iterations); // Final update of dual variable functional.computeGradF(*primal, *dual); enforceAdmissibleState(); return error; } /* -------------------------------------------------------------------------- */ void PolonskyKeerRey::enforceAdmissibleState() { /// Make dual admissible const Real dual_min = dual->min(); *dual -= dual_min; // Primal is pressure: need to make sure gap is positive if (variable_type == pressure) { *displacement_view = *dual; *displacement_view += this->surface; } // Primal is gap: need to make sure dual is positive (pressure + adhesion) else { *displacement_view = *primal; *displacement_view += this->surface; integral_op->apply(*displacement_view, *pressure_view); *pressure_view -= dual_min; } } /* -------------------------------------------------------------------------- */ /** * Computes \f$ \frac{1}{\mathrm{card}(\{p > 0\})} \sum_{\{p > 0\}}{f_i} \f$ */ Real PolonskyKeerRey::meanOnUnsaturated(const GridBase& field) const { // Sum on unsaturated const Real sum = Loop::reduce( [] CUDA_LAMBDA(Real & p, const Real& f) { return (p > 0) ? f : 0; }, *primal, field); // Number of unsaturated points const UInt n_unsat = Loop::reduce( [] CUDA_LAMBDA(Real & p) -> UInt { return (p > 0); }, *primal); return sum / n_unsat; } /* -------------------------------------------------------------------------- */ /** * Computes \f$ \sum_{\{p > 0\}}{f_i^2} \f$ */ Real PolonskyKeerRey::computeSquaredNorm(const GridBase& field) const { return Loop::reduce( [] CUDA_LAMBDA(Real & p, const Real& f) { return (p > 0) ? f * f : 0; }, *primal, field); } /* -------------------------------------------------------------------------- */ /** * Computes \f$ \tau = \frac{ \sum_{\{p > 0\}}{q_i't_i} }{ \sum_{\{p > 0\}}{r_i' * t_i} } \f$ */ Real PolonskyKeerRey::computeCriticalStep(Real target) { integral_op->apply(*search_direction, *projected_search_direction); const Real rbar = meanOnUnsaturated(*projected_search_direction); if (constraint_type == variable_type) *projected_search_direction -= rbar; else { *projected_search_direction += 2 * target + rbar; } const Real num = Loop::reduce( [] CUDA_LAMBDA(const Real& p, const Real& q, const Real& t) { return (p > 0) ? q * t : 0; }, *primal, *dual, *search_direction); const Real denum = Loop::reduce( [] CUDA_LAMBDA(const Real& p, const Real& r, const Real& t) { return (p > 0) ? r * t : 0; }, *primal, *projected_search_direction, *search_direction); return num / denum; } /* -------------------------------------------------------------------------- */ /** * Update steps: * 1. \f$\mathbf{p} = \mathbf{p} - \tau \mathbf{t} \f$ * 2. Truncate all \f$p\f$ negative * 3. For all points in \f$I_\mathrm{na} = \{p = 0 \land q < 0 \}\f$ do \f$p_i = * p_i - \tau q_i\f$ */ bool PolonskyKeerRey::updatePrimal(Real step) { const UInt na_num = Loop::reduce( [step] CUDA_LAMBDA(Real & p, const Real& q, const Real& t) -> UInt { p -= step * t; // Updating primal if (p < 0) p = 0; // Truncating neg values if (p == 0 && q < 0) { // If non-admissible state p -= step * q; return 1; } else return 0; }, *primal, *dual, *search_direction); return na_num == 0; } /* -------------------------------------------------------------------------- */ /** * Error is based on \f$ \sum{p_i q_i} \f$ */ Real PolonskyKeerRey::computeError() { /// Making sure dual respects constraint *dual -= dual->min(); /// Complementarity error const Real error = primal->dot(*dual); if (std::isnan(error)) TAMAAS_EXCEPTION("Encountered NaN in complementarity error: this may be " "caused by a contact area of a single node."); const Real norm = [this]() { if (variable_type == pressure) return std::abs(primal->sum() * this->surface_stddev); else return std::abs(dual->sum() * this->surface_stddev); }(); return std::abs(error) / (norm * primal->getNbPoints()); } /* -------------------------------------------------------------------------- */ /** * Do \f$\mathbf{t} = \mathbf{q}' + \delta \frac{R}{R_\mathrm{old}}\mathbf{t} * \f$ */ void PolonskyKeerRey::updateSearchDirection(Real factor) { Loop::loop( [factor] CUDA_LAMBDA(Real & p, Real & q, Real & t) { t = (p > 0) ? q + factor * t : 0; }, *primal, *dual, *search_direction); } /* -------------------------------------------------------------------------- */ void PolonskyKeerRey::enforceMeanValue(Real mean) { *primal *= mean / primal->mean(); } -/* -------------------------------------------------------------------------- */ -void PolonskyKeerRey::addFunctionalTerm(functional::Functional* func) { - functional.addFunctionalTerm(func, false); -} - } // namespace tamaas /* -------------------------------------------------------------------------- */ diff --git a/src/solvers/polonsky_keer_rey.hh b/src/solvers/polonsky_keer_rey.hh index 51af97f..75b944f 100644 --- a/src/solvers/polonsky_keer_rey.hh +++ b/src/solvers/polonsky_keer_rey.hh @@ -1,130 +1,128 @@ /** * @file * @section LICENSE * * Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * */ /* -------------------------------------------------------------------------- */ #ifndef __POLONSKY_KEER_REY_HH__ #define __POLONSKY_KEER_REY_HH__ /* -------------------------------------------------------------------------- */ #include "contact_solver.hh" #include "grid_view.hh" #include "meta_functional.hh" #include "westergaard.hh" /* -------------------------------------------------------------------------- */ namespace tamaas { class PolonskyKeerRey : public ContactSolver { public: /// Types of algorithm (primal/dual) or constraint enum type { gap, pressure }; public: /// Constructor PolonskyKeerRey(Model& model, const GridBase& surface, Real tolerance, type variable_type, type constraint_type); ~PolonskyKeerRey() override = default; public: using ContactSolver::solve; /// Solve Real solve(std::vector target) override; /// Mean on unsaturated constraint zone virtual Real meanOnUnsaturated(const GridBase& field) const; /// Compute squared norm virtual Real computeSquaredNorm(const GridBase& field) const; /// Update search direction virtual void updateSearchDirection(Real factor); /// Compute critical step virtual Real computeCriticalStep(Real target = 0); /// Update primal and check non-admissible state virtual bool updatePrimal(Real step); /// Compute error/stopping criterion virtual Real computeError(); /// Enforce contact constraints on final state virtual void enforceAdmissibleState(); - /// Add term to functional - void addFunctionalTerm(functional::Functional* func); /// Enfore mean value constraint virtual void enforceMeanValue(Real mean); private: /// Set correct views on normal traction and gap template void setViews(); protected: type variable_type, constraint_type; model_type operation_type; GridBase* primal = nullptr; ///< non-owning pointer for primal varaible GridBase* dual = nullptr; ///< non-owning pointer for dual variable /// CG search direction std::unique_ptr> search_direction = nullptr; /// Projected CG search direction std::unique_ptr> projected_search_direction = nullptr; /// View on normal pressure, gap, displacement std::unique_ptr> pressure_view, gap_view, displacement_view = nullptr; /// Integral operator for gradient computation IntegralOperator* integral_op = nullptr; }; /* -------------------------------------------------------------------------- */ template void PolonskyKeerRey::setViews() { constexpr UInt dim = model_type_traits::dimension; constexpr UInt bdim = model_type_traits::boundary_dimension; constexpr UInt comp = model_type_traits::components; pressure_view = std::unique_ptr>{ new GridView(model.getTraction(), {}, comp - 1)}; gap_view = std::unique_ptr>{ new GridView(*this->_gap, {}, comp - 1)}; displacement_view = std::unique_ptr>{new GridView( model.getDisplacement(), model_type_traits::indices, comp - 1)}; #define WESTERGAARD(type, kind, desc) \ this->model.registerIntegralOperator< \ Westergaard>(desc) // I noz is fugly - TODO factory? if (bdim == 1) { operation_type = model_type::basic_1d; if (variable_type == gap) integral_op = WESTERGAARD(basic_1d, dirichlet, "westergaard_dirichlet"); else integral_op = WESTERGAARD(basic_1d, neumann, "westergaard_neumann"); } else if (bdim == 2) { operation_type = model_type::basic_2d; if (variable_type == gap) integral_op = WESTERGAARD(basic_2d, dirichlet, "westergaard_dirichlet"); else integral_op = WESTERGAARD(basic_2d, neumann, "westergaard_neumann"); } #undef WESTERGAARD } } // namespace tamaas #endif // __POLONSKY_KEER_REY_HH__ diff --git a/tests/SConscript b/tests/SConscript index c6b9cc7..303be65 100644 --- a/tests/SConscript +++ b/tests/SConscript @@ -1,217 +1,221 @@ # -*- mode:python; coding: utf-8 -*- # vim: set ft=python: # @file # @section LICENSE # # Copyright (©) 2016-2020 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . from __future__ import print_function from os.path import join from SCons.Script import Split, Copy, Dir, Import from detect import FindGTest, FindPybind11 # ------------------------------------------------------------------------------ def copyComStr(env, main): if 'SHCXXCOMSTR' in main: env['CXXCOMSTR'] = main['SHCXXCOMSTR'] if 'SHLINKCOMSTR' in main: env['LINKCOMSTR'] = main['SHLINKCOMSTR'] # ------------------------------------------------------------------------------ def make_python_tests(env): """Copy python tests to build directory""" test_env = env.Clone() test_files = Split(""" test_hertz.py test_westergaard.py test_patch_westergaard.py test_patch_plasticity.py test_surface.py test_hertz_disp.py test_hertz_kato.py test_saturated_pressure.py test_flood_fill.py test_integral_operators.py test_dumper.py test_tangential.py test_boussinesq_surface.py test_voigt.py test_memory.py test_epic.py fftfreq.py conftest.py pytest.ini """) if env['legacy_bem']: test_files += ['test_bem_grid.py', 'test_autocorrelation.py'] src_dir = "#/tests" targets = [ test_env.Command(file, join(src_dir, file), Copy("$TARGET", "$SOURCE")) for file in test_files ] test_env = env.Clone(tools=[pybind11]) # Helper module for integral operators test_env['SHLIBPREFIX'] = '' register = test_env.Pybind11Module( target="register_integral_operators", source=["register_integral_operators.cpp"], LIBS=['Tamaas']) Import('libTamaas') test_env.Depends(register, libTamaas) targets.append(register) return targets # ------------------------------------------------------------------------------ def compile_google_test(env, gtest_path): gtest_obj = env.Object('gtest.o', [join(gtest_path, "src/gtest-all.cc")]) return env.StaticLibrary('gtest', gtest_obj) # ------------------------------------------------------------------------------ def make_google_tests(env): gtest_dir = Dir(env['GTEST_ROOT']) gtest_env = env.Clone(CPPPATH=[gtest_dir], CXXFLAGS=['-pthread', '-isystem', join(str(gtest_dir), "include")]) colors = env['COLOR_DICT'] if not env['verbose']: gtest_env['ARCOMSTR'] = u'{}[Ar]{} $TARGET'.format(colors['purple'], colors['end']) gtest_env['RANLIBCOMSTR'] = \ u'{}[Randlib]{} $TARGET'.format(colors['purple'], colors['end']) FindGTest(gtest_env) libgtest = None # Hugly hack to detect if we need to compile gtest submodule if env['GTEST_ROOT'] == '#third-party/googletest/googletest': gtest_path = str(gtest_dir) libgtest = compile_google_test(gtest_env, gtest_path) env.AppendUnique(LIBS=['Tamaas'], CXXFLAGS=gtest_env['CXXFLAGS']) google_test_files = Split(""" test_fft.cpp test_grid.cpp test_loop.cpp test_model.cpp test_static_types.cpp test_integration.cpp """) # Necessary for the tests that use pybind11 calls to python uses = [] if env['build_python']: google_test_files.append('test_fftfreq.cpp') uses.append('USE_PYTHON') if env['use_mpi']: google_test_files.append('test_mpi.cpp') defines = env['CPPDEFINES'] if type(defines) is not list: defines = [defines] gtest_main = env.Object("tamaas_gtest_main.o", 'tamaas_gtest_main.cc', CPPDEFINES=defines + uses) gtest_all = env.Program('test_gtest_all', google_test_files + [gtest_main], LIBS=(env['LIBS'] + ['gtest'])) Import('libTamaas') env.Depends(gtest_all, libTamaas) env.Depends(gtest_all, libgtest) return [gtest_all] # ------------------------------------------------------------------------------ def make_bare_tests(env): rough = env.Program("test_rough_surface.cpp") Import('libTamaas') env.Depends(rough, libTamaas) return [rough] # ------------------------------------------------------------------------------ Import('main_env') # Setup of test environment test_env = main_env.Clone() -test_env.AppendUnique(LIBS=['Tamaas'], LIBPATH=['.']) +test_env.AppendUnique( + LIBS=['Tamaas'], + LIBPATH=['.', Dir(join('#${build_dir}', 'src'))], + RPATH=["'$$$$ORIGIN/../src'"] +) # Building tests that do not require any third party targets = make_bare_tests(test_env) # Build tests that required python bindings if test_env['build_python']: FindPybind11(test_env) test_env.Tool(pybind11) test_env.ParseConfig("${py_exec}-config --ldflags") test_env['CCFLAGS'] = [] targets += make_python_tests(test_env) # Building google tests if test_env['use_googletest']: targets += make_google_tests(test_env) targets.append(test_env.Command('test_gtest.py', '#tests/test_gtest.py', Copy('$TARGET', '$SOURCE'))) # Target alias to build tests main_env.Alias('build-tests', targets) # Check if pytest is installed conf = Configure(main_env, custom_tests={'CheckPythonModule': CheckPythonModule}) has_pytest = conf.CheckPythonModule('pytest') conf.Finish() # Define a command to execute tests if has_pytest: pytest_env = main_env.Clone() pytest_env.PrependENVPath('PYTHONPATH', pytest_env.subst('${build_dir}/python')) pytest_env.PrependENVPath('LD_LIBRARY_PATH', pytest_env.subst('${build_dir}/src')) test_target = pytest_env.Command('#/.phony_test', targets, '${py_exec} -m pytest ${build_dir}/tests') main_env.Alias('test', test_target) else: # We still define a target here so that `scons test` still works dummy_command(main_env, 'test', 'Cannot run tests: pytest is not installed')