diff --git a/SConstruct b/SConstruct
index ed2f775..097d785 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1,464 +1,465 @@
# -*- mode:python; coding: utf-8 -*-
# vim: set ft=python:
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
from __future__ import print_function
import sys
import os
from subprocess import check_output
# Import below not strictly necessary, but good for pep8
from SCons.Script import (
EnsurePythonVersion,
EnsureSConsVersion,
Help,
Environment,
Variables,
EnumVariable,
PathVariable,
BoolVariable,
ListVariable,
Split,
Export,
)
from SCons.Errors import StopError
from SCons import __version__ as scons_version
from version import get_git_subst
from detect import (FindFFTW, FindBoost, FindThrust, FindCuda, FindExpolit,
FindPybind11)
from INFOS import TAMAAS_INFOS
# ------------------------------------------------------------------------------
EnsurePythonVersion(3, 6)
EnsureSConsVersion(3, 0)
# ------------------------------------------------------------------------------
def detect_dependencies(env):
"Detect all dependencies"
fftw_comp = {
'omp': ['omp'],
'threads': ['threads'],
'none': [],
}
fftw_components = fftw_comp[env['fftw_threads']]
if main_env['use_mpi']:
fftw_components.append('mpi')
if main_env['use_fftw']:
FindFFTW(env, fftw_components, precision=env['real_type'])
if main_env['use_cuda']:
FindCuda(env)
FindBoost(env, ['boost/preprocessor/seq.hpp'])
# Use thrust shipped with cuda if cuda is requested
thrust_var = 'CUDA_ROOT' if env['use_cuda'] else 'THRUST_ROOT'
FindThrust(env, env['backend'], thrust_var)
if env['build_python']:
FindPybind11(env)
FindExpolit(env)
def subdir(env, dir):
"Building a sub-directory"
return env.SConscript(env.File('SConscript', dir),
variant_dir=env.Dir(dir, env['build_dir']),
duplicate=True)
def print_build_info(env):
info = ("-- Tamaas ${version}\n" + "-- SCons {} (Python {}.{})\n".format(
scons_version, sys.version_info.major, sys.version_info.minor) +
"-- Build type: ${build_type}\n" +
"-- Thrust backend: ${backend}\n" +
("-- FFTW threads: ${fftw_threads}\n" if env['use_fftw'] else '') +
"-- MPI: ${use_mpi}\n" + "-- Build directory: ${build_dir}\n" +
("-- Python version (bindings): $py_version"
if env['build_python'] else ''))
print(env.subst(info))
# ------------------------------------------------------------------------------
# Main compilation
# ------------------------------------------------------------------------------
# Compilation colors
colors = {
'cyan': '\033[96m',
'purple': '\033[95m',
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'gray': '\033[38;5;8m',
'orange': '\033[38;5;208m',
'red': '\033[91m',
'end': '\033[0m'
}
# Inherit all environment variables (for CXX detection, etc.)
main_env = Environment(ENV=os.environ, )
# Set tamaas information
for k, v in TAMAAS_INFOS._asdict().items():
main_env[k] = v
main_env['COLOR_DICT'] = colors
main_env.AddMethod(subdir, 'SubDirectory')
# Build variables
vars = Variables('build-setup.conf')
vars.AddVariables(
EnumVariable('build_type',
'Build type',
'release',
allowed_values=('release', 'profiling', 'debug'),
ignorecase=2),
EnumVariable('backend',
'Thrust backend',
'omp',
allowed_values=('cpp', 'omp', 'tbb', 'cuda'),
ignorecase=2),
EnumVariable('fftw_threads',
'Threads FFTW library preference',
'omp',
allowed_values=('omp', 'threads', 'none'),
ignorecase=2),
EnumVariable('sanitizer',
'Sanitizer type',
'none',
allowed_values=('none', 'memory', 'leaks', 'address'),
ignorecase=2),
PathVariable('prefix', 'Prefix where to install', '/usr/local'),
# Dependencies paths
PathVariable('FFTW_ROOT', 'FFTW custom path', os.getenv('FFTW_ROOT', ''),
PathVariable.PathAccept),
PathVariable('THRUST_ROOT', 'Thrust custom path',
os.getenv('THRUST_ROOT', ''), PathVariable.PathAccept),
PathVariable('BOOST_ROOT', 'Boost custom path',
os.getenv('BOOST_ROOT', ''), PathVariable.PathAccept),
PathVariable('CUDA_ROOT', 'Cuda custom path', os.getenv('CUDA_ROOT', ''),
PathVariable.PathAccept),
PathVariable('GTEST_ROOT', 'Googletest custom path',
os.getenv('GTEST_ROOT', ''), PathVariable.PathAccept),
PathVariable('PYBIND11_ROOT', 'Pybind11 custom path',
os.getenv('PYBIND11_ROOT', ''), PathVariable.PathAccept),
# Executables
('CXX', 'Compiler', os.getenv('CXX', 'g++')),
('MPICXX', 'MPI Compiler wrapper', os.getenv('MPICXX', 'mpicxx')),
('py_exec', 'Python executable', 'python3'),
# Compiler flags
('CXXFLAGS', 'C++ compiler flags', os.getenv('CXXFLAGS', "")),
# Cosmetic
BoolVariable('verbose', 'Activate verbosity',
os.getenv('VERBOSE') in {'1', 'True', 'true'}),
BoolVariable('color', 'Color the non-verbose compilation output', False),
# Tamaas components
BoolVariable('build_tests', 'Build test suite', False),
BoolVariable('build_python', 'Build python wrapper', True),
# Documentation
ListVariable('doc_builders',
'Generated documentation formats',
default='html',
names=Split("html man")), # TODO include latex
# Dependencies
BoolVariable('use_mpi', 'Builds multi-process parallelism', False),
# Distribution options
BoolVariable('strip_info', 'Strip binary of added information', True),
BoolVariable('build_static_lib', "Build a static libTamaas", False),
# Type variables
EnumVariable('real_type',
'Type for real precision variables',
'double',
allowed_values=('double', 'long double')),
EnumVariable('integer_type',
'Type for integer variables',
'int',
allowed_values=('int', 'long')),
)
# Set variables of environment
vars.Update(main_env)
help_text = vars.GenerateHelpText(main_env)
help_text += """
Commands:
scons [build] [options]... Compile Tamaas (and additional modules/tests)
scons install [prefix=/your/prefix] [options]... Install Tamaas to prefix
scons dev Install symlink to Tamaas python module (useful to development purposes)
scons test Run tests with pytest
scons doc Compile documentation with Doxygen and Sphinx+Breathe
scons archive Create a gzipped archive from source
""" # noqa
Help(help_text)
# Save all options, not just those that differ from default
with open('build-setup.conf', 'w') as setup:
for option in vars.options:
setup.write("# " + option.help.replace('\n', '\n# ') + "\n")
setup.write("{} = '{}'\n".format(option.key, main_env[option.key]))
main_env['should_configure'] = \
not main_env.GetOption('clean') and not main_env.GetOption('help')
build_type = main_env['build_type']
build_dir = 'build-${build_type}'
main_env['build_dir'] = main_env.Dir(build_dir)
# Setting up the python name with version
if main_env['build_python']:
args = (main_env.subst("${py_exec} -c").split() + [
"from sysconfig import get_python_version;"
"print(get_python_version())"
])
main_env['py_version'] = bytes(check_output(args)).decode()
verbose = main_env['verbose']
# Remove colors if not set
if not main_env['color']:
for key in colors:
colors[key] = ''
if not verbose:
main_env['CXXCOMSTR'] = main_env['SHCXXCOMSTR'] = \
u'{0}[Compiling ($SHCXX)] {1}$SOURCE'.format(colors['green'],
colors['end'])
main_env['LINKCOMSTR'] = main_env['SHLINKCOMSTR'] = \
u'{0}[Linking] {1}$TARGET'.format(colors['purple'],
colors['end'])
main_env['ARCOMSTR'] = u'{}[Ar]{} $TARGET'.format(colors['purple'],
colors['end'])
main_env['RANLIBCOMSTR'] = \
u'{}[Randlib]{} $TARGET'.format(colors['purple'],
colors['end'])
main_env['PRINT_CMD_LINE_FUNC'] = pretty_cmd_print
main_env['INSTALLSTR'] = \
u'{}[Installing] {}$SOURCE to $TARGET'.format(colors['blue'],
colors['end'])
# Include paths
main_env.AppendUnique(CPPPATH=[
'#/src', '#/src/core', '#/src/surface', '#/src/percolation', '#/src/model',
'#/src/model/elasto_plastic', '#/src/solvers', '#/src/gpu', '#/python',
'#/third-party/expolit/include'
])
# Changing the shared object extension
main_env['SHOBJSUFFIX'] = '.o'
# Variables for clarity
main_env['use_cuda'] = main_env['backend'] == "cuda"
main_env['use_fftw'] = not main_env['use_cuda']
main_env['use_mpi'] = main_env['use_mpi'] and not main_env['use_cuda']
if not main_env['use_fftw']:
main_env['fftw_threads'] = 'none'
# Back to gcc if cuda is activated
if main_env['use_cuda'] and "g++" not in main_env['CXX']:
raise StopError('GCC should be used when compiling with CUDA')
# Printing some build infos
if main_env['should_configure']:
print_build_info(main_env)
# OpenMP flags - compiler dependent
omp_flags = {
"g++": ["-fopenmp"],
"clang++": ["-fopenmp"],
"icpc": ["-qopenmp"]
}
def cxx_alias(cxx):
for k in omp_flags.keys():
if k in cxx:
return k
raise StopError('Unsupported compiler: ' + cxx)
cxx = cxx_alias(main_env['CXX'])
# Setting main compilation flags
main_env['CXXFLAGS'] = Split(main_env['CXXFLAGS'])
main_env['LINKFLAGS'] = main_env['CXXFLAGS']
main_env.AppendUnique(
CXXFLAGS=Split('-std=c++14 -Wall -Wextra'),
CPPDEFINES={
'TAMAAS_LOOP_BACKEND': 'TAMAAS_LOOP_BACKEND_${backend.upper()}',
'TAMAAS_FFTW_BACKEND': 'TAMAAS_FFTW_BACKEND_${fftw_threads.upper()}'
},
)
if main_env['backend'] != 'cuda':
main_env.AppendUnique(CXXFLAGS=['-pedantic'])
# Adding OpenMP flags
if main_env['backend'] == 'omp':
main_env.AppendUnique(CXXFLAGS=omp_flags[cxx])
main_env.AppendUnique(LINKFLAGS=omp_flags[cxx])
else:
main_env.AppendUnique(CXXFLAGS=['-Wno-unknown-pragmas'])
# Correct bug in clang?
if main_env['backend'] == 'omp' and cxx == "clang++":
main_env.AppendUnique(LIBS=["atomic"])
elif main_env['backend'] == 'tbb':
main_env.AppendUnique(LIBS=['tbb'])
# Manage MPI compiler
if main_env['use_mpi']:
main_env['CXX'] = '$MPICXX'
main_env.AppendUnique(CPPDEFINES=['TAMAAS_USE_MPI'])
main_env.AppendUnique(CXXFLAGS=['-Wno-cast-function-type'])
# Flags and options
if main_env['build_type'] == 'debug':
main_env.AppendUnique(CPPDEFINES=['TAMAAS_DEBUG'])
# Define the scalar types
main_env.AppendUnique(CPPDEFINES={
'TAMAAS_REAL_TYPE': '${real_type}',
'TAMAAS_INT_TYPE': '${integer_type}'
})
# Compilation flags
cxxflags_dict = {
"debug": Split("-g -O0"),
"profiling": Split("-g -O3 -fno-omit-frame-pointer"),
"release": Split("-O3")
}
if main_env['sanitizer'] != 'none':
if main_env['backend'] == 'cuda':
raise StopError("Sanitizers with cuda are not yet supported!")
cxxflags_dict[build_type].append('-fsanitize=${sanitizer}')
main_env.AppendUnique(CXXFLAGS=cxxflags_dict[build_type])
main_env.AppendUnique(SHLINKFLAGS=cxxflags_dict[build_type])
main_env.AppendUnique(LINKFLAGS=cxxflags_dict[build_type])
if main_env['should_configure']:
basic_checks(main_env)
detect_dependencies(main_env)
# Writing information file
main_env.Tool('textfile')
main_env['SUBST_DICT'] = get_git_subst()
# Empty values if requested
if main_env['strip_info']:
for k in main_env['SUBST_DICT']:
main_env['SUBST_DICT'][k] = ""
# Substitution of environment file
main_env['SUBST_DICT'].update({
'@build_type@': '$build_type',
'@build_dir@': '${build_dir.abspath}',
'@build_version@': '$version',
'@backend@': '$backend',
})
# Environment file content
env_content = """export PYTHONPATH=@build_dir@/python:$$PYTHONPATH
export LD_LIBRARY_PATH=@build_dir@/src:$$LD_LIBRARY_PATH
"""
# Writing environment file
env_file = main_env.Textfile(
main_env.File('tamaas_environment.sh', main_env['build_dir']), env_content)
# Default targets
build_targets = ['build-cpp', env_file]
install_targets = ['install-lib']
if main_env._get_major_minor_revision(scons_version)[0] >= 4:
main_env.Tool('compilation_db')
build_targets.append(
main_env.CompilationDatabase(PRINT_CMD_LINE_FUNC=pretty_cmd_print
if not main_env['verbose'] else None))
# Building Tamaas library
Export('main_env')
main_env.SubDirectory('src')
# Building Tamaas extra components
for dir in ['python', 'tests']:
if main_env['build_{}'.format(dir)] and not main_env.GetOption('help'):
main_env.SubDirectory(dir)
build_targets.append('build-{}'.format(dir))
# Building API + Sphinx documentation if requested
main_env.SubDirectory('doc')
main_env.Alias('doc', 'build-doc')
install_targets.append('install-doc')
# Define dummy dev command when python is deactivated
if not main_env['build_python']:
dummy_command(
main_env, 'dev', 'Command "dev" does not do anything' +
' without python activated ("build_python=True")')
else:
install_targets.append('install-python')
# Define dummy test command when tests are deactivated
if not main_env['build_tests']:
dummy_command(
main_env, 'test', 'Command "test" does not do anything' +
' without tests activated ("build_tests=True")')
# Definition of target aliases, a.k.a. sub-commands
main_env.Alias('build', build_targets)
# Define proper install targets
main_env.Alias('install', install_targets)
# Default target is to build stuff
main_env.Default('build')
# Building a tar archive
archive = main_env.Command(
'tamaas-${version}.tar.gz',
'',
('git archive '
'--format=tar.gz '
'--prefix=tamaas/ '
'-o $TARGET HEAD'),
)
main_env.Alias('archive', archive)
diff --git a/doc/SConscript b/doc/SConscript
index 4ec2a0e..b180071 100644
--- a/doc/SConscript
+++ b/doc/SConscript
@@ -1,106 +1,107 @@
# -*- mode:python; coding: utf-8 -*-
# vim: set ft=python:
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function
from SCons.Script import Import, Glob, Dir, File
def add_sources(source_list, dirname, fnames):
source_list += Dir(dirname).glob('*.hh')
source_list += Dir(dirname).glob('*.cpp')
Import('main_env')
Import('libTamaas')
doc_env = main_env.Clone()
doc_env['man_section'] = 7
doc_targets = []
# Generate Doxygen API documentation
doc_env.Tool('doxygen')
if doc_env['has_doxygen']:
# Generating Doxyfile
doxygen_verbose = {True: "NO", False: "YES"}
doxyfile_target = doc_env.Substfile('doxygen/Doxyfile',
'doxygen/Doxyfile.in',
SUBST_DICT={
'@version@': '$version',
'@build_dir@': Dir('doxygen').path,
'@logo@': File('icon.svg').path,
'@src_dir@': Dir('#src').abspath,
'@verbose@':
doxygen_verbose[doc_env["verbose"]],
})
doxygen_target = doc_env.Doxygen('doxygen/xml/index.xml',
[doxyfile_target, 'icon.svg', 'icon.ico'])
# Adding all source files as dependencies
sources = []
Dir('#src').walk(add_sources, sources)
doc_env.Depends(doxygen_target, sources)
doc_targets.append(doxygen_target)
# Generate Sphinx User documentation
sphinx_targets_map = {
"html": "sphinx/html/index.html",
"man": "sphinx/tamaas.${man_section}",
"latex": "sphinx/latex/Tamaas.tex"
}
doc_env.Tool('sphinx')
sphinx_targets = None
if doc_env['has_sphinx'] and len(doc_targets) != 0:
doc_env['IMPLICIT_COMMAND_DEPENDENCIES'] = 0
sphinx_sources = [Glob('sphinx/source/*'), Glob('sphinx/source/figures/*')]
sphinx_targets = {
builder: doc_env.Sphinx(sphinx_targets_map[builder], sphinx_sources)
for builder in doc_env['doc_builders']
}
for target in sphinx_targets.values():
doc_env.Depends(target, doxygen_target)
if main_env['build_python']:
doc_env.Depends(target, 'build-python')
doc_targets += list(sphinx_targets.values())
# Alias for both docs
main_env.Alias('build-doc', doc_targets)
# Install target for documentation
share = Dir('share', doc_env['prefix'])
doc_install = []
sphinx_prefix_map = {}
if sphinx_targets is not None:
if "html" in doc_env['doc_builders']:
doc_install.append(doc_env.Install(
target=share.Dir('doc').Dir('tamaas'),
source=sphinx_targets['html'][0].dir))
if "man" in doc_env['doc_builders']:
doc_install.append(doc_env.Install(
target=share.Dir('man') .Dir(doc_env.subst('man${man_section}')),
source=sphinx_targets['man']))
main_env.Alias('install-doc', doc_install)
diff --git a/python/SConscript b/python/SConscript
index 8ac818a..f6467b9 100644
--- a/python/SConscript
+++ b/python/SConscript
@@ -1,176 +1,177 @@
# -*- mode:python; coding: utf-8 -*-
# vim: set ft=python:
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function
from SCons.Script import Import, Split, Copy, Dir
Import('main_env')
# Pybind11 wrapper
env_pybind = main_env.Clone(SHLIBPREFIX='')
# Remove pedantic warnings
cxx_flags = env_pybind['CXXFLAGS']
try:
del cxx_flags[cxx_flags.index('-pedantic')]
except ValueError:
pass
env_pybind.Tool(pybind11)
pybind_sources = Split("""
tamaas_module.cpp
wrap/core.cpp
wrap/percolation.cpp
wrap/surface.cpp
wrap/model.cpp
wrap/solvers.cpp
wrap/compute.cpp
wrap/mpi.cpp
wrap/test_features.cpp
""")
# Setting paths to find libTamaas
env_pybind.AppendUnique(LIBPATH=['../src'])
# Link against a static libTamaas
if env_pybind['build_static_lib']:
env_pybind.PrependUnique(LIBS=['Tamaas']) # keep other libs for link
env_pybind['RPATH'] = "" # no need for rpath w/ static lib
# Link against a dynamic libTamaas
else:
env_pybind.AppendUnique(RPATH=[
"'$$$$ORIGIN/../../src'", # path to lib in build_dir
"'$$$$ORIGIN/../../..'", # path to lib in install prefix
])
env_pybind['LIBS'] = ['Tamaas'] # discard other libs for link
# Building the pybind library
tamaas_wrap = env_pybind.Pybind11Module(
target='tamaas/_tamaas',
source=pybind_sources,
)
# For some reason link happens too early
Import('libTamaas')
env_pybind.Depends(tamaas_wrap, libTamaas)
# Copying the __init__.py file with extra python classes
copy_env = env_pybind.Clone()
# Copying additional python files
python_files = """
__main__.py
compute.py
utils.py
dumpers/__init__.py
dumpers/_helper.py
nonlinear_solvers/__init__.py
""".split()
targets = [tamaas_wrap]
targets += [
copy_env.Command(copy_env.File(f, 'tamaas'),
copy_env.File(f, '#python/tamaas'),
Copy("$TARGET", "$SOURCE"))
for f in python_files
]
dist_files = """
MANIFEST.in
pypi.md
setup.py
""".split()
# pyproject.toml causes issues with develop mode
dist_files.append("pyproject.toml")
targets += [
copy_env.Command(copy_env.File(f, ''),
copy_env.File(f, '#python'),
Copy("$TARGET", "$SOURCE"))
for f in dist_files
]
subst_env = env_pybind.Clone(
SUBST_DICT={
'@version@': '$version',
'@authors@': str(copy_env['authors']),
'@author_list@': ', '.join(copy_env['authors']),
'@email@': '$email',
'@description@': '$description',
'@copyright@': '$copyright',
'@maintainer@': '$maintainer',
'@license@': '$license',
}
)
subst_env.Tool('textfile')
targets.append(subst_env.Substfile('tamaas/__init__.py.in'))
targets.append(subst_env.Substfile('setup.cfg.in'))
# Defining alias for python builds
main_env.Alias('build-python', targets)
# Checking if we can use pip to install (more convenient for end-user)
install_env = main_env.Clone()
conf = Configure(install_env,
custom_tests={'CheckPythonModule': CheckPythonModule})
has_pip = conf.CheckPythonModule('pip')
install_env = conf.Finish()
# Current build directory
install_env['PYDIR'] = Dir('.')
# Setting command line for installation
if has_pip:
install_env['PYINSTALLCOM'] = '${py_exec} -m pip install -U $PYOPTIONS .'
install_env['PYDEVELOPCOM'] = \
'${py_exec} -m pip install $PYOPTIONS -e .[all]'
else:
install_env['PYINSTALLCOM'] = '${py_exec} setup.py install $PYOPTIONS'
install_env['PYDEVELOPCOM'] = '${py_exec} setup.py develop $PYOPTIONS'
install_env['py_version'] = get_python_version(install_env)
install_env.PrependENVPath(
'PYTHONPATH',
install_env.subst('${prefix}/lib/python${py_version}/site-packages'))
# Specify install target
PYOPTIONS = ['${"" if verbose else "-q"}']
python_install = install_env.Command(
'.python_install_phony',
targets, install_env['PYINSTALLCOM'],
PYOPTIONS=['--prefix', '${prefix}'] + PYOPTIONS,
chdir=install_env['PYDIR'])
python_install_dev = install_env.Command(
'.python_install_local_phony',
targets, install_env['PYDEVELOPCOM'],
# Temporary fix for https://github.com/pypa/pip/issues/7953
PYOPTIONS=['--prefix=$$HOME/.local/'] + PYOPTIONS,
chdir=install_env['PYDIR'])
# Defining aliases
main_env.Alias('install-python', python_install)
main_env.Alias('dev', python_install_dev)
diff --git a/python/tamaas/__init__.py.in b/python/tamaas/__init__.py.in
index f218d49..9180365 100644
--- a/python/tamaas/__init__.py.in
+++ b/python/tamaas/__init__.py.in
@@ -1,58 +1,59 @@
# -*- mode:python; coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
@description@
See __author__, __license__, __copyright__ for extra information about Tamaas.
- User documentation: https://tamaas.readthedocs.io
- Bug Tracker: https://gitlab.com/tamaas/tamaas/-/issues
- Source Code: https://gitlab.com/tamaas/tamaas
"""
__author__ = @authors@
__copyright__ = "@copyright@"
__license__ = "@license@"
__maintainer__ = "@maintainer@"
__email__ = "@email@"
try:
from ._tamaas import model_type, TamaasInfo
from ._tamaas import _type_traits as __tt
type_traits = {
model_type.basic_1d: __tt.basic_1d,
model_type.basic_2d: __tt.basic_2d,
model_type.surface_1d: __tt.surface_1d,
model_type.surface_2d: __tt.surface_2d,
model_type.volume_1d: __tt.volume_1d,
model_type.volume_2d: __tt.volume_2d,
}
del __tt
from ._tamaas import * # noqa
__version__ = TamaasInfo.version
except ImportError as e:
print("Error trying to import _tamaas:\n{}".format(e))
raise e
diff --git a/python/tamaas/__main__.py b/python/tamaas/__main__.py
index d4a77b7..ca81bab 100755
--- a/python/tamaas/__main__.py
+++ b/python/tamaas/__main__.py
@@ -1,255 +1,257 @@
#!/usr/bin/env python3
# -*- mode: python; coding: utf-8 -*-
# vim: set ft=python:
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""Module entry point."""
import sys
import io
import time
import argparse
import tamaas as tm
import numpy as np
__author__ = "Lucas Frérot"
__copyright__ = (
"Copyright (©) 2019-2022, EPFL (École Polytechnique Fédérale de Lausanne),"
"\nLaboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)"
+ "\nCopyright (©) 2020-2022 Lucas Frérot"
)
__license__ = "SPDX-License-Identifier: AGPL-3.0-or-later"
def load_stream(stream):
"""
Load numpy from binary stream (allows piping).
Code from
https://gist.github.com/CMCDragonkai/3c99fd4aabc8278b9e17f50494fcc30a
"""
np_magic = stream.read(6)
# use the sys.stdin.buffer to read binary data
np_data = stream.read()
# read it all into an io.BytesIO object
return io.BytesIO(np_magic + np_data)
def print_version():
"""Print version information."""
print(
f"""\
Tamaas {tm.__version__}
{tm.__copyright__}
Authors: {', '.join(tm.__author__)}
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
Tamaas is the fruit of a research effort. Consider citing 10.21105/joss.02121
and the relevant references therein. Use the function tamaas.utils.publications
at the end of your python scripts for an exhaustive publication list."""
)
def surface(args):
"""Generate a surface."""
if args.generator == "random_phase":
generator = tm.SurfaceGeneratorRandomPhase2D(args.sizes)
elif args.generator == "filter":
generator = tm.SurfaceGeneratorFilter2D(args.sizes)
else:
raise ValueError("Unknown generator method {}".format(args.generator))
generator.spectrum = tm.Isopowerlaw2D()
generator.spectrum.q0 = args.cutoffs[0]
generator.spectrum.q1 = args.cutoffs[1]
generator.spectrum.q2 = args.cutoffs[2]
generator.spectrum.hurst = args.hurst
generator.random_seed = args.seed
surface = (
generator.buildSurface() / generator.spectrum.rmsSlopes() * args.rms
)
output = args.output if args.output is not None else sys.stdout
params = {
"q0": generator.spectrum.q0,
"q1": generator.spectrum.q1,
"q2": generator.spectrum.q2,
"hurst": generator.spectrum.hurst,
"random_seed": generator.random_seed,
"rms_heights": args.rms,
"generator": args.generator,
}
try:
np.savetxt(output, surface, header=str(params))
except BrokenPipeError:
pass
def contact(args):
"""Solve a contact problem."""
from tamaas.dumpers import NumpyDumper
tm.set_log_level(tm.LogLevel.error)
if not args.input:
input = sys.stdin
else:
input = args.input
surface = np.loadtxt(input)
discretization = surface.shape
system_size = [1.0, 1.0]
model = tm.ModelFactory.createModel(
tm.model_type.basic_2d, system_size, discretization
)
solver = tm.PolonskyKeerRey(model, surface, args.tol)
solver.solve(args.load)
dumper = NumpyDumper("numpy", "traction", "displacement")
dumper.dump_to_file(sys.stdout.buffer, model)
def plot(args):
"""Plot displacement and pressure maps."""
try:
import matplotlib.pyplot as plt
except ImportError:
print(
"Please install matplotlib to use the 'plot' command",
file=sys.stderr,
)
sys.exit(1)
fig, (ax_traction, ax_displacement) = plt.subplots(1, 2)
ax_traction.set_title("Traction")
ax_displacement.set_title("Displacement")
with load_stream(sys.stdin.buffer) as f_np:
data = np.load(f_np)
ax_traction.imshow(data["traction"])
ax_displacement.imshow(data["displacement"])
fig.set_size_inches(10, 6)
fig.tight_layout()
plt.show()
def main():
"""Main function."""
parser = argparse.ArgumentParser(
prog="tamaas",
description=(
"The tamaas command is a simple utility for surface"
" generation, contact computation and"
" plotting of contact solutions"
),
)
parser.add_argument(
"--version", action="store_true", help="print version info"
)
subs = parser.add_subparsers(
title="commands", description="utility commands"
)
# Arguments for surface command
parser_surface = subs.add_parser(
"surface", description="Generate a self-affine rough surface"
)
parser_surface.add_argument(
"--cutoffs",
"-K",
nargs=3,
type=int,
help="Long, rolloff, short wavelength cutoffs",
metavar=("k_l", "k_r", "k_s"),
required=True,
)
parser_surface.add_argument(
"--sizes",
nargs=2,
type=int,
help="Number of points",
metavar=("nx", "ny"),
required=True,
)
parser_surface.add_argument(
"--hurst", "-H", type=float, help="Hurst exponent", required=True
)
parser_surface.add_argument(
"--rms", type=float, help="Root-mean-square of slopes", default=1.0
)
parser_surface.add_argument(
"--seed", type=int, help="Random seed", default=int(time.time())
)
parser_surface.add_argument(
"--generator",
help="Generation method",
choices=("random_phase", "filter"),
default="random_phase",
)
parser_surface.add_argument(
"--output", "-o", help="Output file name (compressed if .gz)"
)
parser_surface.set_defaults(func=surface)
# Arguments for contact command
parser_contact = subs.add_parser(
"contact",
description="Compute the elastic contact solution with a given surface",
)
parser_contact.add_argument(
"--input", "-i", help="Rough surface file (default stdin)"
)
parser_contact.add_argument(
"--tol", type=float, default=1e-12, help="Solver tolerance"
)
parser_contact.add_argument(
"load", type=float, help="Applied average pressure"
)
parser_contact.set_defaults(func=contact)
# Arguments for plot command
parser_plot = subs.add_parser("plot", description="Plot contact solution")
parser_plot.set_defaults(func=plot)
args = parser.parse_args()
if args.version:
print_version()
return
try:
args.func(args)
except AttributeError:
parser.print_usage()
if __name__ == "__main__":
main()
diff --git a/python/tamaas/compute.py b/python/tamaas/compute.py
index c4dc253..b6630b5 100644
--- a/python/tamaas/compute.py
+++ b/python/tamaas/compute.py
@@ -1,20 +1,21 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from ._tamaas.compute import * # noqa
diff --git a/python/tamaas/dumpers/__init__.py b/python/tamaas/dumpers/__init__.py
index da43834..e9a0413 100644
--- a/python/tamaas/dumpers/__init__.py
+++ b/python/tamaas/dumpers/__init__.py
@@ -1,569 +1,570 @@
# -*- mode:python; coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""Dumpers for the class :py:class:`Model `."""
from pathlib import Path
from os import PathLike
import glob
import json
import io
import typing as ts
from collections.abc import Collection
import numpy as np
from .. import (
ModelDumper,
model_type,
mpi,
type_traits,
ModelFactory,
Model,
__version__,
)
from ._helper import (
step_dump,
directory_dump,
local_slice,
_is_surface_field,
_basic_types,
file_handler,
)
__all__ = [
"JSONDumper",
"FieldDumper",
"NumpyDumper",
]
FileType = ts.Union[str, PathLike, io.TextIOBase]
NameType = ts.Union[str, PathLike]
_reverse_trait_map = {
'model_type.' + t.__name__: mtype
for mtype, t in type_traits.items()
}
def _get_attributes(model: Model):
"""Get model attributes."""
return {
'model_type': str(model.type),
'system_size': model.system_size,
'discretization': model.global_shape,
'program': f"Tamaas {__version__}, DOI:10.21105/joss.02121",
}
def _create_model(attrs: ts.MutableMapping):
"""Create a model from attribute dictionary."""
mtype = _reverse_trait_map[attrs['model_type']]
# netcdf4 converts 1-lists attributes to numbers
for attr in ['system_size', 'discretization']:
if not isinstance(attrs[attr], Collection):
attrs[attr] = [attrs[attr]]
return ModelFactory.createModel(mtype, attrs['system_size'],
attrs['discretization'])
class MPIIncompatibilityError(RuntimeError):
"""Raised when code is not meant to be executed in MPI environment."""
class ModelError(ValueError):
"""Raised when unexpected model is passed to a dumper with a state."""
class ComponentsError(ValueError):
"""Raised when an unexpected number of components is encountred."""
class _ModelJSONEncoder(json.JSONEncoder):
"""Encode a model to JSON."""
def default(self, obj):
"""Encode model."""
if isinstance(obj, Model):
model = obj
attrs = _get_attributes(model)
model_dict = {
'attrs': attrs,
'fields': {},
'operators': [],
}
for field in model:
model_dict['fields'][field] = model[field].tolist()
for op in model.operators:
model_dict['operators'].append(op)
return model_dict
return json.JSONEncoder.default(self, obj)
class JSONDumper(ModelDumper):
"""Dumper to JSON."""
def __init__(self, file_descriptor: FileType):
"""Construct with file handle."""
super(JSONDumper, self).__init__()
self.fd = file_descriptor
@file_handler('w')
def _dump_to_file(self, fd: FileType, model: Model):
json.dump(model, fd, cls=_ModelJSONEncoder)
def dump(self, model: Model):
"""Dump model."""
self._dump_to_file(self.fd, model)
@classmethod
@file_handler('r')
def read(cls, fd: FileType):
"""Read model from file."""
properties = json.load(fd)
model = _create_model(properties['attrs'])
for name, field in properties['fields'].items():
v = np.asarray(field)
if model.type in _basic_types:
v = v.reshape(list(v.shape) + [1])
model[name] = v
return model
class FieldDumper(ModelDumper):
"""Abstract dumper for python classes using fields."""
postfix = ""
extension = ""
name_format = "{basename}{postfix}.{extension}"
def __init__(self, basename: NameType, *fields, **kwargs):
"""Construct with desired fields."""
super(FieldDumper, self).__init__()
self.basename = basename
self.fields: ts.List[str] = list(fields)
self.all_fields: bool = kwargs.get('all_fields', False)
def add_field(self, field: str):
"""Add another field to the dump."""
if field not in self.fields:
self.fields.append(field)
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Dump to a file (path-like or file handle)."""
raise NotImplementedError()
def get_fields(self, model: Model):
"""Get the desired fields."""
if not self.all_fields:
requested_fields = self.fields
else:
requested_fields = list(model)
return {field: model[field] for field in requested_fields}
def dump(self, model: Model):
"""Dump model."""
self._dump_to_file(self.file_path, model)
@classmethod
def read(cls, file_descriptor: FileType):
"""Read model from file."""
raise NotImplementedError(
f'read() method not implemented in {cls.__name__}')
@classmethod
def read_sequence(cls, glob_pattern):
"""Read models from a file sequence."""
return map(cls.read, glob.iglob(glob_pattern))
@property
def file_path(self):
"""Get the default filename."""
return self.name_format.format(basename=self.basename,
postfix=self.postfix,
extension=self.extension)
@directory_dump('numpys')
@step_dump
class NumpyDumper(FieldDumper):
"""Dumper to compressed numpy files."""
extension = 'npz'
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Save to compressed multi-field Numpy format."""
if mpi.size() > 1:
raise MPIIncompatibilityError("NumpyDumper does not function "
"at all in parallel")
np.savez_compressed(file_descriptor,
attrs=json.dumps(_get_attributes(model)),
**self.get_fields(model))
@classmethod
def read(cls, file_descriptor: FileType):
"""Create model from Numpy file."""
data = np.load(file_descriptor, mmap_mode='r')
model = _create_model(json.loads(str(data['attrs'])))
for k, v in filter(lambda k: k[0] != 'attrs', data.items()):
if model.type in _basic_types:
v = v.reshape(list(v.shape) + [1])
model[k] = v
return model
try:
import h5py
__all__.append("H5Dumper")
@directory_dump('hdf5')
@step_dump
class H5Dumper(FieldDumper):
"""Dumper to HDF5 file format."""
extension = 'h5'
@staticmethod
def _hdf5_args():
if mpi.size() > 1:
from mpi4py import MPI # noqa
mpi_args = dict(driver='mpio', comm=MPI.COMM_WORLD)
comp_args = {} # compression does not work in parallel
else:
mpi_args = {}
comp_args = dict(compression='gzip', compression_opts=7)
return mpi_args, comp_args
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Save to HDF5 with metadata about the model."""
# Setup for MPI
if not h5py.get_config().mpi and mpi.size() > 1:
raise MPIIncompatibilityError("HDF5 does not have MPI support")
mpi_args, comp_args = self._hdf5_args()
with h5py.File(file_descriptor, 'w', **mpi_args) as handle:
# Writing data
for name, field in self.get_fields(model).items():
shape = list(field.shape)
if mpi.size() > 1:
xdim = 0 if _is_surface_field(field, model) else 1
shape[xdim] = mpi_args['comm'].allreduce(shape[xdim])
dset = handle.create_dataset(name, shape, field.dtype,
**comp_args)
dset[local_slice(field, model)] = field
# Writing metadata
for name, attr in _get_attributes(model).items():
handle.attrs[name] = attr
@classmethod
def read(cls, file_descriptor: FileType):
"""Create model from HDF5 file."""
mpi_args, _ = cls._hdf5_args()
with h5py.File(file_descriptor, 'r', **mpi_args) as handle:
model = _create_model(handle.attrs)
for k, v in handle.items():
if model.type in _basic_types:
v = np.asarray(v).reshape(list(v.shape) + [1])
model[k] = v[local_slice(v, model)].copy()
return model
except ImportError:
pass
try:
import uvw # noqa
__all__ += [
"UVWDumper",
"UVWGroupDumper",
]
@directory_dump('paraview')
@step_dump
class UVWDumper(FieldDumper):
"""Dumper to VTK files for elasto-plastic calculations."""
extension = 'vtr'
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Dump displacements, plastic deformations and stresses."""
if mpi.size() > 1:
raise MPIIncompatibilityError("UVWDumper does not function "
"properly in parallel")
bdim = len(model.boundary_shape)
# Local MPI size
lsize = model.shape
gsize = mpi.global_shape(model.boundary_shape)
gshape = gsize
if len(lsize) > bdim:
gshape = [model.shape[0]] + gshape
# Space coordinates
coordinates = [
np.linspace(0, L, N, endpoint=False)
for L, N in zip(model.system_size, gshape)
]
# If model has subsurfce domain, z-coordinate is always first
dimension_indices = np.arange(bdim)
if len(lsize) > bdim:
dimension_indices += 1
dimension_indices = np.concatenate((dimension_indices, [0]))
coordinates[0] = \
np.linspace(0, model.system_size[0], gshape[0])
offset = np.zeros_like(dimension_indices)
offset[0] = mpi.local_offset(gsize)
rectgrid = uvw.RectilinearGrid if mpi.size() == 1 \
else uvw.parallel.PRectilinearGrid
# Creating rectilinear grid with correct order for components
coordlist = [
coordinates[i][o:o + lsize[i]]
for i, o in zip(dimension_indices, offset)
]
grid = rectgrid(
file_descriptor,
coordlist,
compression=True,
offsets=offset,
)
fields = self.get_fields(model).items()
# Iterator over fields we want to dump on system geometry
fields_it = filter(lambda t: not _is_surface_field(t[1], model),
fields)
# We make fields periodic for visualization
for name, field in fields_it:
array = uvw.DataArray(field, dimension_indices, name)
grid.addPointData(array)
# Dump surface fields separately
fields_it = filter(lambda t: _is_surface_field(t[1], model),
fields)
for name, field in fields_it:
array = uvw.DataArray(field, range(len(model.boundary_shape)),
name)
grid.addFieldData(array)
grid.write()
@directory_dump('paraview')
class UVWGroupDumper(FieldDumper):
"""Dumper to ParaViewData files."""
extension = 'pvd'
def __init__(self, basename: NameType, *fields, **kwargs):
"""Construct with desired fields."""
super(UVWGroupDumper, self).__init__(basename, *fields, **kwargs)
subdir = Path('paraview') / f'{basename}-VTR'
subdir.mkdir(parents=True, exist_ok=True)
self.uvw_dumper = UVWDumper(
Path(f'{basename}-VTR') / basename, *fields, **kwargs)
self.group = uvw.ParaViewData(self.file_path, compression=True)
def _dump_to_file(self, file_descriptor, model):
self.group.addFile(
self.uvw_dumper.file_path.replace('paraview/', ''),
timestep=self.uvw_dumper.count,
)
self.group.write()
self.uvw_dumper.dump(model)
except ImportError:
pass
try:
from netCDF4 import Dataset
__all__.append("cls")
@directory_dump('netcdf')
class NetCDFDumper(FieldDumper):
"""Dumper to netCDF4 files."""
extension = "nc"
time_dim = 'frame'
format = 'NETCDF4_CLASSIC'
def _file_setup(self, grp, model: Model):
grp.createDimension(self.time_dim, None)
# Attibutes
for k, v in _get_attributes(model).items():
grp.setncattr(k, v)
# Local dimensions
voigt_dim = type_traits[model.type].voigt
components = type_traits[model.type].components
self._vec = grp.createDimension('spatial', components)
self._tens = grp.createDimension('Voigt', voigt_dim)
self.model_info = model.global_shape, model.type
global_boundary_shape = mpi.global_shape(model.boundary_shape)
# Create boundary dimensions
for label, size, length in zip("xy", global_boundary_shape,
model.boundary_system_size):
grp.createDimension(label, size)
coord = grp.createVariable(label, 'f8', (label, ))
coord[:] = np.linspace(0, length, size, endpoint=False)
self._create_variables(grp, model,
lambda f: _is_surface_field(f[1], model),
global_boundary_shape, "xy")
# Create volume dimension
if model.type in {model_type.volume_1d, model_type.volume_2d}:
size = model.shape[0]
grp.createDimension("z", size)
coord = grp.createVariable("z", 'f8', ("z", ))
coord[:] = np.linspace(0, model.system_size[0], size)
self._create_variables(
grp, model, lambda f: not _is_surface_field(f[1], model),
model.global_shape, "zxy")
self.has_setup = True
def _set_collective(self, rootgrp):
if mpi.size() == 1:
return
for v in rootgrp.variables.values():
if self.time_dim in v.dimensions:
v.set_collective(True)
def _dump_to_file(self, file_descriptor: NameType, model: Model):
mode = 'a' if Path(file_descriptor).is_file() \
and getattr(self, 'has_setup', False) else 'w'
try:
with Dataset(file_descriptor,
mode,
format=self.format,
parallel=mpi.size() > 1) as rootgrp:
if rootgrp.dimensions == {}:
self._file_setup(rootgrp, model)
self._set_collective(rootgrp)
if self.model_info != (model.global_shape, model.type):
raise ModelError(f"Unexpected model {mode}")
self._dump_generic(rootgrp, model)
except ValueError:
raise MPIIncompatibilityError("NetCDF4 has no MPI support")
def _create_variables(self, grp, model, predicate, shape, dimensions):
field_dim = len(shape)
fields = list(filter(predicate, self.get_fields(model).items()))
dim_labels = list(dimensions[:field_dim])
for label, data in fields:
local_dim = []
# If we have an extra component
if data.ndim > field_dim:
if data.shape[-1] == self._tens.size:
local_dim = [self._tens.name]
elif data.shape[-1] == self._vec.size:
local_dim = [self._vec.name]
else:
raise ComponentsError(
f"{label} has unexpected number of components "
f"({data.shape[-1]})")
grp.createVariable(label,
'f8',
[self.time_dim] + dim_labels + local_dim,
zlib=mpi.size() == 0)
def _dump_generic(self, grp, model):
fields = self.get_fields(model).items()
new_frame = len(grp.dimensions[self.time_dim])
for label, data in fields:
var = grp[label]
slice_in_global = (new_frame, ) + local_slice(data, model)
var[slice_in_global] = np.array(data, dtype=np.double)
@classmethod
def _open_read(cls, fd):
return Dataset(fd, 'r', format=cls.format, parallel=mpi.size() > 1)
@staticmethod
def _create_model(rootgrp):
attrs = {k: rootgrp.getncattr(k) for k in rootgrp.ncattrs()}
return _create_model(attrs)
@staticmethod
def _set_model_fields(rootgrp, model, frame):
dims = rootgrp.dimensions.keys()
for k, v in filter(lambda k: k[0] not in dims,
rootgrp.variables.items()):
v = v[frame, :]
if model.type in _basic_types:
v = np.asarray(v).reshape(list(v.shape) + [1])
model[k] = v[local_slice(v, model)].copy()
@classmethod
def read(cls, file_descriptor: NameType):
"""Create model with last frame."""
with cls._open_read(file_descriptor) as rootgrp:
model = cls._create_model(rootgrp)
cls._set_model_fields(rootgrp, model, -1)
return model
@classmethod
def read_sequence(cls, file_descriptor: NameType):
with cls._open_read(file_descriptor) as rootgrp:
model = cls._create_model(rootgrp)
for frame in range(len(rootgrp.dimensions[cls.time_dim])):
cls._set_model_fields(rootgrp, model, frame)
yield model
except ImportError:
pass
diff --git a/python/tamaas/dumpers/_helper.py b/python/tamaas/dumpers/_helper.py
index 7c7767c..8a48ed4 100644
--- a/python/tamaas/dumpers/_helper.py
+++ b/python/tamaas/dumpers/_helper.py
@@ -1,158 +1,159 @@
# -*- mode:python; coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
Helper functions for dumpers
"""
from os import PathLike
from functools import wraps
from pathlib import Path
import io
import numpy as np
from .. import model_type, type_traits, mpi
__all__ = ["step_dump", "directory_dump"]
_basic_types = [t for t, trait in type_traits.items() if trait.components == 1]
def _is_surface_field(field, model):
def _to_global(shape):
if len(shape) == len(model.boundary_shape) + 1:
return mpi.global_shape(model.boundary_shape) + [shape[-1]]
else:
return mpi.global_shape(shape)
b_shapes = [list(model[name].shape) for name in model.boundary_fields]
shape = list(field.shape)
return any(shape == s for s in b_shapes) \
or any(shape == _to_global(s) for s in b_shapes)
def local_slice(field, model):
n = model.shape
bn = model.boundary_shape
gshape = mpi.global_shape(bn)
offsets = np.zeros_like(gshape)
offsets[0] = mpi.local_offset(gshape)
if not _is_surface_field(field, model) and len(n) > len(bn):
gshape = [n[0]] + gshape
offsets = np.concatenate(([0], offsets))
shape = bn if _is_surface_field(field, model) else n
if len(field.shape) > len(shape):
shape += field.shape[len(shape):]
def sgen(offset, size):
return slice(offset, offset + size, None)
def sgen_basic(offset, size):
return slice(offset, offset + size)
slice_gen = sgen_basic if model_type in _basic_types else sgen
return tuple(map(slice_gen, offsets, shape))
def step_dump(cls):
"""
Decorator for dumper with counter for steps
"""
orig_init = cls.__init__
orig_dump = cls.dump
@wraps(cls.__init__)
def __init__(obj, *args, **kwargs):
orig_init(obj, *args, **kwargs)
obj.count = 0
def postfix(obj):
return "_{:04d}".format(obj.count)
@wraps(cls.dump)
def dump(obj, *args, **kwargs):
orig_dump(obj, *args, **kwargs)
obj.count += 1
cls.__init__ = __init__
cls.dump = dump
cls.postfix = property(postfix)
return cls
def directory_dump(directory=""):
"Decorator for dumper in a directory"
directory = Path(directory)
def actual_decorator(cls):
orig_dump = cls.dump
orig_filepath = cls.file_path.fget
@wraps(cls.dump)
def dump(obj, *args, **kwargs):
if mpi.rank() == 0:
directory.mkdir(parents=True, exist_ok=True)
orig_dump(obj, *args, **kwargs)
@wraps(cls.file_path.fget)
def file_path(obj):
return str(directory / orig_filepath(obj))
cls.dump = dump
cls.file_path = property(file_path)
return cls
return actual_decorator
def hdf5toVTK(inpath, outname):
"""Convert HDF5 dump of a model to VTK."""
from . import UVWDumper, H5Dumper # noqa
UVWDumper(outname, all_fields=True) << H5Dumper.read(inpath)
def netCDFtoParaview(inpath, outname):
"""Convert NetCDF dump of model sequence to Paraview."""
from . import UVWGroupDumper, NetCDFDumper # noqa
dumper = UVWGroupDumper(outname, all_fields=True)
for model in NetCDFDumper.read_sequence(inpath):
dumper << model
def file_handler(mode):
"""Decorate a function to accept path-like or file handles."""
def _handler(func):
@wraps(func)
def _wrapped(self, fd, *args, **kwargs):
if isinstance(fd, (str, PathLike)):
with open(fd, mode) as fd:
return _wrapped(self, fd, *args, **kwargs)
elif isinstance(fd, io.TextIOBase):
return func(self, fd, *args, **kwargs)
raise TypeError(
f"Expected a path-like or file handle, got {type(fd)}")
return _wrapped
return _handler
diff --git a/python/tamaas/nonlinear_solvers/__init__.py b/python/tamaas/nonlinear_solvers/__init__.py
index 6ecc239..b10ba88 100644
--- a/python/tamaas/nonlinear_solvers/__init__.py
+++ b/python/tamaas/nonlinear_solvers/__init__.py
@@ -1,166 +1,167 @@
# -*- mode:python; coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""Nonlinear solvers for plasticity problems.
Solvers in this module use :py:mod:`scipy.optimize` to solve the implicit
non-linear equation for plastic deformations with fixed contact pressures.
"""
from functools import wraps
from scipy.optimize import newton_krylov, root
from scipy.optimize.nonlin import NoConvergence
from .. import EPSolver, Logger, LogLevel, mpi
from .._tamaas import _tolerance_manager
from .._tamaas import _DFSANESolver as DFSANECXXSolver
__all__ = ['NLNoConvergence',
'DFSANESolver',
'DFSANECXXSolver',
'NewtonKrylovSolver',
'ToleranceManager']
class NLNoConvergence(Exception):
"""Convergence not reached exception."""
class ScipySolver(EPSolver):
"""Base class for solvers wrapping SciPy routines."""
def __init__(self, residual, model=None, callback=None):
"""Construct nonlinear solver with residual.
:param residual: plasticity residual object
:param model: Deprecated
:param callback: passed on to the Scipy solver
"""
super(ScipySolver, self).__init__(residual)
if mpi.size() > 1:
raise RuntimeError("Scipy solvers cannot be used with MPI; "
"DFSANECXXSolver can be used instead")
self.callback = callback
self._x = self.getStrainIncrement()
self._residual = self.getResidual()
self.options = {'ftol': 0, 'fatol': 1e-9}
def solve(self):
"""Solve R(δε) = 0 using a Scipy function."""
# For initial guess, compute the strain due to boundary tractions
# self._residual.computeResidual(self._x)
# self._x[...] = self._residual.getVector()
EPSolver.beforeSolve(self)
# Scipy root callback
def compute_residual(vec):
self._residual.computeResidual(vec)
return self._residual.getVector().copy()
# Solve
self._x[...] = self._scipy_solve(compute_residual)
# Computing displacements
self._residual.computeResidualDisplacement(self._x)
def reset(self):
"""Set solution vector to zero."""
self._x[...] = 0
def _scipy_solve(self, compute_residual):
"""Actually call the scipy solver.
:param compute_residual: function returning residual for given variable
"""
raise NotImplementedError()
class NewtonKrylovSolver(ScipySolver):
"""Solve using a finite-difference Newton-Krylov method."""
def _scipy_solve(self, compute_residual):
try:
return newton_krylov(compute_residual, self._x,
f_tol=self.tolerance,
verbose=True, callback=self.callback)
except NoConvergence:
raise NLNoConvergence("Newton-Krylov did not converge")
class DFSANESolver(ScipySolver):
"""Solve using a spectral residual jacobianless method.
See :doi:`10.1090/S0025-5718-06-01840-0` for details on method and
the relevant Scipy `documentation
`_ for
details on parameters.
"""
def _scipy_solve(self, compute_residual):
solution = root(compute_residual,
self._x,
method='df-sane',
options={'ftol': 0, 'fatol': self.tolerance},
callback=self.callback)
Logger().get(LogLevel.info) << \
"DF-SANE/Scipy: {} ({} iterations, {})".format(
solution.message,
solution.nit,
self.tolerance)
if not solution.success:
raise NLNoConvergence("DF-SANE/Scipy did not converge")
return solution.x.copy()
def ToleranceManager(start, end, rate):
"""Decorate solver to manage tolerance of non-linear solve step."""
def actual_decorator(cls):
orig_init = cls.__init__
orig_solve = cls.solve
orig_update_state = cls.updateState
@wraps(cls.__init__)
def __init__(obj, *args, **kwargs):
orig_init(obj, *args, **kwargs)
obj.setToleranceManager(_tolerance_manager(start, end, rate))
@wraps(cls.solve)
def new_solve(obj, *args, **kwargs):
ftol = obj.tolerance
ftol *= rate
obj.tolerance = max(ftol, end)
return orig_solve(obj, *args, **kwargs)
@wraps(cls.updateState)
def updateState(obj, *args, **kwargs):
obj.tolerance = start
return orig_update_state(obj, *args, **kwargs)
cls.__init__ = __init__
# cls.solve = new_solve
# cls.updateState = updateState
return cls
return actual_decorator
diff --git a/python/tamaas/utils.py b/python/tamaas/utils.py
index ccbee20..51d85bb 100644
--- a/python/tamaas/utils.py
+++ b/python/tamaas/utils.py
@@ -1,310 +1,311 @@
# -*- mode: python; coding: utf-8 -*-
# vim: set ft=python:
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""Convenience utilities."""
import inspect
from collections import namedtuple
from contextlib import contextmanager
from typing import Iterable, Union
from functools import partial
from operator import contains
import numpy as np
from . import (
ContactSolver,
Model,
SurfaceGenerator1D,
SurfaceGenerator2D,
dtype,
set_log_level,
get_log_level,
LogLevel,
Logger,
)
__all__ = [
"log_context",
"publications",
"load_path",
"seeded_surfaces",
"hertz_surface",
]
class NoConvergenceError(RuntimeError):
"""Convergence not reached exception."""
@contextmanager
def log_context(log_level: LogLevel):
"""Context manager to easily control Tamaas' logging level."""
current = get_log_level()
set_log_level(log_level)
try:
yield
finally:
set_log_level(current)
def publications(format_str="{pub.citation}\n\t{pub.doi}"):
"""Print publications associated with objects in use."""
Publication = namedtuple("Publication", ["citation", "doi"])
joss = Publication(
citation=(
"Frérot, L., Anciaux, G., Rey, V., Pham-Ba, S. & Molinari, J.-F."
" Tamaas: a library for elastic-plastic contact of periodic rough"
" surfaces. Journal of Open Source Software 5, 2121 (2020)."),
doi="10.21105/joss.02121",
)
zenodo = Publication(
citation=(
"Frérot, Lucas, Anciaux, Guillaume, Rey, Valentine, Pham-Ba, Son, "
"& Molinari, Jean-François. (2022). Tamaas, a high-performance "
"library for periodic rough surface contact (2.4.0). Zenodo."),
doi="10.5281/zenodo.3479236",
)
_publications = {
k: v
for keys, v in [
(
(
"SurfaceGeneratorRandomPhase1D",
"SurfaceGeneratorRandomPhase2D",
),
[
Publication(
citation=(
"Wu, J.-J. Simulation of rough surfaces with FFT."
" Tribology International 33, 47–58 (2000)."),
doi="10.1016/S0301-679X(00)00016-5",
),
],
),
(
("SurfaceGeneratorFilter1D", "SurfaceGeneratorFilter2D"),
[
Publication(
citation=(
"Hu, Y. Z. & Tonder, K. Simulation of 3-D random"
" rough surface by 2-D digital filter and fourier"
" analysis. International Journal of Machine Tools"
" and Manufacture 32, 83–90 (1992)."),
doi="10.1016/0890-6955(92)90064-N",
),
],
),
(
("PolonskyKeerRey", ),
[
Publication(
citation=(
"Polonsky, I. A. & Keer, L. M. A numerical method"
" for solving rough contact problems based on the"
" multi-level multi-summation and conjugate"
" gradient techniques. Wear 231, 206–219 (1999)."),
doi="10.1016/S0043-1648(99)00113-1",
),
Publication(
citation=(
"Rey, V., Anciaux, G. & Molinari, J.-F. Normal"
" adhesive contact on rough surfaces: efficient"
" algorithm for FFT-based BEM resolution. Comput"
" Mech 1–13 (2017)."),
doi="10.1007/s00466-017-1392-5",
),
],
),
(
("DFSANESolver", "DFSANECXXSolver"),
[
Publication(
citation=(
"La Cruz, W., Martínez, J. & Raydan, M. Spectral"
" residual method without gradient information for"
" solving large-scale nonlinear systems of"
" equations. Math. Comp. 75, 1429–1448 (2006)."),
doi="10.1090/S0025-5718-06-01840-0",
),
],
),
(
("Residual", ),
[
Publication(
citation=("Frérot, L., Bonnet, M., Molinari, J.-F. &"
" Anciaux, G. A Fourier-accelerated volume"
" integral method for elastoplastic contact."
" Computer Methods in Applied Mechanics and"
" Engineering 351, 951–976 (2019)."),
doi="10.1016/j.cma.2019.04.006",
),
],
),
(
("EPICSolver", ),
[
Publication(
citation=("Frérot, L., Bonnet, M., Molinari, J.-F. &"
" Anciaux, G. A Fourier-accelerated volume"
" integral method for elastoplastic contact."
" Computer Methods in Applied Mechanics and"
" Engineering 351, 951–976 (2019)."),
doi="10.1016/j.cma.2019.04.006",
),
Publication(
citation=(
"Jacq, C., Nélias, D., Lormand, G. & Girodin, D."
" Development of a Three-Dimensional"
" Semi-Analytical Elastic-Plastic Contact Code."
" Journal of Tribology 124, 653 (2002)."),
doi="10.1115/1.1467920",
),
],
),
(
("Condat", ),
[
Publication(
citation=(
"Condat, L. A Primal–Dual Splitting Method for"
" Convex Optimization Involving Lipschitzian,"
" Proximable and Linear Composite Terms. J Optim"
" Theory Appl 158, 460–479 (2012)."),
doi="10.1007/s10957-012-0245-9",
),
],
),
(
("KatoSaturated", ),
[
Publication(
citation=(
"Stanley, H. M. & Kato, T. An FFT-Based Method for"
" Rough Surface Contact. J. Tribol 119, 481–485"
" (1997)."),
doi="10.1115/1.2833523",
),
Publication(
citation=(
"Almqvist, A., Sahlin, F., Larsson, R. &"
" Glavatskih, S. On the dry elasto-plastic contact"
" of nominally flat surfaces. Tribology"
" International 40, 574–579 (2007)."),
doi="10.1016/j.triboint.2005.11.008",
),
],
),
] for k in keys
}
frame = inspect.stack()[1][0]
caller_vars = (frame.f_globals | frame.f_locals)
citable = filter(
partial(contains, _publications.keys()),
map(lambda x: type(x).__name__, caller_vars.values()),
)
citable = [joss, zenodo] \
+ list({pub for k in citable for pub in _publications[k]})
msg = "Please cite the following publications:\n\n"
msg += "\n".join(format_str.format(pub=pub) for pub in citable)
Logger().get(LogLevel.info) << msg
return citable
def load_path(
solver: ContactSolver,
loads: Iterable[Union[float, np.ndarray]],
verbose: bool = False,
callback=None,
) -> Iterable[Model]:
"""
Generate model objects solutions for a sequence of applied loads.
:param solver: a contact solver object
:param loads: an iterable sequence of loads
:param verbose: print info output of solver
:param callback: a callback executed after the yield
"""
log_level = LogLevel.info if verbose else LogLevel.warning
with log_context(log_level):
for load in loads:
if solver.solve(load) > solver.tolerance:
raise NoConvergenceError("Solver error exceeded tolerance")
yield solver.model
if callback is not None:
callback()
def seeded_surfaces(
generator: Union[SurfaceGenerator1D, SurfaceGenerator2D],
seeds: Iterable[int],
) -> Iterable[np.ndarray]:
"""
Generate rough surfaces with a prescribed seed sequence.
:param generator: surface generator object
:param seeds: random seed sequence
"""
for seed in seeds:
generator.random_seed = seed
yield generator.buildSurface()
def hertz_surface(system_size: Iterable[float], shape: Iterable[int],
radius: float) -> np.ndarray:
"""
Construct a parabolic surface.
:param system_size: size of the domain in each direction
:param shape: number of points in each direction
:param radius: radius of surface
"""
coords = map(
lambda L, N: np.linspace(0, L, N, endpoint=False, dtype=dtype),
system_size,
shape,
)
coords = np.meshgrid(*coords, "ij", sparse=True)
surface = (-1 / (2 * radius)) * sum(
(x - L / 2)**2 for x, L in zip(coords, system_size))
return surface
def radial_average(x, y, values, r, theta, method='linear', endpoint=False):
"""Compute the radial average of a 2D field."""
try:
from scipy.interpolate import RegularGridInterpolator
except ImportError:
raise ImportError("Install scipy to use tamaas.utils.radial_average")
interpolator = RegularGridInterpolator((x, y), values, method=method)
rr, tt = np.meshgrid(r, theta, indexing='ij', sparse=True)
x, y = rr * np.cos(tt), rr * np.sin(tt)
X = np.vstack((x.flatten(), y.flatten())).T
return interpolator(X).reshape(x.shape).sum(axis=1) \
/ (theta.size - int(not endpoint))
diff --git a/site_scons/detect.py b/site_scons/detect.py
index 21fee13..8d356af 100644
--- a/site_scons/detect.py
+++ b/site_scons/detect.py
@@ -1,259 +1,260 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function
from SCons.Script import Configure, Dir
from SCons.Errors import StopError
# ------------------------------------------------------------------------------
def _get_path(env, ext, module_var):
path = ""
if module_var in env and env[module_var] != "":
root = Dir(env[module_var])
if not root.exists() or not root.isdir():
raise RuntimeError("{} is set to a non-existing path '{}'".format(
module_var, root))
path = Dir(ext, root)
if not path.exists() or not path.isdir():
raise RuntimeError("{} does not contain '{}' directory".format(
module_var, ext))
return [path]
# ------------------------------------------------------------------------------
def FindFFTW(env, components=None, precision='double', module_var='FFTW_ROOT'):
"""Find FFTW3 and set relevant environment variables"""
if not env.get('should_configure', True):
return
if components is None:
components = []
fftw_vars = {}
fftw_vars['CPPPATH'] = _get_path(env, 'include', module_var)
fftw_vars['LIBPATH'] = _get_path(env, 'lib', module_var)
try:
fftw_vars['LIBPATH'] += _get_path(env, 'lib64', module_var)
except RuntimeError:
pass
fftw_vars['RPATH'] = fftw_vars['LIBPATH']
if 'threads' in components:
fftw_vars['LIBS'] = ['pthread']
# Setting up FFTW
wishes = ['main'] + components
fftw_name = "fftw3{}"
# Components
lib_names = {'main': fftw_name,
'threads': fftw_name + '_threads',
'omp': fftw_name + '_omp',
'mpi': fftw_name + '_mpi'}
# Checking list of wishes
try:
libs = [lib_names[i].format("") for i in wishes]
except KeyError:
raise StopError(
'Incompatible FFTW wishlist {0}'.format(wishes))
# Add long precision libraries
if precision == 'long double':
libs += [lib_names[i].format("l") for i in wishes]
conf_env = env.Clone(**fftw_vars)
conf = Configure(conf_env)
for lib in libs:
inc_names = ['fftw3.h']
if 'mpi' in lib:
inc_names.append('fftw3-mpi.h')
if not conf.CheckLibWithHeader(lib, inc_names, 'c++'):
raise StopError(
('Failed to find library {0} or '
'headers {1}. Check the build options "fftw_threads"'
' and "FFTW_ROOT".').format(lib, str(inc_names)))
conf_env = conf.Finish()
fftw_vars['LIBS'] = fftw_vars.get('LIBS', []) + libs
# Update modified variables
env.AppendUnique(**fftw_vars)
# ------------------------------------------------------------------------------
def FindBoost(env, headers=['boost/version.hpp'], module_var='BOOST_ROOT'):
"""Find Boost and set relevant environment variables"""
if not env.get('should_configure', True):
return
boost_vars = {}
boost_vars['CPPPATH'] = _get_path(env, 'include', module_var)
conf_env = env.Clone(**boost_vars)
conf = Configure(conf_env)
if not conf.CheckCXXHeader(headers):
raise StopError(
'Failed to find Boost headers {}'.format(headers))
conf_env = conf.Finish()
# Update modified variables
env.AppendUnique(**boost_vars)
# ------------------------------------------------------------------------------
def FindThrust(env, backend='omp', module_var='THRUST_ROOT'):
"""Find Thrust and set relevant environment variables"""
if not env.get('should_configure', True):
return
if backend not in ('cpp', 'omp', 'cuda', 'tbb'):
raise StopError(
'Unknown thrust backend "{}"'.format(backend))
thrust_vars = {}
try:
thrust_vars['CPPPATH'] = _get_path(env, 'include', module_var)
except RuntimeError:
thrust_vars['CPPPATH'] = _get_path(env, '', module_var)
if "clang++" in env['CXX']:
thrust_vars['CXXFLAGS'] = ["-Wno-unused-local-typedef"]
thrust_vars['CPPDEFINES'] = [
"THRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_{}".format(backend.upper())
]
if backend == 'cuda':
thrust_vars['CPPDEFINES'].append("TAMAAS_USE_CUDA")
conf_env = env.Clone(**thrust_vars)
conf = Configure(conf_env)
if not conf.CheckCXXHeader('thrust/version.h'):
raise StopError(
'Failed to find Thrust')
conf_env = conf.Finish()
# Update modified variables
env.AppendUnique(**thrust_vars)
# ------------------------------------------------------------------------------
def FindPybind11(env, module_var='PYBIND11_ROOT'):
"""Detech Pybind11 and set appropriate build variables"""
if not env.get('should_configure', True) \
or env.get("PYBIND11_FOUND", False):
return
pybind11_vars = {}
clone = env.Clone(CPPPATH=[])
clone.ParseConfig('${py_exec}-config --includes')
clone.AppendUnique(CPPPATH=[_get_path(env, 'include', module_var)])
pybind11_vars['CPPPATH'] = clone['CPPPATH']
conf = Configure(clone)
if not conf.CheckCXXHeader('pybind11/pybind11.h'):
raise StopError(
"Failed to find pybind11 header.\n"
"Install pybind11 headers or "
"set PYBIND11_ROOT='#third-party/pybind11' "
"to use vendored dependency."
)
conf.Finish()
pybind11_vars['PYBIND11_FOUND'] = True
# Update variables
env.AppendUnique(**pybind11_vars)
# ------------------------------------------------------------------------------
def FindCuda(env, module_var="CUDA_ROOT"):
"""Detect cuda on clusters"""
if not env.get('should_configure', True):
return
if 'CUDA_ROOT' in env:
env['CUDA_TOOLKIT_PATH'] = _get_path(env, '', module_var)[0]
else:
env['CUDA_TOOLKIT_PATH'] = '/opt/cuda'
env['CUDA_COMPONENTS'] = ['cufft']
env['CUDA_ARCH_FLAG'] = '-arch=sm_60'
colors = env['COLOR_DICT']
if not env['verbose']:
env['SHCXX'] = 'nvcc'
env['NVCCCOMSTR'] = env['SHCXXCOMSTR']
env['SHLINKCOMSTR'] = \
u'{0}[Linking (cuda)] {1}$TARGET'.format(colors['purple'],
colors['end'])
env.AppendUnique(CXXFLAGS=[
"-expt-extended-lambda", # experimental lambda support
"-expt-relaxed-constexpr", # experimental constexpr
])
if env['build_type'] == 'debug':
env.AppendUnique(CXXFLAGS="-G")
env.Tool('nvcc')
# ------------------------------------------------------------------------------
def FindGTest(env):
"""A tool to configure GoogleTest"""
if not env.get('should_configure', True):
return
conf = Configure(env)
env['has_gtest'] = conf.CheckCXXHeader('gtest/gtest.h')
env = conf.Finish()
# ------------------------------------------------------------------------------
def FindExpolit(env):
"""A tool to configure Expolit"""
if not env.get('should_configure', True):
return
expolit_vars = {"CPPPATH": "#/third-party/expolit/include"}
conf_env = env.Clone()
conf_env.AppendUnique(**expolit_vars)
conf = Configure(conf_env)
if not conf.CheckCXXHeader('expolit/expolit'):
raise StopError(
'Failed to find Expolit header\n' +
"Run 'git submodule update --init " +
"third-party/expolit'")
conf_env = conf.Finish()
env.AppendUnique(**expolit_vars)
diff --git a/site_scons/site_init.py b/site_scons/site_init.py
index 20c861e..fbc6757 100644
--- a/site_scons/site_init.py
+++ b/site_scons/site_init.py
@@ -1,201 +1,202 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import subprocess
from detect import FindPybind11
from SCons.Script import Configure
from SCons.Errors import StopError
# ------------------------------------------------------------------------------
def pybind11(env):
"""A tool to configure pybind11"""
def execute(command):
return [line for line in subprocess.check_output(
command, universal_newlines=True).split('\n')
if line != ""]
# Create a clone so we do not modify env
clone = env.Clone()
# Set variables for clone
FindPybind11(clone)
includes = clone['CPPPATH']
# Extension of shared library for python
try:
extension = execute(
env.subst('${py_exec}-config --extension-suffix').split())[0]
except subprocess.CalledProcessError:
extension = ".so"
def pybind11_builder(env, target, source, **kwargs):
"""Create a pybind11 module"""
clone = env.Clone()
clone.AppendUnique(CPPPATH=includes)
clone['SHLIBSUFFIX'] = extension
clone.AppendUnique(SHLINKFLAGS=['-fvisibility=hidden'])
return clone.SharedLibrary(target, source, **kwargs)
# Add pseudo-builder to master environment
env.AddMethod(pybind11_builder, 'Pybind11Module')
# ------------------------------------------------------------------------------
def pretty_cmd_print(command, target, source, env):
colors = env['COLOR_DICT']
if 'Copy' in command:
color = colors['gray']
action = 'Copying'
elif 'Creating' in command:
color = colors['yellow']
action = 'Generating'
elif 'database' in command:
color = colors['yellow']
action = 'Generating'
elif 'pytest' in command:
color = colors['green']
action = 'Running tests'
target[0] = target[0].dir
elif 'pip' in command:
color = colors['blue']
if 'user' in command:
action = 'Symlinking'
target[0] = target[0].dir
elif 'prefix' in command:
action = 'Installing'
target[0] = env.subst(target[0].dir.path + ' to ${prefix}')
elif 'tar' in command:
color = colors['blue']
action = 'Archiving'
target[0] = '../' + target[0].path
else:
print(command)
return
print("{color}[{action}] {end}{target}".format(
color=color,
end=colors['end'],
target=target[0],
action=action
))
# ------------------------------------------------------------------------------
def CheckPythonModule(context, module):
"""Checks the existence of a python module"""
context.Message('Checking for Python module {}... '.format(module))
env = context.sconf.env
command = [env.subst('${py_exec}'), '-c', 'import {}'.format(module)]
context.Log('Executing external command: {}\n'.format(command))
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
result = True
except subprocess.CalledProcessError as e:
result = False
output = bytes(e.output)
context.Log(output.decode() + '\n')
context.Result(result)
return result
# ------------------------------------------------------------------------------
def CheckCompilerFlag(context, flag):
"Check compiler provides flag"
context.Message('Checking compiler flag {}... '.format(
context.sconf.env.subst(flag)))
test_file = """
int main() {
return 0;
}
"""
env = context.sconf.env.Clone(CXXFLAGS=[flag],
LINKFLAGS=[flag])
context.sconf.env = env
result = context.TryLink(test_file, '.cpp')
context.Result(result)
return result
# ------------------------------------------------------------------------------
def dummy_command(env, command, error_msg):
"""Creates a dummy scons command"""
def print_error(*args, **kwargs):
print(error_msg)
def print_cmd(*args, **kwargs):
pass
comm = env.Command('#.phony_{}'.format(command),
'', print_error, PRINT_CMD_LINE_FUNC=print_cmd)
env.Alias(command, comm)
# ------------------------------------------------------------------------------
def get_python_version(env):
versions_script = """
from __future__ import print_function
from sysconfig import get_python_version
print(get_python_version())"""
version = subprocess.check_output([env['py_exec'], "-c", versions_script],
universal_newlines=True).replace('\n', '')
print(version)
return version
# ------------------------------------------------------------------------------
def basic_checks(env):
custom_tests = {
'CheckCompilerFlag': CheckCompilerFlag,
}
conf = Configure(env, custom_tests=custom_tests)
if not conf.CheckCXX():
raise StopError('Could not find working C++ compiler')
for flag in env['CXXFLAGS']:
if not conf.CheckCompilerFlag(flag):
raise StopError(
env.subst('Compiler does not support flag "${CXXFLAGS}"'))
# flags are overwritten by CheckCompilerFlag
conf.env['CXXFLAGS'] = env['CXXFLAGS']
def check_type(cpp_type, stl_header):
if not conf.CheckType(
cpp_type,
'#include <{stl_header}>'.format(stl_header=stl_header),
'C++'
):
raise StopError(
'Standard type "{cpp_type}" is not available'
.format(cpp_type=cpp_type))
check_type(env['real_type'], 'cstdlib')
check_type(env['integer_type'], 'cstdlib')
check_type('std::multiplies', 'functional')
check_type('std::unique_ptr', 'memory')
conf.Finish()
diff --git a/site_scons/site_tools/doxygen.py b/site_scons/site_tools/doxygen.py
index 0156841..cea041c 100644
--- a/site_scons/site_tools/doxygen.py
+++ b/site_scons/site_tools/doxygen.py
@@ -1,52 +1,53 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
SCons tool to generate Doxygen API documentation
"""
from __future__ import print_function
from SCons.Script import Builder, Action
def generate(env):
env['has_doxygen'] = exists(env)
if not env['has_doxygen']:
return
action = 'doxygen $SOURCE'
if env['verbose']:
action = Action(action)
else:
action = Action(action,
cmdstr='{}[Doxygen] {}$SOURCE'.format(
env['COLOR_DICT']['orange'],
env['COLOR_DICT']['end']
))
doxygen_builder = Builder(action=action)
env['BUILDERS']['Doxygen'] = doxygen_builder
def exists(env):
conf = env.Configure()
has_doxygen = conf.CheckProg('doxygen')
conf.Finish()
return has_doxygen
diff --git a/site_scons/site_tools/nvcc.py b/site_scons/site_tools/nvcc.py
index 1afb36d..7291e87 100644
--- a/site_scons/site_tools/nvcc.py
+++ b/site_scons/site_tools/nvcc.py
@@ -1,222 +1,223 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
SCons tool to setup compilation environment for NVidia's NVCC
Based on version
https://github.com/thrust/thrust/blob/master/site_scons/site_tools/nvcc.py
"""
from __future__ import print_function
import platform
import SCons
from SCons.Errors import StopError
from SCons.Script import Dir
known_components = {
'cufft': 'cufft.h',
'cufftw': 'cufftw.h'
}
def _find_cuda_paths(env):
"""Finding cuda paths"""
if 'CUDA_TOOLKIT_PATH' not in env:
raise StopError("CUDA_TOOLKIT_PATH variable not found in environment")
cuda_path = env['CUDA_TOOLKIT_PATH']
inc_path = Dir('include', cuda_path)
lib_path = Dir('lib', cuda_path)
bin_path = Dir('bin', cuda_path)
if platform.machine()[-2:] == '64':
lib_path = Dir('lib64', cuda_path)
# add nvcc's location to PATH
env.PrependENVPath('PATH', bin_path)
# add the location of the cudart shared library to LD_LIBRARY_PATH as well
# this allows us to execute CUDA programs during the build
env.PrependENVPath('LD_LIBRARY_PATH', lib_path)
# add to include paths
env.AppendUnique(CPPPATH=[inc_path])
# add to library paths
env.AppendUnique(LIBPATH=[lib_path])
def _configure_cuda_components(env):
"""Finding required components in cuda"""
if env.GetOption('clean'):
return
if 'CUDA_COMPONENTS' in env:
for component in env['CUDA_COMPONENTS']:
if component not in known_components:
raise StopError("Unknown cuda component '{}'".format(component))
conf = SCons.Script.Configure(env)
if not conf.CheckLibWithHeader(component,
known_components[component],
'c++'):
raise StopError("Failed to find library {} or header {}".format(
component, known_components[component]))
env = conf.Finish()
def _define_compile_cuda(env):
"""Define the compile string for files that need nvcc"""
env['NVCC'] = env.Detect('nvcc')
# Setting compilation command
env['NVCCCOM'] = \
'$NVCC $CUDA_ARCH_FLAG -o $TARGET -c $NVCC_CXXFLAGS $NVCC_CCFLAGS ' \
+ '$_CCCOMCOM -x cu $SOURCES'
env['SHNVCCCOM'] = \
'$NVCC $CUDA_ARCH_FLAG -o $TARGET -dc -Xcompiler -fPIC ' \
+ '$NVCC_CXXFLAGS $NVCC_CCFLAGS $_CCCOMCOM -x cu $SOURCES'
# Constructing proper cc flags
env['_NVCC_BARE_CCFLAGS'] = \
'${_concat("", CCFLAGS, "", __env__, _NVCC_BARE_FILTER)}'
env['_NVCC_PASSED_CCFLAGS'] = \
'${_concat("-Xcompiler ", CCFLAGS, "", __env__, ' \
+ '_NVCC_COMPILER_PASSED_FILTER)}'
# Constructing proper cxx flags
env['_NVCC_BARE_CXXFLAGS'] = \
'${_concat("", CXXFLAGS, "", __env__, _NVCC_BARE_FILTER)}'
env['_NVCC_PASSED_CXXFLAGS'] = \
'${_concat("-Xcompiler ", CXXFLAGS, "", __env__, ' \
+ '_NVCC_COMPILER_PASSED_FILTER)}'
# Putting all together
env['NVCC_CCFLAGS'] = '$_NVCC_BARE_CCFLAGS $_NVCC_PASSED_CCFLAGS'
env['NVCC_CXXFLAGS'] = '$_NVCC_BARE_CXXFLAGS $_NVCC_PASSED_CXXFLAGS'
def _define_link_cuda(env):
"""Define the link string"""
# Fixing rpaths
env['RPATHPREFIX'] = '-rpath='
env['__RPATH'] = '${_concat("-Xlinker ", _RPATH, "", __env__)}'
env['LINK'] = env['NVCC']
env['SHLINK'] = env['NVCC']
# Replacing old link command strings
env['LINKCOM'] = \
'$LINK $CUDA_ARCH_FLAG -o $TARGET $NVCC_LINKFLAGS $__RPATH ' \
+ '$SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINKCOM'] = \
'$SHLINK -shared $CUDA_ARCH_FLAG -o $TARGET $NVCC_SHLINKFLAGS ' \
+ '$__SHLIBVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
# Constructing proper static linker flags
env['_NVCC_BARE_LINKFLAGS'] = \
'${_concat("", LINKFLAGS, "", __env__, _NVCC_BARE_FILTER)}'
env['_NVCC_COMPILER_LINKFLAGS'] = \
'${_concat("-Xcompiler ", LINKFLAGS, "", __env__, ' \
+ '_NVCC_COMPILER_FILTER)}'
env['_NVCC_PASSED_LINKFLAGS'] = \
'${_concat("-Xlinker ", LINKFLAGS, "", __env__, ' \
+ '_NVCC_LINK_PASSED_FILTER)}'
# Constructing proper shared linker flags
env['_NVCC_BARE_SHLINKFLAGS'] = \
'${_concat("", SHLINKFLAGS, "", __env__, _NVCC_BARE_FILTER)}'
env['_NVCC_COMPILER_SHLINKFLAGS'] = \
'${_concat("-Xcompiler ", LINKFLAGS, "", __env__, ' \
+ '_NVCC_COMPILER_FILTER)}'
env['_NVCC_PASSED_SHLINKFLAGS'] = \
'${_concat("-Xlinker ", SHLINKFLAGS, "", __env__, ' \
+ '_NVCC_LINK_PASSED_FILTER)}'
env['_SHLIBVERSIONFLAGS'] = '-Xlinker -Bsymbolic ' \
+ '-Xlinker -soname=$_SHLIBSONAME'
# Putting all together
env['NVCC_LINKFLAGS'] = \
'$_NVCC_BARE_LINKFLAGS $_NVCC_COMPILER_LINKFLAGS ' \
+ '$_NVCC_PASSED_LINKFLAGS'
env['NVCC_SHLINKFLAGS'] = env['NVCC_LINKFLAGS']
env['NVCC_SHLINKFLAGS'] += \
' $_NVCC_BARE_SHLINKFLAGS $_NVCC_COMPILER_SHLINKFLAGS ' \
+ '$_NVCC_PASSED_SHLINKFLAGS'
def _define_commands(env):
"""Defining the command strings"""
# Flags allowed by nvcc
bare_flags = """-std=c++11 -O0 -O1 -O2 -O3 -g -pg -G -w""".split()
# Experimental flags
bare_flags += """-expt-extended-lambda -expt-relaxed-constexpr""".split()
# Flags that must be passed to compiler at link time
compiler_flags = """-fopenmp -qopenmp""".split()
# Pass flags bare to nvcc
env['_NVCC_BARE_FILTER'] = lambda flags: list(filter(
lambda flag: flag in bare_flags, flags))
# Must prepend -Xcompiler, even at link time
env['_NVCC_COMPILER_FILTER'] = lambda flags: list(filter(
lambda flag: flag in compiler_flags, flags))
# Prepend -Xcompiler
env['_NVCC_COMPILER_PASSED_FILTER'] = lambda flags: list(filter(
lambda flag: flag not in set(bare_flags), flags))
# Prepend -Xlinker
env['_NVCC_LINKED_PASSED_FILTER'] = lambda flags: list(filter(
lambda flag: flag not in set(bare_flags) | set(compiler_flags), flags))
_define_compile_cuda(env)
_define_link_cuda(env)
def _add_actions_cuda(env):
"""
Adding actions to .cu files to compile with nvcc.
Other files are not affected.
"""
nvcc_action = SCons.Action.Action('$NVCCCOM', '$NVCCCOMSTR')
shnvcc_action = SCons.Action.Action('$SHNVCCCOM', '$NVCCCOMSTR')
static, shared = SCons.Tool.createObjBuilders(env)
# Compiling with nvcc action added to detected .cu files
static.add_action('.cu', nvcc_action)
shared.add_action('.cu', shnvcc_action)
# Emitter to qualify correctly object code type
static.add_emitter('.cu', SCons.Defaults.StaticObjectEmitter)
shared.add_emitter('.cu', SCons.Defaults.SharedObjectEmitter)
env['CXXCOM'] = '$NVCCCOM'
env['SHCXXCOM'] = '$SHNVCCCOM'
# Scanner for dependency calculations
SCons.Tool.SourceFileScanner.add_scanner('.cu', SCons.Scanner.C.CScanner())
def generate(env):
"""Setup environment for compiling .cu files with nvcc"""
_find_cuda_paths(env)
_add_actions_cuda(env)
_define_commands(env)
_configure_cuda_components(env)
def exists(env):
return env.Detect('nvcc')
diff --git a/site_scons/site_tools/sphinx.py b/site_scons/site_tools/sphinx.py
index 2d185bb..b66daf0 100644
--- a/site_scons/site_tools/sphinx.py
+++ b/site_scons/site_tools/sphinx.py
@@ -1,70 +1,71 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
SCons tool to generate Sphinx documentation
"""
from __future__ import print_function
from SCons.Script import Builder, Action
from site_init import CheckPythonModule
def generate(env):
env['has_sphinx'] = exists(env)
if not env['has_sphinx']:
return
def sphinx_action(source, target, env, for_signature):
args = {
'verbose': "" if env['verbose'] else '-Q',
}
if 'html' in target[0].path:
args['builder'] = 'html'
elif env.subst('.${man_section}') in target[0].path:
args['builder'] = 'man'
elif '.tex' in target[0].path:
args['builder'] = 'latex'
action = ('sphinx-build {verbose} -b {builder}'.format(**args)
+ ' -- ${SOURCE.dir} ${TARGET.dir}')
if env['verbose']:
return Action(action)
return Action(action,
cmdstr='{}[Sphinx/{}] {}${{TARGET}}'.format(
env['COLOR_DICT']['orange'],
args['builder'],
env['COLOR_DICT']['end']
))
sphinx_build = Builder(generator=sphinx_action,
emitter=None)
env['BUILDERS']['Sphinx'] = sphinx_build
def exists(env):
conf = env.Configure(custom_tests={'CheckPythonModule': CheckPythonModule})
has_sphinx = \
conf.CheckProg('sphinx-build') and conf.CheckPythonModule('breathe')
conf.Finish()
return has_sphinx
diff --git a/site_scons/site_tools/tamaas.py b/site_scons/site_tools/tamaas.py
index e4722da..cb6d80c 100644
--- a/site_scons/site_tools/tamaas.py
+++ b/site_scons/site_tools/tamaas.py
@@ -1,58 +1,59 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from SCons.Errors import StopError
from SCons.Script import Configure, Dir
def generate(env):
"""Sets correct environement variables for Tamaas.
- 'TAMAAS_PATH' is the path to the build directory of Tamaas"""
if env.GetOption("clean"):
return
if 'TAMAAS_PATH' not in env:
raise StopError("Please set the 'TAMAAS_PATH' variable in your " +
"scons environment")
tm_path = Dir(env['TAMAAS_PATH'])
include_dir = Dir('src', tm_path)
subdirs = "core model solvers bem surface percolation".split(" ")
include_dirs = [Dir(x, include_dir).abspath for x in subdirs]
lib_dir = include_dir
env.AppendUnique(CPPPATH=include_dirs)
env.AppendUnique(LIBPATH=[lib_dir.abspath])
env.AppendUnique(RPATH=[lib_dir.abspath])
env.AppendUnique(CXXFLAGS=["-std=c++14"])
env.AppendUnique(
CPPDEFINES=["THRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_OMP"])
env.AppendUnique(LIBS=['fftw3', 'fftw3_omp'])
conf = Configure(env)
if not conf.CheckLibWithHeader("Tamaas", "tamaas.hh", language="CXX"):
raise StopError(
"Tamaas not found in {}".format(tm_path.abspath))
env['TAMAAS_PATH'] = tm_path.abspath
env = conf.Finish()
def exists(env):
if 'TAMAAS_PATH' in env:
return True
return False
diff --git a/site_scons/version.py b/site_scons/version.py
index 94bbd88..d36b3a1 100644
--- a/site_scons/version.py
+++ b/site_scons/version.py
@@ -1,52 +1,53 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import subprocess
import base64
from zlib import compress
def git(*args):
"""Run a git command"""
return bytes(subprocess.check_output(["git"] + list(args),
stderr=subprocess.STDOUT))
def get_git_subst():
"""Get info about state of git repository"""
try:
branch = git("rev-parse", "--abbrev-ref", "HEAD")[:-1]
commit = git("rev-parse", branch)[:-1]
diff = git("diff", "HEAD")
remotes = git('remote', '-v')
except subprocess.CalledProcessError as err:
if b'not a git repository' in err.output:
print("Not in a git repository: skipping info gathering")
branch, commit, diff, remotes = [bytes()] * 4
if remotes != b"":
remotes_string = remotes[:-1].decode().replace('\n', '\\\\n')
else:
remotes_string = '""'
return {
'@commit@': commit.decode(),
'@branch@': branch.decode(),
'@diff@': base64.b64encode(compress(diff, 9)).decode(),
'@remotes@': remotes_string,
}
diff --git a/tests/SConscript b/tests/SConscript
index ad75d07..72e34c1 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -1,224 +1,225 @@
# -*- mode:python; coding: utf-8 -*-
# vim: set ft=python:
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function
from SCons.Script import Split, Copy, Dir, Import
from detect import FindGTest, FindPybind11
# ------------------------------------------------------------------------------
def copyComStr(env, main):
if 'SHCXXCOMSTR' in main:
env['CXXCOMSTR'] = main['SHCXXCOMSTR']
if 'SHLINKCOMSTR' in main:
env['LINKCOMSTR'] = main['SHLINKCOMSTR']
# ------------------------------------------------------------------------------
def make_python_tests(env):
"""Copy python tests to build directory"""
test_env = env.Clone()
test_files = Split("""
test_hertz.py
test_matvec.py
test_westergaard.py
test_patch_westergaard.py
test_patch_plasticity.py
test_surface.py
test_hertz_disp.py
test_hertz_kato.py
test_saturated_pressure.py
test_flood_fill.py
test_integral_operators.py
test_dumper.py
test_tangential.py
test_boussinesq_surface.py
test_voigt.py
test_memory.py
test_copy.py
test_epic.py
test_publications.py
fftfreq.py
conftest.py
pytest.ini
""")
if env['use_mpi']:
test_files += ['test_mpi_routines.py', 'mpi_routines.py']
src_dir = "#/tests"
targets = [
test_env.Command(file, test_env.File(file, src_dir),
Copy("$TARGET", "$SOURCE"))
for file in test_files
]
test_env = env.Clone(tools=[pybind11])
# Helper module for integral operators
test_env['SHLIBPREFIX'] = ''
test_env.PrependUnique(LIBS=['Tamaas'])
register = test_env.Pybind11Module(
target="register_integral_operators",
source=["register_integral_operators.cpp"])
Import('libTamaas')
test_env.Depends(register, libTamaas)
targets.append(register)
return targets
# ------------------------------------------------------------------------------
def compile_google_test(env, gtest_path):
gtest_obj = env.Object('gtest.o',
[env.File("src/gtest-all.cc", gtest_path)])
return env.StaticLibrary('gtest', gtest_obj)
# ------------------------------------------------------------------------------
def make_google_tests(env):
gtest_dir = Dir(env['GTEST_ROOT'])
gtest_env = env.Clone(CPPPATH=[gtest_dir],
CXXFLAGS=['-pthread', '-isystem',
env.Dir('include', gtest_dir).path])
FindGTest(gtest_env)
if not gtest_env.get('has_gtest'):
return []
libgtest = None
# Hugly hack to detect if we need to compile gtest submodule
if env['GTEST_ROOT'] == '#third-party/googletest/googletest':
gtest_path = str(gtest_dir)
libgtest = compile_google_test(gtest_env, gtest_path)
env.AppendUnique(CXXFLAGS=gtest_env['CXXFLAGS'])
env.PrependUnique(LIBS=['Tamaas', env.subst('python${py_version}')])
google_test_files = Split("""
test_grid.cpp
test_loop.cpp
test_model.cpp
test_static_types.cpp
test_integration.cpp
""")
if env['use_fftw']:
google_test_files += ['test_fftw.cpp']
if env['use_cuda']:
google_test_files += ['test_cufft.cpp']
# Necessary for the tests that use pybind11 calls to python
uses = []
if env['build_python']:
google_test_files.append('test_fftfreq.cpp')
uses = ['TAMAAS_USE_PYTHON']
if env['use_mpi']:
google_test_files.append('test_mpi.cpp')
defines = env['CPPDEFINES']
if type(defines) is not list:
defines = [defines]
gtest_main = env.Object("tamaas_gtest_main.o", 'tamaas_gtest_main.cc',
CPPDEFINES=defines + uses)
gtest_all = env.Program('test_gtest_all', google_test_files + [gtest_main],
LIBS=(env['LIBS'] + ['gtest']))
Import('libTamaas')
env.Depends(gtest_all, libTamaas)
env.Depends(gtest_all, libgtest)
copy_gtest = gtest_env.Command('test_gtest.py', '#tests/test_gtest.py',
Copy('$TARGET', '$SOURCE'))
return [gtest_all, copy_gtest]
# ------------------------------------------------------------------------------
def make_bare_tests(env):
rough = env.Program("test_rough_surface.cpp")
Import('libTamaas')
env.Depends(rough, libTamaas)
return [rough]
# ------------------------------------------------------------------------------
Import('main_env')
# Setup of test environment
test_env = main_env.Clone()
test_env.AppendUnique(
LIBPATH=['.', '../src'],
RPATH=["'$$$$ORIGIN/../src'"],
LINKFLAGS=['-pthread'],
)
test_env.PrependUnique(LIBS=['Tamaas'])
# Building tests that do not require any third party
targets = make_bare_tests(test_env)
# Build tests that required python bindings
if test_env['build_python']:
FindPybind11(test_env)
test_env.Tool(pybind11)
test_env.ParseConfig("${py_exec}-config --ldflags")
test_env['CCFLAGS'] = []
targets += make_python_tests(test_env)
# Building google tests
targets += make_google_tests(test_env)
# Target alias to build tests
main_env.Alias('build-tests', targets)
# Check if pytest is installed
conf = Configure(test_env,
custom_tests={'CheckPythonModule': CheckPythonModule})
has_pytest = conf.CheckPythonModule('pytest')
conf.Finish()
# Define a command to execute tests
if has_pytest:
pytest_env = test_env.Clone()
pytest_env['pythonpath'] = pytest_env.subst('${build_dir}/python')
pytest_env['ld_library_path'] = pytest_env.subst('${build_dir}/src')
pytest_env.PrependENVPath('PYTHONPATH', pytest_env['pythonpath'])
pytest_env.PrependENVPath('LD_LIBRARY_PATH', pytest_env['ld_library_path'])
# Setting a moderate thread number
pytest_env['ENV']['OMP_NUM_THREADS'] = "1"
pytest_env['PYTESTOPTS'] = ['${"-v" if verbose else "-q"}']
test_target = pytest_env.Command(
'.phony_test', targets,
'echo $$PYTHONPATH; echo $$LD_LIBRARY_PATH; '
'${py_exec} -m pytest $PYTESTOPTS ${TARGET.dir}')
main_env.Alias('test', test_target)
else:
# We still define a target here so that `scons test` still works
dummy_command(main_env, 'test',
'Cannot run tests: pytest is not installed')
diff --git a/tests/conftest.py b/tests/conftest.py
index 1e3be9a..e4a5bdd 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,300 +1,301 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
import subprocess
import sys
from functools import reduce
from operator import mul as multiply
import numpy as np
import pytest
from numpy.linalg import norm
try:
import tamaas as tm
except ModuleNotFoundError as e:
print(e, file=sys.stderr)
print("Use 'scons test' to run tests", file=sys.stderr)
sys.exit(1)
type_list = [tm.model_type.basic_1d,
tm.model_type.basic_2d,
tm.model_type.surface_1d,
tm.model_type.surface_2d,
tm.model_type.volume_1d,
tm.model_type.volume_2d]
def get_libtamaas_file():
try:
ldd_out = bytes(subprocess.check_output(['ldd', tm._tamaas.__file__]))
for line in filter(lambda x: 'Tamaas' in x,
ldd_out.decode().split('\n')):
path = line.split(' => ')[1]
return path.split(' ')[0]
return "static link"
except subprocess.CalledProcessError:
return "N/A"
def pytest_report_header(config):
print('tamaas build type: {}\nmodule file: {}\nwrapper: {}\nlibTamaas: {}'
.format(tm.TamaasInfo.build_type,
tm.__file__,
tm._tamaas.__file__,
get_libtamaas_file()))
def pytest_addoption(parser):
pass
# try:
# parser.addoption(
# '--with-mpi', action="store_true", default=False,
# help="Run MPI tests, this should be paired with mpirun."
# )
# except ValueError:
# pass
def mtidfn(val):
return str(val).split('.')[-1]
def pkridfn(val):
return {tm.PolonskyKeerRey.pressure: "pkr_pressure",
tm.PolonskyKeerRey.gap: "pkr_gap"}[val]
def profile(func, domain, N, modes, amplitude):
coords = [np.linspace(0, d, n, endpoint=False, dtype=tm.dtype)
for n, d in zip(N, domain)]
coords = np.meshgrid(*coords, indexing='ij')
sines = [func(2 * np.pi * x * m) for x, m in zip(coords, modes)]
return reduce(multiply, [amplitude] + sines)
class HertzFixture:
def __init__(self, n, load):
self.domain_size = 1
self.n = n
self.load = load
self.curvature = 0.1
self.radius = 1. / self.curvature
self.e_star = 1.
self.a = (3 * load / (4 * self.curvature * self.e_star))**(1. / 3.)
self.x = np.linspace(-self.domain_size / 2.,
self.domain_size / 2.,
self.n, dtype=tm.dtype)
self.y = self.x.copy()
self.x, self.y = np.meshgrid(self.x, self.y)
self._computeSurface()
self._computePressure()
self._computeDisplacement()
def _computeDisplacement(self):
r = np.sqrt(self.x**2 + self.y**2)
self.displacement = np.zeros_like(r)
contact = r < self.a
self.displacement[contact] = self.surface[contact]
self.displacement[~contact] = \
(self.surface[~contact]
+ self.a / (np.pi * self.radius) * np.sqrt(r[~contact]**2
- self.a**2)
+ (r[~contact]**2 - 2 * self.a**2) / (np.pi * self.radius)
* np.arccos(self.a / r[~contact]))
def _computePressure(self):
r = np.sqrt(self.x**2 + self.y**2)
self.pressure = np.zeros_like(r)
contact = np.where(r < self.a)
self.pressure[contact] = \
2 * self.e_star / (np.pi * self.radius) \
* np.sqrt(self.a**2 - r[contact]**2)
def _computeSurface(self):
self.surface = -1. / (2 * self.radius) * (self.x**2 + self.y**2)
@pytest.fixture(scope="package")
def hertz():
return HertzFixture(1024, 0.00001)
@pytest.fixture(scope="package")
def hertz_coarse():
return HertzFixture(512, 0.0001)
@pytest.fixture(scope="package",
params=[tm.PolonskyKeerRey.pressure],
ids=pkridfn)
def pkr(hertz, request):
model = tm.ModelFactory.createModel(tm.model_type.basic_2d,
[hertz.domain_size, hertz.domain_size],
[hertz.n, hertz.n])
model.E, model.nu = hertz.e_star, 0
solver = tm.PolonskyKeerRey(model, hertz.surface, 1e-12,
request.param,
request.param)
solver.solve(hertz.load)
return model, hertz
class WestergaardFixture:
def __init__(self, n, load):
self.domain_size = 1.
self.lamda = 1.
self.delta = 0.1
self.e_star = 1.
self.n = n
self.p_star = np.pi * self.e_star * self.delta / self.lamda
self.load = load * self.p_star
self.a = self.lamda / np.pi \
* np.arcsin(np.sqrt(self.load / self.p_star))
self.x = np.linspace(-self.domain_size / 2.,
self.domain_size / 2.,
self.n, endpoint=False, dtype=tm.dtype)
self._computeSurface()
self._computePressure()
self._computeDisplacement()
def _computeSurface(self):
self.surface = self.delta * np.cos(2 * np.pi * self.x / self.lamda)
def _computePressure(self):
self.pressure = np.zeros_like(self.surface)
contact = np.where(np.abs(self.x) < self.a)
self.pressure[contact] = 2 * self.load \
* (np.cos(np.pi * self.x[contact] / self.lamda)
/ np.sin(np.pi * self.a / self.lamda)**2) \
* np.sqrt(np.sin(np.pi * self.a / self.lamda)**2 -
np.sin(np.pi * self.x[contact] / self.lamda)**2)
def _computeDisplacement(self):
psi = np.pi * np.abs(self.x) / self.lamda
psi_a = np.pi * self.a / self.lamda
with np.errstate(invalid='ignore'): # get some warnings out of the way
self.displacement = (np.cos(2*psi) + 2 * np.sin(psi)
* np.sqrt(np.sin(psi)**2 - np.sin(psi_a)**2)
- 2 * np.sin(psi_a)**2 *
np.log((np.sin(psi) +
np.sqrt(np.sin(psi)**2
- np.sin(psi_a)**2))
/ np.sin(psi_a)))
contact = np.where(np.abs(self.x) < self.a)
self.displacement[contact] = np.cos(2*psi[contact])
self.displacement *= self.load * self.lamda / (np.pi * self.e_star
* np.sin(psi_a)**2)
@pytest.fixture(scope="package")
def westergaard():
return WestergaardFixture(19683, 0.1)
class PatchWestergaard:
def __init__(self, model_type):
dim = tm.type_traits[model_type].dimension
bdim = tm.type_traits[model_type].boundary_dimension
domain = [1.] * dim
size = [6] * dim
self.modes = np.random.randint(1, 3, (bdim,))
self.model = tm.ModelFactory.createModel(model_type, domain, size)
self.model.E = 3.
self.model.nu = 0.
self.pressure = profile(np.cos, self.model.boundary_system_size,
self.model.boundary_shape, self.modes, 1)
self.solution = profile(
np.cos, self.model.boundary_system_size,
self.model.boundary_shape, self.modes,
1 / (np.pi * self.model.E_star * norm(self.modes)))
@pytest.fixture(scope="package",
params=set(type_list) - {tm.model_type.volume_1d},
ids=mtidfn)
def patch_westergaard(request):
return PatchWestergaard(request.param)
class UniformPlasticity:
def __init__(self, model_type, domain, sizes):
self.n = sizes
self.domain = domain
self.model = tm.ModelFactory.createModel(model_type, domain,
sizes)
self.E_h = 0.1
self.sigma_y = 0.01
self.residual = tm.ModelFactory.createResidual(self.model,
sigma_y=self.sigma_y,
hardening=self.E_h)
self.model.E = 1.
self.model.nu = 0.
def solution(self, p):
E, nu = self.model.E, self.model.nu
E_h, sigma_y = self.E_h, self.sigma_y
mu = E / (2 * (1 + nu))
strain = -1 / (mu + E_h) * (p * (3 * mu + E_h) / (2 * mu)
- np.sign(p) * sigma_y)
dep = (2 * mu * np.abs(strain) - sigma_y) / (3 * mu + E_h)
plastic_strain = np.sign(strain) / 2 * dep * np.array([
-1, -1, 2, 0, 0, 0,
], dtype=tm.dtype)
stress = 2 * mu * (np.array([
0, 0, strain, 0, 0, 0
], dtype=tm.dtype) - plastic_strain)
return {
"stress": stress,
"plastic_strain": plastic_strain,
"cumulated_plastic_strain": dep
}, {
"stress": mu,
"plastic_strain": 1,
"cumulated_plastic_strain": 1
}
@pytest.fixture(params=[tm.model_type.volume_2d],
ids=mtidfn)
def patch_isotropic_plasticity(request):
return UniformPlasticity(request.param, [1., 1., 1.], [4, 4, 4])
diff --git a/tests/fftfreq.py b/tests/fftfreq.py
index df4eec5..b516dea 100644
--- a/tests/fftfreq.py
+++ b/tests/fftfreq.py
@@ -1,51 +1,52 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division
__author__ = "Lucas Frérot"
import numpy as np
from numpy.fft import fftfreq
def frequencies1D(frequencies):
n = len(frequencies)
frequencies[:] = n * fftfreq(n)
def assign2DFrequencies(qx, qy, frequencies):
qx, qy = np.meshgrid(qx, qy, indexing='ij')
frequencies[:, :, 0] = qx
frequencies[:, :, 1] = qy
def frequencies2D(frequencies):
n = frequencies.shape
qx = fftfreq(n[0]) * n[0]
qy = fftfreq(n[1]) * n[1]
assign2DFrequencies(qx, qy, frequencies)
def hfrequencies2D(frequencies):
n = frequencies.shape
qx = fftfreq(n[0]) * n[0]
qy = np.arange(n[1])
assign2DFrequencies(qx, qy, frequencies)
diff --git a/tests/mpi_routines.py b/tests/mpi_routines.py
index 2c85698..c08cc15 100644
--- a/tests/mpi_routines.py
+++ b/tests/mpi_routines.py
@@ -1,306 +1,307 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import os
import tempfile
import tamaas as tm
import numpy as np
from mpi4py import MPI
from numpy.linalg import norm
from conftest import HertzFixture
from tamaas.dumpers import MPIIncompatibilityError
def make_surface(N):
spectrum = tm.Isopowerlaw2D()
spectrum.q0 = 2
spectrum.q1 = 2
spectrum.q2 = 16
spectrum.hurst = 0.8
generator = tm.SurfaceGeneratorRandomPhase2D(N)
generator.spectrum = spectrum
generator.random_seed = 1
return generator.buildSurface()
def mpi_surface_generator():
N = [128, 128]
tm.set_log_level(tm.LogLevel.debug)
seq_surface = None
comm = MPI.COMM_WORLD
print('[{}] {}'.format(comm.rank, comm.size))
with tm.mpi.sequential():
if comm.rank == 0:
seq_surface = make_surface(N)
surface = make_surface(N)
print('[{}] {}'.format(comm.rank, surface.shape))
recv = comm.gather(surface, root=0)
if comm.rank == 0:
gsurface = np.concatenate(recv)
if False:
import matplotlib.pyplot as plt
plt.imshow(seq_surface)
plt.colorbar()
plt.figure()
plt.imshow(gsurface)
plt.colorbar()
plt.show()
# I think the only reason this assert works is because the
# spectrum is compact in the Fourier domain -> surface is regular
assert np.all(seq_surface == gsurface)
def mpi_model_creation():
N = [20, 50, 50]
S = [1., 1., 1.]
comm = MPI.COMM_WORLD
def get_discretizations(model):
return model.shape, model.boundary_shape
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S[1:], N[1:])
n, bn = get_discretizations(model)
n[0] = comm.allreduce(n[0])
bn[0] = comm.allreduce(bn[0])
assert n == N[1:] and bn == N[1:]
model = tm.ModelFactory.createModel(tm.model_type.volume_2d, S, N)
n, bn = get_discretizations(model)
n[1] = comm.allreduce(n[1])
bn[0] = comm.allreduce(bn[0])
assert n == N and bn == N[1:]
def mpi_polonsky_keer():
N = [1024, 1024]
S = [1., 1.]
load = 0.00001
comm = MPI.COMM_WORLD
hertz = HertzFixture(N[0], load)
surface = hertz.surface
tm.set_log_level(tm.LogLevel.debug)
def solve():
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S, N)
model.E, model.nu = 1, 0
local_surface = tm.mpi.scatter(surface)
solver = tm.PolonskyKeerRey(model, local_surface, 1e-15)
print('Created solver')
solver.solve(load)
return np.copy(model['traction']), np.copy(model['displacement'])
tractions, displacements = map(tm.mpi.gather, solve())
if comm.rank == 0:
perror = norm(tractions - hertz.pressure) / norm(hertz.pressure)
derror = norm(displacements - hertz.displacement)\
/ norm(hertz.displacement)
if False:
print(perror, derror)
import matplotlib.pyplot as plt
plt.imshow(tractions - hertz.pressure)
plt.colorbar()
plt.show()
assert perror < 5e-3
assert derror < 8e-3
def mpi_polonsky_keer_compare():
N = [273, 273]
S = [1., 1.]
E, nu = 0.2, 0.3
load = 0.1
comm = MPI.COMM_WORLD
seq_tractions = None
rms = 0
with tm.mpi.sequential():
if comm.rank == 0:
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S, N)
model.E, model.nu = E, nu
surface = make_surface(N)
rms = tm.Statistics2D.computeSpectralRMSSlope(surface)
surface /= rms
solver = tm.PolonskyKeerRey(model, surface, 1e-15)
solver.solve(load)
seq_tractions = np.copy(model.traction)
seq_area = tm.Statistics2D.contact(model.traction)
rms = comm.bcast(rms, root=0)
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S, N)
model.E, model.nu = E, nu
surface = make_surface(N) / rms
solver = tm.PolonskyKeerRey(model, surface, 1e-15)
solver.solve(load)
tractions = model['traction']
area = tm.Statistics2D.contact(tractions)
recv = comm.gather(tractions, root=0)
if comm.rank == 0:
tractions = np.concatenate(recv)
error = np.linalg.norm(seq_tractions - tractions) / seq_tractions.size
if False:
print(error)
import matplotlib.pyplot as plt
plt.imshow(seq_tractions - tractions)
plt.colorbar()
plt.show()
assert error < 1e-13
assert np.abs(seq_area - area) / seq_area < 1e-13
def model_for_dump():
model_type = tm.model_type.volume_2d
discretization = [10, 2, 10]
flat_domain = [1, 1]
system_size = [0.5] + flat_domain
# Creation of model
model = tm.ModelFactory.createModel(model_type, system_size,
discretization)
model['displacement'][:] = MPI.COMM_WORLD.rank
model['traction'][:] = MPI.COMM_WORLD.rank
return model
def mpi_h5dump():
try:
from tamaas.dumpers import H5Dumper as Dumper
import h5py
except ImportError as e:
print(e)
return
os.chdir(MPI.COMM_WORLD.bcast(tempfile.mkdtemp(), root=0))
model = model_for_dump()
dumper_helper = Dumper('test_mpi_dump', 'displacement', 'traction')
try:
dumper_helper << model
except MPIIncompatibilityError as e:
print(e)
return
with h5py.File('hdf5/test_mpi_dump_0000.h5',
'r',
driver='mpio',
comm=MPI.COMM_WORLD) as handle:
assert np.all(handle['displacement'][:, 0, :] == 0)
assert np.all(handle['displacement'][:, 1, :] == 1)
assert np.all(handle['traction'][0, :] == 0)
assert np.all(handle['traction'][1, :] == 1)
rmodel = dumper_helper.read('hdf5/test_mpi_dump_0000.h5')
assert np.all(rmodel.displacement == MPI.COMM_WORLD.rank)
assert np.all(rmodel.traction == MPI.COMM_WORLD.rank)
def mpi_netcdfdump():
try:
from tamaas.dumpers import NetCDFDumper as Dumper
import netCDF4 as nc
except ImportError as e:
print(e)
return
os.chdir(MPI.COMM_WORLD.bcast(tempfile.mkdtemp(), root=0))
model = model_for_dump()
dumper_helper = Dumper('test_mpi_dump', 'displacement', 'traction')
try:
dumper_helper << model
except MPIIncompatibilityError as e:
print(e)
return
rmodel = dumper_helper.read('netcdf/test_mpi_dump.nc')
assert np.all(rmodel.displacement.astype(int) == MPI.COMM_WORLD.rank)
assert np.all(rmodel.traction.astype(int) == MPI.COMM_WORLD.rank)
def mpi_plastic_solve():
from conftest import UniformPlasticity
from tamaas.nonlinear_solvers import DFSANECXXSolver as Solver
patch_isotropic_plasticity = UniformPlasticity(tm.model_type.volume_2d,
[1.] * 3, [4] * 3)
model = patch_isotropic_plasticity.model
residual = patch_isotropic_plasticity.residual
applied_pressure = 0.1
solver = Solver(residual)
solver.tolerance = 1e-15
pressure = model['traction'][..., 2]
pressure[:] = applied_pressure
solver.solve()
solver.updateState()
solution, normal = patch_isotropic_plasticity.solution(applied_pressure)
for key in solution:
error = norm(model[key] - solution[key]) / normal[key]
assert error < 2e-15
def mpi_flood_fill():
contact = np.zeros([10, 1], dtype=np.bool)
contact[:1, :] = True
contact[-1:, :] = True
clusters = tm.FloodFill.getClusters(contact, False)
if tm.mpi.rank() == 0:
assert len(clusters) == 3, "Wrong number of clusters"
assert [c.getArea() for c in clusters] == [1, 2, 1], "Wrong areas"
if __name__ == '__main__':
mpi_polonsky_keer_compare()
diff --git a/tests/test_boussinesq_surface.py b/tests/test_boussinesq_surface.py
index a1fe426..271cc0c 100644
--- a/tests/test_boussinesq_surface.py
+++ b/tests/test_boussinesq_surface.py
@@ -1,78 +1,79 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import numpy as np
import tamaas as tm
from numpy.linalg import norm
def test_boussinesq_surface():
# Definition of modeled domain
model_type = tm.model_type.volume_2d
discretization = [2, 128, 128]
system_size = [1., 1., 1.]
x, y = [np.linspace(0, size, n, endpoint=False, dtype=tm.dtype)
for size, n in zip(system_size[1:], discretization[1:])]
xx, yy = np.meshgrid(x, y, sparse=True)
tractions = np.zeros(discretization[1:] + [3], dtype=tm.dtype)
for i in range(3):
omega = np.random.randint(1, 20, (2,)) * 2 * np.pi
tractions[:, :, i] = np.cos(omega[0]*xx)*np.cos(omega[1]*yy)
# Material contants
E = 1. # Young's modulus
nu = 0.3 # Poisson's ratio
# Creation of model
model_vol = tm.ModelFactory.createModel(model_type,
system_size,
discretization)
model_surf = tm.ModelFactory.createModel(tm.model_type.surface_2d,
[1, 1],
tractions.shape[:-1])
model_vol.E = model_surf.E = E
model_vol.nu = model_surf.nu = nu
# Setup for integral operators
tm.ModelFactory.createResidual(model_vol, 0, 0)
# Pressure definition
model_vol['traction'][...] = tractions
model_surf['traction'][...] = tractions
model_surf.solveNeumann()
# Applying operator
boussinesq = model_vol.operators["boussinesq"]
boussinesq(model_vol['traction'], model_vol['displacement'])
# # Dumper
# dumper_helper = UVWDumper(residual, args.name)
# model.addDumper(dumper_helper)
# model.dump()
# print("Done")
for i in range(3):
error = norm(model_vol.displacement[0, :, :, i]
- model_surf.displacement[:, :, i]) \
/ norm(tractions[:, :, i])
assert error < 1e-16
diff --git a/tests/test_copy.py b/tests/test_copy.py
index 28bb0dd..f78b844 100644
--- a/tests/test_copy.py
+++ b/tests/test_copy.py
@@ -1,37 +1,38 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import copy
import pytest
from numpy.testing import assert_allclose
from test_dumper import model_fixture # noqa
def test_model_deepcopy(model_fixture):
model = copy.deepcopy(model_fixture)
assert model is not model_fixture, "Copy returns a referecence."
for field in model_fixture:
assert_allclose(model[field], model_fixture[field], 1e-15)
def test_model_copy(model_fixture):
with pytest.raises(RuntimeError):
copy.copy(model_fixture)
diff --git a/tests/test_dumper.py b/tests/test_dumper.py
index 5a5a6a0..8a35857 100644
--- a/tests/test_dumper.py
+++ b/tests/test_dumper.py
@@ -1,192 +1,193 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division
import os
import pytest
import numpy as np
import tamaas as tm
from pathlib import Path
from numpy.testing import assert_allclose
from conftest import mtidfn, type_list
from tamaas.dumpers import NumpyDumper, JSONDumper, FieldDumper
from tamaas.dumpers._helper import directory_dump, step_dump
@pytest.fixture(scope="module",
params=type_list,
ids=mtidfn)
def model_fixture(request):
dim = tm.type_traits[request.param].dimension
model = tm.ModelFactory.createModel(request.param, dim * [1.], dim * [4])
model.displacement[:] = 3
model.traction[:] = 1
model['additional'] = 2 * np.ones(model.shape
+ [tm.type_traits[model.type].components])
return model
class Dumper(tm.ModelDumper):
"""Simple numpy dumper"""
def __init__(self):
tm.ModelDumper.__init__(self)
def dump(self, model):
np.savetxt('tractions.txt', np.ravel(model.traction))
np.savetxt('displacement.txt', np.ravel(model.displacement))
dumper_types = [NumpyDumper, JSONDumper]
try:
from tamaas.dumpers import H5Dumper
dumper_types.append(H5Dumper)
except ImportError:
pass
try:
from tamaas.dumpers import UVWDumper
dumper_types.append(UVWDumper)
except ImportError:
pass
try:
from tamaas.dumpers import NetCDFDumper
dumper_types.append(NetCDFDumper)
except ImportError:
pass
@pytest.mark.parametrize('dumper_class', dumper_types)
def test_dumper(model_fixture, tmp_path, dumper_class):
os.chdir(tmp_path)
filename = 'test_{}'.format(dumper_class.__name__)
if not issubclass(dumper_class, FieldDumper):
dumper = dumper_class(filename)
else:
dumper = dumper_class(filename, all_fields=True)
filename = dumper.file_path
dumper << model_fixture
# UVW does not read VTK files
try:
if dumper_class is UVWDumper:
with pytest.raises(NotImplementedError):
dumper.read(filename)
assert Path(filename).is_file()
return
except NameError:
pass
rmodel = dumper.read(filename)
def compare(model, reference):
assert model is not reference
assert model.type == reference.type
assert model.shape == reference.shape
for field in reference:
# assert field in model
assert_allclose(model[field], reference[field], rtol=1e-15)
compare(rmodel, model_fixture)
# Skipping sequence read for JSON
if dumper_class.__name__ == "JSONDumper":
return
dumper << model_fixture
count = 0
for rmodel in dumper.read_sequence(filename.replace("0000", "*")):
count += 1
compare(rmodel, model_fixture)
assert count == 2
def test_custom_dumper(tmp_path):
os.chdir(tmp_path)
model = tm.ModelFactory.createModel(tm.model_type.volume_2d, [1., 1., 1.],
[16, 4, 8])
dumper = Dumper()
dumper << model
tractions = np.loadtxt('tractions.txt')
displacements = np.loadtxt('displacement.txt')
assert_allclose(tractions.reshape(model.traction.shape), model.traction,
1e-15)
assert_allclose(displacements.reshape(model.displacement.shape),
model.displacement, 1e-15)
@pytest.fixture
def simple_model(scope="module"):
return tm.ModelFactory.createModel(tm.model_type.basic_2d, [1, 1], [1, 1])
def test_directory_dump(simple_model, tmp_path):
os.chdir(tmp_path)
@directory_dump("dummy")
class Dummy(tm.ModelDumper):
def __init__(self):
super(Dummy, self).__init__()
def dump(self, model):
pass
@property
def file_path(self):
return 'dummy'
dumper = Dummy()
dumper << simple_model
assert Path('dummy').is_dir()
def test_step_dump(simple_model, tmp_path):
os.chdir(tmp_path)
@step_dump
class Dummy(FieldDumper):
extension = 'dummy'
def _dump_to_file(self, file_descriptor, model):
with open(file_descriptor, 'w'):
pass
files = []
dumper = Dummy('dummy')
def dump():
files.append(dumper.file_path)
dumper << simple_model
dump()
dump()
for file in files:
assert Path(file).is_file()
diff --git a/tests/test_epic.py b/tests/test_epic.py
index d5bb922..7401bb3 100644
--- a/tests/test_epic.py
+++ b/tests/test_epic.py
@@ -1,79 +1,80 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
import pytest
from conftest import pkridfn
import tamaas as tm
from numpy.linalg import norm
from tamaas.nonlinear_solvers import DFSANESolver
@pytest.fixture(scope="module",
params=[tm.PolonskyKeerRey.pressure],
ids=pkridfn)
def model_setup(hertz_coarse, request):
models, solvers = [], []
for mtype in [tm.model_type.basic_2d, tm.model_type.volume_2d]:
dim = tm.type_traits[mtype].dimension
n = [hertz_coarse.n] * dim
d = [hertz_coarse.domain_size] * dim
if dim == 3:
n[0] = 2
d[0] = 0.001
model = tm.ModelFactory.createModel(mtype, d, n)
model.E, model.nu = hertz_coarse.e_star, 0
solver = tm.PolonskyKeerRey(model, hertz_coarse.surface, 1e-12,
request.param,
request.param)
models.append(model)
solvers.append(solver)
epsolver = DFSANESolver(tm.ModelFactory.createResidual(
model,
sigma_y=1e2 * model.E,
hardening=0
))
solvers[1] = tm.EPICSolver(solvers[1], epsolver, 1e-12)
for solver in solvers:
solver.solve(hertz_coarse.load)
return models, hertz_coarse
@pytest.mark.xfail(tm.TamaasInfo.backend == 'tbb',
reason='TBB reductions are unstable?')
def test_energy(model_setup):
"""Test the full elastic-plastic solve step in elasticity"""
# We use computed values from basic PKR to test
(model_elastic, model_plastic), hertz = model_setup
error = norm(
(model_plastic.traction[..., 2] - model_elastic.traction)
* (model_plastic.displacement[0, ..., 2] - model_elastic.displacement))
error /= norm(hertz.pressure * hertz.displacement)
assert error < 1e-16
diff --git a/tests/test_flood_fill.py b/tests/test_flood_fill.py
index cc7afa5..37b2b60 100644
--- a/tests/test_flood_fill.py
+++ b/tests/test_flood_fill.py
@@ -1,69 +1,70 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function, division
import tamaas as tm
import numpy as np
def test_flood_fill2d():
field = np.zeros((18, 18), np.bool)
# Cluster of permeter 18 area 13
field[2:4, 3:6] = True
field[4 , 4:6] = True # noqa
field[5 , 5 ] = True # noqa
field[3:5, 6:8] = True # noqa
# Cluster of perimeter 8 area 4
field[14:16, 3:5] = True
# Cluster of perimeter 20 area 11
field[9:11, 8:11 ] = True # noqa
field[7:9 , 9 ] = True # noqa
field[10 , 11:14] = True # noqa
# Cluster of perimeter 18 area 9
field[3:5, 14:16] = True
field[5:10, 15] = True
clusters = tm.FloodFill.getClusters(field, False)
# Dummy class for clusters
class dummy:
def __init__(self, area, perimeter):
self.area = area
self.perimeter = perimeter
# Solution
ref = [dummy(13, 18), dummy(9, 18), dummy(11, 20), dummy(4, 8)]
for x, y in zip(clusters, ref):
assert x.area == y.area
assert x.perimeter == y.perimeter
def test_flood_fill3d():
field = np.zeros((5, 5, 5), np.bool)
field[2:4, 3, 1:3] = True
clusters = tm.FloodFill.getVolumes(field, False)
assert len(clusters) == 1
assert clusters[0].area == 4
diff --git a/tests/test_gtest.py b/tests/test_gtest.py
index 190eb88..b511484 100644
--- a/tests/test_gtest.py
+++ b/tests/test_gtest.py
@@ -1,54 +1,55 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import subprocess
import os
from tamaas import TamaasInfo
def test_gtest(pytestconfig):
me_path = os.path.realpath(__file__)
current_dir = os.path.dirname(me_path)
my_env = os.environ.copy()
my_env['PYTHONPATH'] = current_dir + ":" + os.getenv('PYTHONPATH', "")
args = [os.path.join(current_dir, 'test_gtest_all'),
'--gtest_output=xml:' +
os.path.join(current_dir, 'gtest_results.xml')]
has_mpirun = True
try:
subprocess.check_call(['mpirun', '-V'])
except subprocess.CalledProcessError:
has_mpirun = False
subprocess.check_call(args, env=my_env)
if not has_mpirun or not TamaasInfo.has_mpi:
return
for procs in range(2, 4):
# Do not write output xml in parallel
call_args = ['mpirun',
'--allow-run-as-root',
'--oversubscribe',
'-np', str(procs)] \
+ args[:1]
subprocess.check_call(call_args, env=my_env)
diff --git a/tests/test_hertz.py b/tests/test_hertz.py
index 9d753a5..21ce689 100644
--- a/tests/test_hertz.py
+++ b/tests/test_hertz.py
@@ -1,71 +1,72 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
import numpy as np
from numpy.linalg import norm
import tamaas as tm
def compute_perimeter(tractions):
contact = np.zeros_like(tractions, np.bool)
contact[tractions > 0] = True
# Computing clusters
clusters = tm.FloodFill.getClusters(contact, False)
return np.sum([c.perimeter for c in clusters])
def test_contact_area(pkr):
"Testing contact area against Hertz solution for solids of revolution"
model, hertz = pkr
def error(erzt, true):
return np.abs(erzt - true) / true
tractions = model['traction']
true_area = hertz.a**2 * np.pi
contact_area = tm.Statistics2D.contact(tractions)
hertz_area = np.mean(hertz.pressure > 0)
assert error(contact_area, hertz_area) < 1e-2
assert error(contact_area, true_area) < 5e-3
contact_area = tm.Statistics2D.contact(tractions,
compute_perimeter(tractions))
hertz_perimeter = compute_perimeter(hertz.pressure)
hertz_area -= (np.pi - 1 + np.log(2)) / 24 \
* hertz_perimeter / tractions.size
assert error(contact_area, hertz_area) < 1e-2
assert error(contact_area, true_area) < 3e-3
def test_pressure(pkr):
model, hertz = pkr
error = norm(model['traction'] - hertz.pressure) / norm(hertz.pressure)
assert error < 5e-3
def test_displacement(pkr):
model, hertz = pkr
error = norm(model['displacement'] - hertz.displacement)\
/ norm(hertz.displacement)
assert error < 8e-3
diff --git a/tests/test_hertz_disp.py b/tests/test_hertz_disp.py
index fc259f8..0eb4f12 100644
--- a/tests/test_hertz_disp.py
+++ b/tests/test_hertz_disp.py
@@ -1,129 +1,130 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function, division
import tamaas as tm
import numpy as np
def constructHertzProfile(size, curvature):
radius = 1. / curvature
x = np.linspace(-0.5, 0.5, size, dtype=tm.dtype)
y = np.linspace(-0.5, 0.5, size, dtype=tm.dtype)
x, y = np.meshgrid(x, y)
surface = np.sqrt(radius**2 - x**2 - y**2)
surface -= surface.min()
return surface.copy()
def computeHertzDisplacement(e_star, contact_size, max_pressure, size):
x = np.linspace(-0.5, 0.5, size)
y = np.linspace(-0.5, 0.5, size)
x, y = np.meshgrid(x, y)
disp = np.pi * max_pressure / (4 * contact_size * e_star) \
* (2 * contact_size**2 - (x**2 + y**2))
return disp.copy()
def test_hertz_disp():
grid_size = 512
curvature = 0.5
effective_modulus = 1.
load = 0.12
surface = constructHertzProfile(grid_size, curvature)
model = tm.ModelFactory.createModel(tm.model_type.basic_2d,
[1., 1.],
[grid_size, grid_size])
model.E, model.nu = 1, 0
solver = tm.PolonskyKeerRey(model, surface, 1e-12,
tm.PolonskyKeerRey.gap,
tm.PolonskyKeerRey.gap)
solver.solve(load - surface.mean())
tractions = model['traction']
true_displacements = model['displacement']
true_gap = true_displacements - surface
pressure = np.mean(tractions)
contact_points = np.where(1.0e-10 >= true_gap)
contact_area = (np.size(contact_points)/2)/float(grid_size*grid_size)
print('The contact area computed with the gap map is', contact_area)
hertz_contact_size = (3 * pressure
/ (4 * curvature * effective_modulus))**(1. / 3.)
print('Exact Hertz contact radius is', hertz_contact_size)
hertz_area = np.pi * hertz_contact_size**2
print('Exact Hertz contact area is', hertz_area)
area_error = np.abs(hertz_area - contact_area) / hertz_area
print("Area error:", area_error)
# Testing maximum pressure
max_pressure = tractions.max()
hertz_max_pressure = (6 * pressure * effective_modulus**2
* curvature ** 2)**(1. / 3.) / np.pi
pressure_error = np.abs(hertz_max_pressure - max_pressure) \
/ hertz_max_pressure
print('Exact Hertz max pressure is', hertz_max_pressure)
print('max pressure is', np.max(tractions))
print("Max pressure error:", pressure_error)
# Testing displacements
hertz_displacements = computeHertzDisplacement(effective_modulus,
hertz_contact_size,
hertz_max_pressure,
grid_size)
# Selecing only the points that are in contact
contact_indexes = list(zip(*np.where(true_gap < 1e-12)))
# Displacements of bem are centered around the mean of the whole surface
# and Hertz displacements are not centered, so we need to compute mean
# on the contact zone for both arrays
bem_mean = 0.
hertz_mean = 0.
for index in contact_indexes:
bem_mean += true_displacements[index]
hertz_mean += hertz_displacements[index]
bem_mean /= len(contact_indexes)
hertz_mean /= len(contact_indexes)
# Correction applied when computing error
correction = hertz_mean - bem_mean
# Computation of error of displacement in contact zone
error = 0.
hertz_norm = 0.
for index in contact_indexes:
error += (hertz_displacements[index] -
true_displacements[index] - correction)**2
hertz_norm += (hertz_displacements[index] - hertz_mean)**2
displacement_error = np.sqrt(error / hertz_norm)
print("Displacement error (in contact zone): {}".format(displacement_error))
assert area_error < 1e-2
assert pressure_error < 1e-2
assert displacement_error < 2e-3
if __name__ == "__main__":
test_hertz_disp()
diff --git a/tests/test_hertz_kato.py b/tests/test_hertz_kato.py
index 5071e90..131bc2b 100644
--- a/tests/test_hertz_kato.py
+++ b/tests/test_hertz_kato.py
@@ -1,60 +1,61 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
import numpy as np
import tamaas as tm
import pytest
from numpy.linalg import norm
@pytest.fixture(scope="module")
def kato(hertz_coarse):
hertz = hertz_coarse
model = tm.ModelFactory.createModel(tm.model_type.basic_2d,
[hertz.domain_size, hertz.domain_size],
[hertz.n, hertz.n])
model.E, model.nu = hertz.e_star, 0
solver = tm.KatoSaturated(model, hertz.surface, 1e-12, 1e30)
solver.max_iter = 10000
solver.solve(hertz.load)
return model, hertz
def test_contact_area(kato):
# Testing contact area against Hertz solution for solids of revolution
model, hertz = kato
contact_area = tm.Statistics2D.contact(model.traction)
hertz_area = tm.Statistics2D.contact(hertz.pressure)
area_error = np.abs(hertz_area - contact_area) / hertz_area
assert area_error < 1e-2
def test_pressure(kato):
model, hertz = kato
error = norm(model['traction'] - hertz.pressure) / norm(hertz.pressure)
assert error < 5e-3
def test_displacement(kato):
model, hertz = kato
error = norm(model['displacement'] - hertz.displacement)\
/ norm(hertz.displacement)
assert error < 2e-2
diff --git a/tests/test_integral_operators.py b/tests/test_integral_operators.py
index d09e2fa..ae06952 100644
--- a/tests/test_integral_operators.py
+++ b/tests/test_integral_operators.py
@@ -1,229 +1,230 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import tamaas as tm
import numpy as np
import pytest
from numpy.linalg import norm
from register_integral_operators import register_kelvin_force, \
set_integration_method
@pytest.fixture(params=[tm.integration_method.linear,
tm.integration_method.cutoff],
ids=['linear', 'cutoff'])
def integral_fixture(request):
return request.param
def test_kelvin_volume_force(integral_fixture):
N = 65
E = 1.
nu = 0.3
mu = E / (2*(1+nu))
domain = np.array([1.] * 3)
omega = 2 * np.pi * np.array([1, 1]) / domain[:2]
omega_ = norm(omega)
discretization = [N] * 3
model = tm.ModelFactory.createModel(tm.model_type.volume_2d,
domain,
discretization)
model.E = E
model.nu = nu
register_kelvin_force(model)
kelvin = model.operators['kelvin_force']
set_integration_method(kelvin, integral_fixture, 1e-12)
coords = [np.linspace(0, domain[i],
discretization[i], endpoint=False, dtype=tm.dtype)
for i in range(2)]\
+ [np.linspace(0, domain[2], discretization[2])]
x, y = np.meshgrid(*coords[:2], indexing='ij')
displacement = model['displacement']
source = np.zeros_like(displacement)
# The integral of forces should stay constant
source[N//2, :, :, 2] = np.sin(omega[0]*x) * np.sin(omega[1]*y) * (N-1)
kelvin(source, displacement)
z = coords[2] - 0.5
z, x, y = np.meshgrid(z, *coords[:2], indexing='ij')
solution = np.zeros_like(source)
solution[:, :, :, 0] = -np.exp(-omega_*np.abs(z)) / (8*mu*(1-nu)*omega_) \
* omega[0]*z*np.cos(omega[0]*x)*np.sin(omega[1]*y)
solution[:, :, :, 1] = -np.exp(-omega_*np.abs(z)) / (8*mu*(1-nu)*omega_) \
* omega[1]*z*np.sin(omega[0]*x)*np.cos(omega[1]*y)
solution[:, :, :, 2] = np.exp(-omega_*np.abs(z)) / (8*mu*(1-nu)*omega_) \
* (3-4*nu + omega_*np.abs(z))*np.sin(omega[0]*x)*np.sin(omega[1]*y)
error = norm(displacement - solution) / norm(solution)
assert error < 5e-2
def test_mindlin(integral_fixture):
# Definition of modeled domain
# tm.set_log_level(tm.LogLevel.debug)
model_type = tm.model_type.volume_2d
discretization = [126, 128, 128]
system_size = [1., 3., 3.]
integration_method = integral_fixture
print(integration_method)
# Material contants
E = 1. # Young's modulus
nu = 0.3 # Poisson's ratio
h = 0.01 * E # Hardening modulus
sigma_y = 0.3 * E # Initial yield stress
# Creation of model
model = tm.ModelFactory.createModel(model_type,
system_size,
discretization)
model.E = E
model.nu = nu
mu = E / (2*(1+nu))
lamda = E * nu / ((1+nu) * (1-2*nu))
# Setup for integral operators
residual = tm.ModelFactory.createResidual(model, sigma_y, h)
residual.setIntegrationMethod(integration_method, 1e-12)
# Coordinates
x = np.linspace(0, system_size[1], discretization[1],
endpoint=False, dtype=tm.dtype)
y = np.linspace(0, system_size[2], discretization[2],
endpoint=False, dtype=tm.dtype)
z = np.linspace(0, system_size[0], discretization[0],
endpoint=True, dtype=tm.dtype)
z, x, y = np.meshgrid(z, x, y, indexing='ij')
# Inclusion definition
a, c = 0.1, 0.2
center = [system_size[1] / 2, system_size[2] / 2, c]
r = np.sqrt((x-center[0])**2 + (y-center[1])**2 + (z-center[2])**2)
ball = r < a
# Eigenstrain definition
alpha = 1 # constant isotropic strain
beta = (3 * lamda + 2 * mu) * alpha * np.eye(3)
eigenstress = np.zeros(discretization + [3, 3], dtype=tm.dtype)
eigenstress[ball, ...] = beta
eigenstress = tm.compute.to_voigt(eigenstress.reshape(discretization + [9]))
# Array references
gradient = residual.getVector()
stress = residual.getStress()
# Applying operator
# mindlin = model.operators["mindlin"]
mindlin_gradient = model.operators["mindlin_gradient"]
# Not testing displacements yet
# mindlin(eigenstress, model['displacement'])
# Applying gradient
mindlin_gradient(eigenstress, gradient)
model.operators['hooke'](gradient, stress)
stress -= eigenstress
# Normalizing stess as in Mura (1976)
T, alpha = 1., 1.
beta = alpha * T * (1+nu) / (1-nu)
stress *= 1. / (2 * mu * beta)
n = discretization[1] // 2
vertical_stress = stress[:, n, n, :]
z_all = np.linspace(0, 1, discretization[0], dtype=tm.dtype)
sigma_z = np.zeros_like(z_all)
sigma_t = np.zeros_like(z_all)
inclusion = np.abs(z_all - c) < a
# Computing stresses for exterior points
z = z_all[~inclusion]
R_1 = np.abs(z - c)
R_2 = np.abs(z + c)
sigma_z[~inclusion] = 2*mu*beta*a**3/3 * (
1 / R_1**3
- 1 / R_2**3
- 18*z*(z+c) / R_2**5
+ 3*(z+c)**2 / R_2**5
- 3*(z-c)**2 / R_1**5
+ 30*z*(z+c)**3 / R_2**7
)
sigma_t[~inclusion] = 2*mu*beta*a**3/3 * (
1 / R_1**3
+ (3-8*nu) / R_2**3
- 6*z*(z+c) / R_2**5
+ 12*nu*(z+c)**2 / R_2**5
)
# Computing stresses for interior points
z = z_all[inclusion]
R_1 = np.abs(z - c)
R_2 = np.abs(z + c)
sigma_z[inclusion] = 2*mu*beta*a**3/3 * (
- 2 / a**3
- 1 / R_2**3
- 18*z*(z+c) / R_2**5
+ 3*(z+c)**2 / R_2**5
+ 30*z*(z+c)**3 / R_2**7
)
sigma_t[inclusion] = 2*mu*beta*a**3/3 * (
- 2 / a**3
+ (3-8*nu) / R_2**3
- 6*z*(z+c) / R_2**5
+ 12*nu*(z+c)**2 / R_2**5
)
# This test can be used to debug
if False:
import matplotlib.pyplot as plt
plt.plot(z_all, vertical_stress[:, 2])
plt.plot(z_all, sigma_z / (2 * mu * beta))
plt.figure()
plt.plot(z_all, vertical_stress[:, 0])
plt.plot(z_all, sigma_t / (2 * mu * beta))
plt.show()
z_error = norm(vertical_stress[:, 2] - sigma_z / (2 * mu * beta)) \
/ discretization[0]
t_error = norm(vertical_stress[:, 0] - sigma_t / (2 * mu * beta)) \
/ discretization[0]
assert z_error < 1e-3 and t_error < 1e-3
diff --git a/tests/test_matvec.py b/tests/test_matvec.py
index fc79569..859cc73 100644
--- a/tests/test_matvec.py
+++ b/tests/test_matvec.py
@@ -1,63 +1,64 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import pytest
import numpy as np
import tamaas as tm
from numpy.testing import assert_allclose
from test_dumper import model_fixture # noqa
linalg = pytest.importorskip('scipy.sparse.linalg')
def test_matvec_westergaard(model_fixture):
m = model_fixture
m.be_engine.registerNeumann()
op = m.operators['Westergaard::neumann']
linop = linalg.aslinearoperator(op)
x = [np.linspace(0, L, N, endpoint=False)
for (L, N) in zip(m.boundary_system_size, m.boundary_shape)]
coords = np.meshgrid(*x, indexing='ij')
t = m.traction
res = np.zeros_like(m.displacement)
volume = len(m.shape) != len(m.boundary_shape)
basic = m.type in (tm.model_type.basic_1d, tm.model_type.basic_2d)
# Adapting to special cases
if basic:
t = t[..., np.newaxis]
res = res[..., np.newaxis]
t[:] = 0
t[..., -1] = np.sin(2 * np.pi * coords[0])
# Computing using native API
op(t, res)
# Only look at surface displacement
if volume:
res = res[0]
# Computing using LinearOperator API
y = np.reshape(linop * np.ravel(t), res.shape)
assert_allclose(res, y)
diff --git a/tests/test_memory.py b/tests/test_memory.py
index 78c28ed..241d966 100644
--- a/tests/test_memory.py
+++ b/tests/test_memory.py
@@ -1,56 +1,57 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function, division
import tamaas as tm
import numpy as np
def createRandomSurface():
"""Return a surface from an object which loses a ref"""
sg = tm.SurfaceGeneratorFilter2D([12, 12])
sg.spectrum = tm.Isopowerlaw2D()
sg.spectrum.q0 = 1
sg.spectrum.q1 = 2
sg.spectrum.q2 = 4
sg.spectrum.hurst = 0.6
return sg.buildSurface()
def registerField(model):
"""Register a field which loses a ref"""
f = np.zeros([12, 12], dtype=tm.dtype)
model['f'] = f
def test_surface_survival():
surface = createRandomSurface()
surface += 1
assert surface.shape == (12, 12)
def test_register_field_survival():
model = tm.ModelFactory.createModel(tm.model_type.basic_2d,
[1., 1.],
[12, 12])
registerField(model)
f = model['f']
f += 1
assert model['f'].shape == (12, 12)
diff --git a/tests/test_mpi_routines.py b/tests/test_mpi_routines.py
index 4d1b089..109bfcb 100644
--- a/tests/test_mpi_routines.py
+++ b/tests/test_mpi_routines.py
@@ -1,88 +1,89 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function
import inspect
import sys
import re
import os
import numpy as np
from mpi4py import MPI
from pytest import mark
import tamaas
PROG = '''
import sys
import numpy as np
sys.path = ['{}'] + sys.path
import mpi_routines
from mpi4py import MPI
res = 0
try:
mpi_routines.{}()
except Exception as e:
print(e)
res = 1
comm = MPI.Comm.Get_parent()
comm.Reduce(np.array(res, 'i'), None, op=MPI.SUM, root=0)
comm.Free()
sys.exit(0)
'''
HOSTFILE_CONTENT = 'localhost slots=24\n'
@mark.skipif(not tamaas.TamaasInfo.has_mpi, reason='tamaas compiled w/o MPI')
def test_mpi_routines():
import mpi_routines
me_path = os.path.realpath(__file__)
current_dir = os.path.dirname(me_path)
hostfile = os.path.join(current_dir, 'hostfile-mpi')
with open(hostfile, 'w') as fh:
fh.write(HOSTFILE_CONTENT)
mpi_matcher = re.compile('^mpi_')
for name, _ in inspect.getmembers(mpi_routines, inspect.isfunction):
if mpi_matcher.match(name):
try:
info = MPI.Info.Create()
info['add-hostfile'] = hostfile
print('Spawning "{}"'.format(name))
comm = MPI.COMM_SELF.Spawn(
sys.executable, args=[
'-c', PROG.format(os.path.abspath(current_dir),
name),
], maxprocs=2, info=info)
res = np.array(0, 'i')
comm.Reduce(None, res, op=MPI.SUM, root=MPI.ROOT)
except MPI.Exception as exc:
print(exc)
raise RuntimeError('Could not spawn MPI processes for "{}"'
.format(name))
assert res == 0, name
diff --git a/tests/test_patch_plasticity.py b/tests/test_patch_plasticity.py
index 7696230..1d93c54 100644
--- a/tests/test_patch_plasticity.py
+++ b/tests/test_patch_plasticity.py
@@ -1,51 +1,52 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-20 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import pytest
from numpy.linalg import norm
import tamaas
from tamaas.nonlinear_solvers import \
DFSANESolver as PySolver, \
DFSANECXXSolver as CXXSolver, \
NewtonKrylovSolver
@pytest.mark.parametrize('solvers', [CXXSolver, NewtonKrylovSolver, PySolver])
def test_patch_plasticity(patch_isotropic_plasticity, solvers):
"Test analyitical solution of 1d plasticity"
tamaas.set_log_level(tamaas.LogLevel.info)
Solver = solvers
model = patch_isotropic_plasticity.model
residual = patch_isotropic_plasticity.residual
applied_pressure = 0.1
solver = Solver(residual)
solver.tolerance = 1e-15
pressure = model['traction'][..., 2]
pressure[:] = applied_pressure
solver.solve()
solver.updateState()
solution, normal = patch_isotropic_plasticity.solution(applied_pressure)
for key in solution:
error = norm(model[key] - solution[key]) / normal[key]
assert error < 3e-15
diff --git a/tests/test_patch_westergaard.py b/tests/test_patch_westergaard.py
index eccd47c..fa358aa 100644
--- a/tests/test_patch_westergaard.py
+++ b/tests/test_patch_westergaard.py
@@ -1,64 +1,65 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
from numpy.linalg import norm
import tamaas as tm
def test_patch_westergaard(patch_westergaard):
model = patch_westergaard.model
solution = patch_westergaard.solution
pressure = patch_westergaard.pressure
shape = (model.boundary_shape + [tm.type_traits[model.type].components])
# Setting the correct pressure component
traction = model.traction.reshape(shape)[..., -1]
traction[:] = pressure
# solveNeumann() doesn't do anything on volume models
if model.type == tm.model_type.volume_2d:
tm.ModelFactory.registerVolumeOperators(model)
model.operators['boussinesq'](model.traction, model.displacement)
else:
model.solveNeumann()
if model.type in [tm.model_type.volume_1d, tm.model_type.volume_2d]:
output_displ = model.displacement[0, ..., -1]
else:
output_displ = model.displacement.reshape(shape)[..., -1]
error = norm(solution - output_displ) / norm(solution)
assert error < 1e-15 and norm(solution) > 1e-15, \
"Neumann error = {}".format(error)
# Removing model types where no dirichlet is defined yet
if model.type in {tm.model_type.surface_1d,
tm.model_type.surface_2d,
tm.model_type.volume_2d}:
return
traction[:] = 0
output_displ[:] = solution
model.solveDirichlet()
error = norm(pressure - traction) / norm(pressure)
assert error < 1e-15, "Dirichlet error = {}".format(error)
diff --git a/tests/test_publications.py b/tests/test_publications.py
index a200f8e..ad199f1 100644
--- a/tests/test_publications.py
+++ b/tests/test_publications.py
@@ -1,35 +1,36 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import tamaas as tm
from tamaas.utils import publications
def test_publications():
gen = tm.SurfaceGeneratorRandomPhase2D() # noqa
model = tm.Model(tm.model_type.basic_2d, [2, 2], [2, 2])
solver = tm.PolonskyKeerRey(model, model.traction, 1e-12) # noqa
assert len(publications()) == 5, "Wrong number of publications"
gen2 = tm.SurfaceGeneratorFilter2D() # noqa
assert len(publications()) == 6, "Wrong number of publications"
if __name__ == "__main__":
test_publications()
diff --git a/tests/test_saturated_pressure.py b/tests/test_saturated_pressure.py
index 60ce788..f99d752 100644
--- a/tests/test_saturated_pressure.py
+++ b/tests/test_saturated_pressure.py
@@ -1,71 +1,72 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function, division
import numpy as np
import tamaas as tm
import pytest
@pytest.fixture(scope="module")
def saturated():
grid_size = 256
load = 0.06
p_sat = 0.4
iso = tm.Isopowerlaw2D()
iso.q0 = 4
iso.q1 = 4
iso.q2 = 16
iso.hurst = 0.8
sg = tm.SurfaceGeneratorFilter2D([grid_size] * 2)
sg.random_seed = 2
sg.spectrum = iso
surface = sg.buildSurface()
surface *= 0.01 / iso.rmsHeights()
surface -= np.max(surface)
model = tm.ModelFactory.createModel(tm.model_type.basic_2d,
[1., 1.], [grid_size, grid_size])
model.E = 1.
model.nu = 0
solver = tm.KatoSaturated(model, surface, 1e-12, p_sat)
solver.max_iter = 6000
solver.solve(load)
return model, p_sat, load
def test_mean_pressure(saturated):
model, _, load = saturated
assert abs(model.traction.mean() - load) / load < 1e-12
def test_max_pressure(saturated):
model, p_sat, _ = saturated
assert abs(model.traction.max() - p_sat) / p_sat < 1e-12
def test_orthogonality(saturated):
model, _, _ = saturated
error = np.max(model.traction * model['gap']) \
/ (model.traction.max() * model['gap'].max())
assert error < 4e-8
diff --git a/tests/test_surface.py b/tests/test_surface.py
index 10feaf3..58fde32 100644
--- a/tests/test_surface.py
+++ b/tests/test_surface.py
@@ -1,140 +1,141 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function, division
import pytest
import numpy as np
import tamaas as tm
from numpy import pi
integrate = pytest.importorskip('scipy.integrate')
special = pytest.importorskip('scipy.special')
jv = special.jv
E = special.ellipe
def spectrum(r, isopowerlaw):
kl, kr, ks = isopowerlaw.q0, isopowerlaw.q1, isopowerlaw.q2
H = isopowerlaw.hurst
C = 1.
if r > ks:
return 0
elif r > kr:
return C * (r / float(kr))**(-2. * (H + 1))
elif r > kl:
return C
else:
return 0
def integrate_bessel(x, spectrum, n):
return 2 * pi * integrate.quad(
lambda y: y * jv(0, y * x) * spectrum(n * y), 0, np.inf, limit=200)[0]
integrate_bessel = np.vectorize(integrate_bessel, otypes=[tm.dtype])
@pytest.fixture(
scope="module",
params=[tm.SurfaceGeneratorFilter2D, tm.SurfaceGeneratorRandomPhase2D])
def stats(request):
iso = tm.Isopowerlaw2D()
iso.q0 = 16
iso.q1 = 32
iso.q2 = 64
iso.hurst = 0.8
N = 243
sg = request.param()
sg.spectrum = iso
sg.shape = [N, N]
analytical_stats = {
"rms_heights": iso.rmsHeights(),
"rms_slopes_spectral": iso.rmsSlopes(),
"moments": iso.moments(),
"alpha": iso.alpha()
}
stats = {k: [] for k in analytical_stats}
acf = np.zeros(sg.shape, dtype=tm.dtype)
realizations = 1000
for i in range(realizations):
sg.random_seed = i
s = sg.buildSurface()
stats['rms_heights'].append(tm.Statistics2D.computeRMSHeights(s))
stats['rms_slopes_spectral'].append(
tm.Statistics2D.computeSpectralRMSSlope(s))
stats['moments'].append(tm.Statistics2D.computeMoments(s))
acf += tm.Statistics2D.computeAutocorrelation(s) / realizations
L = 1 * sg.spectrum.q0
n = sg.shape[0] // 2
x = np.linspace(0, L / 2, n)
true_acf = (L / (2 * pi))**2 \
* integrate_bessel(x, lambda x: spectrum(x, sg.spectrum), L / (2 * pi))
return analytical_stats, stats, acf, true_acf
@pytest.mark.parametrize('key', ['rms_heights', 'rms_slopes_spectral'])
def test_statistics(stats, key):
analytical_stats, stats, _, _ = stats
mu = np.mean(stats[key])
error = np.abs(mu - analytical_stats[key]) / analytical_stats[key]
assert error < 3e-3
def test_autocorrelation(stats):
from tamaas.utils import radial_average
_, _, acf, true_acf = stats
x, y = (np.linspace(-1, 1, acf.shape[i]) for i in range(2))
r = np.linspace(0, 1, true_acf.shape[0])
theta = np.linspace(0, 2 * np.pi, endpoint=False)
r_acf = radial_average(x, y, np.fft.fftshift(acf),
r, theta, endpoint=False)
acf_error = np.sum((r_acf - true_acf)**2) / np.sum(true_acf**2)
assert acf_error < 2e-3
def test_graph_area():
N = 100
x = np.linspace(0, 1, N, endpoint=False)
f = np.sin(2 * np.pi * x)
df = 2 * np.pi * np.cos(2 * np.pi * x)
num_area = np.sqrt(1 + df**2).mean()
area = 2 * np.sqrt(1 + 4 * np.pi**2) / np.pi * E(4 * np.pi**2 /
(1 + 4 * np.pi**2))
fourier_area = tm.Statistics1D.graphArea(f)
assert np.abs(num_area - fourier_area) / num_area < 1e-10
assert np.abs(area - fourier_area) / area < 2e-10
diff --git a/tests/test_tangential.py b/tests/test_tangential.py
index 7ecc2c3..2a2a6e6 100644
--- a/tests/test_tangential.py
+++ b/tests/test_tangential.py
@@ -1,100 +1,101 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
import numpy as np
from numpy.linalg import norm
import tamaas as tm
def test_pressure():
E = 1.0
nu = 0.5
mu = 0.5
n = 81
L = 1.0
p_target = np.array([0.5e-4, 0, 2e-4], dtype=tm.dtype)
# g_target = np.array([-0.000223, 0, 9.99911], dtype=tm.dtype)
x = np.linspace(-L/2, L/2, n, dtype=tm.dtype)
y = np.copy(x)
x, y = np.meshgrid(x, y, indexing="ij")
r_sqrd = (x**2 + y**2)
# Spherical contact
R = 10.0
surface = R * np.sqrt((r_sqrd < R**2) * (1 - r_sqrd / R**2))
# Creating model
model = tm.ModelFactory.createModel(tm.model_type.surface_2d,
[L, L], [n, n])
model.E, model.nu = E, nu
p = model['traction']
# Theoretical solution
Estar = E / (1.0 - nu**2)
Fn = p_target[2] * L**2
Fx = p_target[0] * L**2
d_theory = np.power(3 / 4 * Fn / Estar / np.sqrt(R), 2/3)
a_theory = np.sqrt(R * d_theory)
c_theory = a_theory * (1 - Fx / (mu * Fn)) ** (1/3)
p0_theory = np.power(6 * Fn * Estar**2 / np.pi**3 / R**2, 1/3)
t1_theory = mu * p0_theory
t2_theory = t1_theory * c_theory / a_theory
# p_theory = p0_theory * np.sqrt((r_sqrd < a_theory**2)
# * (1 - r_sqrd / a_theory**2))
t_theory = t1_theory * np.sqrt((r_sqrd < a_theory**2)
* (1 - r_sqrd / a_theory**2))
t_theory = t_theory - t2_theory * np.sqrt((r_sqrd < c_theory**2)
* (1 - r_sqrd / c_theory**2))
def assert_error(err):
error = norm(p[..., 0] - t_theory) / norm(t_theory)
print(error)
assert error < err
# Test Kato solver
solver = tm.Kato(model, surface, 1e-12, mu)
solver.max_iter = 1000
solver.solve(p_target, 100)
assert_error(4e-2)
# solver.solveRelaxed(g_target)
# assert_error(1e-2)
# # Test BeckTeboulle solver
# solver = tm.BeckTeboulle(model, surface, 1e-12, mu)
# solver.solve(g_target)
# assert_error(1e-2)
# Test Condat solver
solver = tm.Condat(model, surface, 1e-12, mu)
solver.max_iter = 5000 # or 10000
solver.solve(p_target)
assert_error(7e-2) # 4e-2 for 10000 iterations
# Test tangential Polonsky Keer solver
solver = tm.PolonskyKeerTan(model, surface, 1e-12, mu)
solver.max_iter = 1000
solver.solve(p_target)
assert_error(4e-2)
if __name__ == "__main__":
test_pressure()
diff --git a/tests/test_voigt.py b/tests/test_voigt.py
index d949313..e213c18 100644
--- a/tests/test_voigt.py
+++ b/tests/test_voigt.py
@@ -1,32 +1,33 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
import tamaas as tm
import numpy as np
def test_voigt():
shape = [3, 3, 3]
stress = np.ones(shape + [9], dtype=tm.dtype)
# voigt = np.zeros(shape + [6])
voigt = tm.compute.to_voigt(stress)
sol = np.ones(shape + [6], dtype=tm.dtype)
sol[:, :, :, 3:] = np.sqrt(2)
print(voigt)
print(sol)
assert np.all(np.abs(sol - voigt) < 1e-15)
diff --git a/tests/test_westergaard.py b/tests/test_westergaard.py
index fc5b2a2..874450a 100644
--- a/tests/test_westergaard.py
+++ b/tests/test_westergaard.py
@@ -1,81 +1,82 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
+# Copyright (©) 2020-2022 Lucas Frérot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import division, print_function
import tamaas as tm
import pytest
from conftest import pkridfn
from numpy.linalg import norm
@pytest.fixture(scope="module",
params=[tm.PolonskyKeerRey.pressure,
tm.PolonskyKeerRey.gap],
ids=pkridfn)
def pkr(westergaard, request):
loads = {
tm.PolonskyKeerRey.pressure: westergaard.load,
tm.PolonskyKeerRey.gap: 0.06697415181446396
}
model = tm.ModelFactory.createModel(tm.model_type.basic_1d, [1.],
[westergaard.n])
model.E, model.nu = westergaard.e_star, 0
solver = tm.PolonskyKeerRey(model, westergaard.surface, 1e-12,
request.param,
request.param)
solver.max_iter = 5000
solver.solve(loads[request.param])
return model, westergaard
def test_energy(pkr):
model, sol = pkr
traction, displacement = model['traction'], model['displacement']
error = norm((traction - sol.pressure)*(displacement - sol.displacement))
error /= norm(sol.pressure * sol.displacement)
assert error < 4e-6
@pytest.mark.xfail(tm.TamaasInfo.backend == 'tbb',
reason='TBB reductions are unstable?')
def test_pkr_multi(westergaard):
model_basic = tm.ModelFactory.createModel(tm.model_type.basic_1d, [1.],
[westergaard.n])
model_volume = tm.ModelFactory.createModel(tm.model_type.volume_1d,
[1., 1.],
[20, westergaard.n])
pressures = {}
for model in [model_basic, model_volume]:
print(model)
solver = tm.PolonskyKeerRey(model, westergaard.surface, 1e-12,
tm.PolonskyKeerRey.pressure,
tm.PolonskyKeerRey.pressure)
solver.solve(westergaard.load)
pressures[model] = model['traction']
error = norm(pressures[model_basic] - pressures[model_volume][:, 1])\
/ westergaard.load
assert error < 1e-16
if __name__ == "__main__":
print("Test is meant to run with pytest")