diff --git a/tests/SConscript b/tests/SConscript index 8387e1b..eeba298 100644 --- a/tests/SConscript +++ b/tests/SConscript @@ -1,214 +1,213 @@ # -*- mode:python; coding: utf-8 -*- # vim: set ft=python: # @file # LICENSE # # Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . from __future__ import print_function from SCons.Script import Split, Copy, Dir, Import from detect import FindGTest, FindPybind11 # ------------------------------------------------------------------------------ def copyComStr(env, main): if 'SHCXXCOMSTR' in main: env['CXXCOMSTR'] = main['SHCXXCOMSTR'] if 'SHLINKCOMSTR' in main: env['LINKCOMSTR'] = main['SHLINKCOMSTR'] # ------------------------------------------------------------------------------ def make_python_tests(env): """Copy python tests to build directory""" test_env = env.Clone() test_files = Split(""" - test_autocorrelation.py test_hertz.py test_westergaard.py test_patch_westergaard.py test_patch_plasticity.py test_surface.py test_hertz_disp.py test_hertz_kato.py test_saturated_pressure.py test_flood_fill.py test_integral_operators.py test_dumper.py test_tangential.py test_boussinesq_surface.py test_voigt.py test_memory.py test_epic.py fftfreq.py conftest.py pytest.ini """) if env['use_mpi']: test_files += ['test_mpi_routines.py', 'mpi_routines.py'] src_dir = "#/tests" targets = [ test_env.Command(file, test_env.File(file, src_dir), Copy("$TARGET", "$SOURCE")) for file in test_files ] test_env = env.Clone(tools=[pybind11]) # Helper module for integral operators test_env['SHLIBPREFIX'] = '' test_env.PrependUnique(LIBS=['Tamaas']) register = test_env.Pybind11Module( target="register_integral_operators", source=["register_integral_operators.cpp"]) Import('libTamaas') test_env.Depends(register, libTamaas) targets.append(register) return targets # ------------------------------------------------------------------------------ def compile_google_test(env, gtest_path): gtest_obj = env.Object('gtest.o', [env.File("src/gtest-all.cc", gtest_path)]) return env.StaticLibrary('gtest', gtest_obj) # ------------------------------------------------------------------------------ def make_google_tests(env): gtest_dir = Dir(env['GTEST_ROOT']) gtest_env = env.Clone(CPPPATH=[gtest_dir], CXXFLAGS=['-pthread', '-isystem', env.Dir('include', gtest_dir).path]) FindGTest(gtest_env) libgtest = None # Hugly hack to detect if we need to compile gtest submodule if env['GTEST_ROOT'] == '#third-party/googletest/googletest': gtest_path = str(gtest_dir) libgtest = compile_google_test(gtest_env, gtest_path) env.AppendUnique(CXXFLAGS=gtest_env['CXXFLAGS']) env.PrependUnique(LIBS=['Tamaas', env.subst('python${py_version}')]) google_test_files = Split(""" test_fft.cpp test_grid.cpp test_loop.cpp test_model.cpp test_static_types.cpp test_integration.cpp """) # Necessary for the tests that use pybind11 calls to python uses = [] if env['build_python']: google_test_files.append('test_fftfreq.cpp') uses = ['TAMAAS_USE_PYTHON'] if env['use_mpi']: google_test_files.append('test_mpi.cpp') defines = env['CPPDEFINES'] if type(defines) is not list: defines = [defines] gtest_main = env.Object("tamaas_gtest_main.o", 'tamaas_gtest_main.cc', CPPDEFINES=defines + uses) gtest_all = env.Program('test_gtest_all', google_test_files + [gtest_main], LIBS=(env['LIBS'] + ['gtest'])) Import('libTamaas') env.Depends(gtest_all, libTamaas) env.Depends(gtest_all, libgtest) return [gtest_all] # ------------------------------------------------------------------------------ def make_bare_tests(env): rough = env.Program("test_rough_surface.cpp") Import('libTamaas') env.Depends(rough, libTamaas) return [rough] # ------------------------------------------------------------------------------ Import('main_env') # Setup of test environment test_env = main_env.Clone() test_env.AppendUnique( LIBPATH=['.', '../src'], RPATH=["'$$$$ORIGIN/../src'"] ) test_env.PrependUnique(LIBS=['Tamaas']) # Building tests that do not require any third party targets = make_bare_tests(test_env) # Build tests that required python bindings if test_env['build_python']: FindPybind11(test_env) test_env.Tool(pybind11) test_env.ParseConfig("${py_exec}-config --ldflags") test_env['CCFLAGS'] = [] targets += make_python_tests(test_env) # Building google tests if test_env['use_googletest']: targets += make_google_tests(test_env) targets.append(test_env.Command('test_gtest.py', '#tests/test_gtest.py', Copy('$TARGET', '$SOURCE'))) # Target alias to build tests main_env.Alias('build-tests', targets) # Check if pytest is installed conf = Configure(test_env, custom_tests={'CheckPythonModule': CheckPythonModule}) has_pytest = conf.CheckPythonModule('pytest') conf.Finish() # Define a command to execute tests if has_pytest: pytest_env = test_env.Clone() test_env['pythonpath'] = '${build_dir}/python' test_env['ld_library_path'] = '${build_dir}/src' pytest_env.PrependENVPath('PYTHONPATH', '${pythonpath.abspath}') pytest_env.PrependENVPath('LD_LIBRARY_PATH', '${ld_library_path.abspath}') # Setting a moderate thread number pytest_env['ENV']['OMP_NUM_THREADS'] = "1" test_target = pytest_env.Command('.phony_test', targets, '${py_exec} -m pytest ${build_dir}/tests') main_env.Alias('test', test_target) else: # We still define a target here so that `scons test` still works dummy_command(main_env, 'test', 'Cannot run tests: pytest is not installed') diff --git a/tests/test_autocorrelation.py b/tests/test_autocorrelation.py deleted file mode 100644 index ee6aaa9..0000000 --- a/tests/test_autocorrelation.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -# @file -# LICENSE -# -# Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), -# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -from __future__ import print_function - -import tamaas as tm -import numpy as np -from scipy.special import jv -import scipy.integrate as integrate - - -def spectrum(r, kl, kr, ks, C, H): - if r > ks: - return 0 - elif r > kr: - return C * (r/float(kr))**(-2.*(H+1)) - elif r > kl: - return C - else: - return 0 - - -# n is normalization parameter -def integrate_bessel(x, spectrum, n): - result = 2 * np.pi * \ - integrate.quad(lambda y: y*jv(0, y*x) * spectrum(n*y), 0, np.inf)[0] - return result - - -def test_autocorrelation(): - tm.initialize() - grid_size = 256 - - lambda_l = 8. - lambda_r = 8. - lambda_s = 1. - - L = 4 * lambda_l - - kl = int(L / lambda_l) - kr = int(L / lambda_r) - ks = int(L / lambda_s) - - # Computing ACF from integral with Bessel function - n = grid_size // 2 # points on curve - x = np.linspace(0, L/2, n) - bessel_acf = np.zeros(n) - integrate_bessel_v = np.vectorize(integrate_bessel, otypes=[tm.dtype]) - bessel_acf = (L / (2*np.pi))**2 *\ - integrate_bessel_v(x, lambda x: spectrum(x, - kl, kr, ks, - 1., 0.8), L / (2*np.pi)) - nsamples = 1000 - etages = np.zeros((grid_size, grid_size, nsamples)) - - # Computing ACF with Hu & Tonder algo reimplemented - spec = tm.Isopowerlaw2D() - spec.q0, spec.q1, spec.q2 = kl, kr, ks - spec.hurst = 0.8 - - SG = tm.SurfaceGeneratorFilter2D([grid_size] * 2) - SG.spectrum = spec - - for i in range(nsamples): - SG.random_seed = i - a = SG.buildSurface() - etages[:, :, i] = a[:, :] - - cov = np.zeros(n) - - for i in np.arange(n, grid_size, 1): - cov[i-n] = np.sum((etages[n, i, :]-np.mean(etages[n, i, :])) * - (etages[n, n, :]-np.mean(etages[n, n, :])))/nsamples - - error = np.sum((cov-bessel_acf)*(cov-bessel_acf)) \ - / np.sum(bessel_acf*bessel_acf) - print(error) - - assert error < 1e-1 - - -if __name__ == "__main__": - test_autocorrelation() diff --git a/tests/test_surface.py b/tests/test_surface.py index f396596..f85af92 100644 --- a/tests/test_surface.py +++ b/tests/test_surface.py @@ -1,79 +1,137 @@ # -*- coding: utf-8 -*- # @file # LICENSE # # Copyright (©) 2016-2021 EPFL (École Polytechnique Fédérale de Lausanne), # Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . from __future__ import print_function, division import numpy as np import tamaas as tm import pytest +import scipy.integrate as integrate +from scipy.special import jv +from numpy import pi +from scipy.interpolate import interp2d def compute_mse(values, mean): return np.mean((values - mean)**2) +def spectrum(r, isopowerlaw): + kl, kr, ks = isopowerlaw.q0, isopowerlaw.q1, isopowerlaw.q2 + H = isopowerlaw.hurst + C = 1. + + if r > ks: + return 0 + elif r > kr: + return C * (r/float(kr))**(-2.*(H+1)) + elif r > kl: + return C + else: + return 0 + + +def integrate_bessel(x, spectrum, n): + return 2 * pi * integrate.quad(lambda y: y*jv(0, y*x) * spectrum(n*y), + 0, np.inf, limit=200)[0] + + +def radial_average(field): + x = np.linspace(-1, 1, field.shape[0]) + y = np.linspace(-1, 1, field.shape[1]) + interpolation = interp2d(x, y, field) + + n = field.shape[0] // 2 + radii = np.linspace(0, 1, n) + thetas = np.linspace(0, 2 * pi, endpoint=False) + + sum = np.zeros_like(radii) + + for Θ in thetas: + for i, r in enumerate(radii): + sum[i] += interpolation(r * np.cos(Θ), r * np.sin(Θ)) + return sum / thetas.size + + +integrate_bessel = np.vectorize(integrate_bessel, otypes=[tm.dtype]) + + @pytest.fixture(scope="module", params=[tm.SurfaceGeneratorFilter2D, tm.SurfaceGeneratorRandomPhase2D]) def isopowerlaw(request): iso = tm.Isopowerlaw2D() iso.q0 = 16 iso.q1 = 32 iso.q2 = 64 iso.hurst = 0.8 - N = 128 + N = 243 sg = request.param() sg.spectrum = iso sg.shape = [N, N] analytical_stats = { "rms_heights": iso.rmsHeights(), "rms_slopes_spectral": iso.rmsSlopes(), "moments": iso.moments(), "alpha": iso.alpha() } return sg, analytical_stats def test_surface_statistics(isopowerlaw): sg, analytical_stats = isopowerlaw realizations = 1000 + L = 1 * sg.spectrum.q0 + n = sg.shape[0] // 2 + x = np.linspace(0, L/2, n) + + true_acf = (L / (2 * pi))**2 \ + * integrate_bessel(x, lambda x: spectrum(x, sg.spectrum), L / (2 * pi)) + stats = {k: [] for k in analytical_stats} + acf = np.zeros(sg.shape, dtype=tm.dtype) for i in range(realizations): sg.random_seed = i s = sg.buildSurface() stats['rms_heights'].append( - np.sqrt(np.mean(s**2))) + tm.Statistics2D.computeRMSHeights(s)) stats['rms_slopes_spectral'].append( tm.Statistics2D.computeSpectralRMSSlope(s)) stats['moments'].append(tm.Statistics2D.computeMoments(s)) + acf += tm.Statistics2D.computeAutocorrelation(s) / realizations # Testing only the more reliable measures for key in ['rms_heights', 'rms_slopes_spectral']: mu = np.mean(stats[key]) error = np.abs(mu - analytical_stats[key]) / analytical_stats[key] - assert error < 3e-3 + assert error < 3e-3, "Error in measure of {}".format(key) + + acf = radial_average(np.fft.fftshift(acf)) + acf_error = np.sum((acf-true_acf)**2) / np.sum(true_acf**2) + + assert acf_error < 2e-3, "Error with analytical ACF"