diff --git a/.style.yapf b/.style.yapf
new file mode 100644
index 0000000..2897013
--- /dev/null
+++ b/.style.yapf
@@ -0,0 +1,3 @@
+[style]
+based_on_style = pep8
+blank_line_before_nested_class_or_def = true
\ No newline at end of file
diff --git a/python/tamaas/dumpers/__init__.py b/python/tamaas/dumpers/__init__.py
index f8c28fe..d9c4956 100644
--- a/python/tamaas/dumpers/__init__.py
+++ b/python/tamaas/dumpers/__init__.py
@@ -1,551 +1,549 @@
# -*- mode:python; coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-
"""Dumpers for the class :py:class:`Model `."""
from pathlib import Path
from os import PathLike
import glob
import json
import io
import typing as ts
from collections.abc import Collection
import numpy as np
from .. import (
ModelDumper,
model_type,
mpi,
type_traits,
ModelFactory,
Model,
__version__,
)
from ._helper import (
step_dump,
directory_dump,
local_slice,
_is_surface_field,
_basic_types,
file_handler,
)
__all__ = [
"JSONDumper",
"FieldDumper",
"NumpyDumper",
]
FileType = ts.Union[str, PathLike, io.TextIOBase]
NameType = ts.Union[str, PathLike]
_reverse_trait_map = {
- 'model_type.' + t.__name__: mtype for mtype, t in type_traits.items()
+ 'model_type.' + t.__name__: mtype
+ for mtype, t in type_traits.items()
}
def _get_attributes(model: Model):
"""Get model attributes."""
return {
'model_type': str(model.type),
'system_size': model.system_size,
'discretization': model.global_shape,
'program': f"Tamaas {__version__}, DOI:10.21105/joss.02121",
}
def _create_model(attrs: ts.MutableMapping):
"""Create a model from attribute dictionary."""
mtype = _reverse_trait_map[attrs['model_type']]
# netcdf4 converts 1-lists attributes to numbers
for attr in ['system_size', 'discretization']:
if not isinstance(attrs[attr], Collection):
attrs[attr] = [attrs[attr]]
- return ModelFactory.createModel(mtype,
- attrs['system_size'],
+ return ModelFactory.createModel(mtype, attrs['system_size'],
attrs['discretization'])
class MPIIncompatibilityError(RuntimeError):
"""Raised when code is not meant to be executed in MPI environment."""
class ModelError(ValueError):
"""Raised when unexpected model is passed to a dumper with a state."""
class ComponentsError(ValueError):
"""Raised when an unexpected number of components is encountred."""
class _ModelJSONEncoder(json.JSONEncoder):
"""Encode a model to JSON."""
def default(self, obj):
"""Encode model."""
if isinstance(obj, Model):
model = obj
attrs = _get_attributes(model)
model_dict = {
'attrs': attrs,
'fields': {},
'operators': [],
}
for field in model:
model_dict['fields'][field] = model[field].tolist()
for op in model.operators:
model_dict['operators'].append(op)
return model_dict
return json.JSONEncoder.default(self, obj)
class JSONDumper(ModelDumper):
"""Dumper to JSON."""
def __init__(self, file_descriptor: FileType):
"""Construct with file handle."""
super(JSONDumper, self).__init__()
self.fd = file_descriptor
@file_handler('w')
def _dump_to_file(self, fd: FileType, model: Model):
json.dump(model, fd, cls=_ModelJSONEncoder)
def dump(self, model: Model):
"""Dump model."""
self._dump_to_file(self.fd, model)
@file_handler('r')
def read(self, fd: FileType):
"""Read model from file."""
properties = json.load(fd)
model = _create_model(properties['attrs'])
for name, field in properties['fields'].items():
v = np.asarray(field)
if model.type in _basic_types:
v = v.reshape(list(v.shape) + [1])
model[name] = v
return model
class FieldDumper(ModelDumper):
"""Abstract dumper for python classes using fields."""
postfix = ""
extension = ""
name_format = "{basename}{postfix}.{extension}"
def __init__(self, basename: NameType, *fields, **kwargs):
"""Construct with desired fields."""
super(FieldDumper, self).__init__()
self.basename = basename
self.fields: ts.List[str] = list(fields)
self.all_fields: bool = kwargs.get('all_fields', False)
def add_field(self, field: str):
"""Add another field to the dump."""
if field not in self.fields:
self.fields.append(field)
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Dump to a file (path-like or file handle)."""
raise NotImplementedError()
def get_fields(self, model: Model):
"""Get the desired fields."""
if not self.all_fields:
requested_fields = self.fields
else:
requested_fields = list(model)
return {field: model[field] for field in requested_fields}
def dump(self, model: Model):
"""Dump model."""
self._dump_to_file(self.file_path, model)
def read(self, file_descriptor: FileType):
"""Read model from file."""
raise NotImplementedError(
f'read() method not implemented in {type(self).__name__}')
def read_sequence(self, glob_pattern):
"""Read models from a file sequence."""
return map(self.read, glob.iglob(glob_pattern))
@property
def file_path(self):
"""Get the default filename."""
return self.name_format.format(basename=self.basename,
postfix=self.postfix,
extension=self.extension)
@directory_dump('numpys')
@step_dump
class NumpyDumper(FieldDumper):
"""Dumper to compressed numpy files."""
extension = 'npz'
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Save to compressed multi-field Numpy format."""
if mpi.size() > 1:
raise MPIIncompatibilityError("NumpyDumper does not function "
"at all in parallel")
np.savez_compressed(file_descriptor,
attrs=json.dumps(_get_attributes(model)),
**self.get_fields(model))
def read(self, file_descriptor: FileType):
"""Create model from Numpy file."""
data = np.load(file_descriptor, mmap_mode='r')
model = _create_model(json.loads(str(data['attrs'])))
for k, v in filter(lambda k: k[0] != 'attrs', data.items()):
if model.type in _basic_types:
v = v.reshape(list(v.shape) + [1])
model[k] = v
return model
try:
import h5py
__all__.append("H5Dumper")
@directory_dump('hdf5')
@step_dump
class H5Dumper(FieldDumper):
"""Dumper to HDF5 file format."""
extension = 'h5'
def _hdf5_args(self):
if mpi.size() > 1:
from mpi4py import MPI # noqa
mpi_args = dict(driver='mpio', comm=MPI.COMM_WORLD)
comp_args = {} # compression does not work in parallel
else:
mpi_args = {}
comp_args = dict(compression='gzip', compression_opts=7)
return mpi_args, comp_args
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Save to HDF5 with metadata about the model."""
# Setup for MPI
if not h5py.get_config().mpi and mpi.size() > 1:
raise MPIIncompatibilityError("HDF5 does not have MPI support")
mpi_args, comp_args = self._hdf5_args()
with h5py.File(file_descriptor, 'w', **mpi_args) as handle:
# Writing data
for name, field in self.get_fields(model).items():
shape = list(field.shape)
if mpi.size() > 1:
xdim = 0 if _is_surface_field(field, model) else 1
shape[xdim] = mpi_args['comm'].allreduce(shape[xdim])
dset = handle.create_dataset(name, shape, field.dtype,
**comp_args)
dset[local_slice(field, model)] = field
# Writing metadata
for name, attr in _get_attributes(model).items():
handle.attrs[name] = attr
def read(self, file_descriptor: FileType):
"""Create model from HDF5 file."""
mpi_args, _ = self._hdf5_args()
with h5py.File(file_descriptor, 'r', **mpi_args) as handle:
model = _create_model(handle.attrs)
for k, v in handle.items():
if model.type in _basic_types:
v = np.asarray(v).reshape(list(v.shape) + [1])
- model[k] = v[local_slice(v, model)]
+ model[k] = v[local_slice(v, model)].copy()
return model
except ImportError:
pass
try:
import uvw # noqa
__all__ += [
"UVWDumper",
"UVWGroupDumper",
]
@directory_dump('paraview')
@step_dump
class UVWDumper(FieldDumper):
"""Dumper to VTK files for elasto-plastic calculations."""
extension = 'vtr'
forbidden_fields = ['traction', 'gap']
def _dump_to_file(self, file_descriptor: FileType, model: Model):
"""Dump displacements, plastic deformations and stresses."""
if mpi.size() > 1:
raise MPIIncompatibilityError("UVWDumper does not function "
"properly in parallel")
bdim = len(model.boundary_shape)
# Local MPI size
lsize = model.shape
gsize = mpi.global_shape(model.boundary_shape)
gshape = gsize
if len(lsize) > bdim:
gshape = [model.shape[0]] + gshape
# Space coordinates
- coordinates = [np.linspace(0, L, N, endpoint=False)
- for L, N in zip(model.system_size, gshape)]
+ coordinates = [
+ np.linspace(0, L, N, endpoint=False)
+ for L, N in zip(model.system_size, gshape)
+ ]
# If model has subsurfce domain, z-coordinate is always first
dimension_indices = np.arange(bdim)
if len(lsize) > bdim:
dimension_indices += 1
dimension_indices = np.concatenate((dimension_indices, [0]))
coordinates[0] = \
np.linspace(0, model.system_size[0], gshape[0])
offset = np.zeros_like(dimension_indices)
offset[0] = mpi.local_offset(gsize)
rectgrid = uvw.RectilinearGrid if mpi.size() == 1 \
else uvw.parallel.PRectilinearGrid
# Creating rectilinear grid with correct order for components
- coordlist = [coordinates[i][o:o+lsize[i]]
- for i, o in zip(dimension_indices, offset)]
+ coordlist = [
+ coordinates[i][o:o + lsize[i]]
+ for i, o in zip(dimension_indices, offset)
+ ]
grid = rectgrid(
- file_descriptor, coordlist,
+ file_descriptor,
+ coordlist,
compression=True,
offsets=offset,
)
# Iterator over fields we want to dump
fields_it = filter(lambda t: t[0] not in self.forbidden_fields,
self.get_fields(model).items())
# We make fields periodic for visualization
for name, field in fields_it:
array = uvw.DataArray(field, dimension_indices, name)
grid.addPointData(array)
grid.write()
@directory_dump('paraview')
class UVWGroupDumper(FieldDumper):
"""Dumper to ParaViewData files."""
extension = 'pvd'
def __init__(self, basename: NameType, *fields, **kwargs):
"""Construct with desired fields."""
super(UVWGroupDumper, self).__init__(basename, *fields, **kwargs)
subdir = Path('paraview') / f'{basename}-VTR'
subdir.mkdir(parents=True, exist_ok=True)
self.uvw_dumper = UVWDumper(
- Path(f'{basename}-VTR') / basename, *fields, **kwargs
- )
+ Path(f'{basename}-VTR') / basename, *fields, **kwargs)
self.group = uvw.ParaViewData(self.file_path, compression=True)
def _dump_to_file(self, file_descriptor, model):
- self.group.addFile(
- self.uvw_dumper.file_path.replace('paraview/', ''),
- timestep=self.uvw_dumper.count
- )
+ self.group.addFile(self.uvw_dumper.file_path.replace(
+ 'paraview/', ''),
+ timestep=self.uvw_dumper.count)
self.group.write()
self.uvw_dumper.dump(model)
except ImportError:
pass
-
try:
from netCDF4 import Dataset
__all__.append("NetCDFDumper")
@directory_dump('netcdf')
class NetCDFDumper(FieldDumper):
"""Dumper to netCDF4 files."""
extension = "nc"
time_dim = 'frame'
def _file_setup(self, grp, model: Model):
grp.createDimension(self.time_dim, None)
# Attibutes
for k, v in _get_attributes(model).items():
grp.setncattr(k, v)
# Local dimensions
voigt_dim = type_traits[model.type].voigt
components = type_traits[model.type].components
self._vec = grp.createDimension('spatial', components)
self._tens = grp.createDimension('Voigt', voigt_dim)
self.model_info = model.global_shape, model.type
global_boundary_shape = mpi.global_shape(model.boundary_shape)
# Create boundary dimensions
- for label, size, length in zip(
- "xy",
- global_boundary_shape,
- model.boundary_system_size
- ):
+ for label, size, length in zip("xy", global_boundary_shape,
+ model.boundary_system_size):
grp.createDimension(label, size)
- coord = grp.createVariable(label, 'f8', (label,))
+ coord = grp.createVariable(label, 'f8', (label, ))
coord[:] = np.linspace(0, length, size, endpoint=False)
- self._create_variables(
- grp, model,
- lambda f: _is_surface_field(f[1], model),
- global_boundary_shape, "xy"
- )
+ self._create_variables(grp, model,
+ lambda f: _is_surface_field(f[1], model),
+ global_boundary_shape, "xy")
# Create volume dimension
if model.type in {model_type.volume_1d, model_type.volume_2d}:
size = model.shape[0]
grp.createDimension("z", size)
- coord = grp.createVariable("z", 'f8', ("z",))
+ coord = grp.createVariable("z", 'f8', ("z", ))
coord[:] = np.linspace(0, model.system_size[0], size)
self._create_variables(
- grp, model,
- lambda f: not _is_surface_field(f[1], model),
- model.global_shape, "zxy"
- )
+ grp, model, lambda f: not _is_surface_field(f[1], model),
+ model.global_shape, "zxy")
self.has_setup = True
def _set_collective(self, rootgrp):
if mpi.size() == 1:
return
for v in rootgrp.variables.values():
if self.time_dim in v.dimensions:
v.set_collective(True)
def _dump_to_file(self, file_descriptor: NameType, model: Model):
format = 'NETCDF4_CLASSIC'
mode = 'a' if Path(file_descriptor).is_file() \
and getattr(self, 'has_setup', False) else 'w'
- with Dataset(file_descriptor, mode, format=format,
+ with Dataset(file_descriptor,
+ mode,
+ format=format,
parallel=mpi.size() > 1) as rootgrp:
if rootgrp.dimensions == {}:
self._file_setup(rootgrp, model)
self._set_collective(rootgrp)
if self.model_info != (model.global_shape, model.type):
raise ModelError(f"Unexpected model {mode}")
self._dump_generic(rootgrp, model)
- def _create_variables(self, grp, model, predicate,
- shape, dimensions):
+ def _create_variables(self, grp, model, predicate, shape, dimensions):
field_dim = len(shape)
fields = list(filter(predicate, self.get_fields(model).items()))
dim_labels = list(dimensions[:field_dim])
for label, data in fields:
local_dim = []
# If we have an extra component
if data.ndim > field_dim:
if data.shape[-1] == self._tens.size:
local_dim = [self._tens.name]
elif data.shape[-1] == self._vec.size:
local_dim = [self._vec.name]
else:
raise ComponentsError(
f"{label} has unexpected number of components "
f"({data.shape[-1]})")
- grp.createVariable(label, 'f8',
+ grp.createVariable(label,
+ 'f8',
[self.time_dim] + dim_labels + local_dim,
zlib=mpi.size() == 0)
def _dump_generic(self, grp, model):
fields = self.get_fields(model).items()
new_frame = len(grp.dimensions[self.time_dim])
for label, data in fields:
var = grp[label]
- slice_in_global = (new_frame,) + local_slice(data, model)
+ slice_in_global = (new_frame, ) + local_slice(data, model)
var[slice_in_global] = np.array(data, dtype=np.double)
@staticmethod
def _open_read(fd):
- return Dataset(fd, 'r', format='NETCDF4_CLASSIC')
+ return Dataset(fd,
+ 'r',
+ format='NETCDF4_CLASSIC',
+ parallel=mpi.size() > 1)
@staticmethod
def _create_model(rootgrp):
attrs = {k: rootgrp.getncattr(k) for k in rootgrp.ncattrs()}
return _create_model(attrs)
@staticmethod
def _set_model_fields(rootgrp, model, frame):
dims = rootgrp.dimensions.keys()
for k, v in filter(lambda k: k[0] not in dims,
rootgrp.variables.items()):
- v = v[-1, :]
+ v = v[frame, :]
if model.type in _basic_types:
v = np.asarray(v).reshape(list(v.shape) + [1])
- model[k] = v
+ model[k] = v[local_slice(v, model)].copy()
def read(self, file_descriptor: NameType):
"""Create model with last frame."""
with self._open_read(file_descriptor) as rootgrp:
model = self._create_model(rootgrp)
self._set_model_fields(rootgrp, model, -1)
return model
def read_sequence(self, file_descriptor: NameType):
with self._open_read(file_descriptor) as rootgrp:
model = self._create_model(rootgrp)
for frame in range(len(rootgrp.dimensions[self.time_dim])):
self._set_model_fields(rootgrp, model, frame)
yield model
-
except ImportError:
pass
diff --git a/python/tamaas/dumpers/_helper.py b/python/tamaas/dumpers/_helper.py
index fb55893..2b588ac 100644
--- a/python/tamaas/dumpers/_helper.py
+++ b/python/tamaas/dumpers/_helper.py
@@ -1,153 +1,151 @@
# -*- mode:python; coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
"""
Helper functions for dumpers
"""
from os import PathLike
from functools import wraps
from pathlib import Path
import io
import numpy as np
from .. import model_type, type_traits, mpi
__all__ = ["step_dump", "directory_dump"]
_basic_types = [t for t, trait in type_traits.items() if trait.components == 1]
def _is_surface_field(field, model):
def _to_global(shape):
if len(shape) == len(model.boundary_shape) + 1:
return mpi.global_shape(model.boundary_shape) + [shape[-1]]
else:
return mpi.global_shape(shape)
b_shapes = [list(model[name].shape) for name in model.boundary_fields]
shape = list(field.shape)
return any(shape == s for s in b_shapes) \
or any(shape == _to_global(s) for s in b_shapes)
def local_slice(field, model):
n = model.shape
bn = model.boundary_shape
gshape = mpi.global_shape(bn)
offsets = np.zeros_like(gshape)
offsets[0] = mpi.local_offset(gshape)
if not _is_surface_field(field, model) and len(n) > len(bn):
gshape = [n[0]] + gshape
offsets = np.concatenate(([0], offsets))
shape = bn if _is_surface_field(field, model) else n
if len(field.shape) > len(shape):
shape += field.shape[len(shape):]
- def sgen(pair):
- offset, size = pair
+ def sgen(offset, size):
return slice(offset, offset + size, None)
- def sgen_basic(pair):
- offset, size = pair
+ def sgen_basic(offset, size):
return slice(offset, offset + size)
slice_gen = sgen_basic if model_type in _basic_types else sgen
- return tuple(map(slice_gen, zip(offsets, shape)))
+ return tuple(map(slice_gen, offsets, shape))
def step_dump(cls):
"""
Decorator for dumper with counter for steps
"""
orig_init = cls.__init__
orig_dump = cls.dump
@wraps(cls.__init__)
def __init__(obj, *args, **kwargs):
orig_init(obj, *args, **kwargs)
obj.count = 0
def postfix(obj):
return "_{:04d}".format(obj.count)
@wraps(cls.dump)
def dump(obj, *args, **kwargs):
orig_dump(obj, *args, **kwargs)
obj.count += 1
cls.__init__ = __init__
cls.dump = dump
cls.postfix = property(postfix)
return cls
def directory_dump(directory=""):
"Decorator for dumper in a directory"
directory = Path(directory)
def actual_decorator(cls):
orig_dump = cls.dump
orig_filepath = cls.file_path.fget
@wraps(cls.dump)
def dump(obj, *args, **kwargs):
if mpi.rank() == 0:
directory.mkdir(parents=True, exist_ok=True)
orig_dump(obj, *args, **kwargs)
@wraps(cls.file_path.fget)
def file_path(obj):
return str(directory / orig_filepath(obj))
cls.dump = dump
cls.file_path = property(file_path)
return cls
return actual_decorator
def hdf5toVTK(inpath, outname):
"""Convert HDF5 dump of a model to VTK."""
from . import UVWDumper # noqa
from . import H5Dumper # noqa
UVWDumper(outname, all_fields=True) << H5Dumper("").read(inpath)
def file_handler(mode):
"""Decorate a function to accept path-like or file handles."""
def _handler(func):
@wraps(func)
def _wrapped(self, fd, *args, **kwargs):
if isinstance(fd, (str, PathLike)):
with open(fd, mode) as fd:
return _wrapped(self, fd, *args, **kwargs)
elif isinstance(fd, io.TextIOBase):
return func(self, fd, *args, **kwargs)
raise TypeError(
f"Expected a path-like or file handle, got {type(fd)}")
return _wrapped
return _handler
diff --git a/tests/mpi_routines.py b/tests/mpi_routines.py
index 785d676..5a7fbda 100644
--- a/tests/mpi_routines.py
+++ b/tests/mpi_routines.py
@@ -1,274 +1,308 @@
# -*- coding: utf-8 -*-
#
# Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
# Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from __future__ import print_function
-import shutil
+import os
+import tempfile
import tamaas as tm
import numpy as np
from mpi4py import MPI
from numpy.linalg import norm
from conftest import HertzFixture
def make_surface(N):
spectrum = tm.Isopowerlaw2D()
spectrum.q0 = 2
spectrum.q1 = 2
spectrum.q2 = 16
spectrum.hurst = 0.8
generator = tm.SurfaceGeneratorRandomPhase2D(N)
generator.spectrum = spectrum
generator.random_seed = 1
return generator.buildSurface()
def mpi_surface_generator():
N = [128, 128]
tm.set_log_level(tm.LogLevel.debug)
seq_surface = None
comm = MPI.COMM_WORLD
print('[{}] {}'.format(comm.rank, comm.size))
with tm.mpi.sequential():
if comm.rank == 0:
seq_surface = make_surface(N)
surface = make_surface(N)
print('[{}] {}'.format(comm.rank, surface.shape))
recv = comm.gather(surface, root=0)
if comm.rank == 0:
gsurface = np.concatenate(recv)
if False:
import matplotlib.pyplot as plt
plt.imshow(seq_surface)
plt.colorbar()
plt.figure()
plt.imshow(gsurface)
plt.colorbar()
plt.show()
# I think the only reason this assert works is because the
# spectrum is compact in the Fourier domain -> surface is regular
assert np.all(seq_surface == gsurface)
def mpi_model_creation():
N = [20, 50, 50]
S = [1., 1., 1.]
comm = MPI.COMM_WORLD
def get_discretizations(model):
return model.shape, model.boundary_shape
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S[1:], N[1:])
n, bn = get_discretizations(model)
n[0] = comm.allreduce(n[0])
bn[0] = comm.allreduce(bn[0])
assert n == N[1:] and bn == N[1:]
model = tm.ModelFactory.createModel(tm.model_type.volume_2d, S, N)
n, bn = get_discretizations(model)
n[1] = comm.allreduce(n[1])
bn[0] = comm.allreduce(bn[0])
assert n == N and bn == N[1:]
def mpi_polonsky_keer():
N = [1024, 1024]
S = [1., 1.]
load = 0.00001
comm = MPI.COMM_WORLD
hertz = HertzFixture(N[0], load)
surface = hertz.surface
tm.set_log_level(tm.LogLevel.debug)
def solve():
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S, N)
model.E, model.nu = 1, 0
local_surface = tm.mpi.scatter(surface)
solver = tm.PolonskyKeerRey(model, local_surface, 1e-15)
print('Created solver')
solver.solve(load)
return np.copy(model['traction']), np.copy(model['displacement'])
tractions, displacements = map(tm.mpi.gather, solve())
if comm.rank == 0:
perror = norm(tractions - hertz.pressure) / norm(hertz.pressure)
derror = norm(displacements - hertz.displacement)\
/ norm(hertz.displacement)
if False:
print(perror, derror)
import matplotlib.pyplot as plt
plt.imshow(tractions - hertz.pressure)
plt.colorbar()
plt.show()
assert perror < 5e-3
assert derror < 8e-3
def mpi_polonsky_keer_compare():
N = [273, 273]
S = [1., 1.]
E, nu = 0.2, 0.3
load = 0.1
comm = MPI.COMM_WORLD
seq_tractions = None
rms = 0
with tm.mpi.sequential():
if comm.rank == 0:
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S, N)
model.E, model.nu = E, nu
surface = make_surface(N)
rms = tm.Statistics2D.computeSpectralRMSSlope(surface)
surface /= rms
solver = tm.PolonskyKeerRey(model, surface, 1e-15)
solver.solve(load)
seq_tractions = np.copy(model.traction)
seq_area = tm.Statistics2D.contact(model.traction)
rms = comm.bcast(rms, root=0)
model = tm.ModelFactory.createModel(tm.model_type.basic_2d, S, N)
model.E, model.nu = E, nu
surface = make_surface(N) / rms
solver = tm.PolonskyKeerRey(model, surface, 1e-15)
solver.solve(load)
tractions = model['traction']
area = tm.Statistics2D.contact(tractions)
recv = comm.gather(tractions, root=0)
if comm.rank == 0:
tractions = np.concatenate(recv)
error = np.linalg.norm(seq_tractions - tractions) / seq_tractions.size
if False:
print(error)
import matplotlib.pyplot as plt
plt.imshow(seq_tractions - tractions)
plt.colorbar()
plt.show()
assert error < 1e-13
assert np.abs(seq_area - area) / seq_area < 1e-13
+def model_for_dump():
+ model_type = tm.model_type.volume_2d
+ discretization = [10, 2, 10]
+ flat_domain = [1, 1]
+ system_size = [0.5] + flat_domain
+
+ # Creation of model
+ model = tm.ModelFactory.createModel(model_type, system_size,
+ discretization)
+
+ model['displacement'][:] = MPI.COMM_WORLD.rank
+ model['traction'][:] = MPI.COMM_WORLD.rank
+
+ return model
+
+
def mpi_h5dump():
try:
from tamaas.dumpers import H5Dumper as Dumper
import h5py
except ImportError:
return
if not h5py.get_config().mpi:
print('Did not test H5Dumper with MPI')
return
- model_type = tm.model_type.volume_2d
- discretization = [10, 2, 10]
- flat_domain = [1, 1]
- system_size = [0.5] + flat_domain
-
- # Creation of model
- model = tm.ModelFactory.createModel(model_type,
- system_size,
- discretization)
-
- model['displacement'][:] = MPI.COMM_WORLD.rank
- model['traction'][:] = MPI.COMM_WORLD.rank
+ os.chdir(MPI.COMM_WORLD.bcast(tempfile.mkdtemp(), root=0))
+ print(os.getcwd())
+ model = model_for_dump()
dumper_helper = Dumper('test_mpi_dump', 'displacement', 'traction')
dumper_helper << model
- with h5py.File('hdf5/test_mpi_dump_0000.h5', 'r', driver='mpio',
+ with h5py.File('hdf5/test_mpi_dump_0000.h5',
+ 'r',
+ driver='mpio',
comm=MPI.COMM_WORLD) as handle:
assert np.all(handle['displacement'][:, 0, :] == 0)
assert np.all(handle['displacement'][:, 1, :] == 1)
assert np.all(handle['traction'][0, :] == 0)
assert np.all(handle['traction'][1, :] == 1)
rmodel = dumper_helper.read('hdf5/test_mpi_dump_0000.h5')
assert np.all(rmodel.displacement == MPI.COMM_WORLD.rank)
assert np.all(rmodel.traction == MPI.COMM_WORLD.rank)
- if tm.mpi.rank() == 0:
- shutil.rmtree('hdf5')
+
+def mpi_netcdfdump():
+ try:
+ from tamaas.dumpers import NetCDFDumper as Dumper
+ import netCDF4
+ except ImportError:
+ return
+
+ import subprocess
+ capture = subprocess.run(["nc-config", "--has-parallel4"],
+ capture_output=True)
+
+ if 'yes' not in capture.stdout.decode('ascii'):
+ print('Did not test NetCDF with MPI')
+ return
+
+ os.chdir(MPI.COMM_WORLD.bcast(tempfile.mkdtemp(), root=0))
+ print(os.getcwd())
+
+ model = model_for_dump()
+ dumper_helper = Dumper('test_mpi_dump', 'displacement', 'traction')
+ dumper_helper << model
+
+ rmodel = dumper_helper.read('netcdf/test_mpi_dump.nc')
+
+ assert np.all(rmodel.displacement.astype(int) == MPI.COMM_WORLD.rank)
+ assert np.all(rmodel.traction.astype(int) == MPI.COMM_WORLD.rank)
def mpi_plastic_solve():
from conftest import UniformPlasticity
from tamaas.nonlinear_solvers import DFSANECXXSolver as Solver
patch_isotropic_plasticity = UniformPlasticity(tm.model_type.volume_2d,
- [1.] * 3,
- [4] * 3)
+ [1.] * 3, [4] * 3)
model = patch_isotropic_plasticity.model
residual = patch_isotropic_plasticity.residual
applied_pressure = 0.1
solver = Solver(residual)
solver.tolerance = 1e-15
pressure = model['traction'][..., 2]
pressure[:] = applied_pressure
solver.solve()
solver.updateState()
solution, normal = patch_isotropic_plasticity.solution(applied_pressure)
for key in solution:
error = norm(model[key] - solution[key]) / normal[key]
assert error < 2e-15
def mpi_flood_fill():
contact = np.zeros([10, 1], dtype=np.bool)
contact[:1, :] = True
contact[-1:, :] = True
clusters = tm.FloodFill.getClusters(contact, False)
if tm.mpi.rank() == 0:
assert len(clusters) == 3, "Wrong number of clusters"
assert [c.getArea() for c in clusters] == [1, 2, 1], "Wrong areas"
if __name__ == '__main__':
mpi_polonsky_keer_compare()