diff --git a/rrompy/hfengines/base/__init__.py b/rrompy/hfengines/base/__init__.py index 00f3c2e..437dac4 100644 --- a/rrompy/hfengines/base/__init__.py +++ b/rrompy/hfengines/base/__init__.py @@ -1,30 +1,30 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from .problem_engine_base import ProblemEngineBase -from .mixed_problem_engine_base import MixedProblemEngineBase +from .vector_problem_engine_base import VectorProblemEngineBase from .boundary_conditions import BoundaryConditions __all__ = [ 'ProblemEngineBase', - 'MixedProblemEngineBase', + 'VectorProblemEngineBase', 'BoundaryConditions' ] diff --git a/rrompy/hfengines/base/boundary_conditions.py b/rrompy/hfengines/base/boundary_conditions.py index 476510f..601422e 100644 --- a/rrompy/hfengines/base/boundary_conditions.py +++ b/rrompy/hfengines/base/boundary_conditions.py @@ -1,120 +1,125 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # +from fenics import SubDomain, AutoSubDomain from rrompy.utilities.base.types import GenExpr -from rrompy.utilities.base.fenics import bdrTrue, bdrFalse +from rrompy.utilities.base.fenics import bdrFalse __all__ = ['BoundaryConditions'] class BoundaryConditions: """ Boundary conditions manager. Attributes: DirichletBoundary: Callable returning True when on Dirichlet boundary. NeumannBoundary: Callable returning True when on Neumann boundary. RobinBoundary: Callable returning True when on Robin boundary. """ allowedKinds = ["Dirichlet", "Neumann", "Robin"] def __init__(self, kind : str = None): if kind is None: return kind = kind[0].upper() + kind[1:].lower() if kind in self.allowedKinds: getattr(self.__class__, kind + "Boundary", None).fset(self, "ALL") else: raise Exception("BC kind not recognized.") def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def _generalManagement(self, kind:str, value:GenExpr): if isinstance(value, (str,)): value = value.upper() if value.upper() == "ALL": self._complementaryManagementAll(kind) elif value.upper() == "REST": self._complementaryManagementRest(kind) else: raise Exception("Wildcard not recognized.") elif callable(value): + self._standardManagementCallable(kind, value) + elif isinstance(value, (SubDomain,)): self._standardManagement(kind, value) else: raise Exception(kind + "Boundary type not recognized.") def _complementaryManagementAll(self, kind:str): if kind not in self.allowedKinds: raise Exception("BC kind not recognized.") for k in self.allowedKinds: if k != kind: - getattr(self.__class__, k + "Boundary").fset(self, bdrFalse) - super().__setattr__("_" + kind + "Boundary", bdrTrue) - if hasattr(self, "_" + kind + "Rest"): - super().__delattr__("_" + kind + "Rest") + self._standardManagementCallable(k, bdrFalse) + self._complementaryManagementRest(kind) def _complementaryManagementRest(self, kind:str): if kind not in self.allowedKinds: raise Exception("BC kind not recognized.") otherBCs = [] for k in self.allowedKinds: if k != kind: if hasattr(self, "_" + k + "Rest"): - raise Exception("Only 1 'REST' wildcard can be specified.") + self._standardManagement(k, bdrFalse) otherBCs += [getattr(self, k + "Boundary")] def restCall(x, on_boundary): return (on_boundary - and not any([bc(x, on_boundary) for bc in otherBCs])) - super().__setattr__("_" + kind + "Boundary", restCall) + and not any([bc.inside(x, on_boundary) for bc in otherBCs])) + self._standardManagementCallable(kind, restCall) super().__setattr__("_" + kind + "Rest", 1) - def _standardManagement(self, kind:str, bc:callable): + def _standardManagementCallable(self, kind:str, bc:callable): + bcSD = AutoSubDomain(bc) + self._standardManagement(kind, bcSD) + + def _standardManagement(self, kind:str, bc:SubDomain): super().__setattr__("_" + kind + "Boundary", bc) if hasattr(self, "_" + kind + "Rest"): super().__delattr__("_" + kind + "Rest") @property def DirichletBoundary(self): """Function handle to DirichletBoundary.""" return self._DirichletBoundary @DirichletBoundary.setter def DirichletBoundary(self, DirichletBoundary): self._generalManagement("Dirichlet", DirichletBoundary) @property def NeumannBoundary(self): """Function handle to NeumannBoundary.""" return self._NeumannBoundary @NeumannBoundary.setter def NeumannBoundary(self, NeumannBoundary): self._generalManagement("Neumann", NeumannBoundary) @property def RobinBoundary(self): """Function handle to RobinBoundary.""" return self._RobinBoundary @RobinBoundary.setter def RobinBoundary(self, RobinBoundary): self._generalManagement("Robin", RobinBoundary) diff --git a/rrompy/hfengines/base/mixed_problem_engine_base.py b/rrompy/hfengines/base/mixed_problem_engine_base.py deleted file mode 100644 index e09384c..0000000 --- a/rrompy/hfengines/base/mixed_problem_engine_base.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (C) 2018 by the RROMPy authors -# -# This file is part of RROMPy. -# -# RROMPy is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# RROMPy is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with RROMPy. If not, see . -# - -import numpy as np -import fenics as fen -from scipy.sparse import csr_matrix -from matplotlib import pyplot as plt -from rrompy.utilities.base.types import Np1D, strLst -from .problem_engine_base import ProblemEngineBase -from rrompy.utilities.base import purgeList, getNewFilename, verbosityDepth - -__all__ = ['MixedProblemEngineBase'] - -class MixedProblemEngineBase(ProblemEngineBase): - """ - Generic solver for parametric mixed problems. - - Attributes: - verbosity: Verbosity level. - BCManager: Boundary condition manager. - V: Real mixed FE space. - u: Generic mixed trial functions for variational form evaluation. - v: Generic mixed test functions for variational form evaluation. - As: Scipy sparse array representation (in CSC format) of As. - bs: Numpy array representation of bs. - energyNormMatrix: Scipy sparse matrix representing inner product over - V. - bsmu: Mu value of last bs evaluation. - liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. - liftedDirichletDatum: Dofs of Dirichlet datum lifting. - mu0BC: Mu value of last Dirichlet datum lifting. - degree_threshold: Threshold for ufl expression interpolation degree. - """ - - nAs, nbs = 1, 1 - functional = lambda self, u: 0. - - def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): - super().__init__(degree_threshold = degree_threshold, - verbosity = verbosity) - V1 = fen.FiniteElement("P", fen.triangle, 1) - V2 = fen.FiniteElement("R", fen.triangle, 0) - element = fen.MixedElement([V1, V2]) - self.V = fen.FunctionSpace(fen.UnitSquareMesh(10, 10), element) - self.primalIndices = [0] - - def buildEnergyNormForm(self): # L2 primal - """ - Build sparse matrix (in CSR format) representative of scalar product. - """ - if self.verbosity >= 20: - verbosityDepth("INIT", "Assembling energy matrix.", end = "") - a = 0. - for j in self.primalIndices: - a = a + fen.dot(self.u[j], self.v[j]) - normMatFen = fen.assemble(a * fen.dx) - normMat = fen.as_backend_type(normMatFen).mat() - self.energyNormMatrix = csr_matrix(normMat.getValuesCSR()[::-1], - shape = normMat.size) - if self.verbosity >= 20: - verbosityDepth("DEL", " Done.", inline = True) - - def plot(self, u:Np1D, name : strLst = "u", save : str = None, - what : strLst = 'all', saveFormat : str = "eps", - saveDPI : int = 100, **figspecs): - """ - Do some nice plots of the complex-valued function with given dofs. - - Args: - u: numpy complex array with function dofs. - name(optional): Name to be shown as title of the plots. Defaults to - 'u'. - save(optional): Where to save plot(s). Defaults to None, i.e. no - saving. - what(optional): Which plots to do. If list, can contain 'ABS', - 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. - Defaults to 'ALL'. - saveFormat(optional): Format for saved plot(s). Defaults to "eps". - saveDPI(optional): DPI for saved plot(s). Defaults to 100. - figspecs(optional key args): Optional arguments for matplotlib - figure creation. - """ - if isinstance(what, (str,)): - if what.upper() == 'ALL': - what = ['ABS', 'PHASE', 'REAL', 'IMAG'] - else: - what = [what] - what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'], - listname = self.name() + ".what", baselevel = 1) - if len(what) == 0: return - if 'figsize' not in figspecs.keys(): - figspecs['figsize'] = (13. * len(what) / 4, 3) - if isinstance(name, (str,)): - name = ["{}_sub{}".format(name, j + 1)\ - for j in range(self.V.num_sub_spaces())] - - for j in range(self.V.num_sub_spaces()): - subplotcode = 100 + len(what) * 10 - Vj = self.V.sub(j) - dofs = Vj.dofmap().dofs() - uj = u[dofs] - plt.figure(**figspecs) - Vj = Vj.collapse() - plt.jet() - if 'ABS' in what: - uAb = fen.Function(Vj) - uAb.vector()[:] = np.array(np.abs(uj), dtype = float) - subplotcode = subplotcode + 1 - plt.subplot(subplotcode) - p = fen.plot(uAb, title = "|{}|".format(name[j])) - plt.colorbar(p) - if 'PHASE' in what: - uPh = fen.Function(Vj) - uPh.vector()[:] = np.array(np.angle(uj), dtype = float) - subplotcode = subplotcode + 1 - plt.subplot(subplotcode) - p = fen.plot(uPh, title = "phase({})".format(name[j])) - plt.colorbar(p) - if 'REAL' in what: - uRe = fen.Function(Vj) - uRe.vector()[:] = np.array(np.real(uj), dtype = float) - subplotcode = subplotcode + 1 - plt.subplot(subplotcode) - p = fen.plot(uRe, title = "Re({})".format(name[j])) - plt.colorbar(p) - if 'IMAG' in what: - uIm = fen.Function(Vj) - uIm.vector()[:] = np.array(np.imag(uj), dtype = float) - subplotcode = subplotcode + 1 - plt.subplot(subplotcode) - p = fen.plot(uIm, title = "Im({})".format(name[j])) - plt.colorbar(p) - if save is not None: - save = save.strip() - plt.savefig(getNewFilename("{}_sub{}_fig_".format(save, j + 1), - saveFormat), - format = saveFormat, dpi = saveDPI) - plt.show() - plt.close() diff --git a/rrompy/hfengines/base/problem_engine_base.py b/rrompy/hfengines/base/problem_engine_base.py index 452d221..8fbc709 100644 --- a/rrompy/hfengines/base/problem_engine_base.py +++ b/rrompy/hfengines/base/problem_engine_base.py @@ -1,368 +1,451 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import fenics as fen import numpy as np from scipy.sparse import csr_matrix import scipy.sparse as scsp import scipy.sparse.linalg as scspla from matplotlib import pyplot as plt from rrompy.utilities.base.types import (Np1D, Np2D, ScOp, strLst, FenFunc, Tuple, List) from rrompy.utilities.base import purgeList, getNewFilename, verbosityDepth from .boundary_conditions import BoundaryConditions __all__ = ['ProblemEngineBase'] class ProblemEngineBase: """ Generic solver for parametric problems. Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. energyNormMatrix: Scipy sparse matrix representing inner product over V. bsmu: Mu value of last bs evaluation. liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. liftedDirichletDatum: Dofs of Dirichlet datum lifting. mu0BC: Mu value of last Dirichlet datum lifting. degree_threshold: Threshold for ufl expression interpolation degree. """ nAs, nbs = 1, 1 functional = lambda self, u: 0. def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): self.BCManager = BoundaryConditions("Dirichlet") self.V = fen.FunctionSpace(fen.UnitSquareMesh(10, 10), "P", 1) self.verbosity = verbosity self.resetAs() self.resetbs() self.bsmu = np.nan self.liftDirichletDatamu = np.nan self.mu0BC = np.nan self.degree_threshold = degree_threshold def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def __dir_base__(self): return [x for x in self.__dir__() if x[:2] != "__"] @property def V(self): """Value of V.""" return self._V @V.setter def V(self, V): self.resetAs() self.resetbs() if not type(V).__name__ == 'FunctionSpace': raise Exception("V type not recognized.") self._V = V self.u = fen.TrialFunction(V) self.v = fen.TestFunction(V) def innerProduct(self, u:Np2D, v:Np2D, onlyDiag : bool = False) -> Np2D: """Hilbert space scalar product.""" if not hasattr(self, "energyNormMatrix"): self.buildEnergyNormForm() if onlyDiag: return np.sum(self.energyNormMatrix.dot(u) * v.conj(), axis = 0) return v.conj().T.dot(self.energyNormMatrix.dot(u)) def buildEnergyNormForm(self): # L2 """ Build sparse matrix (in CSR format) representative of scalar product. """ if self.verbosity >= 20: verbosityDepth("INIT", "Assembling energy matrix.", end = "") normMatFen = fen.assemble(fen.dot(self.u, self.v) * fen.dx) normMat = fen.as_backend_type(normMatFen).mat() self.energyNormMatrix = csr_matrix(normMat.getValuesCSR()[::-1], shape = normMat.size) if self.verbosity >= 20: verbosityDepth("DEL", " Done.", inline = True) def norm(self, u:Np2D) -> Np1D: return np.abs(self.innerProduct(u, u, onlyDiag = True)) ** .5 def rescaling(self, x:Np1D) -> Np1D: """Rescaling in parameter dependence.""" return x def rescalingInv(self, x:Np1D) -> Np1D: """Inverse rescaling in parameter dependence.""" return x def checkAInBounds(self, der : int = 0): """Check if derivative index is oob for operator of linear system.""" if der < 0 or der >= self.nAs: d = self.V.dim() return scsp.csr_matrix((np.zeros(0), np.zeros(0), np.zeros(d + 1)), shape = (d, d), dtype = np.complex) def checkbInBounds(self, der : int = 0, homogeneized : bool = False): """Check if derivative index is oob for RHS of linear system.""" if der < 0 or der >= max(self.nbs, self.nAs * homogeneized): return np.zeros(self.V.dim(), dtype = np.complex) def setDirichletDatum(self, mu:complex): """Set Dirichlet datum if parametric.""" if hasattr(self, "liftedDirichletDatum"): self.liftDirichletDatamu = mu def liftDirichletData(self, mu:complex) -> Np1D: """Lift Dirichlet datum.""" self.setDirichletDatum(mu) if not np.isclose(self.liftDirichletDatamu, mu): try: liftRe = fen.interpolate(self.DirichletDatum[0], self.V) except: liftRe = fen.project(self.DirichletDatum[0], self.V) try: liftIm = fen.interpolate(self.DirichletDatum[1], self.V) except: liftIm = fen.project(self.DirichletDatum[1], self.V) self.liftedDirichletDatum = (np.array(liftRe.vector()) + 1.j * np.array(liftIm.vector())) return self.liftedDirichletDatum def resetAs(self): """Reset (derivatives of) operator of linear system.""" self.As = [None] * self.nAs def resetbs(self): """Reset (derivatives of) RHS of linear system.""" self.bs = {True: [None] * max(self.nbs, self.nAs), False: [None] * self.nbs} def reduceQuadratureDegree(self, fun:FenFunc, name:str): """Check whether to reduce compiler parameters to degree threshold.""" if not np.isinf(self.degree_threshold): from ufl.algorithms.estimate_degrees import ( estimate_total_polynomial_degree as ETPD) try: deg = ETPD(fun) except: return False if deg > self.degree_threshold: if self.verbosity >= 15: verbosityDepth("MAIN", ("Reducing quadrature degree from " "{} to {} for {}.").format( deg, self.degree_threshold, name)) return True return False def iterReduceQuadratureDegree(self, funsNames:List[Tuple[FenFunc, str]]): """ Iterate reduceQuadratureDegree over list and define reduce compiler parameters. """ if funsNames is not None: for fun, name in funsNames: if self.reduceQuadratureDegree(fun, name): return {"quadrature_degree" : self.degree_threshold} return {} @abstractmethod def A(self, mu:complex, der : int = 0) -> ScOp: """Assemble (derivative of) operator of linear system.""" Anull = self.checkAInBounds(der) if Anull is not None: return Anull if self.As[der] is None: self.As[der] = 0. return self.As[der] @abstractmethod def b(self, mu:complex, der : int = 0, homogeneized : bool = False) -> Np1D: """Assemble (derivative of) RHS of linear system.""" bnull = self.checkbInBounds(der, homogeneized) if bnull is not None: return bnull if self.bs[homogeneized][der] is None: self.bs[homogeneized][der] = 0. return self.bs[homogeneized][der] def affineBlocksA(self, mu : complex = 0.) -> Tuple[List[Np1D], callable]: """Assemble affine blocks of operator of linear system.""" def lambdas(x, j): if j == 0: return np.ones(np.size(x)) if j in range(1, self.nAs): return np.power(self.rescaling(x) - self.rescaling(mu), j) raise Exception("Wrong j value.") As = [None] * self.nAs for j in range(self.nAs): As[j] = self.A(mu, j) return As, lambdas def affineBlocksb(self, mu : complex = 0., homogeneized : bool = False)\ -> Tuple[List[Np1D], callable]: """Assemble affine blocks of RHS of linear system.""" def lambdas(x, j): if j == 0: return np.ones(np.size(x)) if j in range(1, self.nbsEff): return np.power(self.rescaling(x) - self.rescaling(mu), j) raise Exception("Wrong j value.") if homogeneized: self.nbsEff = max(self.nAs, self.nbs) else: self.nbsEff = self.nbs bs = [None] * self.nbsEff for j in range(self.nbsEff): bs[j] = self.b(mu, j, homogeneized) return bs, lambdas def solve(self, mu:complex, RHS : Np1D = None, homogeneized : bool = False) -> Np1D: """ Find solution of linear system. Args: mu: parameter value. RHS: RHS of linear system. If None, defaults to that of parametric system. Defaults to None. """ A = self.A(mu) if RHS is None: RHS = self.b(mu, 0, homogeneized) return scspla.spsolve(A, RHS) def residual(self, u:Np1D, mu:complex, homogeneized : bool = False) -> Np1D: """ Find residual of linear system for given approximate solution. Args: u: numpy complex array with function dofs. If None, set to 0. mu: parameter value. """ A = self.A(mu) RHS = self.b(mu, 0, homogeneized) if u is None: return RHS return RHS - A.dot(u) def plot(self, u:Np1D, name : str = "u", save : str = None, what : strLst = 'all', saveFormat : str = "eps", saveDPI : int = 100, **figspecs): """ Do some nice plots of the complex-valued function with given dofs. Args: u: numpy complex array with function dofs. name(optional): Name to be shown as title of the plots. Defaults to 'u'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. what(optional): Which plots to do. If list, can contain 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ if isinstance(what, (str,)): if what.upper() == 'ALL': what = ['ABS', 'PHASE', 'REAL', 'IMAG'] else: what = [what] what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'], listname = self.name() + ".what", baselevel = 1) if len(what) == 0: return if 'figsize' not in figspecs.keys(): figspecs['figsize'] = (13. * len(what) / 4, 3) subplotcode = 100 + len(what) * 10 plt.figure(**figspecs) plt.jet() if 'ABS' in what: uAb = fen.Function(self.V) - uAb.vector()[:] = np.array(np.abs(u), dtype = float) + uAb.vector().set_local(np.abs(u)) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uAb, title = "|{0}|".format(name)) plt.colorbar(p) if 'PHASE' in what: uPh = fen.Function(self.V) - uPh.vector()[:] = np.array(np.angle(u), dtype = float) + uPh.vector().set_local(np.angle(u)) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uPh, title = "phase({0})".format(name)) plt.colorbar(p) if 'REAL' in what: uRe = fen.Function(self.V) - uRe.vector()[:] = np.array(np.real(u), dtype = float) + uRe.vector().set_local(np.real(u)) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uRe, title = "Re({0})".format(name)) plt.colorbar(p) if 'IMAG' in what: uIm = fen.Function(self.V) - uIm.vector()[:] = np.array(np.imag(u), dtype = float) + uIm.vector().set_local(np.imag(u)) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uIm, title = "Im({0})".format(name)) plt.colorbar(p) if save is not None: save = save.strip() plt.savefig(getNewFilename("{}_fig_".format(save), saveFormat), format = saveFormat, dpi = saveDPI) plt.show() plt.close() def plotmesh(self, name : str = "Mesh", save : str = None, saveFormat : str = "eps", saveDPI : int = 100, **figspecs): """ Do a nice plot of the mesh. Args: u: numpy complex array with function dofs. name(optional): Name to be shown as title of the plots. Defaults to 'u'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ plt.figure(**figspecs) fen.plot(self.V.mesh()) if save is not None: save = save.strip() plt.savefig(getNewFilename("{}_msh_".format(save), saveFormat), format = saveFormat, dpi = saveDPI) plt.show() plt.close() + def outParaview(self, u:Np1D, name : str = "u", filename : str = "out", + time : float = 0., what : strLst = 'all', + forceNewFile : bool = True, filePW = None): + """ + Output complex-valued function with given dofs to ParaView file. + + Args: + u: numpy complex array with function dofs. + name(optional): Base name to be used for data output. + filename(optional): Name of output file. + time(optional): Timestamp. + what(optional): Which plots to do. If list, can contain 'MESH', + 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard + 'ALL'. Defaults to 'ALL'. + forceNewFile(optional): Whether to create new output file. + filePW(optional): Fenics File entity (for time series). + """ + if isinstance(what, (str,)): + if what.upper() == 'ALL': + what = ['MESH', 'ABS', 'PHASE', 'REAL', 'IMAG'] + else: + what = [what] + what = purgeList(what, ['MESH', 'ABS', 'PHASE', 'REAL', 'IMAG'], + listname = self.name() + ".what", baselevel = 1) + if len(what) == 0: return + + if not filePW: + if forceNewFile: + filePW = fen.File(getNewFilename(filename, "pvd")) + else: + filePW = fen.File("{}.pvd".format(filename)) + if what == ['MESH']: + filePW << (self.V.mesh(), time) + if 'ABS' in what: + uAb = fen.Function(self.V, name = "{}_ABS".format(name)) + uAb.vector().set_local(np.abs(u)) + filePW << (uAb, time) + if 'PHASE' in what: + uPh = fen.Function(self.V, name = "{}_PHASE".format(name)) + uPh.vector().set_local(np.angle(u)) + filePW << (uPh, time) + if 'REAL' in what: + uRe = fen.Function(self.V, name = "{}_REAL".format(name)) + uRe.vector().set_local(np.real(u)) + filePW << (uRe, time) + if 'IMAG' in what: + uIm = fen.Function(self.V, name = "{}_IMAG".format(name)) + uIm.vector().set_local(np.imag(u)) + filePW << (uIm, time) + return filePW + + def outParaviewTimeDomain(self, u:Np1D, omega:float, + timeFinal : float = None, + periodResolution : int = 20, name : str = "u", + filename : str = "out", + forceNewFile : bool = True): + """ + Output complex-valued function with given dofs to ParaView file, + converted to time domain. + + Args: + u: numpy complex array with function dofs. + omega: frequency. + timeFinal(optional): final time of simulation. + periodResolution(optional): number of time steps per period. + name(optional): Base name to be used for data output. + filename(optional): Name of output file. + forceNewFile(optional): Whether to create new output file. + """ + if forceNewFile: + filePW = fen.File(getNewFilename(filename, "pvd")) + else: + filePW = fen.File("{}.pvd".format(filename)) + t = 0. + dt = 2. * np.pi / omega / periodResolution + if timeFinal is None: timeFinal = 2. * np.pi / omega - dt + for j in range(int(timeFinal / dt) + 1): + ut = fen.Function(self.V, name = name) + ut.vector().set_local(np.real(u) * np.cos(omega * t) + + np.imag(u) * np.sin(omega * t)) + filePW << (ut, t) + t += dt + return filePW diff --git a/rrompy/hfengines/base/vector_problem_engine_base.py b/rrompy/hfengines/base/vector_problem_engine_base.py new file mode 100644 index 0000000..ceecc28 --- /dev/null +++ b/rrompy/hfengines/base/vector_problem_engine_base.py @@ -0,0 +1,194 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import fenics as fen +import numpy as np +from matplotlib import pyplot as plt +from rrompy.utilities.base.types import Np1D, strLst +from rrompy.utilities.base import purgeList, getNewFilename +from .problem_engine_base import ProblemEngineBase + +__all__ = ['VectorProblemEngineBase'] + +class VectorProblemEngineBase(ProblemEngineBase): + """ + Generic solver for parametric vector problems. + + Attributes: + verbosity: Verbosity level. + BCManager: Boundary condition manager. + V: Real FE space. + u: Generic trial functions for variational form evaluation. + v: Generic test functions for variational form evaluation. + As: Scipy sparse array representation (in CSC format) of As. + bs: Numpy array representation of bs. + energyNormMatrix: Scipy sparse matrix representing inner product over + V. + bsmu: Mu value of last bs evaluation. + liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. + liftedDirichletDatum: Dofs of Dirichlet datum lifting. + mu0BC: Mu value of last Dirichlet datum lifting. + degree_threshold: Threshold for ufl expression interpolation degree. + """ + + nAs, nbs = 1, 1 + functional = lambda self, u: 0. + + def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): + super().__init__(degree_threshold = degree_threshold, + verbosity = verbosity) + self.V = fen.VectorFunctionSpace(fen.UnitSquareMesh(10, 10), "P", 1) + + def plot(self, u:Np1D, name : str = "u", save : str = None, + what : strLst = 'all', saveFormat : str = "eps", + saveDPI : int = 100, **figspecs): + """ + Do some nice plots of the complex-valued function with given dofs. + + Args: + u: numpy complex array with function dofs. + name(optional): Name to be shown as title of the plots. Defaults to + 'u'. + save(optional): Where to save plot(s). Defaults to None, i.e. no + saving. + what(optional): Which plots to do. If list, can contain 'ABS', + 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. + Defaults to 'ALL'. + saveFormat(optional): Format for saved plot(s). Defaults to "eps". + saveDPI(optional): DPI for saved plot(s). Defaults to. + figspecs(optional key args): Optional arguments for matplotlib + figure creation. + """ + if isinstance(what, (str,)): + if what.upper() == 'ALL': + what = ['ABS', 'PHASE', 'REAL', 'IMAG'] + else: + what = [what] + what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'], + listname = self.name() + ".what", baselevel = 1) + if 'figsize' not in figspecs.keys(): + figspecs['figsize'] = (13. * max(len(what), 1) / 4, 3) + + if len(what) > 0: + for j in range(self.V.num_sub_spaces()): + subplotcode = 100 + len(what) * 10 + II = self.V.sub(j).dofmap().dofs() + Vj = self.V.sub(j).collapse() + plt.figure(**figspecs) + plt.jet() + if 'ABS' in what: + uAb = fen.Function(Vj) + uAb.vector().set_local(np.abs(u[II])) + subplotcode = subplotcode + 1 + plt.subplot(subplotcode) + p = fen.plot(uAb, title = "|{}_comp{}|".format(name, j)) + plt.colorbar(p) + if 'PHASE' in what: + uPh = fen.Function(Vj) + uPh.vector().set_local(np.angle(u[II])) + subplotcode = subplotcode + 1 + plt.subplot(subplotcode) + p = fen.plot(uPh, title = "phase({}_comp{})".format(name, + j)) + plt.colorbar(p) + if 'REAL' in what: + uRe = fen.Function(Vj) + uRe.vector().set_local(np.real(u[II])) + subplotcode = subplotcode + 1 + plt.subplot(subplotcode) + p = fen.plot(uRe, title = "Re({}_comp{})".format(name, j)) + plt.colorbar(p) + if 'IMAG' in what: + uIm = fen.Function(Vj) + uIm.vector().set_local(np.imag(u[II])) + subplotcode = subplotcode + 1 + plt.subplot(subplotcode) + p = fen.plot(uIm, title = "Im({}_comp{})".format(name, j)) + plt.colorbar(p) + if save is not None: + save = save.strip() + plt.savefig(getNewFilename("{}_comp{}_fig_".format(save, j), + saveFormat), + format = saveFormat, dpi = saveDPI) + plt.show() + plt.close() + try: + if len(what) > 1: + figspecs['figsize'] = (2. / len(what) * figspecs['figsize'][0], + figspecs['figsize'][1]) + elif len(what) == 0: + figspecs['figsize'] = (2. * figspecs['figsize'][0], + figspecs['figsize'][1]) + if len(what) == 0 or 'ABS' in what or 'REAL' in what: + uVRe = fen.Function(self.V) + uVRe.vector().set_local(np.real(u)) + plt.figure(**figspecs) + plt.jet() + p = fen.plot(uVRe, title = "{}_Re".format(name), + mode = "displacement") + plt.colorbar(p) + if save is not None: + save = save.strip() + plt.savefig(getNewFilename("{}_disp_Re_fig_".format(save), + saveFormat), + format = saveFormat, dpi = saveDPI) + plt.show() + plt.close() + if 'ABS' in what or 'IMAG' in what: + uVIm = fen.Function(self.V) + uVIm.vector().set_local(np.imag(u)) + plt.figure(**figspecs) + plt.jet() + p = fen.plot(uVIm, title = "{}_Im".format(name), + mode = "displacement") + plt.colorbar(p) + if save is not None: + save = save.strip() + plt.savefig(getNewFilename("{}_disp_Im_fig_".format(save, j), + saveFormat), + format = saveFormat, dpi = saveDPI) + plt.show() + plt.close() + except: + pass + + def plotmesh(self, name : str = "Mesh", save : str = None, + saveFormat : str = "eps", saveDPI : int = 100, **figspecs): + """ + Do a nice plot of the mesh. + + Args: + u: numpy complex array with function dofs. + name(optional): Name to be shown as title of the plots. Defaults to + 'u'. + save(optional): Where to save plot(s). Defaults to None, i.e. no + saving. + saveFormat(optional): Format for saved plot(s). Defaults to "eps". + saveDPI(optional): DPI for saved plot(s). Defaults to 100. + figspecs(optional key args): Optional arguments for matplotlib + figure creation. + """ + plt.figure(**figspecs) + fen.plot(self.V.mesh()) + if save is not None: + save = save.strip() + plt.savefig(getNewFilename("{}_msh_".format(save), saveFormat), + format = saveFormat, dpi = saveDPI) + plt.show() + plt.close() + diff --git a/rrompy/hfengines/linear_problem/laplace_base_problem_engine.py b/rrompy/hfengines/linear_problem/laplace_base_problem_engine.py index 95f39fa..34751b5 100644 --- a/rrompy/hfengines/linear_problem/laplace_base_problem_engine.py +++ b/rrompy/hfengines/linear_problem/laplace_base_problem_engine.py @@ -1,320 +1,314 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np import scipy.sparse as scsp import fenics as fen from rrompy.hfengines.base.problem_engine_base import ProblemEngineBase from rrompy.utilities.base.types import Np1D, ScOp from rrompy.utilities.base.fenics import fenZERO, fenONE from rrompy.utilities.base import verbosityDepth __all__ = ['LaplaceBaseProblemEngine'] class LaplaceBaseProblemEngine(ProblemEngineBase): """ Solver for generic Laplace problems. - \nabla \cdot (a \nabla u) = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. energyNormMatrix: Scipy sparse matrix representing inner product over V. bsmu: Mu value of last bs evaluation. liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. liftedDirichletDatum: Dofs of Dirichlet datum lifting. mu0BC: Mu value of last Dirichlet datum lifting. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. diffusivity: Value of a. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). A0: Scipy sparse array representation (in CSC format) of A0. b0: Numpy array representation of b0. dsToBeSet: Whether ds needs to be set. """ def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity) self.omega = 0. self.diffusivity = fenONE self.forcingTerm = fenZERO self.DirichletDatum = fenZERO self.NeumannDatum = fenZERO self.RobinDatumG = fenZERO self.RobinDatumH = fenZERO @property def V(self): """Value of V.""" return self._V @V.setter def V(self, V): ProblemEngineBase.V.fset(self, V) self.dsToBeSet = True @property def diffusivity(self): """Value of a.""" return self._diffusivity @diffusivity.setter def diffusivity(self, diffusivity): self.resetAs() if not isinstance(diffusivity, (list, tuple,)): diffusivity = [diffusivity, fenZERO] self._diffusivity = diffusivity @property def forcingTerm(self): """Value of f.""" return self._forcingTerm @forcingTerm.setter def forcingTerm(self, forcingTerm): self.resetbs() if not isinstance(forcingTerm, (list, tuple,)): forcingTerm = [forcingTerm, fenZERO] self._forcingTerm = forcingTerm @property def DirichletDatum(self): """Value of u0.""" return self._DirichletDatum @DirichletDatum.setter def DirichletDatum(self, DirichletDatum): self.resetbs() if not isinstance(DirichletDatum, (list, tuple,)): DirichletDatum = [DirichletDatum, fenZERO] self._DirichletDatum = DirichletDatum @property def NeumannDatum(self): """Value of g1.""" return self._NeumannDatum @NeumannDatum.setter def NeumannDatum(self, NeumannDatum): self.resetbs() if not isinstance(NeumannDatum, (list, tuple,)): NeumannDatum = [NeumannDatum, fenZERO] self._NeumannDatum = NeumannDatum @property def RobinDatumG(self): """Value of g2.""" return self._RobinDatumG @RobinDatumG.setter def RobinDatumG(self, RobinDatumG): self.resetbs() if not isinstance(RobinDatumG, (list, tuple,)): RobinDatumG = [RobinDatumG, fenZERO] self._RobinDatumG = RobinDatumG @property def RobinDatumH(self): """Value of h.""" return self._RobinDatumH @RobinDatumH.setter def RobinDatumH(self, RobinDatumH): self.resetAs() if not isinstance(RobinDatumH, (list, tuple,)): RobinDatumH = [RobinDatumH, fenZERO] self._RobinDatumH = RobinDatumH @property def DirichletBoundary(self): """Function handle to DirichletBoundary.""" return self.BCManager.DirichletBoundary @DirichletBoundary.setter def DirichletBoundary(self, DirichletBoundary): self.resetAs() self.resetbs() self.BCManager.DirichletBoundary = DirichletBoundary @property def NeumannBoundary(self): """Function handle to NeumannBoundary.""" return self.BCManager.NeumannBoundary @NeumannBoundary.setter def NeumannBoundary(self, NeumannBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.NeumannBoundary = NeumannBoundary @property def RobinBoundary(self): """Function handle to RobinBoundary.""" return self.BCManager.RobinBoundary @RobinBoundary.setter def RobinBoundary(self, RobinBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.RobinBoundary = RobinBoundary def autoSetDS(self): """Set FEniCS boundary measure based on boundary function handles.""" if self.dsToBeSet: if self.verbosity >= 20: verbosityDepth("INIT", "Initializing boundary measures.", end = "") NB = self.NeumannBoundary RB = self.RobinBoundary - class NBoundary(fen.SubDomain): - def inside(self, x, on_boundary): - return NB(x, on_boundary) - class RBoundary(fen.SubDomain): - def inside(self, x, on_boundary): - return RB(x, on_boundary) boundary_markers = fen.MeshFunction("size_t", self.V.mesh(), - self.V.mesh().topology().dim() - 1) - NBoundary().mark(boundary_markers, 0) - RBoundary().mark(boundary_markers, 1) + self.V.num_sub_spaces() - 1) + NB.mark(boundary_markers, 0) + RB.mark(boundary_markers, 1) self.ds = fen.Measure("ds", domain = self.V.mesh(), subdomain_data = boundary_markers) self.dsToBeSet = False if self.verbosity >= 20: verbosityDepth("DEL", " Done.", inline = True) def buildEnergyNormForm(self): # H1_omega """ Build sparse matrix (in CSR format) representative of scalar product. """ if self.verbosity >= 20: verbosityDepth("INIT", "Assembling energy matrix.", end = "") normMatFen = fen.assemble((fen.dot(fen.grad(self.u), fen.grad(self.v)) + np.abs(self.omega)**2 * fen.dot(self.u, self.v)) *fen.dx) normMat = fen.as_backend_type(normMatFen).mat() self.energyNormMatrix = scsp.csr_matrix(normMat.getValuesCSR()[::-1], shape = normMat.size) if self.verbosity >= 20: verbosityDepth("DEL", " Done.", inline = True) def A(self, mu:complex, der : int = 0) -> ScOp: """Assemble (derivative of) operator of linear system.""" Anull = self.checkAInBounds(der) if Anull is not None: return Anull self.autoSetDS() if self.As[0] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A0.") DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) aRe, aIm = self.diffusivity hRe, hIm = self.RobinDatumH termNames = ["diffusivity", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( [aRe, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [aIm, hIm], [x + "Imag" for x in termNames])) a0Re = (aRe * fen.dot(fen.grad(self.u), fen.grad(self.v)) * fen.dx + hRe * fen.dot(self.u, self.v) * self.ds(1)) a0Im = (aIm * fen.dot(fen.grad(self.u), fen.grad(self.v)) * fen.dx + hIm * fen.dot(self.u, self.v) * self.ds(1)) A0Re = fen.assemble(a0Re, form_compiler_parameters = parsRe) A0Im = fen.assemble(a0Im, form_compiler_parameters = parsIm) DirichletBC0.apply(A0Re) DirichletBC0.zero(A0Im) A0ReMat = fen.as_backend_type(A0Re).mat() A0ImMat = fen.as_backend_type(A0Im).mat() A0Rer, A0Rec, A0Rev = A0ReMat.getValuesCSR() A0Imr, A0Imc, A0Imv = A0ImMat.getValuesCSR() self.As[0] = (scsp.csr_matrix((A0Rev, A0Rec, A0Rer), shape = A0ReMat.size) + 1.j * scsp.csr_matrix((A0Imv, A0Imc, A0Imr), shape = A0ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.") return self.As[0] def b(self, mu:complex, der : int = 0, homogeneized : bool = False) -> Np1D: """Assemble (derivative of) RHS of linear system.""" bnull = self.checkbInBounds(der, homogeneized) if bnull is not None: return bnull if homogeneized and not np.isclose(self.mu0BC, mu): self.u0BC = self.liftDirichletData(mu) if (max(self.nbs, self.nAs * homogeneized) > 1 and not np.isclose(self.bsmu, mu)): self.bsmu = mu self.resetbs() if self.bs[homogeneized][der] is None: self.autoSetDS() if self.verbosity >= 20: verbosityDepth("INIT", "Assembling forcing term b{}.".format( der)) if der < self.nbs: fRe, fIm = self.forcingTerm g1Re, g1Im = self.NeumannDatum g2Re, g2Im = self.RobinDatumG else: fRe, fIm = fenZERO, fenZERO g1Re, g1Im = fenZERO, fenZERO g2Re, g2Im = fenZERO, fenZERO termNames = ["forcingTerm", "NeumannDatum", "RobinDatumG"] parsRe = self.iterReduceQuadratureDegree(zip( [fRe, g1Re, g2Re], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [fIm, g1Im, g2Im], [x + "Imag" for x in termNames])) L0Re = (fen.dot(fRe, self.v) * fen.dx + fen.dot(g1Re, self.v) * self.ds(0) + fen.dot(g2Re, self.v) * self.ds(1)) L0Im = (fen.dot(fIm, self.v) * fen.dx + fen.dot(g1Im, self.v) * self.ds(0) + fen.dot(g2Im, self.v) * self.ds(1)) b0Re = fen.assemble(L0Re, form_compiler_parameters = parsRe) b0Im = fen.assemble(L0Im, form_compiler_parameters = parsIm) if homogeneized: Ader = self.A(mu, der) b0Re[:] -= np.real(Ader.dot(self.u0BC)) b0Im[:] -= np.imag(Ader.dot(self.u0BC)) DBCR = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) else: DBCR = fen.DirichletBC(self.V, self.DirichletDatum[0], self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, self.DirichletDatum[1], self.DirichletBoundary) DBCR.apply(b0Re) DBCI.apply(b0Im) self.bs[homogeneized][der] = np.array(b0Re[:] + 1.j * b0Im[:], dtype = np.complex) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling forcing term.") return self.bs[homogeneized][der] diff --git a/rrompy/hfengines/linear_mixed_problem/__init__.py b/rrompy/hfengines/vector_linear_problem/__init__.py similarity index 53% rename from rrompy/hfengines/linear_mixed_problem/__init__.py rename to rrompy/hfengines/vector_linear_problem/__init__.py index da9a255..d09cdf3 100644 --- a/rrompy/hfengines/linear_mixed_problem/__init__.py +++ b/rrompy/hfengines/vector_linear_problem/__init__.py @@ -1,28 +1,33 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # -from .mixed_laplace_base_problem_engine import MixedLaplaceBaseProblemEngine -from .mixed_helmholtz_problem_engine import MixedHelmholtzProblemEngine +from .linear_elasticity_problem_engine import LinearElasticityProblemEngine +from .linear_elasticity_helmholtz_problem_engine import LinearElasticityHelmholtzProblemEngine +from .linear_elasticity_helmholtz_problem_engine_damped import LinearElasticityHelmholtzProblemEngineDamped +from .linear_elasticity_beam_poisson_ratio import LinearElasticityBeamPoissonRatio +from .linear_elasticity_helmholtz_archway_frequency import LinearElasticityHelmholtzArchwayFrequency __all__ = [ - 'MixedLaplaceBaseProblemEngine', - 'MixedHelmholtzProblemEngine' + 'LinearElasticityProblemEngine', + 'LinearElasticityHelmholtzProblemEngine', + 'LinearElasticityBeamPoissonRatio', + 'LinearElasticityHelmholtzArchwayFrequency' ] diff --git a/rrompy/hfengines/vector_linear_problem/linear_elasticity_beam_poisson_ratio.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_beam_poisson_ratio.py new file mode 100644 index 0000000..556c0fe --- /dev/null +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_beam_poisson_ratio.py @@ -0,0 +1,140 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +import scipy.sparse as scsp +import fenics as fen +from .linear_elasticity_problem_engine import LinearElasticityProblemEngine +from rrompy.utilities.base.fenics import fenZEROS +from rrompy.utilities.base.types import Np1D, ScOp +from rrompy.utilities.base import verbosityDepth + +__all__ = ['LinearElasticityBeamPoissonRatio'] + +class LinearElasticityBeamPoissonRatio(LinearElasticityProblemEngine): + """ + Solver for linear elasticity problem of a beam subject to its own weight, + with parametric Poisson's ratio. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) = rho_ * g in \Omega + u = 0 on \Gamma_D + \partial_nu = 0 on \Gamma_N + """ + + nAs = 2 + nbs = 3 + + def __init__(self, n:int, rho_:float, g:float, E:float, nu0:float, + length:float, degree_threshold : int = np.inf, + verbosity : int = 10): + super().__init__(degree_threshold = degree_threshold, + verbosity = verbosity) + self.lambda_ = E * nu0 / (1. + nu0) / (1. - 2 * nu0) + self.mu_ = E / (1. + nu0) + + mesh = fen.RectangleMesh(fen.Point(0., 0.), fen.Point(length, 1), + n, max(int(n / length), 1)) + self.V = fen.VectorFunctionSpace(mesh, "P", 1) + + self.forcingTerm = [fen.Constant((0., - rho_ * g / E)), fenZEROS(2)] + self.DirichletBoundary = lambda x, on_b: on_b and fen.near(x[0], 0.) + self.NeumannBoundary = "REST" + + def A(self, mu:complex, der : int = 0) -> ScOp: + """Assemble (derivative of) operator of linear system.""" + Anull = self.checkAInBounds(der) + if Anull is not None: return Anull + self.autoSetDS() + if der <= 1 and self.As[0] is None: + if self.verbosity >= 20: + verbosityDepth("INIT", "Assembling operator term A0.") + DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), + self.DirichletBoundary) + epsilon = lambda u: .5 * (fen.grad(u) + fen.nabla_grad(u)) + a0Re = 2 * fen.inner(epsilon(self.u), epsilon(self.v)) * fen.dx + A0Re = fen.assemble(a0Re) + DirichletBC0.apply(A0Re) + A0ReMat = fen.as_backend_type(A0Re).mat() + A0Rer, A0Rec, A0Rev = A0ReMat.getValuesCSR() + self.As[0] = scsp.csr_matrix((A0Rev, A0Rec, A0Rer), + shape = A0ReMat.size, + dtype = np.complex) + if self.verbosity >= 20: + verbosityDepth("DEL", "Done assembling operator term.") + if der <= 1 and self.As[1] is None: + if self.verbosity >= 20: + verbosityDepth("INIT", "Assembling operator term A1.") + DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), + self.DirichletBoundary) + a1Re = fen.div(self.u) * fen.div(self.v) * fen.dx + A1Re = fen.assemble(a1Re) + DirichletBC0.apply(A1Re) + A1ReMat = fen.as_backend_type(A1Re).mat() + A1Rer, A1Rec, A1Rev = A1ReMat.getValuesCSR() + self.As[1] = (scsp.csr_matrix((A1Rev, A1Rec, A1Rer), + shape = A1ReMat.size, + dtype = np.complex) + - 2. * self.As[0]) + if self.verbosity >= 20: + verbosityDepth("DEL", "Done assembling operator term.") + if der == 0: + return self.As[0] + mu * self.As[1] + return self.As[1] + + def b(self, mu:complex, der : int = 0, + homogeneized : bool = False) -> Np1D: + """Assemble (derivative of) RHS of linear system.""" + homogeneized = False + bnull = self.checkbInBounds(der) + if bnull is not None: return bnull + if (self.nbs > 1 and not np.isclose(self.bsmu, mu)): + self.bsmu = mu + self.resetbs() + if self.bs[homogeneized][der] is None: + self.autoSetDS() + if self.bs[homogeneized][0] is None and der > 0: self.b(mu, 0) + if self.verbosity >= 20: + verbosityDepth("INIT", "Assembling forcing term b{}.".format( + der)) + if der == 0: + fRe, fIm = self.forcingTerm + parsRe = self.iterReduceQuadratureDegree(zip( + [fRe], + ["forcingTermReal"])) + parsIm = self.iterReduceQuadratureDegree(zip( + [fIm], + ["forcingTermImag"])) + L0Re = fen.inner(fRe, self.v) * fen.dx + L0Im = fen.inner(fIm, self.v) * fen.dx + b0Re = fen.assemble(L0Re, form_compiler_parameters = parsRe) + b0Im = fen.assemble(L0Im, form_compiler_parameters = parsIm) + DBCR = fen.DirichletBC(self.V, self.DirichletDatum[0], + self.DirichletBoundary) + DBCI = fen.DirichletBC(self.V, self.DirichletDatum[1], + self.DirichletBoundary) + DBCR.apply(b0Re) + DBCI.apply(b0Im) + self.bsBase = np.array(b0Re[:] + 1.j * b0Im[:], + dtype = np.complex) + self.bs[homogeneized][0] = (1 - mu - 2 * mu ** 2) * self.bsBase + elif der == 1: + self.bs[homogeneized][der] = (- 1 - 4 * mu) * self.bsBase + elif der == 2: + self.bs[homogeneized][der] = - 2. * self.bsBase + if self.verbosity >= 20: + verbosityDepth("DEL", "Done assembling forcing term.") + return self.bs[homogeneized][der] diff --git a/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency.py new file mode 100644 index 0000000..2eb31ba --- /dev/null +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency.py @@ -0,0 +1,66 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +import fenics as fen +from .linear_elasticity_helmholtz_problem_engine import \ + LinearElasticityHelmholtzProblemEngine +from rrompy.utilities.base.fenics import fenZEROS + +__all__ = ['LinearElasticityHelmholtzArchwayFrequency'] + +class LinearElasticityHelmholtzArchwayFrequency( + LinearElasticityHelmholtzProblemEngine): + """ + Solver for archway linear elasticity Helmholtz problem with parametric + wavenumber. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) + - rho_ * omega^2 * u = rho_ * g / omega in \Omega + u = 0 on \Gamma_D + \partial_nu = 0 on \Gamma_N + """ + + def __init__(self, kappa:float, n:int, rho_:float, T:float, lambda_:float, + mu_:float, R:float, r:float, degree_threshold : int = np.inf, + verbosity : int = 10): + super().__init__(degree_threshold = degree_threshold, + verbosity = verbosity) + self.omega = kappa + self.lambda_ = lambda_ + self.mu_ = mu_ + self.rho_ = rho_ + + import mshr + domain = (mshr.Circle(fen.Point(0, 0), R) + - mshr.Circle(fen.Point(0, 0), r) + - mshr.Rectangle(fen.Point(-1.05*R, -1.05*R), + fen.Point(1.05*R, 0))) + mesh = mshr.generate_mesh(domain, n) + self.V = fen.VectorFunctionSpace(mesh, "P", 1) + + import ufl + x, y = fen.SpatialCoordinate(mesh)[:] + NeumannNonZero = ufl.And(ufl.gt(y, r), + ufl.And(ufl.ge(x, -.25 * R), ufl.le(x, .25 * R))) + self.NeumannDatum = [ufl.as_vector((0., + ufl.conditional(NeumannNonZero, + fen.Constant(T), + 0.))), + fenZEROS(2)] + self.DirichletBoundary = lambda x, on_b: on_b and fen.near(x[1], 0.) + self.NeumannBoundary = "REST" diff --git a/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency_3d.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency_3d.py new file mode 100644 index 0000000..99e2b77 --- /dev/null +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency_3d.py @@ -0,0 +1,71 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +import fenics as fen +from .linear_elasticity_helmholtz_problem_engine import \ + LinearElasticityHelmholtzProblemEngine +from rrompy.utilities.base.fenics import fenZEROS + +__all__ = ['LinearElasticityHelmholtzArchwayFrequency'] + +class LinearElasticityHelmholtzArchwayFrequency( + LinearElasticityHelmholtzProblemEngine): + """ + Solver for archway linear elasticity Helmholtz problem with parametric + wavenumber. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) + - rho_ * omega^2 * u = rho_ * g / omega in \Omega + u = 0 on \Gamma_D + \partial_nu = 0 on \Gamma_N + """ + + def __init__(self, kappa:float, n:int, rho_:float, T:float, lambda_:float, + mu_:float, R:float, r:float, degree_threshold : int = np.inf, + verbosity : int = 10): + super().__init__(degree_threshold = degree_threshold, + verbosity = verbosity) + self.omega = kappa + self.lambda_ = lambda_ + self.mu_ = mu_ + self.rho_ = rho_ + + import mshr + domain = (mshr.Sphere(fen.Point(0, 0, 0), R) + - mshr.Sphere(fen.Point(0, 0, 0), r) + - mshr.Box(fen.Point(-1.05*R, -1.05*R, -1.05*R), + fen.Point(1.05*R, 1.05*R, 0)) + - mshr.Box(fen.Point(-1.05*R, -1.05*R, -1.05*R), + fen.Point(1.05*R, -.05*R, 1.05*R)) + - mshr.Box(fen.Point(1.05*R, 1.05*R, 1.05*R), + fen.Point(-1.05*R, .05*R, -1.05*R))) + mesh = mshr.generate_mesh(domain, n) + self.V = fen.VectorFunctionSpace(mesh, "P", 1) + + import ufl + x, y, z = fen.SpatialCoordinate(mesh)[:] + NeumannNonZero = ufl.And(ufl.gt(z, r), + ufl.And(ufl.ge(x, -.25 * R), ufl.le(x, .25 * R))) + self.NeumannDatum = [ufl.as_vector((0., 0.,fen.Constant(T))), +# ufl.conditional(NeumannNonZero, +# fen.Constant(T), +# 0.))), + fenZEROS(3)] + self.DirichletBoundary = lambda x, on_b: on_b and fen.near(x[2], 0.) + self.NeumannBoundary = "REST" + diff --git a/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency_broken.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency_broken.py new file mode 100644 index 0000000..b125f0b --- /dev/null +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_archway_frequency_broken.py @@ -0,0 +1,110 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +import fenics as fen +from .linear_elasticity_helmholtz_problem_engine import \ + LinearElasticityHelmholtzProblemEngine +from rrompy.utilities.base.fenics import fenZEROS +from rrompy.utilities.base.types import Np1D +from rrompy.utilities.base import verbosityDepth + +__all__ = ['LinearElasticityHelmholtzArchwayFrequency'] + +class LinearElasticityHelmholtzArchwayFrequency( + LinearElasticityHelmholtzProblemEngine): + """ + Solver for archway linear elasticity Helmholtz problem with parametric + wavenumber. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) + - rho_ * omega^2 * u = rho_ * g / omega in \Omega + u = 0 on \Gamma_D + \partial_nu = 0 on \Gamma_N + """ + nAs = 2 + nbs = 20 + + def __init__(self, kappa:float, n:int, rho_:float, g:float, lambda_:float, + mu_:float, R:float, r:float, degree_threshold : int = np.inf, + verbosity : int = 10): + super().__init__(degree_threshold = degree_threshold, + verbosity = verbosity) + self.omega = kappa + self.lambda_ = lambda_ + self.mu_ = mu_ + self.rho_ = rho_ + + import mshr + domain = (mshr.Circle(fen.Point(0, 0), R) + - mshr.Circle(fen.Point(0, 0), r) + - mshr.Rectangle(fen.Point(-1.05*R, -1.05*R), + fen.Point(1.05*R, 0))) + mesh = mshr.generate_mesh(domain, n) + self.V = fen.VectorFunctionSpace(mesh, "P", 1) + + self.forcingTerm = [fen.Constant((0., - rho_ * g)), fenZEROS(2)] + self.DirichletBoundary = lambda x, on_b: on_b and fen.near(x[1], 0.) + self.NeumannBoundary = "REST" + + def b(self, mu:complex, der : int = 0, + homogeneized : bool = False) -> Np1D: + """Assemble (derivative of) RHS of linear system.""" + homogeneized = False + bnull = self.checkbInBounds(der) + if bnull is not None: return bnull + if (self.nbs > 1 and not np.isclose(self.bsmu, mu)): + self.bsmu = mu + self.resetbs() + if self.bs[homogeneized][der] is None: + self.autoSetDS() + if self.bs[homogeneized][0] is None and der > 0: self.b(mu, 0) + if self.verbosity >= 20: + verbosityDepth("INIT", "Assembling forcing term b{}.".format( + der)) + if der == 0: + fRe, fIm = self.forcingTerm + parsRe = self.iterReduceQuadratureDegree(zip( + [fRe], + ["forcingTermReal"])) + parsIm = self.iterReduceQuadratureDegree(zip( + [fIm], + ["forcingTermImag"])) + L0Re = fen.inner(fRe, self.v) * fen.dx + L0Im = fen.inner(fIm, self.v) * fen.dx + b0Re = fen.assemble(L0Re, form_compiler_parameters = parsRe) + b0Im = fen.assemble(L0Im, form_compiler_parameters = parsIm) + DBCR = fen.DirichletBC(self.V, self.DirichletDatum[0], + self.DirichletBoundary) + DBCI = fen.DirichletBC(self.V, self.DirichletDatum[1], + self.DirichletBoundary) + DBCR.apply(b0Re) + DBCI.apply(b0Im) + self.bsBase = np.array(b0Re[:] + 1.j * b0Im[:], + dtype = np.complex) + if np.isclose(mu, 0.): + if der == 0: + self.bs[homogeneized][der] = self.bsBase + else: + self.bs[homogeneized][der] = np.zeros_like(self.bsBase) + else: + self.bs[homogeneized][der] = (mu * np.power(- mu, - der) + * self.bsBase) + if self.verbosity >= 20: + verbosityDepth("DEL", "Done assembling forcing term.") + return self.bs[homogeneized][der] + diff --git a/rrompy/hfengines/linear_mixed_problem/mixed_helmholtz_problem_engine.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_problem_engine.py similarity index 55% copy from rrompy/hfengines/linear_mixed_problem/mixed_helmholtz_problem_engine.py copy to rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_problem_engine.py index 52ef35c..07c1ff5 100644 --- a/rrompy/hfengines/linear_mixed_problem/mixed_helmholtz_problem_engine.py +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_problem_engine.py @@ -1,167 +1,201 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.sparse import csr_matrix import fenics as fen -from .mixed_laplace_base_problem_engine import MixedLaplaceBaseProblemEngine +from .linear_elasticity_problem_engine import LinearElasticityProblemEngine from rrompy.utilities.base.types import Np1D, ScOp from rrompy.utilities.base.fenics import fenZERO, fenZEROS, fenONE from rrompy.utilities.base import verbosityDepth -__all__ = ['MixedHelmholtzProblemEngine'] +__all__ = ['LinearElasticityHelmholtzProblemEngine'] -class MixedHelmholtzProblemEngine(MixedLaplaceBaseProblemEngine): +class LinearElasticityHelmholtzProblemEngine(LinearElasticityProblemEngine): """ - Solver for generic mixed Helmholtz problems with parametric wavenumber. - - \nabla \cdot (a \nabla u) - omega^2 * n**2 * u = f in \Omega - \int_Omega u = 0 - u = u0 on \Gamma_D - \partial_nu = g1 on \Gamma_N - \partial_nu + h u = g2 on \Gamma_R + Solver for generic linear elasticity Helmholtz problems with parametric + wavenumber. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) + - rho_ * mu^2 * u = f in \Omega + u = u0 on \Gamma_D + \partial_nu = g1 on \Gamma_N + \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. - V: Real mixed FE space. - u: Generic mixed trial functions for variational form evaluation. - v: Generic mixed test functions for variational form evaluation. + V: Real vector FE space. + u: Generic vector trial functions for variational form evaluation. + v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. energyNormMatrix: Scipy sparse matrix representing inner product over V. bsmu: Mu value of last bs evaluation. liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. liftedDirichletDatum: Dofs of Dirichlet datum lifting. mu0BC: Mu value of last Dirichlet datum lifting. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. - diffusivity: Value of a. - refractionIndex: Value of n. + lambda_: Value of lambda_. + mu_: Value of mu_. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). A0: Scipy sparse array representation (in CSC format) of A0. A1: Scipy sparse array representation (in CSC format) of A1. b0: Numpy array representation of b0. dsToBeSet: Whether ds needs to be set. """ nAs = 2 def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity) self.omega = 1. - self.refractionIndex = fenONE + self.rho_ = fenONE @property - def refractionIndex(self): - """Value of n.""" - return self._refractionIndex - @refractionIndex.setter - def refractionIndex(self, refractionIndex): + def rho_(self): + """Value of rho_.""" + return self._rho_ + @rho_.setter + def rho_(self, rho_): self.resetAs() - if not isinstance(refractionIndex, (list, tuple,)): - refractionIndex = [refractionIndex, fenZERO] - self._refractionIndex = refractionIndex - + if not isinstance(rho_, (list, tuple,)): + rho_ = [rho_, fenZERO] + self._rho_ = rho_ + + def buildEnergyNormForm(self): # energy + omega norm + """ + Build sparse matrix (in CSR format) representative of scalar product. + """ + if self.verbosity >= 20: + verbosityDepth("INIT", "Assembling energy matrix.", end = "") + l_Re, _ = self.lambda_ + m_Re, _ = self.mu_ + r_Re, _ = self.rho_ + termNames = ["lambda_", "mu_", "rho_"] + pars = self.iterReduceQuadratureDegree(zip( + [l_Re, m_Re, r_Re], + [x + "Real" for x in termNames])) + epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) + sigma = lambda u, l_, m_: ( + l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + + 2. * m_ * epsilon(u)) + normMatFen = (fen.inner(sigma(self.u, l_Re, m_Re), epsilon(self.v)) + + np.abs(self.omega)**2 * r_Re * fen.inner(self.u, self.v) + ) * fen.dx + normMatFen = fen.assemble(normMatFen, form_compiler_parameters = pars) + normMat = fen.as_backend_type(normMatFen).mat() + self.energyNormMatrix = csr_matrix(normMat.getValuesCSR()[::-1], + shape = normMat.size) + if self.verbosity >= 20: + verbosityDepth("DEL", " Done.", inline = True) + def rescaling(self, x:Np1D) -> Np1D: """Rescaling in parameter dependence.""" return np.power(x, 2.) def rescalingInv(self, x:Np1D) -> Np1D: """Inverse rescaling in parameter dependence.""" return np.power(x, .5) def A(self, mu:complex, der : int = 0) -> ScOp: """Assemble (derivative of) operator of linear system.""" Anull = self.checkAInBounds(der) if Anull is not None: return Anull self.autoSetDS() if der <= 0 and self.As[0] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A0.") - DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) - aRe, aIm = self.diffusivity + DirichletBC0 = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + lambda_Re, lambda_Im = self.lambda_ + mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH - termNames = ["diffusivity", "RobinDatumH"] + termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( - [aRe, hRe], + [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( - [aIm, hIm], + [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) - a0Re = ((aRe * fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) - + self.u[0] * self.v[1] + self.u[1] * self.v[0]) * fen.dx - + hRe * fen.dot(self.u[0], self.v[0]) * self.ds(1)) - a0Im = (aIm * fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) \ - * fen.dx - + hIm * fen.dot(self.u[0], self.v[0]) * self.ds(1)) + epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) + sigma = lambda u, l_, m_: ( + l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + + 2. * m_ * epsilon(u)) + a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), + epsilon(self.v)) * fen.dx + + hRe * fen.inner(self.u, self.v) * self.ds(1)) + a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), + epsilon(self.v)) * fen.dx + + hIm * fen.inner(self.u, self.v) * self.ds(1)) A0Re = fen.assemble(a0Re, form_compiler_parameters = parsRe) A0Im = fen.assemble(a0Im, form_compiler_parameters = parsIm) DirichletBC0.apply(A0Re) DirichletBC0.zero(A0Im) A0ReMat = fen.as_backend_type(A0Re).mat() A0ImMat = fen.as_backend_type(A0Im).mat() A0Rer, A0Rec, A0Rev = A0ReMat.getValuesCSR() A0Imr, A0Imc, A0Imv = A0ImMat.getValuesCSR() self.As[0] = (csr_matrix((A0Rev, A0Rec, A0Rer), shape = A0ReMat.size) + 1.j * csr_matrix((A0Imv, A0Imc, A0Imr), shape = A0ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.") if der <= 1 and self.As[1] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A1.") - DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) - nRe, nIm = self.refractionIndex - n2Re, n2Im = nRe * nRe - nIm * nIm, 2 * nRe * nIm - parsRe = self.iterReduceQuadratureDegree(zip([n2Re], - ["refractionIndexSquaredReal"])) - parsIm = self.iterReduceQuadratureDegree(zip([n2Im], - ["refractionIndexSquaredImag"])) - a1Re = - n2Re * fen.dot(self.u[0], self.v[0]) * fen.dx - a1Im = - n2Im * fen.dot(self.u[0], self.v[0]) * fen.dx + DirichletBC0 = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + rho_Re, rho_Im = self.rho_ + parsRe = self.iterReduceQuadratureDegree(zip([rho_Re], + ["rho_Real"])) + parsIm = self.iterReduceQuadratureDegree(zip([rho_Im], + ["rho_Imag"])) + a1Re = - rho_Re * fen.inner(self.u, self.v) * fen.dx + a1Im = - rho_Im * fen.inner(self.u, self.v) * fen.dx A1Re = fen.assemble(a1Re, form_compiler_parameters = parsRe) A1Im = fen.assemble(a1Im, form_compiler_parameters = parsIm) DirichletBC0.zero(A1Re) DirichletBC0.zero(A1Im) A1ReMat = fen.as_backend_type(A1Re).mat() A1ImMat = fen.as_backend_type(A1Im).mat() A1Rer, A1Rec, A1Rev = A1ReMat.getValuesCSR() A1Imr, A1Imc, A1Imv = A1ImMat.getValuesCSR() self.As[1] = (csr_matrix((A1Rev, A1Rec, A1Rer), shape = A1ReMat.size) + 1.j * csr_matrix((A1Imv, A1Imc, A1Imr), shape = A1ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.") if der == 0: return self.As[0] + mu**2 * self.As[1] return self.As[1] diff --git a/rrompy/hfengines/linear_mixed_problem/mixed_helmholtz_problem_engine.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_problem_engine_damped.py similarity index 50% rename from rrompy/hfengines/linear_mixed_problem/mixed_helmholtz_problem_engine.py rename to rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_problem_engine_damped.py index 52ef35c..a62049a 100644 --- a/rrompy/hfengines/linear_mixed_problem/mixed_helmholtz_problem_engine.py +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_helmholtz_problem_engine_damped.py @@ -1,167 +1,209 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.sparse import csr_matrix import fenics as fen -from .mixed_laplace_base_problem_engine import MixedLaplaceBaseProblemEngine +from .linear_elasticity_helmholtz_problem_engine import \ + LinearElasticityHelmholtzProblemEngine from rrompy.utilities.base.types import Np1D, ScOp from rrompy.utilities.base.fenics import fenZERO, fenZEROS, fenONE from rrompy.utilities.base import verbosityDepth -__all__ = ['MixedHelmholtzProblemEngine'] +__all__ = ['LinearElasticityHelmholtzProblemEngineDamped'] -class MixedHelmholtzProblemEngine(MixedLaplaceBaseProblemEngine): +class LinearElasticityHelmholtzProblemEngineDamped( + LinearElasticityHelmholtzProblemEngine): """ - Solver for generic mixed Helmholtz problems with parametric wavenumber. - - \nabla \cdot (a \nabla u) - omega^2 * n**2 * u = f in \Omega - \int_Omega u = 0 - u = u0 on \Gamma_D - \partial_nu = g1 on \Gamma_N - \partial_nu + h u = g2 on \Gamma_R + Solver for generic linear elasticity Helmholtz problems with parametric + wavenumber. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) + - rho_ * (mu^2 - i * eta * mu) * u = f in \Omega + u = u0 on \Gamma_D + \partial_nu = g1 on \Gamma_N + \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. - V: Real mixed FE space. - u: Generic mixed trial functions for variational form evaluation. - v: Generic mixed test functions for variational form evaluation. + V: Real vector FE space. + u: Generic vector trial functions for variational form evaluation. + v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. energyNormMatrix: Scipy sparse matrix representing inner product over V. bsmu: Mu value of last bs evaluation. liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. liftedDirichletDatum: Dofs of Dirichlet datum lifting. mu0BC: Mu value of last Dirichlet datum lifting. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. - diffusivity: Value of a. - refractionIndex: Value of n. + lambda_: Value of lambda_. + mu_: Value of mu_. + eta: Value of eta. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). A0: Scipy sparse array representation (in CSC format) of A0. A1: Scipy sparse array representation (in CSC format) of A1. b0: Numpy array representation of b0. dsToBeSet: Whether ds needs to be set. """ - nAs = 2 + nAs = 3 def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity) - self.omega = 1. - self.refractionIndex = fenONE + self.eta = fenZERO @property - def refractionIndex(self): - """Value of n.""" - return self._refractionIndex - @refractionIndex.setter - def refractionIndex(self, refractionIndex): + def eta(self): + """Value of eta.""" + return self._eta + @eta.setter + def eta(self, eta): self.resetAs() - if not isinstance(refractionIndex, (list, tuple,)): - refractionIndex = [refractionIndex, fenZERO] - self._refractionIndex = refractionIndex - + if not isinstance(eta, (list, tuple,)): + eta = [eta, fenZERO] + self._eta = eta + def rescaling(self, x:Np1D) -> Np1D: """Rescaling in parameter dependence.""" - return np.power(x, 2.) + return x def rescalingInv(self, x:Np1D) -> Np1D: """Inverse rescaling in parameter dependence.""" - return np.power(x, .5) + return x def A(self, mu:complex, der : int = 0) -> ScOp: """Assemble (derivative of) operator of linear system.""" Anull = self.checkAInBounds(der) if Anull is not None: return Anull self.autoSetDS() if der <= 0 and self.As[0] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A0.") - DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) - aRe, aIm = self.diffusivity + DirichletBC0 = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + lambda_Re, lambda_Im = self.lambda_ + mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH - termNames = ["diffusivity", "RobinDatumH"] + termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( - [aRe, hRe], + [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( - [aIm, hIm], + [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) - a0Re = ((aRe * fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) - + self.u[0] * self.v[1] + self.u[1] * self.v[0]) * fen.dx - + hRe * fen.dot(self.u[0], self.v[0]) * self.ds(1)) - a0Im = (aIm * fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) \ - * fen.dx - + hIm * fen.dot(self.u[0], self.v[0]) * self.ds(1)) + epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) + sigma = lambda u, l_, m_: ( + l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + + 2. * m_ * epsilon(u)) + a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), + epsilon(self.v)) * fen.dx + + hRe * fen.inner(self.u, self.v) * self.ds(1)) + a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), + epsilon(self.v)) * fen.dx + + hIm * fen.inner(self.u, self.v) * self.ds(1)) A0Re = fen.assemble(a0Re, form_compiler_parameters = parsRe) A0Im = fen.assemble(a0Im, form_compiler_parameters = parsIm) DirichletBC0.apply(A0Re) DirichletBC0.zero(A0Im) A0ReMat = fen.as_backend_type(A0Re).mat() A0ImMat = fen.as_backend_type(A0Im).mat() A0Rer, A0Rec, A0Rev = A0ReMat.getValuesCSR() A0Imr, A0Imc, A0Imv = A0ImMat.getValuesCSR() self.As[0] = (csr_matrix((A0Rev, A0Rec, A0Rer), shape = A0ReMat.size) + 1.j * csr_matrix((A0Imv, A0Imc, A0Imr), shape = A0ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.") if der <= 1 and self.As[1] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A1.") - DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) - nRe, nIm = self.refractionIndex - n2Re, n2Im = nRe * nRe - nIm * nIm, 2 * nRe * nIm - parsRe = self.iterReduceQuadratureDegree(zip([n2Re], - ["refractionIndexSquaredReal"])) - parsIm = self.iterReduceQuadratureDegree(zip([n2Im], - ["refractionIndexSquaredImag"])) - a1Re = - n2Re * fen.dot(self.u[0], self.v[0]) * fen.dx - a1Im = - n2Im * fen.dot(self.u[0], self.v[0]) * fen.dx + DirichletBC0 = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + rho_Re, rho_Im = self.rho_ + eta_Re, eta_Im = self.eta + termNames = ["rho_", "eta"] + parsRe = self.iterReduceQuadratureDegree(zip([rho_Re, eta_Re], + [x + "Real" for x in termNames])) + parsIm = self.iterReduceQuadratureDegree(zip([rho_Im, eta_Im], + [x + "Imag" for x in termNames])) + a1Re = - ((eta_Re * rho_Im + eta_Im * rho_Re) + * fen.inner(self.u, self.v)) * fen.dx + a1Im = ((eta_Re * rho_Re - eta_Im * rho_Im) + * fen.inner(self.u, self.v)) * fen.dx A1Re = fen.assemble(a1Re, form_compiler_parameters = parsRe) A1Im = fen.assemble(a1Im, form_compiler_parameters = parsIm) DirichletBC0.zero(A1Re) DirichletBC0.zero(A1Im) A1ReMat = fen.as_backend_type(A1Re).mat() A1ImMat = fen.as_backend_type(A1Im).mat() A1Rer, A1Rec, A1Rev = A1ReMat.getValuesCSR() A1Imr, A1Imc, A1Imv = A1ImMat.getValuesCSR() self.As[1] = (csr_matrix((A1Rev, A1Rec, A1Rer), shape = A1ReMat.size) + 1.j * csr_matrix((A1Imv, A1Imc, A1Imr), shape = A1ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.") + if der <= 2 and self.As[2] is None: + if self.verbosity >= 20: + verbosityDepth("INIT", "Assembling operator term A2.") + DirichletBC0 = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + rho_Re, rho_Im = self.rho_ + parsRe = self.iterReduceQuadratureDegree(zip([rho_Re], + ["rho_Real"])) + parsIm = self.iterReduceQuadratureDegree(zip([rho_Im], + ["rho_Imag"])) + a2Re = - rho_Re * fen.inner(self.u, self.v) * fen.dx + a2Im = - rho_Im * fen.inner(self.u, self.v) * fen.dx + A2Re = fen.assemble(a2Re, form_compiler_parameters = parsRe) + A2Im = fen.assemble(a2Im, form_compiler_parameters = parsIm) + DirichletBC0.zero(A2Re) + DirichletBC0.zero(A2Im) + A2ReMat = fen.as_backend_type(A2Re).mat() + A2ImMat = fen.as_backend_type(A2Im).mat() + A2Rer, A2Rec, A2Rev = A2ReMat.getValuesCSR() + A2Imr, A2Imc, A2Imv = A2ImMat.getValuesCSR() + self.As[2] = (csr_matrix((A2Rev, A2Rec, A2Rer), + shape = A2ReMat.size) + + 1.j * csr_matrix((A2Imv, A2Imc, A2Imr), + shape = A2ImMat.size)) + if self.verbosity >= 20: + verbosityDepth("DEL", "Done assembling operator term.") if der == 0: - return self.As[0] + mu**2 * self.As[1] - return self.As[1] + return self.As[0] + mu * self.As[1] + mu**2 * self.As[2] + if der == 1: + return self.As[1] + 2 * mu * self.As[2] + return self.As[2] diff --git a/rrompy/hfengines/linear_mixed_problem/mixed_laplace_base_problem_engine.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py similarity index 65% rename from rrompy/hfengines/linear_mixed_problem/mixed_laplace_base_problem_engine.py rename to rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py index f9b5b33..46131c4 100644 --- a/rrompy/hfengines/linear_mixed_problem/mixed_laplace_base_problem_engine.py +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py @@ -1,327 +1,361 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.sparse import csr_matrix import fenics as fen -from rrompy.hfengines.base.mixed_problem_engine_base import \ - MixedProblemEngineBase +from rrompy.hfengines.base.vector_problem_engine_base import \ + VectorProblemEngineBase from rrompy.utilities.base.types import Np1D, ScOp from rrompy.utilities.base.fenics import fenZERO, fenZEROS, fenONE from rrompy.utilities.base import verbosityDepth -__all__ = ['MixedLaplaceBaseProblemEngine'] +__all__ = ['LinearElasticityProblemEngine'] -class MixedLaplaceBaseProblemEngine(MixedProblemEngineBase): +class LinearElasticityProblemEngine(VectorProblemEngineBase): """ - Solver for generic mixed Laplace problems. - - \nabla \cdot (a \nabla u) = f in \Omega - \int_Omega u = 0 - u = u0 on \Gamma_D - \partial_nu = g1 on \Gamma_N - \partial_nu + h u = g2 on \Gamma_R + Solver for generic linear elasticity problems. + - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) = f in \Omega + u = u0 on \Gamma_D + \partial_nu = g1 on \Gamma_N + \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. - V: Real mixed FE space. - u: Generic mixed trial functions for variational form evaluation. - v: Generic mixed test functions for variational form evaluation. + V: Real vector FE space. + u: Generic vector trial functions for variational form evaluation. + v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. energyNormMatrix: Scipy sparse matrix representing inner product over V. bsmu: Mu value of last bs evaluation. liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. liftedDirichletDatum: Dofs of Dirichlet datum lifting. mu0BC: Mu value of last Dirichlet datum lifting. degree_threshold: Threshold for ufl expression interpolation degree. - omega: Value of omega. - diffusivity: Value of a. + lambda_: Value of lambda_. + mu_: Value of mu_. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). A0: Scipy sparse array representation (in CSC format) of A0. b0: Numpy array representation of b0. dsToBeSet: Whether ds needs to be set. """ def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity) - self.omega = 0. - self.diffusivity = fenONE - self.forcingTerm = fenZERO - self.DirichletDatum = fenZEROS(2) - self.NeumannDatum = fenZERO - self.RobinDatumG = fenZERO + self.lambda_ = fenONE + self.mu_ = fenONE + self.forcingTerm = fenZEROS(self.V.mesh().topology().dim()) + self.DirichletDatum = fenZEROS(self.V.mesh().topology().dim()) + self.NeumannDatum = fenZEROS(self.V.mesh().topology().dim()) + self.RobinDatumG = fenZEROS(self.V.mesh().topology().dim()) self.RobinDatumH = fenZERO @property def V(self): """Value of V.""" return self._V @V.setter def V(self, V): - MixedProblemEngineBase.V.fset(self, V) + VectorProblemEngineBase.V.fset(self, V) + self.forcingTerm = fenZEROS(self.V.mesh().topology().dim()) + self.DirichletDatum = fenZEROS(self.V.mesh().topology().dim()) + self.NeumannDatum = fenZEROS(self.V.mesh().topology().dim()) + self.RobinDatumG = fenZEROS(self.V.mesh().topology().dim()) self.dsToBeSet = True @property - def diffusivity(self): - """Value of a.""" - return self._diffusivity - @diffusivity.setter - def diffusivity(self, diffusivity): + def lambda_(self): + """Value of lambda_.""" + return self._lambda_ + @lambda_.setter + def lambda_(self, lambda_): self.resetAs() - if not isinstance(diffusivity, (list, tuple,)): - diffusivity = [diffusivity, fenZERO] - self._diffusivity = diffusivity + if not isinstance(lambda_, (list, tuple,)): + lambda_ = [lambda_, fenZERO] + self._lambda_ = lambda_ + + @property + def mu_(self): + """Value of mu_.""" + return self._mu_ + @mu_.setter + def mu_(self, mu_): + self.resetAs() + if not isinstance(mu_, (list, tuple,)): + mu_ = [mu_, fenZERO] + self._mu_ = mu_ @property def forcingTerm(self): """Value of f.""" return self._forcingTerm @forcingTerm.setter def forcingTerm(self, forcingTerm): self.resetbs() if not isinstance(forcingTerm, (list, tuple,)): - forcingTerm = [forcingTerm, fenZERO] + forcingTerm = [forcingTerm, + fenZEROS(self.V.mesh().topology().dim())] self._forcingTerm = forcingTerm @property def DirichletDatum(self): """Value of u0.""" return self._DirichletDatum @DirichletDatum.setter def DirichletDatum(self, DirichletDatum): self.resetbs() if not isinstance(DirichletDatum, (list, tuple,)): - DirichletDatum = [DirichletDatum, fenZEROS(2)] + DirichletDatum = [DirichletDatum, + fenZEROS(self.V.mesh().topology().dim())] self._DirichletDatum = DirichletDatum @property def NeumannDatum(self): """Value of g1.""" return self._NeumannDatum @NeumannDatum.setter def NeumannDatum(self, NeumannDatum): self.resetbs() if not isinstance(NeumannDatum, (list, tuple,)): - NeumannDatum = [NeumannDatum, fenZERO] + NeumannDatum = [NeumannDatum, + fenZEROS(self.V.mesh().topology().dim())] self._NeumannDatum = NeumannDatum @property def RobinDatumG(self): """Value of g2.""" return self._RobinDatumG @RobinDatumG.setter def RobinDatumG(self, RobinDatumG): self.resetbs() if not isinstance(RobinDatumG, (list, tuple,)): - RobinDatumG = [RobinDatumG, fenZERO] + RobinDatumG = [RobinDatumG, + fenZEROS(self.V.mesh().topology().dim())] self._RobinDatumG = RobinDatumG @property def RobinDatumH(self): """Value of h.""" return self._RobinDatumH @RobinDatumH.setter def RobinDatumH(self, RobinDatumH): self.resetAs() if not isinstance(RobinDatumH, (list, tuple,)): RobinDatumH = [RobinDatumH, fenZERO] self._RobinDatumH = RobinDatumH @property def DirichletBoundary(self): """Function handle to DirichletBoundary.""" return self.BCManager.DirichletBoundary @DirichletBoundary.setter def DirichletBoundary(self, DirichletBoundary): self.resetAs() self.resetbs() self.BCManager.DirichletBoundary = DirichletBoundary @property def NeumannBoundary(self): """Function handle to NeumannBoundary.""" return self.BCManager.NeumannBoundary @NeumannBoundary.setter def NeumannBoundary(self, NeumannBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.NeumannBoundary = NeumannBoundary @property def RobinBoundary(self): """Function handle to RobinBoundary.""" return self.BCManager.RobinBoundary @RobinBoundary.setter def RobinBoundary(self, RobinBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.RobinBoundary = RobinBoundary def autoSetDS(self): """Set FEniCS boundary measure based on boundary function handles.""" if self.dsToBeSet: if self.verbosity >= 20: verbosityDepth("INIT", "Initializing boundary measures.", end = "") NB = self.NeumannBoundary RB = self.RobinBoundary - class NBoundary(fen.SubDomain): - def inside(self, x, on_boundary): - return NB(x, on_boundary) - class RBoundary(fen.SubDomain): - def inside(self, x, on_boundary): - return RB(x, on_boundary) boundary_markers = fen.MeshFunction("size_t", self.V.mesh(), self.V.mesh().topology().dim() - 1) - NBoundary().mark(boundary_markers, 0) - RBoundary().mark(boundary_markers, 1) + NB.mark(boundary_markers, 0) + RB.mark(boundary_markers, 1) self.ds = fen.Measure("ds", domain = self.V.mesh(), subdomain_data = boundary_markers) self.dsToBeSet = False if self.verbosity >= 20: verbosityDepth("DEL", " Done.", inline = True) - def buildEnergyNormForm(self): # H1_omega + def buildEnergyNormForm(self): # energy norm """ Build sparse matrix (in CSR format) representative of scalar product. """ if self.verbosity >= 20: verbosityDepth("INIT", "Assembling energy matrix.", end = "") - a = (fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) - + np.abs(self.omega)**2 * fen.dot(self.u[0], self.v[0])) - normMatFen = fen.assemble(a * fen.dx) + lambda_Re, _ = self.lambda_ + mu_Re, _ = self.mu_ + termNames = ["lambda_", "mu_"] + pars = self.iterReduceQuadratureDegree(zip( + [lambda_Re, mu_Re], + [x + "Real" for x in termNames])) + epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) + sigma = lambda u, l_, m_: ( + l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + + 2. * m_ * epsilon(u)) + normMatFen = fen.inner(sigma(self.u, lambda_Re, mu_Re), + epsilon(self.v)) * fen.dx + normMatFen = fen.assemble(fen.dot(self.u, self.v) * fen.dx, + form_compiler_parameters = pars) normMat = fen.as_backend_type(normMatFen).mat() self.energyNormMatrix = csr_matrix(normMat.getValuesCSR()[::-1], shape = normMat.size) if self.verbosity >= 20: verbosityDepth("DEL", " Done.", inline = True) def A(self, mu:complex, der : int = 0) -> ScOp: """Assemble (derivative of) operator of linear system.""" Anull = self.checkAInBounds(der) if Anull is not None: return Anull self.autoSetDS() if self.As[0] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A0.") - DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) - aRe, aIm = self.diffusivity + DirichletBC0 = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + lambda_Re, lambda_Im = self.lambda_ + mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH - termNames = ["diffusivity", "RobinDatumH"] + termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( - [aRe, hRe], + [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( - [aIm, hIm], + [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) - a0Re = ((aRe * fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) - + self.u[0] * self.v[1] + self.u[1] * self.v[0]) * fen.dx - + hRe * fen.dot(self.u[0], self.v[0]) * self.ds(1)) - a0Im = (aIm * fen.dot(fen.grad(self.u[0]), fen.grad(self.v[0])) \ - * fen.dx - + hIm * fen.dot(self.u[0], self.v[0]) * self.ds(1)) + epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) + sigma = lambda u, l_, m_: ( + l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + + 2. * m_ * epsilon(u)) + a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), + epsilon(self.v)) * fen.dx + + hRe * fen.inner(self.u, self.v) * self.ds(1)) + a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), + epsilon(self.v)) * fen.dx + + hIm * fen.inner(self.u, self.v) * self.ds(1)) A0Re = fen.assemble(a0Re, form_compiler_parameters = parsRe) A0Im = fen.assemble(a0Im, form_compiler_parameters = parsIm) DirichletBC0.apply(A0Re) DirichletBC0.zero(A0Im) A0ReMat = fen.as_backend_type(A0Re).mat() A0ImMat = fen.as_backend_type(A0Im).mat() A0Rer, A0Rec, A0Rev = A0ReMat.getValuesCSR() A0Imr, A0Imc, A0Imv = A0ImMat.getValuesCSR() self.As[0] = (csr_matrix((A0Rev, A0Rec, A0Rer), shape = A0ReMat.size) + 1.j * csr_matrix((A0Imv, A0Imc, A0Imr), shape = A0ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.") return self.As[0] def b(self, mu:complex, der : int = 0, homogeneized : bool = False) -> Np1D: """Assemble (derivative of) RHS of linear system.""" bnull = self.checkbInBounds(der, homogeneized) if bnull is not None: return bnull if homogeneized and not np.isclose(self.mu0BC, mu): self.u0BC = self.liftDirichletData(mu) if (max(self.nbs, self.nAs * homogeneized) > 1 and not np.isclose(self.bsmu, mu)): self.bsmu = mu self.resetbs() if self.bs[homogeneized][der] is None: self.autoSetDS() if self.verbosity >= 20: verbosityDepth("INIT", "Assembling forcing term b{}.".format( der)) if der < self.nbs: fRe, fIm = self.forcingTerm g1Re, g1Im = self.NeumannDatum g2Re, g2Im = self.RobinDatumG else: - fRe, fIm = fenZERO, fenZERO - g1Re, g1Im = fenZERO, fenZERO - g2Re, g2Im = fenZERO, fenZERO + fRe = fenZEROS(self.V.mesh().topology().dim()) + fIm = fenZEROS(self.V.mesh().topology().dim()) + g1Re = fenZEROS(self.V.mesh().topology().dim()) + g1Im = fenZEROS(self.V.mesh().topology().dim()) + g2Re = fenZEROS(self.V.mesh().topology().dim()) + g2Im = fenZEROS(self.V.mesh().topology().dim()) termNames = ["forcingTerm", "NeumannDatum", "RobinDatumG"] parsRe = self.iterReduceQuadratureDegree(zip( [fRe, g1Re, g2Re], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [fIm, g1Im, g2Im], [x + "Imag" for x in termNames])) - L0Re = (fen.dot(fRe, self.v[0]) * fen.dx - + fen.dot(g1Re, self.v[0]) * self.ds(0) - + fen.dot(g2Re, self.v[0]) * self.ds(1)) - L0Im = (fen.dot(fIm, self.v[0]) * fen.dx - + fen.dot(g1Im, self.v[0]) * self.ds(0) - + fen.dot(g2Im, self.v[0]) * self.ds(1)) + L0Re = (fen.inner(fRe, self.v) * fen.dx + + fen.inner(g1Re, self.v) * self.ds(0) + + fen.inner(g2Re, self.v) * self.ds(1)) + L0Im = (fen.inner(fIm, self.v) * fen.dx + + fen.inner(g1Im, self.v) * self.ds(0) + + fen.inner(g2Im, self.v) * self.ds(1)) b0Re = fen.assemble(L0Re, form_compiler_parameters = parsRe) b0Im = fen.assemble(L0Im, form_compiler_parameters = parsIm) if homogeneized: Ader = self.A(mu, der) b0Re[:] -= np.real(Ader.dot(self.u0BC)) b0Im[:] -= np.imag(Ader.dot(self.u0BC)) - DBCR = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) - DBCI = fen.DirichletBC(self.V, fenZEROS(2), - self.DirichletBoundary) + DBCR = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) + DBCI = fen.DirichletBC(self.V, + fenZEROS(self.V.mesh().topology().dim()), + self.DirichletBoundary) else: DBCR = fen.DirichletBC(self.V, self.DirichletDatum[0], self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, self.DirichletDatum[1], self.DirichletBoundary) DBCR.apply(b0Re) DBCI.apply(b0Im) self.bs[homogeneized][der] = np.array(b0Re[:] + 1.j * b0Im[:], dtype = np.complex) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling forcing term.") return self.bs[homogeneized][der] diff --git a/rrompy/reduction_methods/base/pade_utils.py b/rrompy/reduction_methods/base/pade_utils.py index 958d928..127ca65 100644 --- a/rrompy/reduction_methods/base/pade_utils.py +++ b/rrompy/reduction_methods/base/pade_utils.py @@ -1,44 +1,44 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.base.types import Np1D from rrompy.utilities.warning_manager import warn __all__ = ['checkRobustTolerance'] def checkRobustTolerance(ev:Np1D, E:int, tol:float) -> dict: """ Perform robustness check on eigen-/singular values and return reduced parameters with warning. """ N = len(ev) - 1 ts = tol * np.linalg.norm(ev) diff = N - np.sum(np.abs(ev) >= ts) if diff <= 0: return {} Enew = E - diff - Nnew = min(N, Enew) + Nnew = min(np.sum(np.abs(ev) >= ts), Enew) if Nnew == N: strN = "" else: strN = "N from {} to {} and ".format(N, Nnew) - warn(("Smallest {} eigenvalues below tolerance.\nReducing {}E from {} to " - "{}.").format(diff + 1, strN, E, Enew)) + warn(("Smallest {} eigenvalues below tolerance. Reducing {}E from {} " + "to {}.").format(diff + 1, strN, E, Enew)) newParameters = {"N" : Nnew, "E" : Enew} return newParameters diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py index a155677..3767238 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py @@ -1,593 +1,606 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import (checkRobustTolerance, setupFitCallables) from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.lagrange import ApproximantLagrangePade from rrompy.utilities.base.types import DictAny, List, HFEng from rrompy.utilities.base import purgeDict, verbosityDepth, customFit from rrompy.utilities.warning_manager import warn __all__ = ['ApproximantLagrangePadeGreedy'] class ApproximantLagrangePadeGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangePade): """ ROM greedy Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'basis': type of basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'Delta': difference between M and N in rational approximant; defaults to 0; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'errorEstimatorKind': kind of error estimator; available values include 'EXACT' and 'SIMPLIFIED'; defaults to 'SIMPLIFIED'; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'basis': type of basis for interpolation; - 'Delta': difference between M and N in rational approximant; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'errorEstimatorKind': kind of error estimator; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. errorEstimatorKind: kind of error estimator. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() self._addParametersToList(["basis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change robustTol. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["basis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"], True, True, baselevel = 1) if "Delta" in list(approxParameters.keys()): self._Delta = approxParameters["Delta"] elif hasattr(self, "Delta"): self._Delta = self.Delta else: self._Delta = 0 GenericApproximantLagrangeGreedy.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) self.Delta = self.Delta if "basis" in keyList or not hasattr(self, "_val"): if "basis" in keyList: kind = approxParameters["basis"] else: kind = "MONOMIAL" setupFit = setupFitCallables(kind) for x in setupFit: super().__setattr__("_" + x, setupFit[x]) if "errorEstimatorKind" in keyList: self.errorEstimatorKind = approxParameters["errorEstimatorKind"] elif hasattr(self, "errorEstimatorKind"): self.errorEstimatorKind = self.errorEstimatorKind else: self.errorEstimatorKind = "SIMPLIFIED" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif hasattr(self, "interpRcond"): self.interpRcond = self.interpRcond else: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif hasattr(self, "robustTol"): self.robustTol = self.robustTol else: self.robustTol = 0 @property def Delta(self): """Value of Delta.""" return self._Delta @Delta.setter def Delta(self, Delta): if not np.isclose(Delta, np.floor(Delta)): raise ArithmeticError("Delta must be an integer.") if Delta < 0: warn(("Error estimator unreliable for Delta < 0. Overloading of " "errorEstimator is suggested.")) else: Deltamin = (max(self.HFEngine.nbs, self.HFEngine.nAs * self.homogeneized) - 1 - 1 * (self.HFEngine.nAs > 1)) if Delta < Deltamin: warn(("Method may be unreliable for selected Delta. Suggested " "minimal value of Delta: {}.").format(Deltamin)) self._Delta = Delta self._approxParameters["Delta"] = self.Delta @property def errorEstimatorKind(self): """Value of errorEstimatorKind.""" return self._errorEstimatorKind @errorEstimatorKind.setter def errorEstimatorKind(self, errorEstimatorKind): errorEstimatorKind = errorEstimatorKind.upper() if errorEstimatorKind not in ["EXACT", "SIMPLIFIED"]: warn(("Error estimator kind not recognized. Overriding to " "'SIMPLIFIED'.")) errorEstimatorKind = "SIMPLIFIED" self._errorEstimatorKind = errorEstimatorKind self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= np.abs(self.Delta): warn(("nTestPoints must be at least abs(Delta) + 1. Increasing " "value to abs(Delta) + 1.")) nTestPoints = np.abs(self.Delta) + 1 if not np.isclose(nTestPoints, np.int(nTestPoints)): raise ArithmeticError("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "nTestPoints"): nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self.resbb = None self.resAb = None self.resAA = None self.As = None self.bs = None def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """Standard residual-based error estimator.""" self.setupApprox() self.initEstNormer() PM = self.P[:, -1] if np.any(np.isnan(PM)) or np.any(np.isinf(PM)): err = np.empty(len(mus)) err[:] = np.inf return err nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized) radiusmus = self.radiusPade(mus) radiussmus = self.radiusPade(self.mus) musTile = np.tile(radiusmus.reshape(-1, 1), [1, self.S]) smusCol = radiussmus.reshape(1, -1) num = np.prod(musTile[:, : self.S] - smusCol, axis = 1) den = self.getQVal(mus) self.assembleReducedResidualBlocks() vanderBase = np.polynomial.polynomial.polyvander(radiusmus, max(nAs, nbs)).T radiusb0 = vanderBase[: nbs + 1, :] # 'ij,jk,ik->k', resbb, radiusb0, radiusb0.conj() b0resb0 = np.sum(self.resbb.dot(radiusb0) * radiusb0.conj(), axis = 0) RHSnorms = np.power(np.abs(b0resb0), .5) vanderBase = vanderBase[: -1, :] delta = self.S - self.N - 1 nbsEff = max(0, nbs - delta) if self.errorEstimatorKind == "SIMPLIFIED": radiusA = np.tensordot(PM, vanderBase[: nAs, :], 0) if delta == 0: radiusb = np.abs(self.Q[-1]) * radiusb0[: -1, :] else: #if self.errorEstimatorKind == "EXACT": momentQ = np.zeros(nbsEff, dtype = np.complex) momentQu = np.zeros((self.S, nAs), dtype = np.complex) radiusbTen = np.zeros((nbsEff, nbsEff, len(mus)), dtype = np.complex) radiusATen = np.zeros((nAs, nAs, len(mus)), dtype = np.complex) if nbsEff > 0: momentQ[0] = self.Q[-1] radiusbTen[0, :, :] = vanderBase[: nbsEff, :] momentQu[:, 0] = self.P[:, -1] radiusATen[0, :, :] = vanderBase[: nAs, :] Qvals = self.getQVal(self.mus) for k in range(1, max(nAs, nbs * (nbsEff > 0))): Qvals = Qvals * radiussmus if k > delta and k < nbs: momentQ[k - delta] = self._fitinv.dot(Qvals) radiusbTen[k - delta, k :, :] = ( radiusbTen[0, : delta - k, :]) if k < nAs: momentQu[:, k] = Qvals * self._fitinv radiusATen[k, k :, :] = radiusATen[0, : - k, :] if self.POD and nAs > 1: momentQu[:, 1 :] = self.samplingEngine.RPOD.dot( momentQu[:, 1 :]) radiusA = np.tensordot(momentQu, radiusATen, 1) if nbsEff > 0: radiusb = np.tensordot(momentQ, radiusbTen, 1) if ((self.errorEstimatorKind == "SIMPLIFIED" and delta == 0) or (self.errorEstimatorKind == "EXACT" and nbsEff > 0)): # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() ff = np.sum(self.resbb[delta + 1 :, delta + 1 :].dot(radiusb) * radiusb.conj(), axis = 0) # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() Lf = np.sum(np.tensordot(self.resAb[delta :, :, :], radiusA, 2) * radiusb.conj(), axis = 0) else: ff, Lf = 0., 0. # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() LL = np.sum(np.tensordot(self.resAA, radiusA, 2) * radiusA.conj(), axis = (0, 1)) jOpt = np.power(np.abs(ff - 2. * np.real(Lf) + LL), .5) return self._domcoeff(self.S - 1) * jOpt * np.abs(num / den) / RHSnorms def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name())) self.computeScaleFactor() self.S = len(self.mus) self._M = self.S - 1 self._N = self.S - 1 if self.Delta < 0: self._M += self.Delta else: self._N -= self.Delta if min(self.M, self.N) < 0: if self.verbosity >= 5: verbosityDepth("MAIN", "Minimal sample size not achieved.") self.Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) self.P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) self.Q[:] = np.nan self.P[:] = np.nan self.lastApproxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", ("Aborting computation of " "approximant.\n")) return self.greedy() if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", ("Starting computation of " "denominator.")) TS = self._vander(self.radiusPade(self.mus), self.S - 1) - while self.N > 0: - RHS = np.zeros(self.S) - RHS[-1] = 1. - fitOut = customFit(TS.T, RHS, full = True, - rcond = self.interpRcond) - if self.verbosity >= 2: - verbosityDepth("MAIN", ("Fitting {} samples with " - "degree {} through {}... " - "Conditioning of system: " - "{:.4e}.").format(self.S, + RHS = np.zeros(self.S) + RHS[-1] = 1. + fitOut = customFit(TS.T, RHS, full = True, + rcond = self.interpRcond) + if self.verbosity >= 2: + verbosityDepth("MAIN", ("Fitting {} samples with " + "degree {} through {}... " + "Conditioning of system: " + "{:.4e}.").format(self.S, self.S - 1, self._fitname, fitOut[1][2][0] / fitOut[1][2][-1])) - if fitOut[1][1] < self.S: - warn(("Polyfit is poorly conditioned. Starting " - "preemptive termination of computation of " - "approximant.")) - self.Q = np.empty(max(self.N, 0) + 1, - dtype = np.complex) - self.P = np.empty((len(self.mus), max(self.M, 0) + 1), - dtype = np.complex) - self.Q[:] = np.nan - self.P[:] = np.nan - self.lastApproxParameters = copy(self.approxParameters) - if hasattr(self, "lastSolvedApp"): - del self.lastSolvedApp - if self.verbosity >= 7: - verbosityDepth("DEL", ("Aborting computation of " - "denominator.")) - if self.verbosity >= 5: - verbosityDepth("DEL", ("Aborting computation of " - "approximant.\n")) - return - self._fitinv = fitOut[0] - G = (TS[:, : self.N + 1].T * fitOut[0]).T + if fitOut[1][1] < self.S: + warn(("Polyfit is poorly conditioned. Starting " + "preemptive termination of computation of " + "approximant.")) + self.Q = np.empty(max(self.N, 0) + 1, + dtype = np.complex) + self.P = np.empty((len(self.mus), max(self.M, 0) + 1), + dtype = np.complex) + self.Q[:] = np.nan + self.P[:] = np.nan + self.lastApproxParameters = copy(self.approxParameters) + if hasattr(self, "lastSolvedApp"): + del self.lastSolvedApp + if self.verbosity >= 7: + verbosityDepth("DEL", ("Aborting computation of " + "denominator.")) + if self.verbosity >= 5: + verbosityDepth("DEL", ("Aborting computation of " + "approximant.\n")) + return + self._fitinv = fitOut[0] + while self.N > 0: + G = (TS[:, : self.N + 1].T * self._fitinv).T if self.POD: if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square " "root of gramian matrix.")) G = self.samplingEngine.RPOD.dot(G) _, s, eV = np.linalg.svd(G, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].conj().T if self.verbosity >= 2: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of " "size {} x {} with " "condition number " "{:.4e}.").format( self.S, self.N + 1, condev)) else: if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", end = "") G = self.samplingEngine.samples.dot(G) G2 = self.HFEngine.innerProduct(G, G) if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", inline = True) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving eigenvalue " "problem for gramian " "matrix.")) ev, eV = np.linalg.eigh(G2) if self.verbosity >= 2: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue " "problem of size {} with " "condition number " "{:.4e}.").format( self.N + 1, condev)) if self.verbosity >= 7: verbosityDepth("DEL", ("Done solving eigenvalue " "problem.")) - newParameters = checkRobustTolerance(ev, self.M, - self.robustTol) - if not newParameters: - break - self._N = newParameters["N"] - self._M = newParameters["E"] + Nstable = np.sum(np.abs(ev) >= self.robustTol + * np.linalg.norm(ev)) + if self.N <= Nstable: break + if self.verbosity >= 2: + verbosityDepth("MAIN", ("Smallest {} eigenvalues " + "below tolerance. Reducing N " + "to {}.").format( + self.N - Nstable + 1, + Nstable)) + self._N = Nstable if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) self.Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.") else: self.Q = np.ones(1, dtype = np.complex) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.") self.lastApproxParameters = copy(self.approxParameters) Qevaldiag = np.diag(self.getQVal(self.mus)) while self.M >= 0: fitVander = self._vander(self.radiusPade(self.mus), self.M) - fitOut = customFit(fitVander, Qevaldiag, full = True, + w = None + if self.M == self.S - 1: w = "AUTO" + fitOut = customFit(fitVander, Qevaldiag, full = True, w = w, rcond = self.interpRcond) + if self.verbosity >= 2: + verbosityDepth("MAIN", ("Fitting {} samples with " + "degree {} through {}... " + "Conditioning of system: " + "{:.4e}.").format(self.S, + self.M, self._fitname, + fitOut[1][2][0] / fitOut[1][2][-1])) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break warn(("Polyfit is poorly conditioned. Reducing M from {} to " "{}. Exact snapshot interpolation not guaranteed.")\ .format(self.M, fitOut[1][1] - 1)) self._M = fitOut[1][1] - 1 if self.M <= 0: raise Exception(("Instability in computation of numerator. " "Aborting.")) self.P = np.atleast_2d(P) if self.POD: self.P = self.samplingEngine.RPOD.dot(self.P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.") self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.\n") def assembleReducedResidualBlocks(self): """Build affine blocks of reduced linear system through projections.""" self.initEstNormer() if self.As is None or self.bs is None: if self.verbosity >= 7: verbosityDepth("INIT", "Computing Taylor blocks of system.") nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs, (nAs + 1) * self.homogeneized) self.As = [self.HFEngine.A(self.mu0, j + 1) for j in range(nAs)] self.bs = [self.HFEngine.b(self.mu0, j, self.homogeneized) for j in range(nbs)] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing Taylor blocks.") computeResbb = self.resbb is None computeResAb = self.resAb is None or self.resAb.shape[1] != self.S computeResAA = self.resAA is None or self.resAA.shape[0] != self.S samples = self.samplingEngine.samples if computeResbb or computeResAb or computeResAA: if self.verbosity >= 7: verbosityDepth("INIT", "Projecting Taylor terms of residual.") nAs = len(self.As) nbs = len(self.bs) - 1 if computeResbb: self.resbb = np.empty((nbs + 1, nbs + 1), dtype = np.complex) for i in range(nbs + 1): Mbi = self.scaleFactor ** i * self.bs[i] for j in range(i): Mbj = self.scaleFactor ** j * self.bs[j] self.resbb[i, j] = self.estNormer.innerProduct(Mbj, Mbi) self.resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) for i in range(nbs + 1): for j in range(i + 1, nbs + 1): self.resbb[i, j] = self.resbb[j][i].conj() if computeResAb: if self.resAb is None: self.resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) for i in range(nbs): Mbi = self.scaleFactor ** (i + 1) * self.bs[i + 1] for j in range(nAs): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples)) self.resAb[i, :, j] = self.estNormer.innerProduct( MAj, Mbi) else: Sold = self.resAb.shape[1] if Sold > self.S: self.resAb = self.resAb[:, : self.S, :] else: resAbNew = np.empty((nbs, self.S, nAs), dtype = np.complex) resAbNew[:, : Sold, :] = self.resAb self.resAb = resAbNew for i in range(nbs): Mbi = self.scaleFactor ** (i + 1) * self.bs[i + 1] for j in range(nAs): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples[:, Sold :])) self.resAb[i, Sold :, j] = ( self.estNormer.innerProduct(MAj, Mbi)) if computeResAA: if self.resAA is None: self.resAA = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) for i in range(nAs): MAi = (self.scaleFactor ** (i + 1) * self.As[i].dot(samples)) for j in range(i): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples)) self.resAA[:, i, :, j] = ( self.estNormer.innerProduct(MAj, MAi)) self.resAA[:, i, :, i] = self.estNormer.innerProduct( MAi, MAi) for i in range(nAs): for j in range(i + 1, nAs): self.resAA[:, i, :, j] = ( self.resAA[:, j, :, i].conj()) else: Sold = self.resAA.shape[0] if Sold > self.S: self.resAA = self.resAA[: self.S, :, : self.S, :] else: resAANew = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) resAANew[: Sold, :, : Sold, :] = self.resAA self.resAA = resAANew for i in range(nAs): MAi = (self.scaleFactor ** (i + 1) * self.As[i].dot(samples)) for j in range(i): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples)) self.resAA[: Sold, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, : Sold])) self.resAA[Sold :, i, : Sold, j] = ( self.estNormer.innerProduct(MAj[:, : Sold], MAi[:, Sold :])) self.resAA[Sold :, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, Sold :])) self.resAA[: Sold, i, Sold :, i] = ( self.estNormer.innerProduct(MAi[:, Sold :], MAi[:, : Sold])) self.resAA[Sold :, i, : Sold, i] = ( self.resAA[: Sold, i, Sold :, i].conj().T) self.resAA[Sold :, i, Sold :, i] = ( self.estNormer.innerProduct(MAi[:, Sold :], MAi[:, Sold :])) for i in range(nAs): for j in range(i + 1, nAs): self.resAA[:, i, :, j] = ( self.resAA[:, j, :, i].conj()) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up Taylor " "decomposition of residual.")) diff --git a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py index 81a58b3..bd7e3b5 100644 --- a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py +++ b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py @@ -1,476 +1,504 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.reduction_methods.lagrange.generic_approximant_lagrange import ( GenericApproximantLagrange) from rrompy.utilities.base.types import DictAny, HFEng, Tuple, List from rrompy.utilities.base import purgeDict, verbosityDepth from rrompy.utilities.warning_manager import warn __all__ = ['GenericApproximantLagrangeGreedy'] class GenericApproximantLagrangeGreedy(GenericApproximantLagrange): """ ROM greedy Lagrange interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; + - 'interactive': whether to interactively terminate greedy + algorithm; defaults to False; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator. - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. robustTol: tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ TOL_INSTABILITY = 1e-6 def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() - self._addParametersToList(["muBounds","greedyTol", "maxIter", - "refinementRatio", "nTrainingPoints", - "nTestPoints", "trainingSetGenerator"]) + self._addParametersToList(["muBounds","greedyTol", "interactive", + "maxIter", "refinementRatio", + "nTrainingPoints", "nTestPoints", + "trainingSetGenerator"]) super(GenericApproximantLagrange, self).__init__( HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """Value of approximant parameters. Its assignment may change S.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, - ["muBounds","greedyTol", "maxIter", + ["muBounds","greedyTol", + "interactive", "maxIter", "refinementRatio", "nTrainingPoints", "nTestPoints", "trainingSetGenerator"], True, True, baselevel = 1) GenericApproximant.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "muBounds" in keyList: self.muBounds = approxParameters["muBounds"] elif hasattr(self, "muBounds"): self.muBounds = self.muBounds else: self.muBounds = [[0.], [1.]] if "greedyTol" in keyList: self.greedyTol = approxParameters["greedyTol"] elif hasattr(self, "greedyTol"): self.greedyTol = self.greedyTol else: self.greedyTol = 1e-2 + if "interactive" in keyList: + self.interactive = approxParameters["interactive"] + elif hasattr(self, "interactive"): + self.interactive = self.interactive + else: + self.interactive = False if "maxIter" in keyList: self.maxIter = approxParameters["maxIter"] elif hasattr(self, "maxIter"): self.maxIter = self.maxIter else: self.maxIter = 1e2 if "refinementRatio" in keyList: self.refinementRatio = approxParameters["refinementRatio"] elif hasattr(self, "refinementRatio"): self.refinementRatio = self.refinementRatio else: self.refinementRatio = 0.2 if "nTrainingPoints" in keyList: self.nTrainingPoints = approxParameters["nTrainingPoints"] elif hasattr(self, "nTrainingPoints"): self.nTrainingPoints = self.nTrainingPoints else: self.nTrainingPoints = np.int(np.ceil(self.maxIter / self.refinementRatio)) if "nTestPoints" in keyList: self.nTestPoints = approxParameters["nTestPoints"] elif hasattr(self, "nTestPoints"): self.nTestPoints = self.nTestPoints else: self.nTestPoints = 1 if "trainingSetGenerator" in keyList: self.trainingSetGenerator = ( approxParameters["trainingSetGenerator"]) elif hasattr(self, "trainingSetGenerator"): self.trainingSetGenerator = self.trainingSetGenerator else: from rrompy.utilities.parameter_sampling import QuadratureSampler self.trainingSetGenerator = QuadratureSampler(self.muBounds, "UNIFORM") del QuadratureSampler @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): self._S = S @property def mus(self): """Value of mus.""" return self._mus @mus.setter def mus(self, mus): self._mus = np.array(mus, dtype = np.complex) @property def muBounds(self): """Value of muBounds.""" return self._muBounds @muBounds.setter def muBounds(self, muBounds): if len(muBounds) != 2: raise Exception("2 limits must be specified.") try: muBounds = muBounds.tolist() except: muBounds = list(muBounds) for j in range(2): try: len(muBounds[j]) except: muBounds[j] = np.array([muBounds[j]]) if len(muBounds[0]) != len(muBounds[1]): raise Exception("The bounds must have the same length.") self._muBounds = muBounds @property def greedyTol(self): """Value of greedyTol.""" return self._greedyTol @greedyTol.setter def greedyTol(self, greedyTol): if greedyTol < 0: raise ArithmeticError("greedyTol must be non-negative.") if hasattr(self, "greedyTol"): greedyTolold = self.greedyTol else: greedyTolold = -1 self._greedyTol = greedyTol self._approxParameters["greedyTol"] = self.greedyTol if greedyTolold != self.greedyTol: self.resetSamples() @property def maxIter(self): """Value of maxIter.""" return self._maxIter @maxIter.setter def maxIter(self, maxIter): if maxIter <= 0: raise ArithmeticError("maxIter must be positive.") if hasattr(self, "maxIter"): maxIterold = self.maxIter else: maxIterold = -1 self._maxIter = maxIter self._approxParameters["maxIter"] = self.maxIter if maxIterold != self.maxIter: self.resetSamples() @property def refinementRatio(self): """Value of refinementRatio.""" return self._refinementRatio @refinementRatio.setter def refinementRatio(self, refinementRatio): if refinementRatio <= 0. or refinementRatio > 1.: raise ArithmeticError(("refinementRatio must be between 0 " "(excluded) and 1.")) if hasattr(self, "refinementRatio"): refinementRatioold = self.refinementRatio else: refinementRatioold = -1 self._refinementRatio = refinementRatio self._approxParameters["refinementRatio"] = self.refinementRatio if refinementRatioold != self.refinementRatio: self.resetSamples() @property def nTrainingPoints(self): """Value of nTrainingPoints.""" return self._nTrainingPoints @nTrainingPoints.setter def nTrainingPoints(self, nTrainingPoints): if nTrainingPoints <= 1: raise ArithmeticError("nTrainingPoints must be greater than 1.") if not np.isclose(nTrainingPoints, np.int(nTrainingPoints)): raise ArithmeticError("nTrainingPoints must be an integer.") nTrainingPoints = np.int(nTrainingPoints) if hasattr(self, "nTrainingPoints"): nTrainingPointsold = self.nTrainingPoints else: nTrainingPointsold = -1 self._nTrainingPoints = nTrainingPoints self._approxParameters["nTrainingPoints"] = self.nTrainingPoints if nTrainingPointsold != self.nTrainingPoints: self.resetSamples() @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= 0: raise ArithmeticError("nTestPoints must be positive.") if not np.isclose(nTestPoints, np.int(nTestPoints)): raise ArithmeticError("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "nTestPoints"): nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() @property def trainingSetGenerator(self): """Value of trainingSetGenerator.""" return self._trainingSetGenerator @trainingSetGenerator.setter def trainingSetGenerator(self, trainingSetGenerator): if 'generatePoints' not in dir(trainingSetGenerator): raise Exception("trainingSetGenerator type not recognized.") if hasattr(self, '_trainingSetGenerator'): trainingSetGeneratorOld = self.trainingSetGenerator self._trainingSetGenerator = trainingSetGenerator self._approxParameters["trainingSetGenerator"] = ( self.trainingSetGenerator) if (not 'trainingSetGeneratorOld' in locals() or trainingSetGeneratorOld != self.trainingSetGenerator): self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self._mus = [] def initEstNormer(self): """Initialize estimator norm engine.""" if not hasattr(self, "estNormer"): from rrompy.hfengines.base import ProblemEngineBase as PEB self.estNormer = PEB() # L2 norm self.estNormer.V = self.HFEngine.V self.estNormer.buildEnergyNormForm() def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator with explicit residual computation. """ self.setupApprox() nmus = len(mus) err = np.empty(nmus) if self.HFEngine.nbs == 1: RHS = self.getRHS(mus[0], homogeneized = self.homogeneized) RHSNorm = self.estNormer.norm(RHS) for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / RHSNorm else: for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) RHS = self.getRHS(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / self.estNormer.norm(RHS) return np.abs(err) def getMaxErrorEstimator(self) -> Tuple[float, int]: """ Compute maximum of (and index of maximum of) error estimator over training set. """ errorEstTrain = self.errorEstimator(self.muTrain) idxMaxEst = np.argmax(errorEstTrain) maxEst = errorEstTrain[idxMaxEst] return maxEst, idxMaxEst def greedyNextSample(self, muidx:int, plotEst : bool = False): """Compute next greedy snapshot of solution map.""" mu = self.muTrain[muidx] if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} to test " "set.").format( self.samplingEngine.nsamples + 1, mu)) self.mus = np.append(self.mus, mu) idxs = np.arange(len(self.muTrain)) mask = np.ones_like(idxs, dtype = bool) mask[muidx] = False idxs = idxs[mask] self.muTrain = self.muTrain[idxs] self.samplingEngine.nextSample(mu, homogeneized = self.homogeneized) errorEstTrain = self.errorEstimator(self.muTrain) muidx = np.argmax(errorEstTrain) maxErrorEst = errorEstTrain[muidx] mu = self.muTrain[muidx] if plotEst and not np.all(np.isinf(errorEstTrain)): from matplotlib import pyplot as plt plt.figure() plt.semilogy(np.real(self.muTrain), errorEstTrain, 'k') plt.semilogy(np.real(self.muTrain), self.greedyTol * np.ones(len(self.muTrain)), 'r--') plt.semilogy(np.real(self.mus), 2. * self.greedyTol * np.ones(len(self.mus)), '*m') plt.semilogy(np.real(mu), maxErrorEst, 'xr') plt.grid() plt.show() plt.close() return errorEstTrain, muidx, maxErrorEst, mu def greedy(self, plotEst : bool = False): """Compute greedy snapshots of solution map.""" if self.samplingEngine.samples is None: if self.verbosity >= 2: verbosityDepth("INIT", "Starting computation of snapshots.") self.resetSamples() self.mus, _ = self.trainingSetGenerator.generatePoints( self.nTestPoints) self.mus = np.array([x[0] for x in self.mus], dtype = np.complex) nTrain = self.nTrainingPoints muTrainBase, _ = self.trainingSetGenerator.generatePoints(nTrain) self.muTrain = np.empty(len(muTrainBase) + 1, dtype = np.complex) j = 0 for mu in muTrainBase: if not np.any(np.isclose(self.mus, mu)): self.muTrain[j] = mu[0] j += 1 self.muTrain[j] = self.mus[-1] self.muTrain = self.muTrain[: j + 1] self.mus = self.mus[:-1] for j in range(len(self.mus)): if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} " "to test set.").format( self.samplingEngine.nsamples + 1, self.mus[j])) self.samplingEngine.nextSample(self.mus[j], homogeneized = self.homogeneized) errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample(-1, plotEst) if self.verbosity >= 2: verbosityDepth("MAIN", ("Uniform error estimate {:.4e}.")\ .format(maxErrorEst)) while (self.samplingEngine.nsamples < self.maxIter - and (maxErrorEst > self.greedyTol - or self.samplingEngine.nsamples < self.nTestPoints)): + and maxErrorEst > self.greedyTol): if (1. - self.refinementRatio) * nTrain > len(self.muTrain): muTrainExtra, _ = self.trainingSetGenerator.refine(nTrain) self.muTrain = np.sort(np.append(self.muTrain, muTrainExtra)) nTrain += len(muTrainExtra) if self.verbosity >= 5: verbosityDepth("MAIN", ("Enriching training set by {} " "elements.").format( len(muTrainExtra))) muTrainOld, maxErrorEstOld = self.muTrain, maxErrorEst errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample( muidx, plotEst) if self.verbosity >= 2: verbosityDepth("MAIN", ("Uniform error estimate {:.4e}.")\ .format(maxErrorEst)) if (np.isnan(maxErrorEst) or np.isinf(maxErrorEst) or maxErrorEstOld < maxErrorEst * self.TOL_INSTABILITY): warn(("Instability in a posteriori estimator. Starting " "preemptive greedy loop termination.")) maxErrorEst = maxErrorEstOld self.muTrain = muTrainOld self.mus = self.mus[:-1] self.samplingEngine.popSample() self.setupApprox() break + if (self.interactive + and self.samplingEngine.nsamples >= self.maxIter): + verbosityDepth("MAIN", ("Maximum number of iterations {} " + "reached. Want to increase " + "maxIter and continue? Y/N")\ + .format(self.maxIter)) + increasemaxIter = input() + if increasemaxIter.upper() == "Y": + verbosityDepth("MAIN", "Doubling value of maxIter...") + self.maxIter *= 2 + if (self.interactive and maxErrorEst <= self.greedyTol): + verbosityDepth("MAIN", ("Required tolerance {} achieved. " + "Want to decrease greedyTol and " + "continue? Y/N").format( + self.greedyTol)) + increasemaxIter = input() + if increasemaxIter.upper() == "Y": + verbosityDepth("MAIN", "Halving value of greedyTol...") + self.greedyTol *= .5 if self.verbosity >= 2: verbosityDepth("DEL", ("Done computing snapshots (final " "snapshot count: {}).").format( self.samplingEngine.nsamples)) def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (hasattr(self, "_S") and self.S == len(self.mus) and super().checkComputedApprox()) def computeScaleFactor(self): """Compute parameter rescaling factor.""" self.scaleFactor = .5 * np.abs(self.HFEngine.rescaling( self.trainingSetGenerator.lims[0][0]) - self.HFEngine.rescaling( self.trainingSetGenerator.lims[1][0])) diff --git a/rrompy/utilities/base/custom_fit.py b/rrompy/utilities/base/custom_fit.py index 1d13d0f..e764d09 100644 --- a/rrompy/utilities/base/custom_fit.py +++ b/rrompy/utilities/base/custom_fit.py @@ -1,137 +1,146 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np import numpy.linalg as la from warnings import warn __all__ = ["customFit"] def customFit(van, y, rcond=None, full=False, w=None): """ Least-squares fit of a polynomial to data. Copied from numpy.polynomial.polynomial. Parameters ---------- va : array_like, shape (`M`,`deg` + 1) Vandermonde-like matrix. y : array_like, shape (`M`,) or (`M`, `K`) y-coordinates of the sample points. Several sets of sample points sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be ignored. The default value is ``len(van)*eps``, where `eps` is the relative precision of the platform's float type, about 2e-16 in most cases. full : bool, optional Switch determining the nature of the return value. When ``False`` (the default) just the coefficients are returned; when ``True``, diagnostic information from the singular value decomposition (used to solve the fit's matrix equation) is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) Polynomial coefficients ordered from low to high. If `y` was 2-D, the coefficients in column `k` of `coef` represent the polynomial fit to the data in `y`'s `k`-th column. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Raises ------ RankWarning Raised if the matrix in the least-squares fit is rank deficient. The warning is only raised if `full` == False. The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) """ van = np.asarray(van) + 0.0 y = np.asarray(y) + 0.0 # check arguments. if van.ndim != 2: raise TypeError("expected 2D vector for van") if van.size == 0: raise TypeError("expected non-empty vector for van") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(van) != len(y): raise TypeError("expected van and y to have same length") order = van.shape[1] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T + + if w == "AUTO": + # Determine the norms of the design matrix rows. + if issubclass(van.dtype.type, np.complexfloating): + w = np.sqrt((np.square(van.real) + np.square(van.imag)).sum(1)) + else: + w = np.sqrt(np.square(van).sum(1)) + w[w == 0] = 1 + w = np.power(w, -1.) if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(van) != len(w): raise TypeError("expected van and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(van)*np.finfo(van.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warn(msg, np.polynomial.polyutils.RankWarning, stacklevel = 2) if full: return c, [resids, rank, s, rcond] else: return c