diff --git a/examples/9_active_remeshing/active_remeshing.py b/examples/9_active_remeshing/active_remeshing.py new file mode 100755 index 0000000..204b8c7 --- /dev/null +++ b/examples/9_active_remeshing/active_remeshing.py @@ -0,0 +1,117 @@ +import numpy as np +from pickle import load +from matplotlib import pyplot as plt +from active_remeshing_engine import ActiveRemeshingEngine +from rrompy.reduction_methods import ( + RationalInterpolantGreedyPivotedGreedyNoMatch as RIGPGNM, + RationalInterpolantGreedyPivotedGreedy as RIGPG) +from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, + SparseGridSampler as SGS) + +zs, ts = [0., 100.], [0., .5] +z0, t0, n = np.mean(zs), np.mean(ts), 150 +solver = ActiveRemeshingEngine(z0, t0, n) + +mus = [[z0, ts[0]], [z0, ts[1]]] +for mu in mus: + u = solver.solve(mu, return_state = True)[0] + Y = solver.applyC(u, mu)[0][0] + _ = solver.plot(u, what = "REAL", name = "u(z={}, t={})".format(*mu), + is_state = True, figsize = (12, 4)) + print("Y(z={}, t={}) = {} (solution norm squared)".format(*mu, Y)) + +fighandles = [] +with open("./active_remeshing_hf_samples.pkl", "rb") as f: + zspace, tspace, Yex = load(f) +# zspace = np.linspace(zs[0], zs[-1], 198) +# tspace = np.linspace(ts[0], ts[-1], 50) +# Yex = np.log10([[solver.solve([z, t]) for t in tspace] for z in zspace]) +# (from a ~2h45m simulation on one node of the EPFL Helvetios cluster) + +for match in [0, 1]: + params = {"POD": True, "S": 5, "SMarginal": 3, "greedyTol": 1e-4, + "nTestPoints": 500, "polybasis": "LEGENDRE", + "maxIterMarginal": 25, "trainSetGenerator": QS(zs, "UNIFORM"), + "samplerPivot":QS(zs, "CHEBYSHEV"), "samplerMarginal":SGS(ts), + "errorEstimatorKind": "LOOK_AHEAD_OUTPUT", + "errorEstimatorKindMarginal": "LOOK_AHEAD_RECOVER"} + if match: + print("\nTesting output-based matching with weight 1.") + params["greedyTolMarginal"] = 1e-2 + params["matchingWeight"] = 1. + params["sharedRatio"] = .75 + params["polybasisMarginal"] = "PIECEWISE_LINEAR_UNIFORM" + algo = RIGPG + else: + print("\nTesting matching-free approach.") + params["greedyTolMarginal"] = 2e-2 + algo = RIGPGNM + + approx = algo([0], solver, mu0 = [z0, t0], approxParameters = params, + verbosity = 15) + approx.setupApprox("ALL") + + verb, verbTM = approx.verbosity, approx.trainedModel.verbosity + approx.verbosity, approx.trainedModel.verbosity = 0, 0 + for j, t in enumerate(tspace): + out = approx.getApprox(np.pad(zspace.reshape(-1, 1), [(0, 0), (0, 1)], + "constant", constant_values = t)) + pls = approx.getPoles([None, t]) + pls[np.abs(np.imag(pls)) > 1e-5] = np.nan + if j == 0: + Ys = np.empty((len(zspace), len(tspace))) + poles = np.empty((len(tspace), len(pls))) + Ys[:, j] = np.log10(out.data) + if len(pls) > poles.shape[1]: + poles = np.pad(poles, [(0, 0), (0, len(pls) - poles.shape[1])], + "constant", constant_values = np.nan) + poles[j, : len(pls)] = np.real(pls) + approx.verbosity, approx.trainedModel.verbosity = verb, verbTM + Ymin, Ymax = min(np.min(Ys), np.min(Yex)), max(np.max(Ys), np.max(Yex)) + + fighandles += [plt.figure(figsize = (15, 5))] + ax1 = fighandles[-1].add_subplot(1, 2, 1) + ax2 = fighandles[-1].add_subplot(1, 2, 2) + if match: + ax1.plot(poles, tspace) + else: + ax1.plot(poles, tspace, 'k.') + ax1.set_ylim(ts) + ax1.set_xlabel('z') + ax1.set_ylabel('t') + ax1.grid() + if match: + ax2.plot(poles, tspace) + else: + ax2.plot(poles, tspace, 'k.') + for mm in approx.musMarginal: + ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) + ax2.set_xlim(zs) + ax2.set_ylim(ts) + ax2.set_xlabel('z') + ax2.set_ylabel('t') + ax2.grid() + plt.show() + print("Approximate poles") + + fighandles += [plt.figure(figsize = (15, 5))] + ax1 = fighandles[-1].add_subplot(1, 2, 1) + ax2 = fighandles[-1].add_subplot(1, 2, 2) + p = ax1.contourf(np.repeat(zspace.reshape(-1, 1), len(tspace), axis = 1), + np.repeat(tspace.reshape(1, -1), len(zspace), axis = 0), + Ys, vmin = Ymin, vmax = Ymax, cmap = "gray_r", + levels = np.linspace(Ymin, Ymax, 50)) + plt.colorbar(p, ax = ax1) + ax1.set_xlabel('z') + ax1.set_ylabel('t') + ax1.grid() + p = ax2.contourf(np.repeat(zspace.reshape(-1, 1), len(tspace), axis = 1), + np.repeat(tspace.reshape(1, -1), len(zspace), axis = 0), + Yex, vmin = Ymin, vmax = Ymax, cmap = "gray_r", + levels = np.linspace(Ymin, Ymax, 50)) + ax2.set_xlabel('z') + ax2.set_ylabel('t') + ax2.grid() + plt.colorbar(p, ax = ax2) + plt.show() + print("Approximate and exact output\n") diff --git a/examples/9_active_remeshing/active_remeshing_engine.py b/examples/9_active_remeshing/active_remeshing_engine.py new file mode 100644 index 0000000..41c09e0 --- /dev/null +++ b/examples/9_active_remeshing/active_remeshing_engine.py @@ -0,0 +1,90 @@ +import numpy as np +import ufl +import fenics as fen +import mshr +from rrompy.utilities.base.decorators import (pivot_affine_construct, + mupivot_independent) +from rrompy.hfengines.fenics_engines import HelmholtzProblemEngine +from rrompy.utilities.numerical.hash_derivative import ( + hashDerivativeToIdx as hashD) +from rrompy.solver.fenics import fenZERO, fenONE, fenics2Vector +from rrompy.parameter import parameterMap as pMap +from rrompy.utilities.exception_manager import RROMPyException + +class ActiveRemeshingEngine(HelmholtzProblemEngine): + def __init__(self, z0:float, t0:float, n:int): + super().__init__(mu0 = [z0, t0]) + self._affinePoly = False + self._nMesh = n + self.meshGen(t0) + self.parameterMap = pMap(1., 2) + self.DirichletBoundary = lambda x, on_boundary: (on_boundary + and np.abs(x[0]) >= .75 - 1e-5 + or np.abs(x[1]) >= .5 - 1e-5) + self.NeumannBoundary = "REST" + self.setCQuadratic(2) + self.cutOffPolesIMin, self.cutOffPolesIMax = -1e-2, 1e-2 + + def meshGen(self, t:float): + t = np.real(t) + if (not hasattr(self, "_tMesh") or not np.isclose(self._tMesh, t)): + e, self._tMesh = .01, t + tipx, tipy = .5 * np.sin(t), .5 - .5 * np.cos(t) + tiplx, tiply = tipx + .5 * e * np.cos(t), tipy + .5 * e * np.sin(t) + tiprx, tipry = tipx - .5 * e * np.cos(t), tipy - .5 * e * np.sin(t) + basx, basy = - e * np.sin(t), .5 + e * np.cos(t) + baslx, basly = basx + .5 * e * np.cos(t), basy + .5 * e * np.sin(t) + basrx, basry = basx - .5 * e * np.cos(t), basy - .5 * e * np.sin(t) + mesh = mshr.generate_mesh( + mshr.Rectangle(fen.Point(-.75, -.5), fen.Point(.75, .5)) + - mshr.Polygon([fen.Point(tiprx, tipry), + fen.Point(tiplx, tiply), + fen.Point(baslx, basly), + fen.Point(basrx, basry)]) + - mshr.Circle(fen.Point(tipx, tipy), .5 * e), self._nMesh) + self.V = fen.FunctionSpace(mesh, "P", 1) + self.As, self._C = [None] * 2, None + self.autoSetDS() + if hasattr(self, "energyNormMatrix"): + del self.energyNormMatrix + if hasattr(self, "energyNormDualMatrix"): + del self.energyNormDualMatrix + + def getForcingTerm(self, mu = []): + mu = self.checkParameter(mu) + self.meshGen(mu(0, 1)) + x, y = fen.SpatialCoordinate(self.V.mesh())[:] + rightZone = .1875**-2 * ufl.conditional(ufl.And( + ufl.And(ufl.ge(x, -.5625), ufl.le(x, -.375)), + ufl.And(ufl.ge(y, .125), ufl.le(y, .3125))), + fenONE, fenZERO) + return rightZone, fenZERO + + @pivot_affine_construct + def A(self, mu = [], der = 0): + mu = self.checkParameter(mu) + self.meshGen(mu(0, 1)) + return HelmholtzProblemEngine.A(self, mu, der) + + @pivot_affine_construct + def b(self, mu = [], der = 0): + derI = hashD(der) if hasattr(der, "__len__") else der + if derI < 0: return self.baselineb() + if derI > 0: raise Exception("Derivatives not implemented.") + if self.thbs[0] is None: self.thbs = self.getMonomialWeights(self.nbs) + fen0 = self.getForcingTerm(mu)[0] * self.v * fen.dx + DBC = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) + self.bs = [fenics2Vector(fen0, {}, DBC, 1)] + return HelmholtzProblemEngine.b(self, mu, der) + + @mupivot_independent + def C(self, mu): + mu = self.checkParameterList(mu) + if not np.all(np.isclose(mu(1), mu(0, 1))): + raise RROMPyException(("Simultaneous evaluation of C on multiple " + "meshes not supported.")) + self.meshGen(mu(0, 1)) + if self._C is None: + self.buildEnergyNormForm() + self._C = self.energyNormMatrix + return self._C diff --git a/examples/9_active_remeshing/active_remeshing_hf_samples.pkl b/examples/9_active_remeshing/active_remeshing_hf_samples.pkl new file mode 100644 index 0000000..86fc869 Binary files /dev/null and b/examples/9_active_remeshing/active_remeshing_hf_samples.pkl differ diff --git a/rrompy/hfengines/base/hfengine_base.py b/rrompy/hfengines/base/hfengine_base.py index 3b1992f..bce8ffa 100644 --- a/rrompy/hfengines/base/hfengine_base.py +++ b/rrompy/hfengines/base/hfengine_base.py @@ -1,397 +1,402 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np import scipy.sparse as scsp from numbers import Number from collections.abc import Iterable from copy import copy as softcopy from rrompy.utilities.base.decorators import (nonaffine_construct, mu_independent) from rrompy.utilities.base.types import (Np1D, Np2D, List, DictAny, paramVal, paramList, sampList) from rrompy.utilities.numerical import solve as tsolve, dot from rrompy.utilities.expression import expressionEvaluator from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert from rrompy.sampling.sample_list import sampleList from rrompy.parameter import (checkParameter, checkParameterList, parameterList, parameterMap as pMap) from rrompy.solver.linear_solver import setupSolver from rrompy.utilities.parallel import (poolRank, masterCore, listScatter, matrixGatherv, isend, recv) __all__ = ['HFEngineBase'] class HFEngineBase: """Generic solver for parametric problems.""" def __init__(self, verbosity : int = 10, timestamp : bool = True): self.verbosity = verbosity self.timestamp = timestamp self.setSolver("SPSOLVE", {"use_umfpack" : False}) self.npar = 0 self._C = None self.outputNormMatrix = 1. def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def __dir_base__(self): return [x for x in self.__dir__() if x[:2] != "__"] def __deepcopy__(self, memo): return softcopy(self) @property def npar(self): """Value of npar.""" return self._npar @npar.setter def npar(self, npar): nparOld = self._npar if hasattr(self, "_npar") else -1 if npar != nparOld: self.parameterMap = pMap(1., npar) self._npar = npar @property def spacedim(self): return 1 def checkParameter(self, mu:paramVal) -> paramVal: muP = checkParameter(mu, self.npar) if self.npar == 0: muP.reset((1, 0), muP.dtype) return muP def checkParameterList(self, mu:paramList, check_if_single : bool = False) -> paramList: muL = checkParameterList(mu, self.npar, check_if_single) return muL def mapParameterList(self, mu:paramList, direct : str = "F", idx : List[int] = None) -> paramList: if idx is None: idx = np.arange(self.npar) muMapped = checkParameterList(mu, len(idx)) for j, d in enumerate(idx): muMapped.data[:, j] = expressionEvaluator( self.parameterMap[direct][d], muMapped(j)).flatten() return muMapped def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ self.energyNormMatrix = 1. def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ self.energyNormDualMatrix = 1. def innerProduct(self, u:Np2D, v:Np2D, onlyDiag : bool = False, dual : bool = False, is_state : bool = True) -> Np2D: """Scalar product.""" if is_state or self.isCEye: if dual: if not hasattr(self, "energyNormDualMatrix"): self.buildEnergyNormDualForm() energyMat = self.energyNormDualMatrix else: if not hasattr(self, "energyNormMatrix"): self.buildEnergyNormForm() energyMat = self.energyNormMatrix else: energyMat = self.outputNormMatrix if isinstance(u, (parameterList, sampleList)): u = u.data if isinstance(v, (parameterList, sampleList)): v = v.data if onlyDiag: return np.sum(dot(energyMat, u) * v.conj(), axis = 0) return dot(dot(energyMat, u).T, v.conj()).T def norm(self, u:Np2D, dual : bool = False, is_state : bool = True) -> Np1D: return np.abs(self.innerProduct(u, u, onlyDiag = True, dual = dual, is_state = is_state)) ** .5 def baselineA(self): """Return 0 of shape consistent with operator of linear system.""" if (hasattr(self, "As") and isinstance(self.As, Iterable) and self.As[0] is not None): d = self.As[0].shape[0] else: d = self.spacedim return scsp.csr_matrix((np.zeros(0), np.zeros(0), np.zeros(d + 1)), shape = (d, d), dtype = np.complex) def baselineb(self): """Return 0 of shape consistent with RHS of linear system.""" return np.zeros(self.spacedim, dtype = np.complex) @nonaffine_construct @abstractmethod def A(self, mu : paramVal = [], der : List[int] = 0) -> Np2D: """ Assemble terms of operator of linear system and return it (or its derivative) at a given parameter. """ return @nonaffine_construct @abstractmethod def b(self, mu : paramVal = [], der : List[int] = 0) -> Np1D: """ Assemble terms of RHS of linear system and return it (or its derivative) at a given parameter. """ return @mu_independent def C(self, mu:paramVal): """ Value of C. Should be overridden (with something like return self._C(mu) ) if a mu-dependent C is needed. """ if self._C is None: self._C = 1. return self._C def setCQuadratic(self, quad : bool = 1): if isinstance(quad, (str,)): quad = quad.upper().strip().replace(" ","") if quad == "SELFADJOINT": quad = 2 elif quad in ["STANDARD", "GENERAL"]: quad = 1 else: raise RROMPyException(("String keyword for output symmetry " "not recognized.")) if quad: self._is_C_quadratic = int(quad) elif hasattr(self, "_is_C_quadratic"): del self._is_C_quadratic @property def isCEye(self): """ Whether the action of C can be seen as a scalar multiplication. Should be overridden (with return True ) if a mu-dependent scalar C is used. """ return isinstance(self._C, Number) def applyC(self, u:sampList, mu:paramVal): """Apply LHS of linear system.""" if not (hasattr(self, "_is_C_quadratic") and self._is_C_quadratic): return dot(self.C(mu), u) return self._applyCQuadratic(u, mu) - def _applyCQuadratic(self, u:sampList, mu:paramVal, v : sampList = None): + def _applyCQuadratic(self, u:sampList, mu:paramVal, v : sampList = None, + onlyDiag : bool = False): """Apply quadratic LHS of linear system.""" if not (hasattr(self, "_is_C_quadratic") and self._is_C_quadratic): raise RROMPyException(("Cannot call quadratic output routine if " "output is not quadratic.")) C = self.C(mu) is_C_list = isinstance(C, list) if ((is_C_list and np.any([Ci.ndim != 2 for Ci in C])) or not (is_C_list or C.ndim in [2, 3])): raise RROMPyException(("C array for quadratic output must have 2 " "or 3 dimensions or be list of " "2-dimensional arrays.")) symmetry = v is None # computing u^H * C * u selfadjoint = self._is_C_quadratic == 2 and symmetry if isinstance(u, sampleList): u = u.data if isinstance(v, sampleList): v = v.data while u.ndim < 2: u = np.expand_dims(u, -1) if v is None: v = u while v.ndim < 2: v = np.expand_dims(v, -1) N, M = u.shape[1], v.shape[1] - if symmetry: + if onlyDiag: + if N != M: + raise RROMPyException(("Cannot extract diagonal of " + "rectangular output.")) + Ncol = (N,) + elif symmetry: Ncol = (N * (N + 1) // 2,) if selfadjoint else (N ** 2,) else: Ncol = (N, M) for j in range(N): - Rv = v[:, j :] if selfadjoint else v + Rv = v[:, j] if onlyDiag else v[:, j :] if selfadjoint else v if is_C_list: cj = np.array([[dot(dot(Ci, u[:, j]), Rv.conj())] for Ci in C]) else: cj = dot(dot(C, u[:, j]), Rv.conj()) if C.ndim == 2: cj = np.expand_dims(cj, 0) - if j == 0: - if selfadjoint and N == 1: cj = np.real(cj) - res = np.empty((len(cj),) + Ncol, dtype = cj[0].dtype) - if symmetry: # map 2D idx to hierarchical 1D idx + if selfadjoint and (onlyDiag or N == 1): cj = np.real(cj) + if j == 0: res = np.empty((len(cj),) + Ncol, dtype = cj[0].dtype) + if not onlyDiag and symmetry: # map 2D idx to hierarchical 1D idx if self._is_C_quadratic == 2: res[:, j * (j + 1) // 2] = cj[:, 0] iD = np.arange(j + 1, N) * np.arange(j + 4, N + 3) // 2 - j res[:, iD] = 2 * cj[:, 1 :] # exploit symmetry else: res[:, j ** 2 : j * (j + 1)] = cj[:, : j] iD = np.arange(j, N) * np.arange(j + 2, N + 2) - j res[:, iD] = cj[:, j :] else: - res[:, j, :] = cj + res[:, j] = cj return res def setSolver(self, solverType:str, solverArgs : DictAny = {}): """Choose solver type and parameters.""" self._solver, self._solverArgs = setupSolver(solverType, solverArgs) def solve(self, mu : paramList = [], RHS : sampList = None, return_state : bool = False) -> sampList: """ Find solution of linear system. Args: mu: parameter value. RHS: RHS of linear system. If None, defaults to that of parametric system. Defaults to None. return_state: whether to return state before multiplication by c. Defaults to False. """ from rrompy.sampling import sampleList, emptySampleList if mu == []: mu = self.mu0 mu = self.checkParameterList(mu) if len(mu) == 0: return emptySampleList() mu = self.checkParameterList(mu) mu_loc, idx, sizes = listScatter(mu, return_sizes = True) mu_loc = self.checkParameterList(mu_loc) req, emptyCores = [], np.where(np.logical_not(sizes))[0] if len(mu_loc) == 0: uL, uT = recv(source = 0, tag = poolRank()) sol = np.empty((uL, 0), dtype = uT) else: applyCglob = (hasattr(self, "_is_C_quadratic") and self._is_C_quadratic) if RHS is None: # build RHSs RHS = sampleList([self.b(m) for m in mu_loc]) else: RHS = sampleList(RHS) if len(RHS) > 1: RHS = sampleList([RHS[i] for i in idx]) mult = 0 if len(RHS) == 1 else 1 RROMPyAssert(mult * (len(mu_loc) - 1) + 1, len(RHS), "Sample size") for j, mj in enumerate(mu_loc): u = tsolve(self.A(mj), RHS[mult * j], self._solver, self._solverArgs) if not (return_state or applyCglob): u = self.applyC(u, mj) if j == 0: sol = np.empty((len(u), len(mu_loc)), dtype = u.dtype) if masterCore(): for dest in emptyCores: req += [isend((len(u), u.dtype), dest = dest, tag = dest)] sol[:, j] = u for r in req: r.wait() sol = matrixGatherv(sol, sizes) if not return_state and applyCglob: sol = self.applyC(sol, mu) return sampleList(sol) def residual(self, mu : paramList = [], u : sampList = None, post_c : bool = True) -> sampList: """ Find residual of linear system for given approximate solution. Args: mu: parameter value. u: numpy complex array with function dofs. If None, set to 0. post_c: whether to post-process using c. Defaults to True. """ from rrompy.sampling import sampleList, emptySampleList if mu == []: mu = self.mu0 mu = self.checkParameterList(mu) if len(mu) == 0: return emptySampleList() mu_loc, idx, sizes = listScatter(mu, return_sizes = True) mu_loc = self.checkParameterList(mu_loc) req, emptyCores = [], np.where(np.logical_not(sizes))[0] if len(mu_loc) == 0: uL, uT = recv(source = 0, tag = poolRank()) res = np.empty((uL, 0), dtype = uT) else: applyCglob = (hasattr(self, "_is_C_quadratic") and self._is_C_quadratic) v = sampleList(np.zeros((self.spacedim, len(mu_loc)))) if u is not None: u = sampleList(u) v = v + sampleList([u[i] for i in idx]) for j, (mj, vj) in enumerate(zip(mu_loc, v)): r = self.b(mj) - dot(self.A(mj), vj) if post_c and not applyCglob: r = self.applyC(r, mj) if j == 0: res = np.empty((len(r), len(mu_loc)), dtype = r.dtype) if masterCore(): for dest in emptyCores: req += [isend((len(r), r.dtype), dest = dest, tag = dest)] res[:, j] = r for r in req: r.wait() res = matrixGatherv(res, sizes) if post_c and applyCglob: res = self.applyC(res, mu) return sampleList(res) cutOffPolesRMax,cutOffPolesRMin = np.inf, - np.inf cutOffPolesRMaxRel, cutOffPolesRMinRel = np.inf, - np.inf cutOffPolesIMax, cutOffPolesIMin = np.inf, - np.inf cutOffPolesIMaxRel, cutOffPolesIMinRel = np.inf, - np.inf cutOffResNormMin = -1 def flagBadPolesResidues(self, poles:Np1D, residues : Np1D = None, relative : bool = False) -> Np1D: """ Flag (numerical) poles/residues which are impossible. Args: poles: poles to be judged. residues: residues to be judged. relative: whether relative values should be used for poles. """ poles = np.array(poles).flatten() flag = np.zeros(len(poles), dtype = bool) if residues is None: self._ignoreResidues = self.cutOffResNormMin <= 0. if relative: RMax, RMin = self.cutOffPolesRMaxRel, self.cutOffPolesRMinRel IMax, IMin = self.cutOffPolesIMaxRel, self.cutOffPolesIMinRel else: RMax, RMin = self.cutOffPolesRMax, self.cutOffPolesRMin IMax, IMin = self.cutOffPolesIMax, self.cutOffPolesIMin if not np.isinf(RMax): flag = np.logical_or(flag, np.real(poles) > RMax) if not np.isinf(RMin): flag = np.logical_or(flag, np.real(poles) < RMin) if not np.isinf(IMax): flag = np.logical_or(flag, np.imag(poles) > IMax) if not np.isinf(IMin): flag = np.logical_or(flag, np.imag(poles) < IMin) else: residues = np.array(residues).reshape(len(poles), -1) if self.cutOffResNormMin > 0.: if residues.shape[1] == self.spacedim: resEff = self.norm(residues.T) else: resEff = np.linalg.norm(residues, axis = 1) resEff /= np.max(resEff) flag = np.logical_or(flag, resEff < self.cutOffResNormMin) return flag diff --git a/rrompy/hfengines/fenics_engines/helmholtz_problem_engine.py b/rrompy/hfengines/fenics_engines/helmholtz_problem_engine.py index 2ae4d30..ff062b0 100644 --- a/rrompy/hfengines/fenics_engines/helmholtz_problem_engine.py +++ b/rrompy/hfengines/fenics_engines/helmholtz_problem_engine.py @@ -1,228 +1,230 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import fenics as fen from .laplace_base_problem_engine import LaplaceBaseProblemEngine from rrompy.solver.fenics import fenZERO, fenONE, fenics2Sparse from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyWarning from rrompy.parameter import parameterMap as pMap __all__ = ['HelmholtzProblemEngine', 'ScatteringProblemEngine'] class HelmholtzProblemEngine(LaplaceBaseProblemEngine): """ Solver for generic Helmholtz problems with parametric wavenumber. - \nabla \cdot (a \nabla u) - omega^2 * n**2 * u = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. diffusivity: Value of a. refractionIndex: Value of n. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._affinePoly = True self.nAs = 2 self.parameterMap = pMap([2.] + [1.] * (self.npar - 1)) self.refractionIndex = fenONE @property def refractionIndex(self): """Value of n.""" return self._refractionIndex @refractionIndex.setter def refractionIndex(self, refractionIndex): self.resetAs() if not isinstance(refractionIndex, (list, tuple,)): refractionIndex = [refractionIndex, fenZERO] self._refractionIndex = refractionIndex def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling operator term A0.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) aRe, aIm = self.diffusivity hRe, hIm = self.RobinDatumH termNames = ["diffusivity", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( [aRe, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [aIm, hIm], [x + "Imag" for x in termNames])) a0Re = (aRe * fen.dot(fen.grad(self.u), fen.grad(self.v)) * fen.dx + hRe * fen.dot(self.u, self.v) * self.ds(1)) a0Im = (aIm * fen.dot(fen.grad(self.u), fen.grad(self.v)) * fen.dx + hIm * fen.dot(self.u, self.v) * self.ds(1)) self.As[0] = (fenics2Sparse(a0Re, parsRe, DirichletBC0, 1) + 1.j * fenics2Sparse(a0Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) if self.As[1] is None: vbMng(self, "INIT", "Assembling operator term A1.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) nRe, nIm = self.refractionIndex n2Re, n2Im = nRe * nRe - nIm * nIm, 2 * nRe * nIm parsRe = self.iterReduceQuadratureDegree(zip([n2Re], ["refractionIndexSquaredReal"])) parsIm = self.iterReduceQuadratureDegree(zip([n2Im], ["refractionIndexSquaredImag"])) a1Re = - n2Re * fen.dot(self.u, self.v) * fen.dx a1Im = - n2Im * fen.dot(self.u, self.v) * fen.dx self.As[1] = (fenics2Sparse(a1Re, parsRe, DirichletBC0, 0) + 1.j * fenics2Sparse(a1Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) class ScatteringProblemEngine(HelmholtzProblemEngine): """ Solver for scattering problems with parametric wavenumber. - \nabla \cdot (a \nabla u) - omega^2 * n**2 * u = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu +- i omega u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. signR: Sign in ABC. omega: Value of omega. diffusivity: Value of a. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): self.silenceWarnings = True super().__init__(*args, **kwargs) self._affinePoly = True del self.silenceWarnings self.nAs = 3 self.parameterMap = pMap(1., self.npar) self.signR = - 1. @property def RobinDatumH(self): """Value of h.""" - return self.signR * self.omega + if not hasattr(self, "silenceWarnings"): + RROMPyWarning("Scattering problems do not allow changes of h.") + return self.signR @RobinDatumH.setter def RobinDatumH(self, RobinDatumH): if not hasattr(self, "silenceWarnings"): RROMPyWarning(("Scattering problems do not allow changes of h. " "Ignoring assignment.")) return @property def signR(self): """Value of signR.""" return self._signR @signR.setter def signR(self, signR): self.resetAs() self._signR = signR def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: vbMng(self, "INIT", "Assembling operator term A0.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) aRe, aIm = self.diffusivity parsRe = self.iterReduceQuadratureDegree(zip([aRe], ["diffusivityReal"])) parsIm = self.iterReduceQuadratureDegree(zip([aIm], ["diffusivityImag"])) a0Re = aRe * fen.dot(fen.grad(self.u), fen.grad(self.v)) * fen.dx a0Im = aIm * fen.dot(fen.grad(self.u), fen.grad(self.v)) * fen.dx self.As[0] = (fenics2Sparse(a0Re, parsRe, DirichletBC0, 1) + 1.j * fenics2Sparse(a0Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) if self.As[1] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling operator term A1.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) a1 = fen.dot(self.u, self.v) * self.ds(1) self.As[1] = (self.signR * 1.j * fenics2Sparse(a1, {}, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) if self.As[2] is None: vbMng(self, "INIT", "Assembling operator term A2.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) nRe, nIm = self.refractionIndex n2Re, n2Im = nRe * nRe - nIm * nIm, 2 * nRe * nIm parsRe = self.iterReduceQuadratureDegree(zip([n2Re], ["refractionIndexSquaredReal"])) parsIm = self.iterReduceQuadratureDegree(zip([n2Im], ["refractionIndexSquaredImag"])) a2Re = - n2Re * fen.dot(self.u, self.v) * fen.dx a2Im = - n2Im * fen.dot(self.u, self.v) * fen.dx self.As[2] = (fenics2Sparse(a2Re, parsRe, DirichletBC0, 0) + 1.j * fenics2Sparse(a2Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) diff --git a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py index ddb8084..d837516 100644 --- a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py +++ b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py @@ -1,765 +1,767 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from copy import deepcopy as copy import numpy as np from collections.abc import Iterable from matplotlib import pyplot as plt from rrompy.reduction_methods.pivoted.generic_pivoted_approximant import ( GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximant) from rrompy.reduction_methods.pivoted.gather_pivoted_approximant import ( gatherPivotedApproximant) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal, paramList, ListAny) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.point_matching import (pointMatching, buildResiduesForDistance) from rrompy.utilities.numerical.point_distances import chordalMetricAngleMatrix from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import (masterCore, indicesScatter, arrayGatherv, isend) __all__ = ['GenericPivotedGreedyApproximantNoMatch', 'GenericPivotedGreedyApproximant'] class GenericPivotedGreedyApproximantBase(GenericPivotedApproximantBase): _allowedEstimatorKindsMarginal = ["LOOK_AHEAD", "LOOK_AHEAD_RECOVER", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingWeightError", "errorEstimatorKindMarginal", "greedyTolMarginal", "maxIterMarginal"], [0., "NONE", 1e-1, 1e2]) super().__init__(*args, **kwargs) self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self._scaleFactorOldPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif isinstance(scaleFactorDer, Iterable): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'refine' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") GenericPivotedApproximantBase.samplerMarginal.fset(self, samplerMarginal) @property def errorEstimatorKindMarginal(self): """Value of errorEstimatorKindMarginal.""" return self._errorEstimatorKindMarginal @errorEstimatorKindMarginal.setter def errorEstimatorKindMarginal(self, errorEstimatorKindMarginal): errorEstimatorKindMarginal = errorEstimatorKindMarginal.upper() if errorEstimatorKindMarginal not in ( self._allowedEstimatorKindsMarginal): RROMPyWarning(("Marginal error estimator kind not recognized. " "Overriding to 'NONE'.")) errorEstimatorKindMarginal = "NONE" self._errorEstimatorKindMarginal = errorEstimatorKindMarginal self._approxParameters["errorEstimatorKindMarginal"] = ( self.errorEstimatorKindMarginal) @property def matchingWeightError(self): """Value of matchingWeightError.""" return self._matchingWeightError @matchingWeightError.setter def matchingWeightError(self, matchingWeightError): self._matchingWeightError = matchingWeightError self._approxParameters["matchingWeightError"] = ( self.matchingWeightError) @property def greedyTolMarginal(self): """Value of greedyTolMarginal.""" return self._greedyTolMarginal @greedyTolMarginal.setter def greedyTolMarginal(self, greedyTolMarginal): if greedyTolMarginal < 0: raise RROMPyException("greedyTolMarginal must be non-negative.") if (hasattr(self, "_greedyTolMarginal") and self.greedyTolMarginal is not None): greedyTolMarginalold = self.greedyTolMarginal else: greedyTolMarginalold = -1 self._greedyTolMarginal = greedyTolMarginal self._approxParameters["greedyTolMarginal"] = self.greedyTolMarginal if greedyTolMarginalold != self.greedyTolMarginal: self.resetSamples() @property def maxIterMarginal(self): """Value of maxIterMarginal.""" return self._maxIterMarginal @maxIterMarginal.setter def maxIterMarginal(self, maxIterMarginal): if maxIterMarginal <= 0: raise RROMPyException("maxIterMarginal must be positive.") if (hasattr(self, "_maxIterMarginal") and self.maxIterMarginal is not None): maxIterMarginalold = self.maxIterMarginal else: maxIterMarginalold = -1 self._maxIterMarginal = maxIterMarginal self._approxParameters["maxIterMarginal"] = self.maxIterMarginal if maxIterMarginalold != self.maxIterMarginal: self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() if not hasattr(self, "_temporaryPivot"): self._mus = emptyParameterList() self._musMarginal = emptyParameterList() if hasattr(self, "samplerMarginal"): self.samplerMarginal.reset() if hasattr(self, "samplingEngine") and self.samplingEngine is not None: self.samplingEngine.resetHistory() def _getDistanceApp(self, polesEx:Np1D, resEx:Np2D, muTest:paramVal, foci:Tuple[float, float], ground:float) -> float: polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0] if self.matchingWeightError != 0: resAp = self.trainedModel.interpolateMarginalCoeffs(muTest)[0][ : len(polesAp), :] if (hasattr(self.trainedModel.data, "_is_C_quadratic") and self.trainedModel.data._is_C_quadratic): self.trainedModel._setupQuadMapping() projMapping = self.quad_mapping projMappingReal = self.data._is_C_quadratic == 2 else: projMapping, projMappingReal = None, False resEx = buildResiduesForDistance(resEx, self.trainedModel.data.projMat, 0, projMapping, projMappingReal) resAp = buildResiduesForDistance(resAp, self.trainedModel.data.projMat, 0, projMapping, projMappingReal) else: resAp = None dist = chordalMetricAngleMatrix(polesEx, polesAp, self.matchingWeightError, resEx, resAp, self.HFEngine, False) pmR, pmC = pointMatching(dist) return np.mean(dist[pmR, pmC]) def getErrorEstimatorMarginalLookAhead(self) -> Np1D: if not hasattr(self.trainedModel, "_musMExcl"): err = np.zeros(0) err[:] = np.inf self._musMarginalTestIdxs = np.zeros(0, dtype = int) return err self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl, dtype = int) idx, sizes = indicesScatter(len(self.trainedModel._musMExcl), return_sizes = True) err = [] if len(idx) > 0: self.verbosity -= 25 self.trainedModel.verbosity -= 25 foci = self.samplerPivot.normalFoci() ground = self.samplerPivot.groundPotential() for j in idx: muTest = self.trainedModel._musMExcl[j] HITest = self.trainedModel._HIsExcl[j] polesEx = HITest.poles idxGood = np.logical_not(np.logical_or(np.isinf(polesEx), np.isnan(polesEx))) polesEx = polesEx[idxGood] if self.matchingWeightError != 0: resEx = HITest.coeffs[np.where(idxGood)[0]] else: resEx = None if len(polesEx) == 0: err += [0.] continue err += [self._getDistanceApp(polesEx, resEx, muTest, foci, ground)] self.verbosity += 25 self.trainedModel.verbosity += 25 return arrayGatherv(np.array(err), sizes) def getErrorEstimatorMarginalNone(self) -> Np1D: nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) return (1. + self.greedyTolMarginal) * np.ones(nErr) def errorEstimatorMarginal(self, return_max : bool = False) -> Np1D: vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format( self.trainedModel.data.musMarginal), 10) if self.errorEstimatorKindMarginal == "NONE": nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) err = (1. + self.greedyTolMarginal) * np.ones(nErr) else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": err = self.getErrorEstimatorMarginalLookAhead() vbMng(self.trainedModel, "DEL", "Done evaluating error estimator", 10) if not return_max: return err idxMaxEst = np.where(err > self.greedyTolMarginal)[0] maxErr = err[idxMaxEst] if self.errorEstimatorKindMarginal == "NONE": maxErr = None return err, idxMaxEst, maxErr def plotEstimatorMarginal(self, est:Np1D, idxMax:List[int], estMax:List[float]): if self.errorEstimatorKindMarginal == "NONE": return if (not (np.any(np.isnan(est)) or np.any(np.isinf(est))) and masterCore() and hasattr(self.trainedModel, "_musMExcl")): fig = plt.figure(figsize = plt.figaspect(1. / self.nparMarginal)) for jpar in range(self.nparMarginal): ax = fig.add_subplot(1, self.nparMarginal, 1 + jpar) musre = np.real(self.trainedModel._musMExcl) if len(idxMax) > 0 and estMax is not None: maxrej = musre[idxMax, jpar] errCP = copy(est) idx = np.delete(np.arange(self.nparMarginal), jpar) while len(musre) > 0: if self.nparMarginal == 1: currIdx = np.arange(len(musre)) else: currIdx = np.where(np.isclose(np.sum( np.abs(musre[:, idx] - musre[0, idx]), 1), 0.))[0] currIdxSorted = currIdx[np.argsort(musre[currIdx, jpar])] ax.semilogy(musre[currIdxSorted, jpar], errCP[currIdxSorted], 'k.-', linewidth = 1) musre = np.delete(musre, currIdx, 0) errCP = np.delete(errCP, currIdx) ax.semilogy(self.musMarginal.re(jpar), (self.greedyTolMarginal,) * len(self.musMarginal), '*m') if len(idxMax) > 0 and estMax is not None: ax.semilogy(maxrej, estMax, 'xr') ax.set_xlim(*list(self.samplerMarginal.lims.re(jpar))) ax.grid() plt.tight_layout() plt.show() def _addMarginalSample(self, mus:paramList): mus = self.checkParameterListMarginal(mus) if len(mus) == 0: return self._nmusOld, nmus = len(self.musMarginal), len(mus) if (hasattr(self, "trainedModel") and self.trainedModel is not None and hasattr(self.trainedModel, "_musMExcl")): self._nmusOld += len(self.trainedModel._musMExcl) vbMng(self, "MAIN", ("Adding marginal sample point{} no. {}{} at {} to training " "set.").format("s" * (nmus > 1), self._nmusOld + 1, "--{}".format(self._nmusOld + nmus) * (nmus > 1), mus), 3) self.musMarginal.append(mus) self.setupApproxPivoted(mus) self._poleMatching() del self._nmusOld if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): ubRange = len(self.trainedModel.data.musMarginal) if hasattr(self.trainedModel, "_idxExcl"): shRange = len(self.trainedModel._musMExcl) else: shRange = 0 testIdxs = list(range(ubRange + shRange - len(mus), ubRange + shRange)) for j in testIdxs[::-1]: self.musMarginal.pop(j - shRange) if hasattr(self.trainedModel, "_idxExcl"): testIdxs = self.trainedModel._idxExcl + testIdxs self._updateTrainedModelMarginalSamples(testIdxs) self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = self.SMarginal def greedyNextSampleMarginal(self, muidx:List[int], plotEst : str = "NONE") \ -> Tuple[Np1D, List[int], float, paramVal]: RROMPyAssert(self._mode, message = "Cannot add greedy sample.") muidx = self._musMarginalTestIdxs[muidx] if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): if not hasattr(self.trainedModel, "_idxExcl"): raise RROMPyException(("Sample index to be added not present " "in trained model.")) testIdxs = copy(self.trainedModel._idxExcl) skippedIdx = 0 for cj, j in enumerate(self.trainedModel._idxExcl): if j in muidx: testIdxs.pop(skippedIdx) self.musMarginal.insert(self.trainedModel._musMExcl[cj], j - skippedIdx) else: skippedIdx += 1 if len(self.trainedModel._idxExcl) < (len(muidx) + len(testIdxs)): raise RROMPyException(("Sample index to be added not present " "in trained model.")) self._updateTrainedModelMarginalSamples(testIdxs) self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) self.firstGreedyIterM = False idxAdded = self.samplerMarginal.refine(muidx)[0] self._addMarginalSample(self.samplerMarginal.points[idxAdded]) errorEstTest, muidx, maxErrorEst = self.errorEstimatorMarginal(True) if plotEst == "ALL": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) return (errorEstTest, muidx, maxErrorEst, self.samplerMarginal.points[muidx]) def _preliminaryTrainingMarginal(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if np.sum(self.samplingEngine.nsamples) > 0: return self.resetSamples() self._addMarginalSample(self.samplerMarginal.generatePoints( self.SMarginal)) def _preSetupApproxPivoted(self, mus:paramList) \ -> Tuple[ListAny, ListAny, ListAny]: self.computeScaleFactor() if self.trainedModel is None: self._setupTrainedModel(np.zeros((0, 0))) self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] self.trainedModel.data.Psupp = [] self._trainedModelOld = copy(self.trainedModel) self._scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 self._musLoc = copy(self.mus) idx, sizes = indicesScatter(len(mus), return_sizes = True) emptyCores = np.where(np.logical_not(sizes))[0] self.verbosity -= 10 + self.samplingEngine.verbosity -= 10 return idx, sizes, emptyCores def _postSetupApproxPivoted(self, mus:Np2D, pMat:Np2D, Ps:ListAny, Qs:ListAny, sizes:ListAny): self.scaleFactor = self._scaleFactorOldPivot del self._scaleFactorOldPivot, self._temporaryPivot pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, mus, sizes, self.polybasis) if len(self._musLoc) > 0: self._mus = self.checkParameterList(self._musLoc) self._mus.append(mus) else: self._mus = self.checkParameterList(mus) self.trainedModel = self._trainedModelOld del self._trainedModelOld padLeft = self.trainedModel.data.projMat.shape[1] suppNew = np.append(0, np.cumsum(nsamples)) self._setupTrainedModel(pMat, padLeft > 0) self.trainedModel.data.Qs += Qs self.trainedModel.data.Ps += Ps self.trainedModel.data.Psupp += list(padLeft + suppNew[: -1]) self.trainedModel.data.approxParameters = copy(self.approxParameters) self.verbosity += 10 + self.samplingEngine.verbosity += 10 def _localPivotedResult(self, pMat:Np2D, req:ListAny, emptyCores:ListAny, mus:Np2D) -> Tuple[Np2D, ListAny, Np2D]: pMati = self.samplingEngine.projectionMatrix musi = self.samplingEngine.mus if not hasattr(self, "matchState") or not self.matchState: if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C " "to orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) if (hasattr(self.HFEngine, "_is_C_quadratic") and self.HFEngine._is_C_quadratic): pMati = self.HFEngine.applyC(pMati, musi) else: pMatiEff = None for j, mu in enumerate(musi): pMij = np.expand_dims(self.HFEngine.applyC(pMati[:, j], mu), -1) if pMatiEff is None: pMatiEff = np.array(pMij) else: pMatiEff = np.append(pMatiEff, pMij, axis = 1) pMati = pMatiEff vbMng(self, "DEL", "Done extracting system output.", 35) if pMat is None: mus = copy(musi.data) pMat = copy(pMati) if masterCore(): for dest in emptyCores: req += [isend((len(pMat), pMat.dtype, mus.dtype), dest = dest, tag = dest)] else: mus = np.vstack((mus, musi.data)) pMat = np.hstack((pMat, pMati)) return pMat, req, mus @abstractmethod def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) self._preSetupApproxPivoted() data = [] pass self._postSetupApproxPivoted(mus, data) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: """Compute greedy snapshots of solution map.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") vbMng(self, "INIT", "Starting computation of snapshots.", 3) max2ErrorEst, self.firstGreedyIterM = np.inf, True self._preliminaryTrainingMarginal() if self.errorEstimatorKindMarginal == "NONE": muidx = [] else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": muidx = np.arange(len(self.trainedModel.data.musMarginal)) self._musMarginalTestIdxs = np.array(muidx) while self.firstGreedyIterM or (max2ErrorEst > self.greedyTolMarginal and self.samplerMarginal.npoints < self.maxIterMarginal): errorEstTest, muidx, maxErrorEst, mu = \ self.greedyNextSampleMarginal(muidx, plotEst) if maxErrorEst is None: max2ErrorEst = 1. + self.greedyTolMarginal else: if len(maxErrorEst) > 0: max2ErrorEst = np.max(maxErrorEst) else: max2ErrorEst = np.max(errorEstTest) vbMng(self, "MAIN", ("Uniform testing error estimate " "{:.4e}.").format(max2ErrorEst), 3) if plotEst == "LAST": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: " "{}).").format(len(self.mus)), 3) if (self.errorEstimatorKindMarginal == "LOOK_AHEAD_RECOVER" and hasattr(self.trainedModel, "_idxExcl") and len(self.trainedModel._idxExcl) > 0): vbMng(self, "INIT", "Recovering {} test models.".format( len(self.trainedModel._idxExcl)), 7) for j, mu in zip(self.trainedModel._idxExcl, self.trainedModel._musMExcl): self.musMarginal.insert(mu, j) self._updateTrainedModelMarginalSamples() self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) vbMng(self, "DEL", "Done recovering test models.", 7) return 0 def checkComputedApproxPivoted(self) -> bool: return (super().checkComputedApprox() and len(self.musMarginal) == len(self.trainedModel.data.musMarginal)) class GenericPivotedGreedyApproximantNoMatch( GenericPivotedGreedyApproximantBase, GenericPivotedApproximantNoMatch): """ ROM pivoted greedy interpolant computation for parametric problems (without pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchingWeightError: Weight for pole matching optimization in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _poleMatching(self): vbMng(self, "INIT", "Compressing poles.", 10) self.trainedModel.initializeFromRational() vbMng(self, "DEL", "Done compressing poles.", 10) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx) class GenericPivotedGreedyApproximant(GenericPivotedGreedyApproximantBase, GenericPivotedApproximant): """ ROM pivoted greedy interpolant computation for parametric problems (with pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. matchingWeightError: Weight for pole matching optimization in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _poleMatching(self): vbMng(self, "INIT", "Compressing and matching poles.", 10) self.trainedModel.initializeFromRational(self.matchingWeight, self.HFEngine, False) vbMng(self, "DEL", "Done compressing and matching poles.", 10) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight, self.HFEngine, False) def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() _polybasisMarginal = self.polybasisMarginal self._polybasisMarginal = ("PIECEWISE_LINEAR_" + self.samplerMarginal.kind) setupOK = super().setupApprox(*args, **kwargs) self._polybasisMarginal = _polybasisMarginal self._finalizeMarginalization() if self.matchState: self._postApplyC() return setupOK diff --git a/rrompy/utilities/poly_fitting/heaviside/heaviside_to_from_rational.py b/rrompy/utilities/poly_fitting/heaviside/heaviside_to_from_rational.py index 1761da7..13d0b66 100644 --- a/rrompy/utilities/poly_fitting/heaviside/heaviside_to_from_rational.py +++ b/rrompy/utilities/poly_fitting/heaviside/heaviside_to_from_rational.py @@ -1,98 +1,93 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.poly_fitting.polynomial.polynomial_interpolator import ( PolynomialInterpolator, PolynomialInterpolatorNodal) from rrompy.utilities.poly_fitting.polynomial.vander import polyvander from rrompy.utilities.poly_fitting.custom_fit import customFit from rrompy.utilities.base.types import Np1D, Np2D, Tuple, DictAny, interpEng from rrompy.parameter.parameter_sampling import (RandomSampler as RS, QuadratureSampler as QS) from .val import polyval from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert __all__ = ['heaviside2rational', 'rational2heaviside'] def heaviside2rational(c:Np2D, poles:Np1D, murange : Np1D = None, basis : str = "MONOMIAL_HEAVISIDE", basisD : str = None, parameterMap : DictAny = 1.) \ -> Tuple[interpEng, interpEng]: basisN = basis.split("_")[0] if basisD is None: basisD = basisN num = PolynomialInterpolator() den = PolynomialInterpolatorNodal() den.polybasis, den.nodes = basisD, poles if murange is None: multiplier = [1., 1.j] avgs = [np.mean(np.real(poles)), np.mean(np.imag(poles))] ranges = np.array([[np.min(np.real(poles)), np.max(np.real(poles))], [np.min(np.imag(poles)), np.max(np.imag(poles))]]) domIdx = np.argmax(ranges[:, 1] - ranges[:, 0]) murange = [multiplier[domIdx] * x + multiplier[1 - domIdx] * avgs[1 - domIdx] for x in ranges[domIdx, :]] if basis == "CHEBYSHEV": sampler = QS(murange, "CHEBYSHEV", parameterMap) elif basis == "LEGENDRE": sampler = QS(murange, "GAUSSLEGENDRE", parameterMap) else: sampler = RS(murange, "HALTON", parameterMap) xAux = sampler.generatePoints(len(c)) valsAux = den(xAux) * polyval(xAux, c, poles, basis) num.setupByInterpolation(xAux, valsAux, len(c) - 1, basisN) return num, den def rational2heaviside(num:interpEng, den:interpEng, murange : Np1D = np.array([-1., 1.]), scl : Np1D = None, parameterMap : DictAny = 1.) \ -> Tuple[Np2D, Np1D, str]: if (not isinstance(num, PolynomialInterpolator) or not isinstance(den, PolynomialInterpolator)): raise RROMPyException(("Rational numerator and denominator must be " "polynomial interpolators.")) RROMPyAssert(num.npar, 1, "Number of parameters") RROMPyAssert(den.npar, 1, "Number of parameters") basis = num.polybasis + "_HEAVISIDE" c = np.zeros_like(num.coeffs) poles = den.roots() if len(poles) > 0: Psp = num(poles) - Qsp = den(poles) Qspder = den(poles, 1, scl) - polesBad = np.abs(Qsp) >= 1e-5 - Psp[..., polesBad] = 0. - Qspder[polesBad] = 1. c[: len(poles)] = (Psp / Qspder).T if len(c) > len(poles): from rrompy.parameter.parameter_sampling import (RandomSampler as RS, QuadratureSampler as QS) if num.polybasis == "CHEBYSHEV": sampler = QS(murange, "CHEBYSHEV", parameterMap) elif num.polybasis == "LEGENDRE": sampler = QS(murange, "GAUSSLEGENDRE", parameterMap) else: sampler = RS(murange, "HALTON", parameterMap) xAux = sampler.generatePoints(len(c)) valsAux = num(xAux) / den(xAux) if len(poles) > 0: valsAux -= polyval(xAux, c, poles, basis) VanAux = polyvander(xAux, [len(c) - len(poles) - 1], num.polybasis) c[len(poles) :] = customFit(VanAux, valsAux.T) - if len(poles) > 0: poles[polesBad] = np.inf return c, poles, basis