diff --git a/examples/greedy/squareBubbleHomog.py b/examples/greedy/squareBubbleHomog.py index 7150daa..be15aa4 100644 --- a/examples/greedy/squareBubbleHomog.py +++ b/examples/greedy/squareBubbleHomog.py @@ -1,104 +1,108 @@ import numpy as np from rrompy.hfengines.scipy import HelmholtzSquareBubbleProblemEngine as HSBPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.utilities.base import squareResonances verb = 2 -timed = False +timed = True algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" -polyBasis = "MONOMIAL" +#polyBasis = "MONOMIAL" +errorEstimatorKind = "SIMPLIFIED" +errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(95, 149, 200), .5) -k0s = np.power(np.linspace(95, 129, 200), .5) +k0s = np.power(np.linspace(95, 129, 20), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) polesexact = np.unique(np.power(squareResonances(kl**2., kr**2., False), .5)) -rescaling = lambda x: np.power(x, 2.) -rescalingInv = lambda x: np.power(x, .5) params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis} + 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis, + 'errorEstimatorKind':errorEstimatorKind} + +if timed: + verb = 0 solver = HSBPE(kappa = 12 ** .5, theta = np.pi / 3, n = 30, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros_like(k0s) norm = np.zeros_like(k0s) res = np.zeros_like(k0s) err = np.zeros_like(k0s) for j in range(len(k0s)): normApp[j] = approx.normApp(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(polesexact, 2.*np.max(norm)*np.ones_like(polesexact, dtype = float), 'm.') plt.semilogy(np.real(approx.mus), 4.*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(polesexact, 2.*np.max(resApp)*np.ones_like(polesexact, dtype = float), 'm.') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.semilogy(polesexact, 2.*np.max(err)*np.ones_like(polesexact, dtype = float), 'm.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.plot(np.real(polesexact), np.imag(polesexact), 'm.') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/squareScatteringAirfoil.py b/examples/greedy/squareScatteringAirfoil.py index 17fb055..58b9024 100644 --- a/examples/greedy/squareScatteringAirfoil.py +++ b/examples/greedy/squareScatteringAirfoil.py @@ -1,145 +1,145 @@ import numpy as np import fenics as fen import ufl from rrompy.hfengines.scipy import HelmholtzBoxScatteringProblemEngine as HSP from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.utilities.base.fenics import fenONE verb = 2 timed = False algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" -polyBasis = "MONOMIAL" +#polyBasis = "MONOMIAL" homog = True homog = False -k0s = np.linspace(5, 20, 50) +k0s = np.linspace(5, 20, 25) k0 = np.mean(k0s) kl, kr = min(k0s), max(k0s) params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis} ######### PI = np.pi R = 2 def Dboundary(x, on_boundary): return on_boundary and (x[0]**2+x[1]**2)**.5 < .95 * R kappa = 10 theta = PI * - 45 / 180. mu = 1.1 epsilon = .1 mesh = fen.Mesh('../data/mesh/airfoil.xml') c, s = np.cos(theta), np.sin(theta) x, y = fen.SpatialCoordinate(mesh)[:] u0R = - fen.cos(kappa * (c * x + s * y)) u0I = - fen.sin(kappa * (c * x + s * y)) checkReal = x**2-x+y**2 rhop5 = ((x**2+y**2)/((x-1)**2+y**2))**.25 phiroot1 = fen.atan(-y/(x**2-x+y**2)) / 2 phiroot2 = fen.atan(-y/(x**2-x+y**2)) / 2 - PI * ufl.sign(-y/(x**2-x+y**2)) / 2 kappam1 = (((rhop5*fen.cos(phiroot1)+.5)**2.+(rhop5*fen.sin(phiroot1))**2.)/ ((rhop5*fen.cos(phiroot1)-1.)**2.+(rhop5*fen.sin(phiroot1))**2.) )**.5 - mu kappam2 = (((rhop5*fen.cos(phiroot2)+.5)**2.+(rhop5*fen.sin(phiroot2))**2.)/ ((rhop5*fen.cos(phiroot2)-1.)**2.+(rhop5*fen.sin(phiroot2))**2.) )**.5 - mu Heps1 = .9 * .5 * (1 + kappam1/epsilon + fen.sin(PI*kappam1/epsilon) / PI) + .1 Heps2 = .9 * .5 * (1 + kappam2/epsilon + fen.sin(PI*kappam2/epsilon) / PI) + .1 cTT = ufl.conditional(ufl.le(kappam1, epsilon), Heps1, fenONE) c_F = fen.Constant(.1) cFT = ufl.conditional(ufl.le(kappam2, epsilon), Heps2, fenONE) c_F = fen.Constant(.1) cT = ufl.conditional(ufl.ge(kappam1, - epsilon), cTT, c_F) cF = ufl.conditional(ufl.ge(kappam2, - epsilon), cFT, c_F) a = ufl.conditional(ufl.ge(checkReal, 0.), cT, cF) solver = HSP(R, np.real(k0), theta, n = 1, verbosity = verb, degree_threshold = 8) solver.omega = np.real(k0) solver.V = fen.FunctionSpace(mesh, "P", 3) solver.diffusivity = a solver.DirichletBoundary = Dboundary solver.RobinBoundary = "REST" solver.DirichletDatum = [u0R, u0I] ######### if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 kl, kr = np.real(kl), np.real(kr) from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApp(k0s[j]) norm[j] = approx.normHF(k0s[j]) - res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) - / approx.estNormer.norm(approx.getRHS(k0s[j]))) + res[j] = (approx.estNormer.norm(approx.getRes(k0s[j], homogeneized=homog)) + / approx.estNormer.norm(approx.getRHS(k0s[j], homogeneized=homog))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.plot(k0s, norm) plt.plot(k0s, normApp, '--') plt.plot(np.real(approx.mus), 1.05*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/squareScatteringHomog.py b/examples/greedy/squareScatteringHomog.py index 8c9270a..d037701 100644 --- a/examples/greedy/squareScatteringHomog.py +++ b/examples/greedy/squareScatteringHomog.py @@ -1,91 +1,97 @@ import numpy as np from rrompy.hfengines.scipy import HelmholtzCavityScatteringProblemEngine as \ HCSPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB verb = 2 timed = False algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" -polyBasis = "MONOMIAL" +#polyBasis = "MONOMIAL" +errorEstimatorKind = "SIMPLIFIED" +#errorEstimatorKind = "EXACT" k0s = np.linspace(5, 12, 100) k0 = np.mean(k0s) kl, kr = min(k0s), max(k0s) params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis} + 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis, + 'errorEstimatorKind':errorEstimatorKind} + +if timed: + verb = 0 solver = HCSPE(kappa = 5, n = 10, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApp(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.plot(k0s, norm) plt.plot(k0s, normApp, '--') plt.plot(np.real(approx.mus), 1.25*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/squareTransmissionNonHomog.py b/examples/greedy/squareTransmissionNonHomog.py index e3d153a..5803a52 100644 --- a/examples/greedy/squareTransmissionNonHomog.py +++ b/examples/greedy/squareTransmissionNonHomog.py @@ -1,105 +1,108 @@ import numpy as np from rrompy.hfengines.scipy import HelmholtzSquareTransmissionProblemEngine \ as HSTPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade timed = False verb = 2 algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" -polyBasis = "CHEBYSHEV" -polyBasis = "MONOMIAL" +#polyBasis = "CHEBYSHEV" +#polyBasis = "MONOMIAL" homog = True -homog = False +#homog = False +errorEstimatorKind = "SIMPLIFIED" +errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(4, 15, 100), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) rescaling = lambda x: np.power(x, 2.) rescalingInv = lambda x: np.power(x, .5) params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':5, 'basis':polyBasis} + 'greedyTol':1e-2, 'nTestPoints':5, 'basis':polyBasis, + 'errorEstimatorKind':errorEstimatorKind} solver = HSTPE(nT = 1, nB = 2, theta = np.pi * 20 / 180, kappa = 4., n = 20, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApp(k0s[j]) norm[j] = approx.normHF(k0s[j]) - res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) - / approx.estNormer.norm(approx.getRHS(k0s[j]))) + res[j] = (approx.estNormer.norm(approx.getRes(k0s[j], homogeneized=homog)) + / approx.estNormer.norm(approx.getRHS(k0s[j], homogeneized=homog))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) polesApp = approx.getPoles() polesApp = polesApp[np.abs(np.imag(polesApp)) < 1e-3] plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.semilogy(np.real(polesApp), 2.*np.max(norm)*np.ones_like(polesApp, dtype = float), 'k.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.semilogy(np.real(polesApp), 2.*np.max(resApp)*np.ones_like(polesApp, dtype = float), 'k.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.semilogy(np.real(polesApp), 2.*np.max(err)*np.ones_like(polesApp, dtype = float), 'k.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py index 11f1846..096a0f6 100644 --- a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py +++ b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py @@ -1,479 +1,479 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import (checkRobustTolerance, setupFitCallables) from .generic_approximant_lagrange import GenericApproximantLagrange from rrompy.utilities.base.types import Np1D, List, HFEng, DictAny from rrompy.utilities.base import verbosityDepth, purgeDict, customFit from rrompy.utilities.warning_manager import warn __all__ = ['ApproximantLagrangePade'] class ApproximantLagrangePade(GenericApproximantLagrange): """ ROM Lagrange Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; defaults to 2; - 'sampler': sample point generator; defaults to uniform sampler on [0, 1]; - 'basis': type of basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'E': coefficient of interpolant to be minimized; defaults to min(S, M + 1); - 'M': degree of Pade' interpolant numerator; defaults to 0; - 'N': degree of Pade' interpolant denominator; defaults to 0; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. ws: Array of snapshot weigths. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'basis': type of basis for interpolation; - 'E': coefficient of interpolant to be minimized; - 'M': degree of Pade' interpolant numerator; - 'N': degree of Pade' interpolant denominator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. M: Numerator degree of approximant. N: Denominator degree of approximant. POD: Whether to compute POD of snapshots. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() self._addParametersToList(["basis", "E", "M", "N", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["basis", "E", "M", "N", "interpRcond", "robustTol"], True, True, baselevel = 1) if hasattr(self, "M"): Mold = self.M self._M = 0 if hasattr(self, "N"): Nold = self.N self._N = 0 if hasattr(self, "E"): self._E = 0 GenericApproximantLagrange.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "basis" in keyList or not hasattr(self, "_val"): if "basis" in keyList: kind = approxParameters["basis"] else: kind = "MONOMIAL" setupFit = setupFitCallables(kind) for x in setupFit: super().__setattr__("_" + x, setupFit[x]) if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif hasattr(self, "interpRcond"): self.interpRcond = self.interpRcond else: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif hasattr(self, "robustTol"): self.robustTol = self.robustTol else: self.robustTol = 0 if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "M"): self.M = Mold else: self.M = 0 if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "N"): self.N = Nold else: self.N = 0 if "E" in keyList: self.E = approxParameters["E"] else: self.E = min(self.S - 1, self.M + 1) @property def M(self): """Value of M. Its assignment may change S.""" return self._M @M.setter def M(self, M): if M < 0: raise ArithmeticError("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if hasattr(self, "S") and self.S < self.M + 1: warn("Prescribed S is too small. Updating S to M + 1.") self.S = self.M + 1 @property def N(self): """Value of N. Its assignment may change S.""" return self._N @N.setter def N(self, N): if N < 0: raise ArithmeticError("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N if hasattr(self, "S") and self.S < self.N + 1: warn("Prescribed S is too small. Updating S to N + 1.") self.S = self.N + 1 @property def E(self): """Value of E. Its assignment may change S.""" return self._E @E.setter def E(self, E): if E < 0: raise ArithmeticError("E must be non-negative.") self._E = E self._approxParameters["E"] = self.E if hasattr(self, "S") and self.S < self.E + 1: warn("Prescribed S is too small. Updating S to E + 1.") self.S = self.E + 1 @property def robustTol(self): """Value of tolerance for robust Pade' denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: warn("Overriding prescribed negative robustness tolerance to 0.") robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): if S <= 0: raise ArithmeticError("S must be positive.") if hasattr(self, "S"): Sold = self.S else: Sold = -1 vals, label = [0] * 3, {0:"M", 1:"N", 2:"E"} if hasattr(self, "M"): vals[0] = self.M if hasattr(self, "N"): vals[1] = self.N if hasattr(self, "E"): vals[2] = self.E idxmax = np.argmax(vals) if vals[idxmax] + 1 > S: warn("Prescribed S is too small. Updating S to {} + 1."\ .format(label[idxmax])) self.S = vals[idxmax] + 1 else: self._S = S self._approxParameters["S"] = self.S if Sold != self.S: self.resetSamples() def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name())) self.computeScaleFactor() self.computeSnapshots() if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", ("Starting computation of " "denominator.")) TE = self._vander(self.radiusPade(self.mus), self.E) while self.N > 0: RHS = np.zeros(self.E + 1) RHS[-1] = 1. fitOut = customFit(TE.T, RHS, w = self.ws, full = True, rcond = self.interpRcond) if self.verbosity >= 5: verbosityDepth("MAIN", ("Fitting {} samples with " "degree {} through {}... " "Conditioning of LS system: " "{:.4e}.").format( self.S, self.E, self._fitname, fitOut[1][2][0] / fitOut[1][2][-1])) if fitOut[1][1] < self.E + 1: Enew = fitOut[1][1] - 1 Nnew = min(self.N, Enew) Mnew = min(self.M, Enew) if Nnew == self.N: strN = "" else: strN = "N from {} to {} and ".format(self.N, Nnew) if Mnew == self.M: strM = "" else: strM = "M from {} to {} and ".format(self.M, Mnew) warn(("Polyfit is poorly conditioned.\nReducing {}{}E " "from {} to {}.").format(strN, strM, self.E, Enew)) newParameters = {"N" : Nnew, "M" : Mnew, "E" : Enew} self.approxParameters = newParameters continue G = (TE[:, : self.N + 1].T * fitOut[0]).T if self.POD: if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square " - "root of gramian matrix."), - end = "") + "root of gramian matrix.")) G = self.samplingEngine.RPOD.dot(G) _, s, eV = np.linalg.svd(G, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].conj().T if self.verbosity >= 5: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of " "size {} x {} with " "condition number " "{:.4e}.").format( self.S, self.N + 1, condev)) else: if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", end = "") G = self.samplingEngine.samples.dot(G) G2 = self.HFEngine.innerProduct(G, G) if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", inline = True) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving eigenvalue " "problem for gramian " - "matrix."), end = "") + "matrix.")) ev, eV = np.linalg.eigh(G2) if self.verbosity >= 5: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue " "problem of size {} with " "condition number " "{:.4e}.").format( self.N + 1, condev)) if self.verbosity >= 7: - verbosityDepth("DEL", " Done.", inline = True) + verbosityDepth("DEL", ("Done solving eigenvalue " + "problem.")) newParameters = checkRobustTolerance(ev, self.E, self.robustTol) if not newParameters: break self.approxParameters = newParameters if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) self.Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.") else: self.Q = np.ones(1, dtype = np.complex) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.") self.lastApproxParameters = copy(self.approxParameters) Qevaldiag = np.diag(self.getQVal(self.mus)) while self.M >= 0: fitVander = self._vander(self.radiusPade(self.mus), self.M) fitOut = customFit(fitVander, Qevaldiag, w = self.ws, full = True, rcond = self.interpRcond) if self.verbosity >= 5: verbosityDepth("MAIN", ("Fitting {} samples with degree " "{} through {}... Conditioning of " "LS system: {:.4e}.").format( self.S, self.M, self._fitname, fitOut[1][2][0] / fitOut[1][2][-1])) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break warn(("Polyfit is poorly conditioned. Reducing M from {} to " "{}. Exact snapshot interpolation not guaranteed.")\ .format(self.M, fitOut[1][1] - 1)) self.M = fitOut[1][1] - 1 if self.M <= 0: raise Exception(("Instability in computation of numerator. " "Aborting.")) self.P = np.atleast_2d(P) if self.POD: self.P = self.samplingEngine.RPOD.dot(self.P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.") self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.\n") def radiusPade(self, mu:Np1D, mu0 : float = None) -> float: """ Compute translated radius to be plugged into Pade' approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.mu0. Returns: Translated radius to be plugged into Pade' approximant. """ if mu0 is None: mu0 = self.mu0 return ((self.HFEngine.rescaling(mu) - self.HFEngine.rescaling(mu0)) / self.scaleFactor) def getPVal(self, mu:List[complex]): """ Evaluate Pade' numerator at arbitrary parameter. Args: mu: Target parameter. """ self.setupApprox() if self.verbosity >= 10: verbosityDepth("INIT", "Evaluating numerator at mu = {}.".format(mu), end = "") try: len(mu) except: mu = [mu] p = self._val(self.radiusPade(mu), self.P.T) if len(mu) == 1: p = p.flatten() if self.verbosity >= 10: verbosityDepth("DEL", " Done.", inline = True) return p def getQVal(self, mu:List[complex]): """ Evaluate Pade' denominator at arbitrary parameter. Args: mu: Target parameter. """ self.setupApprox() if self.verbosity >= 10: verbosityDepth("INIT", "Evaluating denominator at mu = {}.".format(mu), end = "") q = self._val(self.radiusPade(mu), self.Q) if self.verbosity >= 10: verbosityDepth("DEL", " Done.", inline = True) return q def evalApproxReduced(self, mu:complex): """ Evaluate Pade' approximant at arbitrary parameter. Args: mu: Target parameter. """ self.setupApprox() if (not hasattr(self, "lastSolvedApp") or not np.isclose(self.lastSolvedApp, mu)): if self.verbosity >= 5: verbosityDepth("INIT", "Evaluating approximant at mu = {}.".format(mu)) self.uAppReduced = self.getPVal(mu) / self.getQVal(mu) self.lastSolvedApp = mu if self.verbosity >= 5: verbosityDepth("DEL", "Done evaluating approximant.") def evalApprox(self, mu:complex): """ Evaluate approximant at arbitrary parameter. Args: mu: Target parameter. """ self.evalApproxReduced(mu) self.uApp = self.samplingEngine.samples.dot(self.uAppReduced) def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() return self.HFEngine.rescalingInv( self.scaleFactor * self._roots(self.Q) + self.HFEngine.rescaling(self.mu0)) diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py index 0b11da6..da631f7 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py @@ -1,402 +1,592 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import (checkRobustTolerance, setupFitCallables) from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.lagrange import ApproximantLagrangePade from rrompy.utilities.base.types import DictAny, List, HFEng from rrompy.utilities.base import purgeDict, verbosityDepth, customFit from rrompy.utilities.warning_manager import warn __all__ = ['ApproximantLagrangePadeGreedy'] class ApproximantLagrangePadeGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangePade): """ ROM greedy Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'basis': type of basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'Delta': difference between M and N in rational approximant; defaults to 0; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; + - 'errorEstimatorKind': kind of error estimator; available values + include 'EXACT' and 'SIMPLIFIED'; defaults to 'SIMPLIFIED'; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'basis': type of basis for interpolation; - 'Delta': difference between M and N in rational approximant; - 'greedyTol': uniform error tolerance for greedy algorithm; + - 'errorEstimatorKind': kind of error estimator; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. + errorEstimatorKind: kind of error estimator. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() - self._addParametersToList(["basis", "Delta", "interpRcond", - "robustTol"]) + self._addParametersToList(["basis", "Delta", "errorEstimatorKind", + "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change robustTol. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) - approxParametersCopy = purgeDict(approxParameters, ["basis", - "Delta", + approxParametersCopy = purgeDict(approxParameters, ["basis", "Delta", + "errorEstimatorKind", "interpRcond", "robustTol"], True, True, baselevel = 1) if "Delta" in list(approxParameters.keys()): self._Delta = approxParameters["Delta"] elif hasattr(self, "Delta"): self._Delta = self.Delta else: self._Delta = 0 GenericApproximantLagrangeGreedy.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) self.Delta = self.Delta if "basis" in keyList or not hasattr(self, "_val"): if "basis" in keyList: kind = approxParameters["basis"] else: kind = "MONOMIAL" setupFit = setupFitCallables(kind) for x in setupFit: super().__setattr__("_" + x, setupFit[x]) + if "errorEstimatorKind" in keyList: + self.errorEstimatorKind = approxParameters["errorEstimatorKind"] + elif hasattr(self, "errorEstimatorKind"): + self.errorEstimatorKind = self.errorEstimatorKind + else: + self.errorEstimatorKind = "SIMPLIFIED" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif hasattr(self, "interpRcond"): self.interpRcond = self.interpRcond else: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif hasattr(self, "robustTol"): self.robustTol = self.robustTol else: self.robustTol = 0 @property def Delta(self): """Value of Delta.""" return self._Delta @Delta.setter def Delta(self, Delta): if not np.isclose(Delta, np.floor(Delta)): raise ArithmeticError("Delta must be an integer.") if Delta < 0: warn(("Error estimator unreliable for Delta < 0. Overloading of " "errorEstimator is suggested.")) else: Deltamin = (max(self.HFEngine.nbs, self.HFEngine.nAs * self.homogeneized) - 1 - 1 * (self.HFEngine.nAs > 1)) if Delta < Deltamin: - warn(("Method unreliable for selected Delta. Suggested " + warn(("Method may be unreliable for selected Delta. Suggested " "minimal value of Delta: {}.").format(Deltamin)) self._Delta = Delta self._approxParameters["Delta"] = self.Delta + @property + def errorEstimatorKind(self): + """Value of errorEstimatorKind.""" + return self._errorEstimatorKind + @errorEstimatorKind.setter + def errorEstimatorKind(self, errorEstimatorKind): + errorEstimatorKind = errorEstimatorKind.upper() + if errorEstimatorKind not in ["EXACT", "SIMPLIFIED"]: + warn(("Error estimator kind not recognized. Overriding to " + "'SIMPLIFIED'.")) + errorEstimatorKind = "SIMPLIFIED" + self._errorEstimatorKind = errorEstimatorKind + self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind + @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= np.abs(self.Delta): warn(("nTestPoints must be at least abs(Delta) + 1. Increasing " "value to abs(Delta) + 1.")) nTestPoints = np.abs(self.Delta) + 1 if not np.isclose(nTestPoints, np.int(nTestPoints)): raise ArithmeticError("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "nTestPoints"): nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() + def resetSamples(self): + """Reset samples.""" + super().resetSamples() + self.resbb = None + self.resAb = None + self.resAA = None + self.As = None + self.bs = None + def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: - """ - Standard residual-based error estimator. Unreliable for unstable - problems. - """ + """Standard residual-based error estimator.""" self.setupApprox() self.initEstNormer() - nmus = len(mus) - if self.N + 1 == self.S: - QSf = self._domcoeff(self.N) * self.Q[-1] * self.HFEngine.b( - self.mu0, 1, homogeneized = self.homogeneized) - else: - QSf = 0 - L1P = self._domcoeff(self.M) * self.HFEngine.A(self.mu0, 1).dot( - self.samplingEngine.samples.dot(self.P[:, -1])) - jOpt = self.estNormer.norm(QSf - L1P) - if np.isnan(jOpt) or np.isinf(jOpt): - err = np.empty(nmus) + PM = self.P[:, -1] + if np.any(np.isnan(PM)) or np.any(np.isinf(PM)): + err = np.empty(len(mus)) err[:] = np.inf return err - musTile = np.tile(self.HFEngine.rescaling(mus).reshape(-1, 1), - [1, len(self.mus)]) - smusCol = self.HFEngine.rescaling(self.mus).reshape(1, -1) - mussmus = np.abs(musTile - smusCol) - num = np.prod(mussmus, axis = 1) - den = np.abs(self.getQVal(mus)) - RHSnorms = np.empty(nmus) - if self.HFEngine.nbs == 1: - RHS = self.getRHS(mus[0], homogeneized = self.homogeneized) - RHSnorms[:] = self.estNormer.norm(RHS) + nAs = self.HFEngine.nAs - 1 + nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized) + radiusmus = self.radiusPade(mus) + radiussmus = self.radiusPade(self.mus) + musTile = np.tile(radiusmus.reshape(-1, 1), [1, self.S]) + smusCol = radiussmus.reshape(1, -1) + num = np.prod(musTile[:, : self.S] - smusCol, axis = 1) + den = self.getQVal(mus) + self.assembleReducedResidualBlocks() + vanderBase = np.polynomial.polynomial.polyvander(radiusmus, + max(nAs, nbs)).T + radiusb0 = vanderBase[: nbs + 1, :] + RHSnorms = np.power(np.abs(np.einsum('ij,ij->j', + self.resbb.dot(radiusb0), + radiusb0.conj())), .5) + vanderBase = vanderBase[: -1, :] + delta = self.S - self.N - 1 + nbsEff = max(0, nbs - delta) + computeff = ((self.errorEstimatorKind == "SIMPLIFIED" and delta == 0) + or (self.errorEstimatorKind == "EXACT" and nbsEff > 0)) + if self.errorEstimatorKind == "SIMPLIFIED": + radiusA = np.tensordot(PM, vanderBase[: nAs, :], 0) + if delta == 0: + radiusb = np.abs(self.Q[-1]) * radiusb0[: -1, :] + else: #if self.errorEstimatorKind == "EXACT": + momentQ = np.zeros(nbsEff, dtype = np.complex) + momentQu = np.zeros((self.S, nAs), dtype = np.complex) + radiusbTen = np.zeros((nbsEff, len(mus), nbsEff), + dtype = np.complex) + radiusATen = np.zeros((nAs, nAs, len(mus)), dtype = np.complex) + if nbsEff > 0: + momentQ[0] = self.Q[-1] + radiusbTen[:, :, 0] = vanderBase[: nbsEff, :] + momentQu[:, 0] = self.P[:, -1] + radiusATen[0, :, :] = vanderBase[: nAs, :] + Qvals = self.getQVal(self.mus) + for k in range(1, max(nAs, nbs)): + Qvals = Qvals * radiussmus + if k > delta and k < nbs: + momentQ[k - delta] = self._fitinv.dot(Qvals) + radiusbTen[k :, :, k - delta] = radiusbTen[: delta - k, + :, 0] + if k < nAs: + momentQu[:, k] = Qvals * self._fitinv + radiusATen[k, k :, :] = radiusATen[0, : - k, :] + if self.POD and nAs > 1: + momentQu[:, 1 :] = self.samplingEngine.RPOD.dot(momentQu[:, + 1 :]) + radiusA = np.tensordot(momentQu, radiusATen, 1) + if nbsEff > 0: + radiusb = radiusbTen.dot(momentQ) + if computeff: + ff = np.einsum('ij,ij->j', + self.resbb[delta + 1 :, delta + 1 :].dot(radiusb), + radiusb.conj()) + Lf = np.einsum('il,il->l', + np.tensordot(self.resAb[delta :, :, :], radiusA, 2), + radiusb.conj()) else: - for j in range(nmus): - RHS = self.getRHS(mus[j], homogeneized = self.homogeneized) - RHSnorms[j] = self.estNormer.norm(RHS) - return jOpt * num / den / RHSnorms / self.scaleFactor ** (self.S - 1) + ff, Lf = 0., 0. + LL = np.einsum('ikt,kit->t', np.tensordot(self.resAA, radiusA, 2), + radiusA.conj()) + jOpt = np.power(np.abs(ff - 2. * np.real(Lf) + LL), .5) + return self._domcoeff(self.S - 1) * jOpt * np.abs(num / den) / RHSnorms def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name())) self.computeScaleFactor() self.S = len(self.mus) self._M = self.S - 1 self._N = self.S - 1 if self.Delta < 0: self._M += self.Delta else: self._N -= self.Delta if min(self.M, self.N) < 0: if self.verbosity >= 5: verbosityDepth("MAIN", "Minimal sample size not achieved.") self.Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) self.P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) self.Q[:] = np.nan self.P[:] = np.nan self.lastApproxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", ("Aborting computation of " "approximant.\n")) return self.greedy() if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", ("Starting computation of " "denominator.")) TS = self._vander(self.radiusPade(self.mus), self.S - 1) while self.N > 0: RHS = np.zeros(self.S) RHS[-1] = 1. fitOut = customFit(TS.T, RHS, full = True, rcond = self.interpRcond) if self.verbosity >= 2: verbosityDepth("MAIN", ("Fitting {} samples with " "degree {} through {}... " "Conditioning of system: " "{:.4e}.").format(self.S, self.S - 1, self._fitname, fitOut[1][2][0] / fitOut[1][2][-1])) if fitOut[1][1] < self.S: warn(("Polyfit is poorly conditioned. Starting " "preemptive termination of computation of " "approximant.")) self.Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) self.P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) self.Q[:] = np.nan self.P[:] = np.nan self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 7: verbosityDepth("DEL", ("Aborting computation of " "denominator.")) if self.verbosity >= 5: verbosityDepth("DEL", ("Aborting computation of " "approximant.\n")) return + self._fitinv = fitOut[0] G = (TS[:, : self.N + 1].T * fitOut[0]).T if self.POD: if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square " - "root of gramian matrix."), - end = "") + "root of gramian matrix.")) G = self.samplingEngine.RPOD.dot(G) _, s, eV = np.linalg.svd(G, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].conj().T if self.verbosity >= 2: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of " "size {} x {} with " "condition number " "{:.4e}.").format( self.S, self.N + 1, condev)) else: if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", end = "") G = self.samplingEngine.samples.dot(G) G2 = self.HFEngine.innerProduct(G, G) if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", inline = True) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving eigenvalue " "problem for gramian " - "matrix."), end = "") + "matrix.")) ev, eV = np.linalg.eigh(G2) if self.verbosity >= 2: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue " "problem of size {} with " "condition number " "{:.4e}.").format( self.N + 1, condev)) if self.verbosity >= 7: - verbosityDepth("DEL", " Done.", inline = True) + verbosityDepth("DEL", ("Done solving eigenvalue " + "problem.")) newParameters = checkRobustTolerance(ev, self.M, self.robustTol) if not newParameters: break self._N = newParameters["N"] self._M = newParameters["E"] if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) self.Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.") else: self.Q = np.ones(1, dtype = np.complex) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.") self.lastApproxParameters = copy(self.approxParameters) Qevaldiag = np.diag(self.getQVal(self.mus)) while self.M >= 0: fitVander = self._vander(self.radiusPade(self.mus), self.M) fitOut = customFit(fitVander, Qevaldiag, full = True, rcond = self.interpRcond) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break warn(("Polyfit is poorly conditioned. Reducing M from {} to " "{}. Exact snapshot interpolation not guaranteed.")\ .format(self.M, fitOut[1][1] - 1)) self._M = fitOut[1][1] - 1 if self.M <= 0: raise Exception(("Instability in computation of numerator. " "Aborting.")) self.P = np.atleast_2d(P) if self.POD: self.P = self.samplingEngine.RPOD.dot(self.P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.") self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.\n") + def assembleReducedResidualBlocks(self): + """Build affine blocks of reduced linear system through projections.""" + self.initEstNormer() + if self.As is None or self.bs is None: + if self.verbosity >= 7: + verbosityDepth("INIT", "Computing Taylor blocks of system.") + nAs = self.HFEngine.nAs - 1 + nbs = max(self.HFEngine.nbs, (nAs + 1) * self.homogeneized) + self.As = [self.HFEngine.A(self.mu0, j + 1) for j in range(nAs)] + self.bs = [self.HFEngine.b(self.mu0, j, self.homogeneized) + for j in range(nbs)] + if self.verbosity >= 7: + verbosityDepth("DEL", "Done computing Taylor blocks.") + computeResbb = self.resbb is None + computeResAb = self.resAb is None or self.resAb.shape[1] != self.S + computeResAA = self.resAA is None or self.resAA.shape[1] != self.S + samples = self.samplingEngine.samples + if computeResbb or computeResAb or computeResAA: + if self.verbosity >= 7: + verbosityDepth("INIT", "Projecting Taylor terms of residual.") + nAs = len(self.As) + nbs = len(self.bs) - 1 + if computeResbb: + self.resbb = np.empty((nbs + 1, nbs + 1), dtype = np.complex) + for i in range(nbs + 1): + Mbi = self.scaleFactor ** i * self.bs[i] + for j in range(i): + Mbj = self.scaleFactor ** j * self.bs[j] + self.resbb[i, j] = self.estNormer.innerProduct(Mbj, + Mbi) + self.resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) + for i in range(nbs + 1): + for j in range(i + 1, nbs + 1): + self.resbb[i, j] = self.resbb[j][i].conj() + if computeResAb: + if self.resAb is None: + self.resAb = np.empty((nbs, self.S, nAs), + dtype = np.complex) + for i in range(nbs): + Mbi = self.scaleFactor ** (i + 1) * self.bs[i + 1] + for j in range(nAs): + MAj = (self.scaleFactor ** (j + 1) + * self.As[j].dot(samples)) + self.resAb[i, :, j] = self.estNormer.innerProduct( + MAj, Mbi) + else: + Sold = self.resAb.shape[1] + if Sold > self.S: + self.resAb = self.resAb[:, : self.S, :] + else: + resAbNew = np.empty((nbs, self.S, nAs), + dtype = np.complex) + resAbNew[:, : Sold, :] = self.resAb + self.resAb = resAbNew + for i in range(nbs): + Mbi = self.scaleFactor ** (i + 1) * self.bs[i + 1] + for j in range(nAs): + MAj = (self.scaleFactor ** (j + 1) + * self.As[j].dot(samples[:, Sold :])) + self.resAb[i, Sold :, j] = ( + self.estNormer.innerProduct(MAj, Mbi)) + if computeResAA: + if self.resAA is None: + self.resAA = np.empty((nAs, self.S, self.S, nAs), + dtype = np.complex) + for i in range(nAs): + MAi = (self.scaleFactor ** (i + 1) + * self.As[i].dot(samples)) + for j in range(i): + MAj = (self.scaleFactor ** (j + 1) + * self.As[j].dot(samples)) + self.resAA[i, :, :, j] = ( + self.estNormer.innerProduct(MAj, MAi)) + self.resAA[i, :, :, i] = self.estNormer.innerProduct( + MAi, MAi) + for i in range(nAs): + for j in range(i + 1, nAs): + self.resAA[i, :, :, j] = ( + self.resAA[j, :, :, i].conj()) + else: + Sold = self.resAA.shape[1] + if Sold > self.S: + self.resAA = self.resAA[:, : self.S, : self.S, :] + else: + resAANew = np.empty((nAs, self.S, self.S, nAs), + dtype = np.complex) + resAANew[:, : Sold, : Sold, :] = self.resAA + self.resAA = resAANew + for i in range(nAs): + MAi = (self.scaleFactor ** (i + 1) + * self.As[i].dot(samples)) + for j in range(i): + MAj = (self.scaleFactor ** (j + 1) + * self.As[j].dot(samples)) + self.resAA[i, : Sold, Sold :, j] = ( + self.estNormer.innerProduct(MAj[:, Sold :], + MAi[:, : Sold])) + self.resAA[i, Sold :, : Sold, j] = ( + self.estNormer.innerProduct(MAj[:, : Sold], + MAi[:, Sold :])) + self.resAA[i, Sold :, Sold :, j] = ( + self.estNormer.innerProduct(MAj[:, Sold :], + MAi[:, Sold :])) + self.resAA[i, : Sold, Sold :, i] = ( + self.estNormer.innerProduct(MAi[:, Sold :], + MAi[:, : Sold])) + self.resAA[i, Sold :, : Sold, i] = ( + self.resAA[i, : Sold, Sold :, i].conj().T) + self.resAA[i, Sold :, Sold :, i] = ( + self.estNormer.innerProduct(MAi[:, Sold :], + MAi[:, Sold :])) + for i in range(nAs): + for j in range(i + 1, nAs): + self.resAA[i, :, :, j] = ( + self.resAA[j, :, :, i].conj()) + if self.verbosity >= 7: + verbosityDepth("DEL", ("Done setting up Taylor " + "decomposition of residual.")) diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py index 50c46d6..eba8c03 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py @@ -1,303 +1,311 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import copy from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB from rrompy.utilities.base.types import DictAny, HFEng, List from rrompy.utilities.base import verbosityDepth from rrompy.utilities.warning_manager import warn __all__ = ['ApproximantLagrangeRBGreedy'] class ApproximantLagrangeRBGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangeRB): """ ROM greedy RB approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. samplingEngine: Sampling engine. projMat: Projection matrix for RB system assembly. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt theta(mu). bs: List of numpy vectors representing coefficients of linear system RHS wrt theta(mu). thetaAs: List of callables representing coefficients of linear system matrix wrt mu. thetabs: List of callables representing coefficients of linear system RHS wrt mu. ARBs: List of sparse matrices (in CSC format) representing coefficients of compressed linear system matrix wrt theta(mu). bRBs: List of numpy vectors representing coefficients of compressed linear system RHS wrt theta(mu). """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) - if self.verbosity >= 10: - verbosityDepth("INIT", "Computing affine blocks of system.") - self.As, self.thetaAs = self.HFEngine.affineBlocksA(self.mu0) - self.bs, self.thetabs = self.HFEngine.affineBlocksb(self.mu0, - self.homogeneized) - if self.verbosity >= 10: - verbosityDepth("DEL", "Done computing affine blocks.") self._postInit() @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): self._S = S @property def R(self): """Value of R.""" return self._S @R.setter def R(self, R): warn(("R is used just to simplify inheritance, and its value cannot " "be changed from that of S.")) def resetSamples(self): """Reset samples.""" super().resetSamples() self.projMat = None self.resbb = None self.resAb = None self.resAA = None + if self.verbosity >= 10: + verbosityDepth("INIT", "Computing affine blocks of system.") + self.As, self.thetaAs = self.HFEngine.affineBlocksA(self.mu0) + self.bs, self.thetabs = self.HFEngine.affineBlocksb(self.mu0, + self.homogeneized) + if self.verbosity >= 10: + verbosityDepth("DEL", "Done computing affine blocks.") def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (self.projMat is not None and super().checkComputedApprox()) def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator. Unreliable for unstable problems (inf-sup constant is missing). """ nmus = len(mus) err = np.empty(nmus) self.assembleReducedSystem() self.assembleReducedResidualBlocks() - nAs = len(self.As) - nbs = len(self.bs) + nAs = self.resAA.shape[0] + nbs = self.resbb.shape[0] for j in range(nmus): mu = mus[j] uAppReduced = self.getAppReduced(mu) prodbb = 0. prodAb = 0. prodAA = 0. for i1 in range(nbs): rhobi1 = self.thetabs(mu, i1) for i2 in range(nbs): rhobi2 = self.thetabs(mu, i2).conj() - prodbb += rhobi1 * rhobi2 * self.resbb[i1][i2] + prodbb += rhobi1 * rhobi2 * self.resbb[i2, i1] for i1 in range(nAs): rhoAi1 = self.thetaAs(mu, i1) for i2 in range(nbs): rhobi2 = self.thetabs(mu, i2).conj() - prodAb += rhoAi1 * rhobi2 * self.resAb[i1][i2] + prodAb += rhoAi1 * rhobi2 * self.resAb[i2, i1, :] for i1 in range(nAs): rhoAi1 = self.thetaAs(mu, i1) for i2 in range(nAs): rhoAi2 = self.thetaAs(mu, i2).conj() - prodAA += rhoAi1 * rhoAi2 * self.resAA[i1][i2] + prodAA += rhoAi1 * rhoAi2 * self.resAA[i2, i1, :, :] err[j] = np.abs(((uAppReduced.T.conj().dot(prodAA.dot(uAppReduced)) - 2. * np.real(prodAb.dot(uAppReduced))) / prodbb + 1.)[0]) ** .5 return err def setupApprox(self): """Compute RB projection matrix.""" if not self.checkComputedApprox(): if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name())) self.greedy() self.S = len(self.mus) if self.verbosity >= 7: verbosityDepth("INIT", "Computing projection matrix.", end = "") self.projMat = self.samplingEngine.samples if self.verbosity >= 7: verbosityDepth("DEL", " Done.", inline = True) self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.\n") def assembleReducedResidualBlocks(self): """Build affine blocks of RB linear system through projections.""" self.initEstNormer() self.assembleReducedSystem() computeResbb = self.resbb is None - computeResAb = (self.resAb is None - or self.resAb[0][0].shape[0] != self.S) - computeResAA = (self.resAA is None - or self.resAA[0][0].shape[0] != self.S) + computeResAb = self.resAb is None or self.resAb.shape[2] != self.S + computeResAA = self.resAA is None or self.resAA.shape[2] != self.S + samples = self.projMat if computeResbb or computeResAb or computeResAA: if self.verbosity >= 7: verbosityDepth("INIT", "Projecting affine terms of residual.", end = "") nAs = len(self.As) - nbs = len(self.bs) + nbs = max(len(self.bs), nAs * self.homogeneized) if computeResbb: - self.resbb = [] + self.resbb = np.empty((nbs, nbs), dtype = np.complex) for i in range(nbs): - resbbi = [] Mbi = self.bs[i] for j in range(i): Mbj = self.bs[j] - resbbi += [self.estNormer.innerProduct(Mbi, Mbj)] - resbbi += [self.estNormer.innerProduct(Mbi, Mbi)] - self.resbb += [resbbi] + self.resbb[i, j] = self.estNormer.innerProduct(Mbj, + Mbi) + self.resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) for i in range(nbs): for j in range(i + 1, nbs): - self.resbb[i] += [self.resbb[j][i].conj()] + self.resbb[i, j] = self.resbb[j][i].conj() if computeResAb: if self.resAb is None: - self.resAb = [] - for i in range(nAs): - resAbi = [] - MAi = self.As[i].dot(self.projMat) - for j in range(nbs): - Mbj = self.bs[j] - resAbi += [self.estNormer.innerProduct(MAi, Mbj)] - self.resAb += [resAbi] + self.resAb = np.empty((nbs, nAs, self.S), + dtype = np.complex) + for i in range(nbs): + Mbi = self.bs[i] + for j in range(nAs): + MAj = self.As[j].dot(samples) + self.resAb[i, j, :] = self.estNormer.innerProduct( + MAj, Mbi) else: - Sold = self.resAb[0][0].shape[0] - for i in range(nAs): - MAi = self.As[i].dot(self.projMat[:, Sold :]) - for j in range(nbs): - Mbj = self.bs[j] - self.resAb[i][j] = np.append(self.resAb[i][j], - self.estNormer.innerProduct(MAi, Mbj)) + resAbNew = np.empty((nbs, nAs, self.S), dtype = np.complex) + Sold = self.resAb.shape[2] + resAbNew[:, :, : Sold] = self.resAb + self.resAb = resAbNew + for i in range(nbs): + Mbi = self.bs[i] + for j in range(nAs): + MAj = self.As[j].dot(samples[:, Sold :]) + self.resAb[i, j, Sold :] = ( + self.estNormer.innerProduct(MAj, Mbi)) if computeResAA: if self.resAA is None: - self.resAA = [] + self.resAA = np.empty((nAs, nAs, self.S, self.S), + dtype = np.complex) for i in range(nAs): - resAAi = [] - MAi = self.As[i].dot(self.projMat) + MAi = self.As[i].dot(samples) for j in range(i): - MAj = self.As[j].dot(self.projMat) - resAAi += [self.estNormer.innerProduct(MAi, MAj)] - resAAi += [self.estNormer.innerProduct(MAi, MAi)] - self.resAA += [resAAi] + MAj = self.As[j].dot(samples) + self.resAA[i, j, :, :] = ( + self.estNormer.innerProduct(MAj, MAi)) + self.resAA[i, i, :, :] = self.estNormer.innerProduct( + MAi, MAi) for i in range(nAs): for j in range(i + 1, nAs): - self.resAA[i] += [self.resAA[j][i].conj()] + self.resAA[i, j, :, :] = ( + self.resAA[j, i, :, :].conj()) else: - Sold = self.resAA[0][0].shape[0] + resAANew = np.empty((nAs, nAs, self.S, self.S), + dtype = np.complex) + Sold = self.resAA.shape[2] + resAANew[:, :, : Sold, : Sold] = self.resAA + self.resAA = resAANew for i in range(nAs): - MAi = self.As[i].dot(self.projMat) + MAi = self.As[i].dot(samples) for j in range(i): - MAj = self.As[j].dot(self.projMat) - resAAcross1 = self.estNormer.innerProduct( - MAi[:, Sold :], MAj[:, : Sold]) - resAAcross2 = self.estNormer.innerProduct( - MAi[:, : Sold], MAj[:, Sold :]) - resAAdiag = self.estNormer.innerProduct( - MAi[:, Sold :], MAj[:, Sold :]) - self.resAA[i][j] = np.block( - [[self.resAA[i][j], resAAcross1], - [ resAAcross2, resAAdiag]]) - resAAcross = self.estNormer.innerProduct( - MAi[:, Sold :], - MAi[:, : Sold]) - resAAdiag = self.estNormer.innerProduct(MAi[:, Sold :], - MAi[:, Sold :]) - self.resAA[i][i] = np.block( - [[ self.resAA[i][i], resAAcross], - [resAAcross.T.conj(), resAAdiag]]) + MAj = self.As[j].dot(samples) + self.resAA[i, j, : Sold, Sold :] = ( + self.estNormer.innerProduct(MAj[:, Sold :], + MAi[:, : Sold])) + self.resAA[i, j, Sold :, : Sold] = ( + self.estNormer.innerProduct(MAj[:, : Sold], + MAi[:, Sold :])) + self.resAA[i, j, Sold :, Sold :] = ( + self.estNormer.innerProduct(MAj[:, Sold :], + MAi[:, Sold :])) + self.resAA[i, i, : Sold, Sold :] = ( + self.estNormer.innerProduct(MAi[:, Sold :], + MAi[:, : Sold])) + self.resAA[i, i, Sold :, : Sold] = ( + self.resAA[i, i, : Sold, Sold :].conj().T) + self.resAA[i, i, Sold :, Sold :] = ( + self.estNormer.innerProduct(MAi[:, Sold :], + MAi[:, Sold :])) for i in range(nAs): for j in range(i + 1, nAs): - self.resAA[i][j] = self.resAA[j][i].conj() + self.resAA[i, j, :, :] = ( + self.resAA[j, i, :, :].conj()) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up affine " "decomposition of residual.")) diff --git a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py index 9ed10f3..fa7936d 100644 --- a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py +++ b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py @@ -1,488 +1,489 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.reduction_methods.lagrange.generic_approximant_lagrange import ( GenericApproximantLagrange) from rrompy.utilities.base.types import DictAny, HFEng, Tuple, List from rrompy.utilities.base import purgeDict, verbosityDepth from rrompy.utilities.warning_manager import warn __all__ = ['GenericApproximantLagrangeGreedy'] class GenericApproximantLagrangeGreedy(GenericApproximantLagrange): """ ROM greedy Lagrange interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator. - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. robustTol: tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ TOL_INSTABILITY = 1e-6 def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() self._addParametersToList(["muBounds","greedyTol", "maxIter", "refinementRatio", "nTrainingPoints", "nTestPoints", "trainingSetGenerator"]) super(GenericApproximantLagrange, self).__init__( HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """Value of approximant parameters. Its assignment may change S.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["muBounds","greedyTol", "maxIter", "refinementRatio", "nTrainingPoints", "nTestPoints", "trainingSetGenerator"], True, True, baselevel = 1) GenericApproximant.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "muBounds" in keyList: self.muBounds = approxParameters["muBounds"] elif hasattr(self, "muBounds"): self.muBounds = self.muBounds else: self.muBounds = [[0.], [1.]] if "greedyTol" in keyList: self.greedyTol = approxParameters["greedyTol"] elif hasattr(self, "greedyTol"): self.greedyTol = self.greedyTol else: self.greedyTol = 1e-2 if "maxIter" in keyList: self.maxIter = approxParameters["maxIter"] elif hasattr(self, "maxIter"): self.maxIter = self.maxIter else: self.maxIter = 1e2 if "refinementRatio" in keyList: self.refinementRatio = approxParameters["refinementRatio"] elif hasattr(self, "refinementRatio"): self.refinementRatio = self.refinementRatio else: self.refinementRatio = 0.2 if "nTrainingPoints" in keyList: self.nTrainingPoints = approxParameters["nTrainingPoints"] elif hasattr(self, "nTrainingPoints"): self.nTrainingPoints = self.nTrainingPoints else: self.nTrainingPoints = np.int(np.ceil(self.maxIter / self.refinementRatio)) if "nTestPoints" in keyList: self.nTestPoints = approxParameters["nTestPoints"] elif hasattr(self, "nTestPoints"): self.nTestPoints = self.nTestPoints else: self.nTestPoints = 1 if "trainingSetGenerator" in keyList: self.trainingSetGenerator = ( approxParameters["trainingSetGenerator"]) elif hasattr(self, "trainingSetGenerator"): self.trainingSetGenerator = self.trainingSetGenerator else: from rrompy.utilities.parameter_sampling import QuadratureSampler self.trainingSetGenerator = QuadratureSampler(self.muBounds, "UNIFORM") del QuadratureSampler @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): self._S = S @property def mus(self): """Value of mus.""" return self._mus @mus.setter def mus(self, mus): self._mus = np.array(mus, dtype = np.complex) @property def muBounds(self): """Value of muBounds.""" return self._muBounds @muBounds.setter def muBounds(self, muBounds): if len(muBounds) != 2: raise Exception("2 limits must be specified.") try: muBounds = muBounds.tolist() except: muBounds = list(muBounds) for j in range(2): try: len(muBounds[j]) except: muBounds[j] = np.array([muBounds[j]]) if len(muBounds[0]) != len(muBounds[1]): raise Exception("The bounds must have the same length.") self._muBounds = muBounds @property def greedyTol(self): """Value of greedyTol.""" return self._greedyTol @greedyTol.setter def greedyTol(self, greedyTol): if greedyTol < 0: raise ArithmeticError("greedyTol must be non-negative.") if hasattr(self, "greedyTol"): greedyTolold = self.greedyTol else: greedyTolold = -1 self._greedyTol = greedyTol self._approxParameters["greedyTol"] = self.greedyTol if greedyTolold != self.greedyTol: self.resetSamples() @property def maxIter(self): """Value of maxIter.""" return self._maxIter @maxIter.setter def maxIter(self, maxIter): if maxIter <= 0: raise ArithmeticError("maxIter must be positive.") if hasattr(self, "maxIter"): maxIterold = self.maxIter else: maxIterold = -1 self._maxIter = maxIter self._approxParameters["maxIter"] = self.maxIter if maxIterold != self.maxIter: self.resetSamples() @property def refinementRatio(self): """Value of refinementRatio.""" return self._refinementRatio @refinementRatio.setter def refinementRatio(self, refinementRatio): if refinementRatio <= 0. or refinementRatio > 1.: raise ArithmeticError(("refinementRatio must be between 0 " "(excluded) and 1.")) if hasattr(self, "refinementRatio"): refinementRatioold = self.refinementRatio else: refinementRatioold = -1 self._refinementRatio = refinementRatio self._approxParameters["refinementRatio"] = self.refinementRatio if refinementRatioold != self.refinementRatio: self.resetSamples() @property def nTrainingPoints(self): """Value of nTrainingPoints.""" return self._nTrainingPoints @nTrainingPoints.setter def nTrainingPoints(self, nTrainingPoints): if nTrainingPoints <= 1: raise ArithmeticError("nTrainingPoints must be greater than 1.") if not np.isclose(nTrainingPoints, np.int(nTrainingPoints)): raise ArithmeticError("nTrainingPoints must be an integer.") nTrainingPoints = np.int(nTrainingPoints) if hasattr(self, "nTrainingPoints"): nTrainingPointsold = self.nTrainingPoints else: nTrainingPointsold = -1 self._nTrainingPoints = nTrainingPoints self._approxParameters["nTrainingPoints"] = self.nTrainingPoints if nTrainingPointsold != self.nTrainingPoints: self.resetSamples() @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= 0: raise ArithmeticError("nTestPoints must be positive.") if not np.isclose(nTestPoints, np.int(nTestPoints)): raise ArithmeticError("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "nTestPoints"): nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() @property def trainingSetGenerator(self): """Value of trainingSetGenerator.""" return self._trainingSetGenerator @trainingSetGenerator.setter def trainingSetGenerator(self, trainingSetGenerator): if 'generatePoints' not in dir(trainingSetGenerator): raise Exception("trainingSetGenerator type not recognized.") if hasattr(self, '_trainingSetGenerator'): trainingSetGeneratorOld = self.trainingSetGenerator self._trainingSetGenerator = trainingSetGenerator self._approxParameters["trainingSetGenerator"] = ( self.trainingSetGenerator) if (not 'trainingSetGeneratorOld' in locals() or trainingSetGeneratorOld != self.trainingSetGenerator): self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self._mus = [] def initEstNormer(self): """Initialize estimator norm engine.""" if not hasattr(self, "estNormer"): from rrompy.hfengines.base import ProblemEngineBase as PEB # class L2InverseNormer(PEB): # def innerProduct(self, u:Np1D, v:Np1D) -> float: # if not hasattr(self, "energyNormMatrix"): # self.buildEnergyNormForm() # return v.conj().T.dot(self.energyNormMatrix.solve(u)) # def buildEnergyNormForm(self): # super().buildEnergyNormForm() # from scipy.sparse import csc_matrix, linalg as sla # Mass = csc_matrix(self.energyNormMatrix, # dtype = np.complex) # self.energyNormMatrix = sla.spilu(Mass) # self.estNormer = L2InverseNormer() # inverse L2 norm self.estNormer = PEB() # L2 norm self.estNormer.V = self.HFEngine.V self.estNormer.buildEnergyNormForm() def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator with explicit residual computation. """ self.setupApprox() nmus = len(mus) err = np.empty(nmus) if self.HFEngine.nbs == 1: RHS = self.getRHS(mus[0], homogeneized = self.homogeneized) RHSNorm = self.estNormer.norm(RHS) for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / RHSNorm else: for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) RHS = self.getRHS(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / self.estNormer.norm(RHS) return np.abs(err) def getMaxErrorEstimator(self) -> Tuple[float, int]: """ Compute maximum of (and index of maximum of) error estimator over training set. """ errorEstTrain = self.errorEstimator(self.muTrain) idxMaxEst = np.argmax(errorEstTrain) maxEst = errorEstTrain[idxMaxEst] return maxEst, idxMaxEst def greedyNextSample(self, muidx:int, plotEst : bool = False): """Compute next greedy snapshot of solution map.""" mu = self.muTrain[muidx] if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} to test " "set.").format( self.samplingEngine.nsamples + 1, mu)) self.mus = np.append(self.mus, mu) idxs = np.arange(len(self.muTrain)) mask = np.ones_like(idxs, dtype = bool) mask[muidx] = False idxs = idxs[mask] self.muTrain = self.muTrain[idxs] self.samplingEngine.nextSample(mu, homogeneized = self.homogeneized) errorEstTrain = self.errorEstimator(self.muTrain) muidx = np.argmax(errorEstTrain) maxErrorEst = errorEstTrain[muidx] mu = self.muTrain[muidx] if plotEst and not np.all(np.isinf(errorEstTrain)): from matplotlib import pyplot as plt plt.figure() plt.semilogy(np.real(self.muTrain), errorEstTrain, 'k') plt.semilogy(np.real(self.muTrain), self.greedyTol * np.ones(len(self.muTrain)), 'r--') plt.semilogy(np.real(self.mus), 2. * self.greedyTol * np.ones(len(self.mus)), '*m') plt.semilogy(np.real(mu), maxErrorEst, 'xr') plt.grid() plt.show() plt.close() return errorEstTrain, muidx, maxErrorEst, mu def greedy(self, plotEst : bool = False): """Compute greedy snapshots of solution map.""" if self.samplingEngine.samples is None: if self.verbosity >= 2: verbosityDepth("INIT", "Starting computation of snapshots.") self.resetSamples() self.mus, _ = self.trainingSetGenerator.generatePoints( self.nTestPoints) self.mus = np.array([x[0] for x in self.mus], dtype = np.complex) nTrain = self.nTrainingPoints muTrainBase, _ = self.trainingSetGenerator.generatePoints(nTrain) self.muTrain = np.empty(len(muTrainBase) + 1, dtype = np.complex) j = 0 for mu in muTrainBase: if not np.any(np.isclose(self.mus, mu)): self.muTrain[j] = mu[0] j += 1 self.muTrain[j] = self.mus[-1] self.muTrain = self.muTrain[: j + 1] self.mus = self.mus[:-1] for j in range(len(self.mus)): if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} " "to test set.").format( self.samplingEngine.nsamples + 1, self.mus[j])) self.samplingEngine.nextSample(self.mus[j], homogeneized = self.homogeneized) errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample(-1, plotEst) if self.verbosity >= 2: verbosityDepth("MAIN", ("Uniform error estimate {:.4e}.")\ .format(maxErrorEst)) while (self.samplingEngine.nsamples < self.maxIter and (maxErrorEst > self.greedyTol or self.samplingEngine.nsamples < self.nTestPoints)): if (1. - self.refinementRatio) * nTrain > len(self.muTrain): muTrainExtra, _ = self.trainingSetGenerator.refine(nTrain) self.muTrain = np.sort(np.append(self.muTrain, muTrainExtra)) nTrain += len(muTrainExtra) if self.verbosity >= 5: verbosityDepth("MAIN", ("Enriching training set by {} " "elements.").format( len(muTrainExtra))) muTrainOld, maxErrorEstOld = self.muTrain, maxErrorEst errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample( muidx, plotEst) if self.verbosity >= 2: verbosityDepth("MAIN", ("Uniform error estimate {:.4e}.")\ .format(maxErrorEst)) - if maxErrorEstOld < maxErrorEst * self.TOL_INSTABILITY: + if (np.isnan(maxErrorEst) or np.isinf(maxErrorEst) + or maxErrorEstOld < maxErrorEst * self.TOL_INSTABILITY): warn(("Instability in a posteriori estimator. Starting " "preemptive greedy loop termination.")) maxErrorEst = maxErrorEstOld self.muTrain = muTrainOld self.mus = self.mus[:-1] self.samplingEngine.popSample() self.setupApprox() break if self.verbosity >= 2: verbosityDepth("DEL", ("Done computing snapshots (final " "snapshot count: {}).").format( self.samplingEngine.nsamples)) def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (hasattr(self, "_S") and self.S == len(self.mus) and super().checkComputedApprox()) def computeScaleFactor(self): """Compute parameter rescaling factor.""" self.scaleFactor = .5 * np.abs(self.HFEngine.rescaling( self.trainingSetGenerator.lims[0][0]) - self.HFEngine.rescaling( self.trainingSetGenerator.lims[1][0])) diff --git a/rrompy/reduction_methods/taylor/approximant_taylor_pade.py b/rrompy/reduction_methods/taylor/approximant_taylor_pade.py index 7a2a162..647a1d5 100644 --- a/rrompy/reduction_methods/taylor/approximant_taylor_pade.py +++ b/rrompy/reduction_methods/taylor/approximant_taylor_pade.py @@ -1,546 +1,546 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import checkRobustTolerance from .generic_approximant_taylor import GenericApproximantTaylor from rrompy.sampling.base.pod_engine import PODEngine from rrompy.utilities.base.types import Np1D, Np2D, List, Tuple, DictAny from rrompy.utilities.base.types import HFEng from rrompy.utilities.base import purgeDict, verbosityDepth from rrompy.utilities.warning_manager import warn __all__ = ['ApproximantTaylorPade'] class ApproximantTaylorPade(GenericApproximantTaylor): """ ROM single-point fast Pade' approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'rho': weight for computation of original Pade' approximant; defaults to np.inf, i.e. fast approximant; - 'M': degree of Pade' approximant numerator; defaults to 0; - 'N': degree of Pade' approximant denominator; defaults to 0; - 'E': total number of derivatives current approximant relies upon; defaults to Emax; - 'Emax': total number of derivatives of solution map to be computed; defaults to E; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0; - 'sampleType': label of sampling type; available values are: - 'ARNOLDI': orthogonalization of solution derivatives through Arnoldi algorithm; - 'KRYLOV': standard computation of solution derivatives. Defaults to 'KRYLOV'. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'rho': weight for computation of original Pade' approximant; - 'M': degree of Pade' approximant numerator; - 'N': degree of Pade' approximant denominator; - 'E': total number of derivatives current approximant relies upon; - 'Emax': total number of derivatives of solution map to be computed; - 'robustTol': tolerance for robust Pade' denominator management; - 'sampleType': label of sampling type. POD: Whether to compute QR factorization of derivatives. rho: Weight of approximant. M: Numerator degree of approximant. N: Denominator degree of approximant. E: Number of solution derivatives over which current approximant is based upon. Emax: Total number of solution derivatives to be computed. robustTol: Tolerance for robust Pade' denominator management. sampleType: Label of sampling type. initialHFData: HF problem initial data. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. G: Square Numpy 2D vector of size (N+1) corresponding to Pade' denominator matrix (see paper). Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0, approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() self._addParametersToList(["M", "N", "robustTol", "rho"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """Value of approximant parameters.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["M", "N", "robustTol", "rho"], True, True, baselevel = 1) keyList = list(approxParameters.keys()) if "rho" in keyList: self._rho = approxParameters["rho"] elif hasattr(self, "rho"): self._rho = self.rho else: self._rho = np.inf GenericApproximantTaylor.approxParameters.fset(self, approxParametersCopy) self.rho = self._rho if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif hasattr(self, "robustTol"): self.robustTol = self.robustTol else: self.robustTol = 0 self._ignoreParWarnings = True if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "M"): self.M = self.M else: self.M = 0 del self._ignoreParWarnings if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "N"): self.N = self.N else: self.N = 0 @property def rho(self): """Value of rho.""" return self._rho @rho.setter def rho(self, rho): self._rho = np.abs(rho) self._approxParameters["rho"] = self.rho @property def M(self): """Value of M. Its assignment may change Emax and E.""" return self._M @M.setter def M(self, M): if M < 0: raise ArithmeticError("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if not hasattr(self, "_ignoreParWarnings"): self.checkMNEEmax() @property def N(self): """Value of N. Its assignment may change Emax.""" return self._N @N.setter def N(self, N): if N < 0: raise ArithmeticError("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N self.checkMNEEmax() def checkMNEEmax(self): """Check consistency of M, N, E, and Emax.""" M = self.M if hasattr(self, "_M") else 0 N = self.N if hasattr(self, "_N") else 0 E = self.E if hasattr(self, "_E") else M + N Emax = self.Emax if hasattr(self, "_Emax") else M + N if self.rho == np.inf: if Emax < max(N, M): warn(("Prescribed Emax is too small. Updating Emax to " "max(M, N).")) self.Emax = max(N, M) if E < max(N, M): warn("Prescribed E is too small. Updating E to max(M, N).") self.E = max(N, M) else: if Emax < N + M: warn("Prescribed Emax is too small. Updating Emax to M + N.") self.Emax = self.N + M if E < N + M: warn("Prescribed E is too small. Updating E to M + N.") self.E = self.N + M @property def robustTol(self): """Value of tolerance for robust Pade' denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: warn("Overriding prescribed negative robustness tolerance to 0.") robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def E(self): """Value of E. Its assignment may change Emax.""" return self._E @E.setter def E(self, E): if E < 0: raise ArithmeticError("E must be non-negative.") self._E = E self.checkMNEEmax() self._approxParameters["E"] = self.E if hasattr(self, "Emax") and self.Emax < self.E: warn("Prescribed Emax is too small. Updating Emax to E.") self.Emax = self.E @property def Emax(self): """Value of Emax. Its assignment may reset computed derivatives.""" return self._Emax @Emax.setter def Emax(self, Emax): if Emax < 0: raise ArithmeticError("Emax must be non-negative.") if hasattr(self, "Emax"): EmaxOld = self.Emax else: EmaxOld = -1 if hasattr(self, "_N"): N = self.N else: N = 0 if hasattr(self, "_M"): M = self.M else: M = 0 if hasattr(self, "_E"): E = self.E else: E = 0 if self.rho == np.inf: if max(N, M, E) > Emax: warn(("Prescribed Emax is too small. Updating Emax to " "max(N, M, E).")) Emax = max(N, M, E) else: if max(N + M, E) > Emax: warn(("Prescribed Emax is too small. Updating Emax to " "max(N + M, E).")) Emax = max(N + M, E) self._Emax = Emax self._approxParameters["Emax"] = Emax if EmaxOld >= self.Emax and self.samplingEngine.samples is not None: self.samplingEngine.samples = self.samplingEngine.samples[:, : self.Emax + 1] if (self.sampleType == "ARNOLDI" and self.samplingEngine.HArnoldi is not None): self.samplingEngine.HArnoldi = self.samplingEngine.HArnoldi[ : self.Emax + 1, : self.Emax + 1] self.samplingEngine.RArnoldi = self.samplingEngine.RArnoldi[ : self.Emax + 1, : self.Emax + 1] def setupApprox(self): """ Compute Pade' approximant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name())) self.computeDerivatives() if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", ("Starting computation of " "denominator.")) while self.N > 0: if self.POD: ev, eV = self.findeveVGQR() else: ev, eV = self.findeveVGExplicit() newParameters = checkRobustTolerance(ev, self.E, self.robustTol) if not newParameters: break self.approxParameters = newParameters if self.N <= 0: eV = np.ones((1, 1)) self.Q = np.poly1d(eV[:, 0]) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.") else: self.Q = np.poly1d([1]) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.") self.P = np.zeros((self.Emax + 1, self.M + 1), dtype = np.complex) for i in range(self.Q.order): rng = np.arange(self.M + 1 - i) self.P[rng, - 1 - rng - i] = self.Q[i] if self.sampleType == "ARNOLDI": self.P = self.samplingEngine.RArnoldi.dot(self.P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.") self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.\n") def rescaleParameter(self, R:Np2D, A : Np2D = None, exponent : float = 1.) -> Np2D: """ Prepare parameter rescaling. Args: R: Matrix whose columns need rescaling. A(optional): Matrix whose diagonal defines scaling factor. If None, previous value of scaleFactor is used. Defaults to None. exponent(optional): Exponent of scaling factor in matrix diagonal. Defaults to 1. Returns: Rescaled matrix. """ if A is not None: aDiag = np.diag(A) scaleCoeffs = np.polyfit(np.arange(A.shape[1]), np.log(aDiag), 1) self.scaleFactor = np.exp(- scaleCoeffs[0] / exponent) return np.multiply(R, np.power(self.scaleFactor,np.arange(R.shape[1]))) def buildG(self): """Assemble Pade' denominator matrix.""" self.computeDerivatives() if self.rho == np.inf: Nmin = self.E - self.N else: Nmin = self.M - self.N + 1 if self.sampleType == "KRYLOV": DerE = self.samplingEngine.samples[:, Nmin : self.E + 1] G = self.HFEngine.innerProduct(DerE, DerE) DerE = self.rescaleParameter(DerE, G, 2.) G = self.HFEngine.innerProduct(DerE, DerE) else: RArnE = self.samplingEngine.RArnoldi[: self.E + 1, Nmin : self.E + 1] RArnE = self.rescaleParameter(RArnE, RArnE[Nmin :, :]) G = RArnE.conj().T.dot(RArnE) if self.rho == np.inf: self.G = G else: Gbig = G self.G = np.zeros((self.N + 1, self.N + 1), dtype = np.complex) for k in range(self.E - self.M): self.G += self.rho ** (2 * k) * Gbig[k : k + self.N + 1, k : k + self.N + 1] def findeveVGExplicit(self) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of Pade' denominator matrix. """ if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.") self.buildG() if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.") if self.verbosity >= 7: - verbosityDepth("INIT", + verbosityDepth("INIT", "Solving eigenvalue problem for gramian matrix.") ev, eV = np.linalg.eigh(self.G) eV = self.rescaleParameter(eV.T).T if self.verbosity >= 5: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue problem of size {} " "with condition number {:.4e}.").format( self.N + 1, condev)) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.") return ev, eV def findeveVGQR(self) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of Pade' denominator matrix through SVD of R factor. See ``Householder triangularization of a quasimatrix'', L.Trefethen, 2008 for QR algorithm. Returns: Eigenvalues in ascending order and corresponding eigenvector matrix. """ self.computeDerivatives() if self.rho == np.inf: Nmin = self.E - self.N else: Nmin = self.M - self.N + 1 if self.sampleType == "KRYLOV": A = copy(self.samplingEngine.samples[:, Nmin : self.E + 1]) self.PODEngine = PODEngine(self.HFEngine) if self.verbosity >= 10: verbosityDepth("INIT", "Orthogonalizing samples.", end = "") R = self.PODEngine.QRHouseholder(A, only_R = True) if self.verbosity >= 10: verbosityDepth("DEL", " Done.", inline = True) else: R = self.samplingEngine.RArnoldi[: self.E + 1, Nmin : self.E + 1] R = self.rescaleParameter(R, R[Nmin :, :]) if self.rho == np.inf: - if self.verbosity >= 10: + if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square root of " - "gramian matrix."), end = "") + "gramian matrix.")) sizeI = R.shape[0] _, s, V = np.linalg.svd(R, full_matrices = False) else: if self.verbosity >= 10: verbosityDepth("INIT", ("Building matrix stack for square " "root of gramian."), end = "") Rtower = np.zeros((R.shape[0] * (self.E - self.M), self.N + 1), dtype = np.complex) for k in range(self.E - self.M): Rtower[k * R.shape[0] : (k + 1) * R.shape[0], :] = ( self.rho ** k * R[:, self.M - self.N + 1 + k : self.M + 1 + k]) if self.verbosity >= 10: verbosityDepth("DEL", " Done.", inline = True) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square root of " - "gramian matrix."), end = "") + "gramian matrix.")) sizeI = Rtower.shape[0] _, s, V = np.linalg.svd(Rtower, full_matrices = False) eV = V.conj().T[:, ::-1] eV = self.rescaleParameter(eV.T).T if self.verbosity >= 5: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of size {} x {} with " "condition number {:.4e}.").format( sizeI, self.N + 1, condev)) if self.verbosity >= 7: - verbosityDepth("DEL", " Done.", inline = True) + verbosityDepth("DEL", "Done solving eigenvalue problem.") return s[::-1], eV def radiusPade(self, mu:Np1D, mu0 : float = None) -> float: """ Compute translated radius to be plugged into Pade' approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.mu0. Returns: Translated radius to be plugged into Pade' approximant. """ if mu0 is None: mu0 = self.mu0 return self.HFEngine.rescaling(mu) - self.HFEngine.rescaling(mu0) def getPVal(self, mu:List[complex]): """ Evaluate Pade' numerator at arbitrary parameter. Args: mu: Target parameter. """ self.setupApprox() if self.verbosity >= 10: verbosityDepth("INIT", "Evaluating numerator at mu = {}.".format(mu), end = "") try: len(mu) except: mu = [mu] powerlist = np.vander(self.radiusPade(mu), self.M + 1).T p = self.P.dot(powerlist) if len(mu) == 1: p = p.flatten() if self.verbosity >= 10: verbosityDepth("DEL", " Done.", inline = True) return p def getQVal(self, mu:List[complex]): """ Evaluate Pade' denominator at arbitrary parameter. Args: mu: Target parameter. """ self.setupApprox() if self.verbosity >= 10: verbosityDepth("INIT", "Evaluating denominator at mu = {}.".format(mu), end = "") q = self.Q(self.radiusPade(mu)) if self.verbosity >= 10: verbosityDepth("DEL", " Done.", inline = True) return q def evalApproxReduced(self, mu:complex): """ Evaluate Pade' approximant at arbitrary parameter. Args: mu: Target parameter. """ self.setupApprox() if (not hasattr(self, "lastSolvedApp") or not np.isclose(self.lastSolvedApp, mu)): if self.verbosity >= 5: verbosityDepth("INIT", "Evaluating approximant at mu = {}.".format(mu)) self.uAppReduced = self.getPVal(mu) / self.getQVal(mu) self.lastSolvedApp = mu if self.verbosity >= 5: verbosityDepth("DEL", "Done evaluating approximant.") def evalApprox(self, mu:complex): """ Evaluate approximant at arbitrary parameter. Args: mu: Target parameter. """ self.evalApproxReduced(mu) self.uApp = self.samplingEngine.samples.dot(self.uAppReduced) def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() return self.HFEngine.rescalingInv(self.Q.r + self.HFEngine.rescaling(self.mu0))