diff --git a/logo/RROMPY_logo_small.png b/logo/RROMPY_logo_small.png index 44cc532..5eee8f5 100644 Binary files a/logo/RROMPY_logo_small.png and b/logo/RROMPY_logo_small.png differ diff --git a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py index 6125e40..8a1a2e1 100644 --- a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py +++ b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py @@ -1,520 +1,519 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from scipy.special import factorial as fact from rrompy.reduction_methods.base import checkRobustTolerance from .generic_approximant_lagrange import GenericApproximantLagrange from rrompy.utilities.poly_fitting import (polybases, polyvander, polyfitname, customFit) from rrompy.reduction_methods.trained_model import TrainedModelPade as tModel from rrompy.reduction_methods.trained_model import TrainedModelData from rrompy.utilities.base.types import Np1D, Np2D, HFEng, DictAny, Tuple from rrompy.utilities.base import verbosityDepth, purgeDict from rrompy.utilities.exception_manager import (RROMPyException, modeAssert, RROMPyWarning) __all__ = ['ApproximantLagrangePade'] class ApproximantLagrangePade(GenericApproximantLagrange): """ ROM Lagrange Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; defaults to 2; - 'sampler': sample point generator; defaults to uniform sampler on [0, 1]; - 'polybasis': type of polynomial basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'E': coefficient of interpolant to be minimized; defaults to min(S, M + 1); - 'M': degree of Pade' interpolant numerator; defaults to 0; - 'N': degree of Pade' interpolant denominator; defaults to 0; - - 'interpRcond': tolerance for interpolation via numpy.polyfit; - defaults to None; + - 'interpRcond': tolerance for interpolation; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. ws: Array of snapshot weigths. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; - 'E': coefficient of interpolant to be minimized; - 'M': degree of Pade' interpolant numerator; - 'N': degree of Pade' interpolant denominator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. M: Numerator degree of approximant. N: Denominator degree of approximant. POD: Whether to compute POD of snapshots. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["polybasis", "E", "M", "N", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["polybasis", "E", "M", "N", "interpRcond", "robustTol"], True, True, baselevel = 1) if hasattr(self, "_M") and self.M is not None: Mold = self.M self._M = 0 if hasattr(self, "_N") and self.N is not None: Nold = self.N self._N = 0 if hasattr(self, "_E") and self.E is not None: self._E = 0 GenericApproximantLagrange.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "polybasis" in keyList: self.polybasis = approxParameters["polybasis"] elif not hasattr(self, "_polybasis") or self._polybasis is None: self.polybasis = "MONOMIAL" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif not hasattr(self, "interpRcond") or self.interpRcond is None: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif not hasattr(self, "_robustTol") or self._robustTol is None: self.robustTol = 0 if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "_M") and self.M is not None: self.M = Mold else: self.M = 0 if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "_N") and self.N is not None: self.N = Nold else: self.N = 0 if "E" in keyList: self.E = approxParameters["E"] else: self.E = min(self.S - 1, self.M + 1) @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in polybases: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._sampleType = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def interpRcond(self): """Value of interpRcond.""" return self._interpRcond @interpRcond.setter def interpRcond(self, interpRcond): self._interpRcond = interpRcond self._approxParameters["interpRcond"] = self.interpRcond @property def M(self): """Value of M. Its assignment may change S.""" return self._M @M.setter def M(self, M): if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if hasattr(self, "_S") and self.S < self.M + 1: RROMPyWarning("Prescribed S is too small. Updating S to M + 1.") self.S = self.M + 1 @property def N(self): """Value of N. Its assignment may change S.""" return self._N @N.setter def N(self, N): if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N if hasattr(self, "_S") and self.S < self.N + 1: RROMPyWarning("Prescribed S is too small. Updating S to N + 1.") self.S = self.N + 1 @property def E(self): """Value of E. Its assignment may change S.""" return self._E @E.setter def E(self, E): if E < 0: raise RROMPyException("E must be non-negative.") self._E = E self._approxParameters["E"] = self.E if hasattr(self, "_S") and self.S < self.E + 1: RROMPyWarning("Prescribed S is too small. Updating S to E + 1.") self.S = self.E + 1 @property def robustTol(self): """Value of tolerance for robust Pade' denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): if S <= 0: raise RROMPyException("S must be positive.") if hasattr(self, "_S"): Sold = self.S else: Sold = -1 vals, label = [0] * 3, {0:"M", 1:"N", 2:"E"} if hasattr(self, "_M") and self._M is not None: vals[0] = self.M if hasattr(self, "_N") and self._N is not None: vals[1] = self.N if hasattr(self, "_E") and self._E is not None: vals[2] = self.E idxmax = np.argmax(vals) if vals[idxmax] + 1 > S: RROMPyWarning(("Prescribed S is too small. Updating S to {} + " "1.").format(label[idxmax])) self.S = vals[idxmax] + 1 else: self._S = S self._approxParameters["S"] = self.S if Sold != self.S: self.resetSamples() def _setupDenominator(self): """Compute Pade' denominator.""" if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of denominator.", timestamp = self.timestamp) while self.N > 0: TE = polyvander[self.polybasis](self.radiusPade(self.mus), self.E, scl = 1. / self.scaleFactor) TE = (TE.T * self.ws).T RHS = np.zeros(self.E + 1) RHS[-1] = 1. fitOut = customFit(TE.T, RHS, full = True, rcond = self.interpRcond) if self.verbosity >= 5: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of LS " "system: {:.4e}.").format( self.S, self.E, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] < self.E + 1: Enew = fitOut[1][1] - 1 Nnew = min(self.N, Enew) Mnew = min(self.M, Enew) if Nnew == self.N: strN = "" else: strN = "N from {} to {} and ".format(self.N, Nnew) if Mnew == self.M: strM = "" else: strM = "M from {} to {} and ".format(self.M, Mnew) RROMPyWarning(("Polyfit is poorly conditioned.\nReducing {}{}" "E from {} to {}.").format(strN, strM, self.E, Enew)) newParams = {"N" : Nnew, "M" : Mnew, "E" : Enew} self.approxParameters = newParams continue mus_un, idx_un, cnt_un = np.unique(self.mus, return_inverse = True, return_counts = True) TE = polyvander[self.polybasis](self.radiusPade(self.mus), self.N, scl = 1. / self.scaleFactor) TE = (TE.T * self.ws).T if len(mus_un) == len(self.mus): Ghalf = (TE.T * fitOut[0]).T else: pseudoInv = np.zeros((len(self.mus), len(self.mus)), dtype = np.complex) for j in range(len(mus_un)): pseudoInv_loc = np.zeros((cnt_un[j], cnt_un[j]), dtype = np.complex) mask = np.arange(len(self.mus))[idx_un == j] for der in range(cnt_un[j]): fitderj = fitOut[0][mask[der]] pseudoInv_loc = (pseudoInv_loc + fitderj * np.diag(np.ones(1 + der), k = der - cnt_un[j] + 1)) I = np.ix_(mask, mask) pseudoInv[I] = np.flipud(pseudoInv_loc) Ghalf = pseudoInv.dot(TE) if self.POD: self.Ghalf = self.samplingEngine.RPOD.dot(Ghalf) ev, eV = self.findeveVGQR() else: self.Ghalf = self.samplingEngine.samples.dot(Ghalf) ev, eV = self.findeveVGExplicit() newParams = checkRobustTolerance(ev, self.E, self.robustTol) if not newParams: break self.approxParameters = newParams if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.", timestamp = self.timestamp) return eV[:, 0] def _setupNumerator(self): """Compute Pade' numerator.""" if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.", timestamp = self.timestamp) Qevaldiag = np.diag(self.trainedModel.getQVal(self.mus)) verb = self.trainedModel.verbosity self.trainedModel.verbosity = 0 mus_un, idx_un, cnt_un = np.unique(self.mus, return_inverse = True, return_counts = True) for j in range(len(mus_un)): if cnt_un[j] > 1: Q_loc = np.zeros((cnt_un[j], cnt_un[j]), dtype = np.complex) for der in range(1, cnt_un[j]): Qderj = (self.trainedModel.getQVal(mus_un[j], der, scl = 1. / self.scaleFactor) / fact(der)) Q_loc = Q_loc + Qderj * np.diag(np.ones(cnt_un[j] - der), k = - der) I = np.ix_(idx_un == j, idx_un == j) Qevaldiag[I] = Qevaldiag[I] + Q_loc self.trainedModel.verbosity = verb while self.M >= 0: fitVander = polyvander[self.polybasis](self.radiusPade(self.mus), self.M, scl = 1. / self.scaleFactor) fitOut = customFit(fitVander, Qevaldiag, w = self.ws, full = True, rcond = self.interpRcond) if self.verbosity >= 5: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of LS " "system: {:.4e}.").format( self.S, self.M, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break RROMPyWarning(("Polyfit is poorly conditioned. Reducing M from {} " "to {}. Exact snapshot interpolation not " "guaranteed.").format(self.M, fitOut[1][1] - 1)) self.M = fitOut[1][1] - 1 if self.M <= 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.", timestamp = self.timestamp) return np.atleast_2d(P) def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return modeAssert(self._mode, message = "Cannot setup approximant.") if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.computeScaleFactor() self.computeSnapshots() if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, np.copy(self.samplingEngine.samples), self.HFEngine.rescalingExp) data.polytype = self.polybasis data.scaleFactor = self.scaleFactor data.mus = np.copy(self.mus) self.trainedModel.data = data else: self.trainedModel.data.projMat = np.copy( self.samplingEngine.samples) if self.N > 0: Q = self._setupDenominator() else: Q = np.ones(1, dtype = np.complex) self.trainedModel.data.Q = np.copy(Q) P = self._setupNumerator() if self.POD: P = self.samplingEngine.RPOD.dot(P) self.trainedModel.data.P = np.copy(P) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def findeveVGExplicit(self, verbOutput : int = 5) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of Pade' denominator matrix. """ modeAssert(self._mode, message = "Cannot solve eigenvalue problem.") if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", timestamp = self.timestamp) self.G = self.HFEngine.innerProduct(self.Ghalf, self.Ghalf) if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving eigenvalue problem for gramian " "matrix."), timestamp = self.timestamp) ev, eV = np.linalg.eigh(self.G) if self.verbosity >= verbOutput: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue problem of size {} " "with condition number {:.4e}.").format( self.N + 1, condev), timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.", timestamp = self.timestamp) return ev, eV def findeveVGQR(self, verbOutput : int = 5) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of Pade' denominator matrix through SVD of R factor. """ if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square root of gramian " "matrix."), timestamp = self.timestamp) _, s, eV = np.linalg.svd(self.Ghalf, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].T.conj() if self.verbosity >= verbOutput: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of size {} x {} with " "condition number {:.4e}.").format( self.S, self.N + 1, condev), timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.", timestamp = self.timestamp) return ev, eV def radiusPade(self, mu:Np1D, mu0 : float = None) -> float: """ Compute translated radius to be plugged into Pade' approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.mu0. Returns: Translated radius to be plugged into Pade' approximant. """ return self.trainedModel.radiusPade(mu, mu0) def getResidues(self) -> Np1D: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues() diff --git a/rrompy/reduction_methods/taylor/approximant_taylor_pade.py b/rrompy/reduction_methods/taylor/approximant_taylor_pade.py index a291344..6dd34af 100644 --- a/rrompy/reduction_methods/taylor/approximant_taylor_pade.py +++ b/rrompy/reduction_methods/taylor/approximant_taylor_pade.py @@ -1,456 +1,455 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import checkRobustTolerance from rrompy.reduction_methods.trained_model import (TrainedModelData, TrainedModelPade as tModel) from .generic_approximant_taylor import GenericApproximantTaylor from rrompy.sampling.base.pod_engine import PODEngine from rrompy.utilities.base.types import Np1D, Np2D, Tuple, DictAny, HFEng from rrompy.utilities.base import verbosityDepth, purgeDict from rrompy.utilities.exception_manager import (RROMPyException, modeAssert, RROMPyWarning) __all__ = ['ApproximantTaylorPade'] class ApproximantTaylorPade(GenericApproximantTaylor): """ ROM single-point fast Pade' approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'rho': weight for computation of original Pade' approximant; defaults to np.inf, i.e. fast approximant; - 'M': degree of Pade' approximant numerator; defaults to 0; - 'N': degree of Pade' approximant denominator; defaults to 0; - 'E': total number of derivatives current approximant relies upon; defaults to 1; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0; - 'sampleType': label of sampling type; available values are: - 'ARNOLDI': orthogonalization of solution derivatives through Arnoldi algorithm; - 'KRYLOV': standard computation of solution derivatives. Defaults to 'KRYLOV'. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'rho': weight for computation of original Pade' approximant; - 'M': degree of Pade' approximant numerator; - 'N': degree of Pade' approximant denominator; - 'E': total number of derivatives current approximant relies upon; - 'robustTol': tolerance for robust Pade' denominator management; - 'sampleType': label of sampling type. POD: Whether to compute QR factorization of derivatives. rho: Weight of approximant. M: Numerator degree of approximant. N: Denominator degree of approximant. E: Number of solution derivatives over which current approximant is based upon. robustTol: Tolerance for robust Pade' denominator management. sampleType: Label of sampling type. initialHFData: HF problem initial data. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. G: Square Numpy 2D vector of size (N+1) corresponding to Pade' denominator matrix (see paper). uApp: Last evaluated approximant as numpy complex vector. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0, approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["M", "N", "robustTol", "rho"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) self._postInit() @property def approxParameters(self): """Value of approximant parameters.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["M", "N", "robustTol", "rho"], True, True, baselevel = 1) keyList = list(approxParameters.keys()) if "rho" in keyList: self._rho = approxParameters["rho"] elif not hasattr(self, "_rho") or self.rho is None: self._rho = np.inf GenericApproximantTaylor.approxParameters.fset(self, approxParametersCopy) self.rho = self._rho if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif not hasattr(self, "_robustTol") or self._robustTol is None: self.robustTol = 0 self._ignoreParWarnings = True if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "_M") and self._M is not None: self.M = self.M else: self.M = 0 del self._ignoreParWarnings if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "_N") and self._N is not None: self.N = self.N else: self.N = 0 @property def rho(self): """Value of rho.""" return self._rho @rho.setter def rho(self, rho): self._rho = np.abs(rho) self._approxParameters["rho"] = self.rho @property def M(self): """Value of M. Its assignment may change E.""" return self._M @M.setter def M(self, M): if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if not hasattr(self, "_ignoreParWarnings"): self.checkMNE() @property def N(self): """Value of N. Its assignment may change E.""" return self._N @N.setter def N(self, N): if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N if not hasattr(self, "_ignoreParWarnings"): self.checkMNE() def checkMNE(self): """Check consistency of M, N, and E.""" if not hasattr(self, "_E") or self.E is None: return M = self.M if (hasattr(self, "_M") and self.M is not None) else 0 N = self.N if (hasattr(self, "_N") and self.N is not None) else 0 msg = "max(M, N)" if self.rho == np.inf else "M + N" bound = eval(msg) if self.E < bound: RROMPyWarning(("Prescribed E is too small. Updating E to " "{}.").format(msg)) self.E = bound del M, N @property def robustTol(self): """Value of tolerance for robust Pade' denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def E(self): """Value of E.""" return self._E @E.setter def E(self, E): GenericApproximantTaylor.E.fset(self, E) self.checkMNE() def _setupDenominator(self): """Compute Pade' denominator.""" if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of denominator.", timestamp = self.timestamp) while self.N > 0: if self.POD: ev, eV = self.findeveVGQR() else: ev, eV = self.findeveVGExplicit() newParameters = checkRobustTolerance(ev, self.E, self.robustTol) if not newParameters: break self.approxParameters = newParameters if self.N <= 0: eV = np.ones((1, 1)) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.", timestamp = self.timestamp) return eV[::-1, 0] def _setupNumerator(self): """Compute Pade' numerator.""" if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.", timestamp = self.timestamp) P = np.zeros((self.E + 1, self.M + 1), dtype = np.complex) for i in range(self.E + 1): l = min(self.M + 1, i + self.N + 1) - P[i, i : l] = self.trainedModel.data.Q[: l - i] + if i < l: + P[i, i : l] = self.trainedModel.data.Q[: l - i] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.", timestamp = self.timestamp) return self.rescaleParameter(P.T).T def setupApprox(self): """ Compute Pade' approximant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.computeDerivatives() if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, - self.samplingEngine.samples[:,:self.E + 1], - self.HFEngine.rescalingExp) + None, self.HFEngine.rescalingExp) data.polytype = "MONOMIAL" self.trainedModel.data = data - else: - self.trainedModel.data.projMat = ( - self.samplingEngine.samples[:, : self.E + 1]) - if self.N > 0: Q = self._setupDenominator() else: Q = np.ones(1, dtype = np.complex) self.trainedModel.data.Q = np.copy(Q) self.trainedModel.data.scaleFactor = self.scaleFactor + self.trainedModel.data.projMat = ( + self.samplingEngine.samples[:, : self.E + 1]) P = self._setupNumerator() if self.sampleType == "ARNOLDI": P = self.samplingEngine.RArnoldi.dot(P) self.trainedModel.data.P = np.copy(P) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def rescaleParameter(self, R:Np2D, A : Np2D = None, exponent : float = 1.) -> Np2D: """ Prepare parameter rescaling. Args: R: Matrix whose columns need rescaling. A(optional): Matrix whose diagonal defines scaling factor. If None, previous value of scaleFactor is used. Defaults to None. exponent(optional): Exponent of scaling factor in matrix diagonal. Defaults to 1. Returns: Rescaled matrix. """ modeAssert(self._mode, message = "Cannot compute rescaling factor.") if A is not None: aDiag = np.diag(A) scaleCoeffs = np.polyfit(np.arange(A.shape[1]), np.log(aDiag), 1) self.scaleFactor = np.exp(- scaleCoeffs[0] / exponent) return np.multiply(R, np.power(self.scaleFactor,np.arange(R.shape[1]))) def buildG(self): """Assemble Pade' denominator matrix.""" modeAssert(self._mode, message = "Cannot compute G matrix.") self.computeDerivatives() if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", timestamp = self.timestamp) if self.rho == np.inf: Nmin = self.E - self.N else: Nmin = self.M - self.N + 1 if self.sampleType == "KRYLOV": DerE = self.samplingEngine.samples[:, Nmin : self.E + 1] G = self.HFEngine.innerProduct(DerE, DerE) DerE = self.rescaleParameter(DerE, G, 2.) G = self.HFEngine.innerProduct(DerE, DerE) else: RArnE = self.samplingEngine.RArnoldi[: self.E + 1, Nmin : self.E + 1] RArnE = self.rescaleParameter(RArnE, RArnE[Nmin :, :]) G = RArnE.T.conj().dot(RArnE) if self.rho == np.inf: self.G = G else: Gbig = G self.G = np.zeros((self.N + 1, self.N + 1), dtype = np.complex) for k in range(self.E - self.M): self.G += self.rho ** (2 * k) * Gbig[k : k + self.N + 1, k : k + self.N + 1] if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", timestamp = self.timestamp) def findeveVGExplicit(self) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of Pade' denominator matrix. """ modeAssert(self._mode, message = "Cannot solve eigenvalue problem.") self.buildG() if self.verbosity >= 7: verbosityDepth("INIT", "Solving eigenvalue problem for gramian matrix.", timestamp = self.timestamp) ev, eV = np.linalg.eigh(self.G) if self.verbosity >= 5: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue problem of size {} " "with condition number {:.4e}.").format( self.N + 1, condev), timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.", timestamp = self.timestamp) return ev, eV def findeveVGQR(self) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of Pade' denominator matrix through SVD of R factor. See ``Householder triangularization of a quasimatrix'', L.Trefethen, 2008 for QR algorithm. Returns: Eigenvalues in ascending order and corresponding eigenvector matrix. """ modeAssert(self._mode, message = "Cannot solve eigenvalue problem.") self.computeDerivatives() if self.rho == np.inf: Nmin = self.E - self.N else: Nmin = self.M - self.N + 1 if self.sampleType == "KRYLOV": A = copy(self.samplingEngine.samples[:, Nmin : self.E + 1]) self.PODEngine = PODEngine(self.HFEngine) if self.verbosity >= 10: verbosityDepth("INIT", "Orthogonalizing samples.", timestamp = self.timestamp) R = self.PODEngine.QRHouseholder(A, only_R = True) if self.verbosity >= 10: verbosityDepth("DEL", "Done orthogonalizing samples.", timestamp = self.timestamp) else: R = self.samplingEngine.RArnoldi[: self.E + 1, Nmin : self.E + 1] R = self.rescaleParameter(R, R[R.shape[0] - R.shape[1] :, :]) if self.rho == np.inf: if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square root of " "gramian matrix."), timestamp = self.timestamp) sizeI = R.shape[0] _, s, V = np.linalg.svd(R, full_matrices = False) else: if self.verbosity >= 10: verbosityDepth("INIT", ("Building matrix stack for square " "root of gramian."), timestamp = self.timestamp) Rtower = np.zeros((R.shape[0] * (self.E - self.M), self.N + 1), dtype = np.complex) for k in range(self.E - self.M): - Rtower[k * R.shape[0] : (k + 1) * R.shape[0], :] = ( - self.rho ** k - * R[:, self.M - self.N + 1 + k : self.M + 1 + k]) + RTleft = max(0, self.N - self.M - k) + Rleft = max(0, self.M - self.N + k) + Rtower[k * R.shape[0] : (k + 1) * R.shape[0], RTleft :] = ( + self.rho ** k * R[:, Rleft : self.M + 1 + k]) if self.verbosity >= 10: verbosityDepth("DEL", "Done building matrix stack.", timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square root of " "gramian matrix."), timestamp = self.timestamp) sizeI = Rtower.shape[0] _, s, V = np.linalg.svd(Rtower, full_matrices = False) eV = V[::-1, :].T.conj() if self.verbosity >= 5: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of size {} x {} with " "condition number {:.4e}.").format(sizeI, self.N + 1, condev), timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.", timestamp = self.timestamp) return s[::-1], eV def radiusPade(self, mu:Np1D, mu0 : float = None) -> float: """ Compute translated radius to be plugged into Pade' approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.mu0. Returns: Translated radius to be plugged into Pade' approximant. """ return self.trainedModel.radiusPade(mu, mu0) def getResidues(self) -> Np1D: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues() diff --git a/tests/test_1_utilities/basic_routines.py b/tests/test_1_utilities/basic_routines.py index b86670f..dfb251e 100644 --- a/tests/test_1_utilities/basic_routines.py +++ b/tests/test_1_utilities/basic_routines.py @@ -1,66 +1,66 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import pytest import numpy as np from rrompy.utilities.exception_manager import RROMPyException, RROMPyWarning from rrompy.utilities.base import (findDictStrKey, purgeDict, purgeList, lowDiscrepancy, verbosityDepth) def test_exception(): with pytest.raises(Exception): raise RROMPyException("This is an exception!") def test_warning(capsys): - RROMPyWarning() + RROMPyWarning("This is a warning.") out, err = capsys.readouterr() - assert len(out) > 0 + assert "This is a warning." in out assert len(err) == 0 def test_dict_list(): dictBase = {str(x): str(x**2) for x in range(10)} assert findDictStrKey("-1", dictBase.keys()) is None assert findDictStrKey("5", dictBase.keys()) == "5" dictPurged = purgeDict(dictBase, [str(x) for x in range(4)], silent = True) assert len(dictPurged.keys()) == 4 listBase = list(dictBase.values()) listPurged = purgeList(listBase, [str(x**2) for x in range(4,6)], silent = True) assert len(listPurged) == 2 def test_vandercorput(): idxs = lowDiscrepancy(8) assert np.all(idxs == np.array([0,4,2,6,1,5,3,7])) def test_verbositydepth(capsys): verbosityDepth("INIT", "Start message", timestamp = False) out, err = capsys.readouterr() assert out == "┌Start message\n" assert len(err) == 0 verbosityDepth("MAIN", "Main message", end = "\n\n", timestamp = False) out, err = capsys.readouterr() assert out == "├Main message\n\n" assert len(err) == 0 verbosityDepth("INIT", "Start message2") verbosityDepth("DEL", "Delete message2") verbosityDepth("DEL", "Delete message") with pytest.raises(Exception): verbosityDepth("DEL", "Wrong deletion") - \ No newline at end of file + diff --git a/tests/test_2_hfengines/helmholtz_internal.py b/tests/test_2_hfengines/helmholtz_internal.py index 934ccba..03ffba1 100644 --- a/tests/test_2_hfengines/helmholtz_internal.py +++ b/tests/test_2_hfengines/helmholtz_internal.py @@ -1,92 +1,95 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import os, shutil import numpy as np from rrompy.hfengines.linear_problem import ( HelmholtzSquareBubbleDomainProblemEngine, HelmholtzSquareBubbleProblemEngine, HelmholtzSquareTransmissionProblemEngine) def test_helmholtz_square(): solver = HelmholtzSquareBubbleProblemEngine(kappa = 4, theta = 1., n = 50, verbosity = 0) mu = 5 uh = solver.solve(mu) assert np.isclose(solver.norm(uh), 70762597.99694124, rtol = 1e-3) assert np.isclose(solver.norm(solver.residual(uh, mu)), 2.1855986e-06, rtol = 1e-1) + if not os.path.isdir("./.pytest_cache"): os.mkdir("./.pytest_cache") filesOut = [x for x in os.listdir("./.pytest_cache") if (x[-4:] == ".pvd" and x[:9] == "outSquare")] filesOutData = [x for x in os.listdir("./.pytest_cache") if (x[-4:] == ".vtu" and x[:9] == "outSquare")] for fileOut in filesOut: os.remove("./.pytest_cache/" + fileOut) for fileOut in filesOutData: os.remove("./.pytest_cache/" + fileOut) solver.outParaview(uh, what = ["MESH", "ABS"], filename = ".pytest_cache/outSquare", forceNewFile = False) filesOut = [x for x in os.listdir("./.pytest_cache") if (x[-4:] == ".pvd" and x[:9] == "outSquare")] filesOutData = [x for x in os.listdir("./.pytest_cache") if (x[-4:] == ".vtu" and x[:9] == "outSquare")] assert len(filesOut) == 1 assert len(filesOutData) == 1 os.remove("./.pytest_cache/" + filesOut[0]) os.remove("./.pytest_cache/" + filesOutData[0]) def test_helmholtz_transmission(): solver = HelmholtzSquareTransmissionProblemEngine(nT = 1, nB = 2, theta = np.pi * 40 / 180, kappa = 4., n = 50, verbosity = 0) mu = 5. uh = solver.solve(mu) assert np.isclose(solver.norm(uh), 46.4528217234862, rtol = 1e-5) assert np.isclose(solver.norm(solver.residual(uh, mu)), 3.7288565e-12, rtol = 1e-1) + if not os.path.isdir("./.pytest_cache"): os.mkdir("./.pytest_cache") solver.outParaviewTimeDomain(uh, omega = mu, filename = ".pytest_cache/outSquare", forceNewFile = False, folder = True) filesOut = [x for x in os.listdir("./.pytest_cache/outSquare") if (x[-4:] == ".pvd" and x[:9] == "outSquare")] filesOutData = [x for x in os.listdir("./.pytest_cache/outSquare") if (x[-4:] == ".vtu" and x[:9] == "outSquare")] assert len(filesOut) == 1 assert len(filesOutData) == 20 shutil.rmtree("./.pytest_cache/outSquare") def test_helmholtz_domain(): solver = HelmholtzSquareBubbleDomainProblemEngine(kappa = 4, theta = 1., n = 50, mu0 = 1.5, verbosity = 0) mu = 1.5 uh = solver.solve(mu) + if not os.path.isdir("./.pytest_cache"): os.mkdir("./.pytest_cache") solver.plot(uh, save = "./.pytest_cache/outDomain", show = False) filesOut = [x for x in os.listdir("./.pytest_cache") if (x[-4:] == ".eps" and x[:9] == "outDomain")] assert len(filesOut) == 1 os.remove("./.pytest_cache/" + filesOut[0]) assert np.isclose(solver.norm(uh), 263.91673976964546, rtol = 1e-5) assert np.isclose(solver.norm(solver.residual(uh, mu)), 1.734638595e-11, rtol = 1e-1) diff --git a/tests/test_3_reduction_methods/lagrange_pade.py b/tests/test_3_reduction_methods/lagrange_pade.py new file mode 100644 index 0000000..7d62e72 --- /dev/null +++ b/tests/test_3_reduction_methods/lagrange_pade.py @@ -0,0 +1,68 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +from matrix_fft import matrixFFT +from rrompy.reduction_methods.lagrange import ApproximantLagrangePade as ALP +from rrompy.utilities.parameter_sampling import (QuadratureSampler as QS, + ManualSampler as MS) + +def test_monomials(capsys): + mu = 1.5 + solver = matrixFFT() + params = {"POD": False, "M": 9, "N": 9, "S": 10, "robustTol": 1e-6, + "interpRcond": 1e-3, "polybasis": "MONOMIAL"} + approx = ALP(solver, 4., params, verbosity = 0) + approx.sampler = QS([1.5, 6.5], "UNIFORM") + approx.setupApprox() + + out, err = capsys.readouterr() + assert (("poorly conditioned.\nReducing N from 9 to 8 and M" in out) + and ("eigenvalues below tolerance. Reducing N from" in out)) + assert len(err) == 0 + assert np.isclose(approx.normErr(mu), .032042037, rtol = 1e-3) + +def test_well_cond(): + mu = 1.5 + solver = matrixFFT() + params = {"POD": False, "M": 9, "N": 9, "S": 10, "robustTol": 1e-6, + "interpRcond": 1e-3, "polybasis": "CHEBYSHEV"} + approx = ALP(solver, 4., params, verbosity = 0) + approx.sampler = QS([1., 7.], "CHEBYSHEV") + approx.setupApprox() + for mu in approx.mus: + assert np.isclose(approx.normErr(mu), 0., atol = 1e-8) + poles = approx.getPoles() + for lambda_ in np.arange(1, 8): + assert np.isclose(np.min(np.abs(poles - lambda_)), 0., atol = 1e-4) + +def test_hermite(): + mu = 1.5 + solver = matrixFFT() + params = {"POD": True, "M": 11, "N": 11, "S": 12, "polybasis": "CHEBYSHEV"} + approx = ALP(solver, 4., params, verbosity = 0) + sampler0 = QS([1., 7.], "CHEBYSHEV") + points = np.tile(sampler0.generatePoints(4)[0], 3) + approx.sampler = MS([1., 7.], points = points) + approx.setupApprox() + for mu in approx.mus: + assert np.isclose(approx.normErr(mu), 0., atol = 1e-8) + poles = approx.getPoles() + for lambda_ in np.arange(1, 8): + assert np.isclose(np.min(np.abs(poles - lambda_)), 0., atol = 1e-4) + \ No newline at end of file diff --git a/tests/test_3_reduction_methods/lagrange_rb.py b/tests/test_3_reduction_methods/lagrange_rb.py new file mode 100644 index 0000000..db275bd --- /dev/null +++ b/tests/test_3_reduction_methods/lagrange_rb.py @@ -0,0 +1,58 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +from matrix_fft import matrixFFT +from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB as ALR +from rrompy.utilities.parameter_sampling import (QuadratureSampler as QS, + ManualSampler as MS) + +def test_LS(capsys): + solver = matrixFFT() + params = {"POD": True, "R": 5, "S": 10} + approx = ALR(solver, 4., params, verbosity = 0) + approx.sampler = QS([1., 7.], "CHEBYSHEV") + approx.setupApprox() + for mu in approx.mus: + assert not np.isclose(approx.normErr(mu), 0., atol = 1e-7) + + approx.POD = False + approx.setupApprox() + for mu in approx.mus[approx.R :]: + assert not np.isclose(approx.normErr(mu), 0., atol = 1e-3) + +def test_interp(): + solver = matrixFFT() + params = {"POD": False, "S": 10} + approx = ALR(solver, 4., params, verbosity = 0) + approx.sampler = QS([1., 7.], "CHEBYSHEV") + approx.setupApprox() + for mu in approx.mus: + assert np.isclose(approx.normErr(mu), 0., atol = 1e-7) + +def test_hermite(): + mu = 1.5 + solver = matrixFFT() + params = {"POD": True, "S": 12} + approx = ALR(solver, 4., params, verbosity = 0) + sampler0 = QS([1., 7.], "CHEBYSHEV") + points = np.tile(sampler0.generatePoints(4)[0], 3) + approx.sampler = MS([1., 7.], points = points) + approx.setupApprox() + for mu in approx.mus: + assert np.isclose(approx.normErr(mu), 0., atol = 1e-8) diff --git a/tests/test_3_reduction_methods/bla.py_ b/tests/test_3_reduction_methods/matrix_fft.py similarity index 60% rename from tests/test_3_reduction_methods/bla.py_ rename to tests/test_3_reduction_methods/matrix_fft.py index ab0e8f7..cb54bd9 100644 --- a/tests/test_3_reduction_methods/bla.py_ +++ b/tests/test_3_reduction_methods/matrix_fft.py @@ -1,23 +1,33 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # -import pytest - -def test_exception(): - pass +import numpy as np +from rrompy.hfengines.base import MatrixEngineBase as MEB +def matrixFFT(): + N = 100 + solver = MEB(verbosity = 0) + np.random.seed(420) + solver.setSolver("SOLVE") + fftB = np.fft.fft(np.eye(N)) * N**-.5 + solver.nAs = 2 + solver.As = [fftB.dot(np.multiply(np.arange(1, 1 + N), fftB.conj()).T), + - np.eye(N)] + solver.nbs = 1 + solver.bs = [np.random.randn(N) + 1.j * np.random.randn(N)] + return solver diff --git a/tests/test_3_reduction_methods/taylor_pade.py b/tests/test_3_reduction_methods/taylor_pade.py new file mode 100644 index 0000000..83176c8 --- /dev/null +++ b/tests/test_3_reduction_methods/taylor_pade.py @@ -0,0 +1,93 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import os +import numpy as np +from matrix_fft import matrixFFT +from rrompy.reduction_methods.taylor import ApproximantTaylorPade as ATP + +def test_rho(capsys): + mu = 1.5 + mu0 = 2. + 1.j + solver = matrixFFT() + uh = solver.solve(mu) + params = {"POD": False, "rho": 3., "M": 4, "N": 5, "E": 10, + "robustTol": 1e-6, "sampleType": "Krylov"} + approx = ATP(solver, mu0, params, verbosity = 0) + approx.setupApprox() + + out, err = capsys.readouterr() + assert ("Smallest 2 eigenvalues below tolerance. Reducing N from 5 to 4 " + "and E from 10 to 9.") in out + assert len(err) == 0 + + if not os.path.isdir("./.pytest_cache"): os.mkdir("./.pytest_cache") + filesOut = [x for x in os.listdir("./.pytest_cache") if + (x[-4:] == ".pkl" and x[:6] == "outRho")] + for fileOut in filesOut: os.remove("./.pytest_cache/" + fileOut) + fileStored = approx.storeTrainedModel(".pytest_cache/outRho") + filesOut = [x for x in os.listdir("./.pytest_cache") if + (x[-4:] == ".pkl" and x[:6] == "outRho")] + assert len(filesOut) == 1 + assert filesOut[0] == fileStored[- len(filesOut[0]) :] + uhP1 = approx.getApprox(mu) + errP = approx.getErr(mu) + errNP = approx.normErr(mu) + assert np.allclose(np.abs(errP - (uhP1 - uh)), 0., rtol = 1e-3) + assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) + resP = approx.getRes(mu) + resNP = approx.normRes(mu) + assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) + assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), + 0., rtol = 1e-3) + del approx + approx = ATP(solver, mu0, {"E": 3}, verbosity = 0) + approx.loadTrainedModel(fileStored) + for fileOut in filesOut: os.remove("./.pytest_cache/" + fileOut) + uhP2 = approx.getApprox(mu) + assert np.allclose(np.abs(uhP1 - uhP2), 0., rtol = 1e-3) + +def test_E_warn(capsys): + mu = 1.5 + mu0 = 2. + 1.j + solver = matrixFFT() + uh = solver.solve(mu) + params = {"POD": True, "rho": 3., "M": 4, "N": 5, "E": 2} + approx = ATP(solver, mu0, params, verbosity = 0) + approx.setupApprox() + + out, err = capsys.readouterr() + assert "Prescribed E is too small. Updating E to M + N." in out + assert len(err) == 0 + + uhP = approx.getApprox(mu) + errP = approx.getErr(mu) + errNP = approx.normErr(mu) + assert np.allclose(np.abs(errP - (uhP - uh)), 0., rtol = 1e-3) + assert np.isclose(errNP, 0.1372966, rtol = 1e-1) + + ress = approx.getResidues() + condres = np.linalg.cond(solver.innerProduct(ress, ress)) + assert np.isclose(condres, 36.63625, rtol = 1e-3) + + poles = approx.getPoles() + assert np.isclose(np.min(np.abs(poles - 2.)), 0., atol = 1e-5) + assert np.isclose(np.min(np.abs(poles - 1.)), 0., atol = 1e-3) + assert np.isclose(np.min(np.abs(poles - 3.)), 0., atol = 1e-3) + + \ No newline at end of file diff --git a/tests/test_3_reduction_methods/taylor_rb.py b/tests/test_3_reduction_methods/taylor_rb.py new file mode 100644 index 0000000..42f0f9d --- /dev/null +++ b/tests/test_3_reduction_methods/taylor_rb.py @@ -0,0 +1,50 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +from matrix_fft import matrixFFT +from rrompy.reduction_methods.taylor import ApproximantTaylorRB as ATR + +def test_R(): + mu = 1.5 + mu0 = 2. + 1.j + solver = matrixFFT() + uh = solver.solve(mu) + params = {"POD": True, "R": 5, "E": 10, "sampleType": "Krylov"} + approx = ATR(solver, mu0, params, verbosity = 0) + approx.setupApprox() + + uhP = approx.getApprox(mu) + errP = approx.getErr(mu) + errNP = approx.normErr(mu) + assert np.allclose(np.abs(errP - (uhP - uh)), 0., rtol = 1e-3) + assert np.isclose(errNP, 0.023691832, rtol = 1e-1) + + poles = approx.getPoles() + assert np.isclose(np.min(np.abs(poles - 2.)), 0., atol = 1e-4) + assert np.isclose(np.min(np.abs(poles - 1.)), 0., atol = 1e-2) + assert np.isclose(np.min(np.abs(poles - 3.)), 0., atol = 1e-2) + +def test_moments(): + mu0 = 2. + 1.j + solver = matrixFFT() + params = {"POD": True, "E": 10, "sampleType": "Krylov"} + approx = ATR(solver, mu0, params, verbosity = 0) + approx.setupApprox() + assert np.isclose(approx.normErr(mu0), 0., atol = 1e-10) +