diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py index 04a1b12..966a028 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py @@ -1,514 +1,518 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.base.fit_utils import (polybases, polyvander, polydomcoeff, polyfitname) from rrompy.reduction_methods.lagrange import ApproximantLagrangePade from rrompy.reduction_methods.trained_model import TrainedModelPade as tModel from rrompy.reduction_methods.trained_model import TrainedModelData from rrompy.utilities.base.types import DictAny, List, HFEng from rrompy.utilities.base import purgeDict, verbosityDepth, customFit from rrompy.utilities.exception_manager import RROMPyWarning from rrompy.utilities.exception_manager import RROMPyException __all__ = ['ApproximantLagrangePadeGreedy'] class ApproximantLagrangePadeGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangePade): """ ROM greedy Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'basis': type of basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'Delta': difference between M and N in rational approximant; defaults to 0; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'errorEstimatorKind': kind of error estimator; available values include 'EXACT' and 'SIMPLIFIED'; defaults to 'SIMPLIFIED'; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'basis': type of basis for interpolation; - 'Delta': difference between M and N in rational approximant; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'errorEstimatorKind': kind of error estimator; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. errorEstimatorKind: kind of error estimator. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["polybasis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) if self.verbosity >= 7: verbosityDepth("INIT", "Computing Taylor blocks of system.", timestamp = self.timestamp) nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs, (nAs + 1) * self.homogeneized) self.As = [self.HFEngine.A(self.mu0, j + 1) for j in range(nAs)] self.bs = [self.HFEngine.b(self.mu0, j, self.homogeneized) for j in range(nbs)] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing Taylor blocks.", timestamp = self.timestamp) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change robustTol. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["polybasis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"], True, True, baselevel = 1) if "Delta" in list(approxParameters.keys()): self._Delta = approxParameters["Delta"] elif not hasattr(self, "_Delta") or self._Delta is None: self._Delta = 0 GenericApproximantLagrangeGreedy.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) self.Delta = self.Delta if "polybasis" in keyList: self.polybasis = approxParameters["polybasis"] elif not hasattr(self, "_polybasis") or self._polybasis is None: self.polybasis = "MONOMIAL" if "errorEstimatorKind" in keyList: self.errorEstimatorKind = approxParameters["errorEstimatorKind"] elif (not hasattr(self, "_errorEstimatorKind") or self.errorEstimatorKind is None): self.errorEstimatorKind = "SIMPLIFIED" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif not hasattr(self, "interpRcond") or self.interpRcond is None: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif not hasattr(self, "_robustTol") or self._robustTol is None: self.robustTol = 0 @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in polybases: raise RROMPyException("Sample type not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def Delta(self): """Value of Delta.""" return self._Delta @Delta.setter def Delta(self, Delta): if not np.isclose(Delta, np.floor(Delta)): raise RROMPyException("Delta must be an integer.") if Delta < 0: RROMPyWarning(("Error estimator unreliable for Delta < 0. " "Overloading of errorEstimator is suggested.")) else: Deltamin = (max(self.HFEngine.nbs, self.HFEngine.nAs * self.homogeneized) - 1 - 1 * (self.HFEngine.nAs > 1)) if Delta < Deltamin: RROMPyWarning(("Method may be unreliable for selected Delta. " "Suggested minimal value of Delta: {}.").format( Deltamin)) self._Delta = Delta self._approxParameters["Delta"] = self.Delta @property def errorEstimatorKind(self): """Value of errorEstimatorKind.""" return self._errorEstimatorKind @errorEstimatorKind.setter def errorEstimatorKind(self, errorEstimatorKind): errorEstimatorKind = errorEstimatorKind.upper() if errorEstimatorKind not in ["EXACT", "SIMPLIFIED"]: RROMPyWarning(("Error estimator kind not recognized. Overriding " "to 'SIMPLIFIED'.")) errorEstimatorKind = "SIMPLIFIED" self._errorEstimatorKind = errorEstimatorKind self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= np.abs(self.Delta): RROMPyWarning(("nTestPoints must be at least abs(Delta) + 1. " "Increasing value to abs(Delta) + 1.")) nTestPoints = np.abs(self.Delta) + 1 if not np.isclose(nTestPoints, np.int(nTestPoints)): raise RROMPyException("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "_nTestPoints") and self.nTestPoints is not None: nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """Standard residual-based error estimator.""" self.setupApprox() PM = self.trainedModel.data.P[:, -1] if np.any(np.isnan(PM)) or np.any(np.isinf(PM)): err = np.empty(len(mus)) err[:] = np.inf return err nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized) radiusmus = self.radiusPade(mus) radiussmus = self.radiusPade(self.mus) musTile = np.tile(radiusmus.reshape(-1, 1), [1, self.S]) smusCol = radiussmus.reshape(1, -1) num = np.prod(musTile[:, : self.S] - smusCol, axis = 1) den = self.trainedModel.getQVal(mus) self.assembleReducedResidualBlocks() vanderBase = np.polynomial.polynomial.polyvander(radiusmus, max(nAs, nbs)).T radiusb0 = vanderBase[: nbs + 1, :] # 'ij,jk,ik->k', resbb, radiusb0, radiusb0.conj() - b0resb0 = np.sum(self.resbb.dot(radiusb0) * radiusb0.conj(), axis = 0) + b0resb0 = np.sum(self.trainedModel.data.resbb.dot(radiusb0) + * radiusb0.conj(), axis = 0) RHSnorms = np.power(np.abs(b0resb0), .5) vanderBase = vanderBase[: -1, :] delta = self.S - len(self.trainedModel.data.Q) nbsEff = max(0, nbs - delta) if self.errorEstimatorKind == "SIMPLIFIED": radiusA = np.tensordot(PM, vanderBase[: nAs, :], 0) if delta == 0: radiusb = (np.abs(self.trainedModel.data.Q[-1]) * radiusb0[: -1, :]) else: #if self.errorEstimatorKind == "EXACT": momentQ = np.zeros(nbsEff, dtype = np.complex) momentQu = np.zeros((self.S, nAs), dtype = np.complex) radiusbTen = np.zeros((nbsEff, nbsEff, len(mus)), dtype = np.complex) radiusATen = np.zeros((nAs, nAs, len(mus)), dtype = np.complex) if nbsEff > 0: momentQ[0] = self.trainedModel.data.Q[-1] radiusbTen[0, :, :] = vanderBase[: nbsEff, :] momentQu[:, 0] = self.trainedModel.data.P[:, -1] radiusATen[0, :, :] = vanderBase[: nAs, :] Qvals = self.trainedModel.getQVal(self.mus) for k in range(1, max(nAs, nbs * (nbsEff > 0))): Qvals = Qvals * radiussmus if k > delta and k < nbs: momentQ[k - delta] = self._fitinv.dot(Qvals) radiusbTen[k - delta, k :, :] = ( radiusbTen[0, : delta - k, :]) if k < nAs: momentQu[:, k] = Qvals * self._fitinv radiusATen[k, k :, :] = radiusATen[0, : - k, :] if self.POD and nAs > 1: momentQu[:, 1 :] = self.samplingEngine.RPOD.dot( momentQu[:, 1 :]) radiusA = np.tensordot(momentQu, radiusATen, 1) if nbsEff > 0: radiusb = np.tensordot(momentQ, radiusbTen, 1) if ((self.errorEstimatorKind == "SIMPLIFIED" and delta == 0) or (self.errorEstimatorKind == "EXACT" and nbsEff > 0)): # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() - ff = np.sum(self.resbb[delta + 1 :, delta + 1 :].dot(radiusb) - * radiusb.conj(), axis = 0) + ff = np.sum(self.trainedModel.data.resbb[delta + 1 :, delta + 1 :]\ + .dot(radiusb) * radiusb.conj(), axis = 0) # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() - Lf = np.sum(np.tensordot(self.resAb[delta :, :, :], radiusA, 2) + Lf = np.sum(np.tensordot( + self.trainedModel.data.resAb[delta :, :, :], radiusA, 2) * radiusb.conj(), axis = 0) else: ff, Lf = 0., 0. # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() - LL = np.sum(np.tensordot(self.resAA, radiusA, 2) * radiusA.conj(), - axis = (0, 1)) + LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2) + * radiusA.conj(), axis = (0, 1)) jOpt = np.power(np.abs(ff - 2. * np.real(Lf) + LL), .5) return (polydomcoeff[self.polybasis](self.S - 1) * jOpt * np.abs(num / den) / RHSnorms) def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.computeScaleFactor() self.greedy() self._M = self.S - 1 self._N = self.S - 1 if self.Delta < 0: self._M += self.Delta else: self._N -= self.Delta if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, np.copy(self.samplingEngine.samples), self.HFEngine.rescalingExp) data.polytype = self.polybasis data.scaleFactor = self.scaleFactor data.mus = np.copy(self.mus) self.trainedModel.data = data else: self.trainedModel.data.projMat = np.copy( self.samplingEngine.samples) self.trainedModel.data.mus = np.copy(self.mus) if min(self.M, self.N) < 0: if self.verbosity >= 5: verbosityDepth("MAIN", "Minimal sample size not achieved.", timestamp = self.timestamp) Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) Q[:] = np.nan P[:] = np.nan self.trainedModel.data.Q = np.copy(Q) self.trainedModel.data.P = np.copy(P) self.trainedModel.data.approxParameters = copy( self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Aborting computation of approximant.", timestamp = self.timestamp) return if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of denominator.", timestamp = self.timestamp) TS = polyvander[self.polybasis](self.radiusPade(self.mus), self.S - 1).T RHS = np.zeros(self.S) RHS[-1] = 1. fitOut = customFit(TS, RHS, full = True, rcond = self.interpRcond) if self.verbosity >= 2: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of " "system: {:.4e}.").format( self.S, self.S - 1, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] < self.S: RROMPyWarning(("Polyfit is poorly conditioned. Starting " "preemptive termination of computation of " "approximant.")) Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) Q[:] = np.nan P[:] = np.nan self.trainedModel.data.Q = np.copy(Q) self.trainedModel.data.P = np.copy(P) self.trainedModel.data.approxParameters = copy( self.approxParameters) if self.verbosity >= 7: verbosityDepth("DEL", "Aborting computation of denominator.", timestamp = self.timestamp) if self.verbosity >= 5: verbosityDepth("DEL", "Aborting computation of approximant.", timestamp = self.timestamp) return self._fitinv = fitOut[0] while self.N > 0: Ghalf = (TS[: self.N + 1, :] * self._fitinv).T if self.POD: self.Ghalf = self.samplingEngine.RPOD.dot(Ghalf) ev, eV = self.findeveVGQR(2) else: self.Ghalf = self.samplingEngine.samples.dot(Ghalf) ev, eV = self.findeveVGQR(2) Nstable = np.sum(np.abs(ev) >= self.robustTol * np.linalg.norm(ev)) if self.N <= Nstable: break if self.verbosity >= 2: verbosityDepth("MAIN", ("Smallest {} eigenvalues below " "tolerance. Reducing N to {}.")\ .format(self.N - Nstable + 1, Nstable), timestamp = self.timestamp) self._N = Nstable if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.", timestamp = self.timestamp) else: Q = np.ones((1,), dtype = np.complex) self.trainedModel.data.Q = np.copy(Q) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.", timestamp = self.timestamp) Qevaldiag = np.diag(self.trainedModel.getQVal(self.mus)) while self.M >= 0: fitVander = polyvander[self.polybasis](self.radiusPade(self.mus), self.M) w = None if self.M == self.S - 1: w = "AUTO" fitOut = customFit(fitVander, Qevaldiag, full = True, w = w, rcond = self.interpRcond) if self.verbosity >= 2: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of " "system: {:.4e}.").format( self.S, self.M, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break RROMPyWarning(("Polyfit is poorly conditioned. Reducing M from {} " "to {}. Exact snapshot interpolation not " "guaranteed.").format(self.M, fitOut[1][1] - 1)) self._M = fitOut[1][1] - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) P = np.atleast_2d(P) if self.POD: P = self.samplingEngine.RPOD.dot(P) self.trainedModel.data.P = np.copy(P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.", timestamp = self.timestamp) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def assembleReducedResidualBlocks(self): """Build affine blocks of reduced linear system through projections.""" - computeResbb = self.resbb is None - computeResAb = self.resAb is None or self.resAb.shape[1] != self.S - computeResAA = self.resAA is None or self.resAA.shape[0] != self.S + computeResbb = not hasattr(self.trainedModel.data, "resbb") + computeResAb = (not hasattr(self.trainedModel.data, "resAb") + or self.trainedModel.data.resAb.shape[1] != self.S) + computeResAA = (not hasattr(self.trainedModel.data, "resAA") + or self.trainedModel.data.resAA.shape[0] != self.S) pMat = self.trainedModel.data.projMat scaling = self.trainedModel.data.scaleFactor if computeResbb or computeResAb or computeResAA: if self.verbosity >= 7: verbosityDepth("INIT", "Projecting Taylor terms of residual.", timestamp = self.timestamp) if computeResbb: self.assembleReducedResidualBlocksbb(self.bs, pMat, scaling) if computeResAb: self.assembleReducedResidualBlocksAb(self.As, self.bs[1 :], pMat, scaling) if computeResAA: self.assembleReducedResidualBlocksAA(self.As, pMat, scaling) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up Taylor " "decomposition of residual."), timestamp = self.timestamp) diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py index 0d24112..48283ac 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py @@ -1,241 +1,245 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import copy from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB from rrompy.reduction_methods.trained_model import TrainedModelRB as tModel from rrompy.reduction_methods.trained_model import TrainedModelData from rrompy.utilities.base.types import DictAny, HFEng, List from rrompy.utilities.base import verbosityDepth from rrompy.utilities.exception_manager import RROMPyException __all__ = ['ApproximantLagrangeRBGreedy'] class ApproximantLagrangeRBGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangeRB): """ ROM greedy RB approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt theta(mu). bs: List of numpy vectors representing coefficients of linear system RHS wrt theta(mu). thetaAs: List of callables representing coefficients of linear system matrix wrt mu. thetabs: List of callables representing coefficients of linear system RHS wrt mu. ARBs: List of sparse matrices (in CSC format) representing coefficients of compressed linear system matrix wrt theta(mu). bRBs: List of numpy vectors representing coefficients of compressed linear system RHS wrt theta(mu). """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) if self.verbosity >= 10: verbosityDepth("INIT", "Computing affine blocks of system.", timestamp = self.timestamp) self.As = self.HFEngine.affineLinearSystemA(self.mu0) self.bs = self.HFEngine.affineLinearSystemb(self.mu0, self.homogeneized) if self.verbosity >= 10: verbosityDepth("DEL", "Done computing affine blocks.", timestamp = self.timestamp) self._postInit() @property def R(self): """Value of R.""" return self._S @R.setter def R(self, R): raise RROMPyException(("R is used just to simplify inheritance, and " "its value cannot be changed from that of S.")) def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator. Unreliable for unstable problems (inf-sup constant is missing). """ self.setupApprox() self.assembleReducedResidualBlocks() nmus = len(mus) - nAs = self.resAA.shape[1] - nbs = self.resbb.shape[0] + nAs = self.trainedModel.data.resAA.shape[1] + nbs = self.trainedModel.data.resbb.shape[0] thetaAs = self.trainedModel.data.thetaAs thetabs = self.trainedModel.data.thetabs radiusA = np.empty((self.S, nAs, nmus), dtype = np.complex) radiusb = np.empty((nbs, nmus), dtype = np.complex) verb = self.trainedModel.verbosity self.trainedModel.verbosity = 0 if verb >= 5: mustr = mus if nmus > 2: mustr = "[{} ..({}).. {}]".format(mus[0], nmus - 2, mus[-1]) verbosityDepth("INIT", ("Computing RB solution at mu = " "{}.").format(mustr), timestamp = self.timestamp) for j in range(nmus): mu = mus[j] + uApp = self.getApproxReduced(mu) for i in range(nAs): - radiusA[:, i, j] = eval(thetaAs[i]) * self.getApproxReduced(mu) + radiusA[:, i, j] = eval(thetaAs[i]) * uApp for i in range(nbs): radiusb[i, j] = eval(thetabs[i]) if verb >= 5: verbosityDepth("DEL", "Done computing RB solution.", timestamp = self.timestamp) self.trainedModel.verbosity = verb # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() - ff = np.sum(self.resbb.dot(radiusb) * radiusb.conj(), axis = 0) - # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() - Lf = np.sum(np.tensordot(self.resAb, radiusA, 2) * radiusb.conj(), + ff = np.sum(self.trainedModel.data.resbb.dot(radiusb) * radiusb.conj(), axis = 0) + # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() + Lf = np.sum(np.tensordot(self.trainedModel.data.resAb, radiusA, 2) + * radiusb.conj(), axis = 0) # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() - LL = np.sum(np.tensordot(self.resAA, radiusA, 2) * radiusA.conj(), - axis = (0, 1)) + LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2) + * radiusA.conj(), axis = (0, 1)) return np.abs((LL - 2. * np.real(Lf) + ff) / ff) ** .5 def setupApprox(self): """Compute RB projection matrix.""" if self.checkComputedApprox(): return if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.greedy() if self.verbosity >= 7: verbosityDepth("INIT", "Computing projection matrix.", timestamp = self.timestamp) pMat = self.samplingEngine.samples if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, np.copy(pMat), self.HFEngine.rescalingExp) data.thetaAs = self.HFEngine.affineWeightsA(self.mu0) data.thetabs = self.HFEngine.affineWeightsb(self.mu0, self.homogeneized) data.ARBs, data.bRBs = self.assembleReducedSystem(pMat) data.mus = np.copy(self.mus) self.trainedModel.data = data else: pMatOld = self.trainedModel.data.projMat Sold = pMatOld.shape[1] ARBs, bRBs = self.assembleReducedSystem(pMat[:, Sold :], pMatOld) self.trainedModel.data.ARBs = ARBs self.trainedModel.data.bRBs = bRBs self.trainedModel.data.projMat = np.copy(pMat) self.trainedModel.data.mus = np.copy(self.mus) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing projection matrix.", timestamp = self.timestamp) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def assembleReducedResidualBlocks(self): """Build affine blocks of RB linear system through projections.""" - computeResbb = self.resbb is None - computeResAb = self.resAb is None or self.resAb.shape[1] != self.S - computeResAA = self.resAA is None or self.resAA.shape[0] != self.S + computeResbb = not hasattr(self.trainedModel.data, "resbb") + computeResAb = (not hasattr(self.trainedModel.data, "resAb") + or self.trainedModel.data.resAb.shape[1] != self.S) + computeResAA = (not hasattr(self.trainedModel.data, "resAA") + or self.trainedModel.data.resAA.shape[0] != self.S) if computeResbb or computeResAb or computeResAA: pMat = self.trainedModel.data.projMat if self.verbosity >= 7: verbosityDepth("INIT", "Projecting affine terms of residual.", timestamp = self.timestamp) if computeResbb: self.assembleReducedResidualBlocksbb(self.bs, pMat) if computeResAb: self.assembleReducedResidualBlocksAb(self.As, self.bs, pMat) if computeResAA: self.assembleReducedResidualBlocksAA(self.As, pMat) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up affine decomposition " "of residual."), timestamp = self.timestamp) diff --git a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py index d2ac3a5..e2b7417 100644 --- a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py +++ b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py @@ -1,627 +1,625 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.reduction_methods.lagrange.generic_approximant_lagrange import ( GenericApproximantLagrange) from rrompy.utilities.base.types import Np1D, Np2D, DictAny, HFEng, Tuple, List from rrompy.utilities.base import purgeDict, verbosityDepth from rrompy.utilities.exception_manager import (RROMPyException, modeAssert, RROMPyWarning) __all__ = ['GenericApproximantLagrangeGreedy'] class GenericApproximantLagrangeGreedy(GenericApproximantLagrange): """ ROM greedy Lagrange interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'interactive': whether to interactively terminate greedy algorithm; defaults to False; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator. - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. robustTol: tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. """ TOL_INSTABILITY = 1e-6 def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["muBounds", "greedyTol", "interactive", "maxIter", "refinementRatio", "nTrainingPoints", "nTestPoints", "trainingSetGenerator"]) super(GenericApproximantLagrange, self).__init__( HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) self._postInit() @property def approxParameters(self): """Value of approximant parameters. Its assignment may change S.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["muBounds", "greedyTol", "interactive", "maxIter", "refinementRatio", "nTrainingPoints", "nTestPoints", "trainingSetGenerator"], True, True, baselevel = 1) GenericApproximant.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "muBounds" in keyList: self.muBounds = approxParameters["muBounds"] elif not hasattr(self, "_muBounds") or self.muBounds is None: self.muBounds = [[0.], [1.]] if "greedyTol" in keyList: self.greedyTol = approxParameters["greedyTol"] elif not hasattr(self, "_greedyTol") or self.greedyTol is None: self.greedyTol = 1e-2 if "interactive" in keyList: self.interactive = approxParameters["interactive"] elif not hasattr(self, "interactive") or self.interactive is None: self.interactive = False if "maxIter" in keyList: self.maxIter = approxParameters["maxIter"] elif not hasattr(self, "_maxIter") or self.maxIter is None: self.maxIter = 1e2 if "refinementRatio" in keyList: self.refinementRatio = approxParameters["refinementRatio"] elif (not hasattr(self, "_refinementRatio") or self.refinementRatio is None): self.refinementRatio = 0.2 if "nTrainingPoints" in keyList: self.nTrainingPoints = approxParameters["nTrainingPoints"] elif (not hasattr(self, "_nTrainingPoints") or self.nTrainingPoints is None): self.nTrainingPoints = np.int(np.ceil(self.maxIter / self.refinementRatio)) if "nTestPoints" in keyList: self.nTestPoints = approxParameters["nTestPoints"] elif not hasattr(self, "_nTestPoints") or self.nTestPoints is None: self.nTestPoints = 1 if "trainingSetGenerator" in keyList: self.trainingSetGenerator = ( approxParameters["trainingSetGenerator"]) elif (not hasattr(self, "_trainingSetGenerator") or self.trainingSetGenerator is None): from rrompy.utilities.parameter_sampling import QuadratureSampler self.trainingSetGenerator = QuadratureSampler(self.muBounds, "UNIFORM") del QuadratureSampler @property def S(self): """Value of S.""" if not hasattr(self, "_mus") or self.mus is None: return 0 return len(self.mus) @S.setter def S(self, S): raise RROMPyException(("S is used just to simplify inheritance, and " "its value cannot be changed.")) @property def mus(self): """Value of mus.""" return self._mus @mus.setter def mus(self, mus): self._mus = np.array(mus, dtype = np.complex) @property def muBounds(self): """Value of muBounds.""" return self._muBounds @muBounds.setter def muBounds(self, muBounds): if len(muBounds) != 2: raise RROMPyException("2 limits must be specified.") try: muBounds = muBounds.tolist() except: muBounds = list(muBounds) for j in range(2): try: len(muBounds[j]) except: muBounds[j] = np.array([muBounds[j]]) if len(muBounds[0]) != len(muBounds[1]): raise RROMPyException("The bounds must have the same length.") self._muBounds = muBounds @property def greedyTol(self): """Value of greedyTol.""" return self._greedyTol @greedyTol.setter def greedyTol(self, greedyTol): if greedyTol < 0: raise RROMPyException("greedyTol must be non-negative.") if hasattr(self, "_greedyTol") and self.greedyTol is not None: greedyTolold = self.greedyTol else: greedyTolold = -1 self._greedyTol = greedyTol self._approxParameters["greedyTol"] = self.greedyTol if greedyTolold != self.greedyTol: self.resetSamples() @property def maxIter(self): """Value of maxIter.""" return self._maxIter @maxIter.setter def maxIter(self, maxIter): if maxIter <= 0: raise RROMPyException("maxIter must be positive.") if hasattr(self, "_maxIter") and self.maxIter is not None: maxIterold = self.maxIter else: maxIterold = -1 self._maxIter = maxIter self._approxParameters["maxIter"] = self.maxIter if maxIterold != self.maxIter: self.resetSamples() @property def refinementRatio(self): """Value of refinementRatio.""" return self._refinementRatio @refinementRatio.setter def refinementRatio(self, refinementRatio): if refinementRatio <= 0. or refinementRatio > 1.: raise RROMPyException(("refinementRatio must be between 0 " "(excluded) and 1.")) if (hasattr(self, "_refinementRatio") and self.refinementRatio is not None): refinementRatioold = self.refinementRatio else: refinementRatioold = -1 self._refinementRatio = refinementRatio self._approxParameters["refinementRatio"] = self.refinementRatio if refinementRatioold != self.refinementRatio: self.resetSamples() @property def nTrainingPoints(self): """Value of nTrainingPoints.""" return self._nTrainingPoints @nTrainingPoints.setter def nTrainingPoints(self, nTrainingPoints): if nTrainingPoints <= 1: raise RROMPyException("nTrainingPoints must be greater than 1.") if not np.isclose(nTrainingPoints, np.int(nTrainingPoints)): raise RROMPyException("nTrainingPoints must be an integer.") nTrainingPoints = np.int(nTrainingPoints) if (hasattr(self, "_nTrainingPoints") and self.nTrainingPoints is not None): nTrainingPointsold = self.nTrainingPoints else: nTrainingPointsold = -1 self._nTrainingPoints = nTrainingPoints self._approxParameters["nTrainingPoints"] = self.nTrainingPoints if nTrainingPointsold != self.nTrainingPoints: self.resetSamples() @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= 0: raise RROMPyException("nTestPoints must be positive.") if not np.isclose(nTestPoints, np.int(nTestPoints)): raise RROMPyException("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "_nTestPoints") and self.nTestPoints is not None: nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() @property def trainingSetGenerator(self): """Value of trainingSetGenerator.""" return self._trainingSetGenerator @trainingSetGenerator.setter def trainingSetGenerator(self, trainingSetGenerator): if 'generatePoints' not in dir(trainingSetGenerator): raise RROMPyException("trainingSetGenerator type not recognized.") if (hasattr(self, '_trainingSetGenerator') and self.trainingSetGenerator is not None): trainingSetGeneratorOld = self.trainingSetGenerator self._trainingSetGenerator = trainingSetGenerator self._approxParameters["trainingSetGenerator"] = ( self.trainingSetGenerator) if (not 'trainingSetGeneratorOld' in locals() or trainingSetGeneratorOld != self.trainingSetGenerator): self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() - self.resbb = None - self.resAb = None - self.resAA = None self._mus = [] def initEstNormer(self): """Initialize estimator norm engine.""" if not hasattr(self, "estNormer"): from rrompy.hfengines.base import ProblemEngineBase as PEB self.estNormer = PEB() # L2 norm self.estNormer.V = self.HFEngine.V self.estNormer.buildEnergyNormForm() def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator with explicit residual computation. """ self.setupApprox() nmus = len(mus) err = np.empty(nmus) if self.HFEngine.nbs == 1: RHS = self.getRHS(mus[0], homogeneized = self.homogeneized) RHSNorm = self.estNormer.norm(RHS) for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / RHSNorm else: for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) RHS = self.getRHS(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / self.estNormer.norm(RHS) return np.abs(err) def getMaxErrorEstimator(self, mus, plot : bool = False)\ -> Tuple[Np1D, int, float]: """ Compute maximum of (and index of maximum of) error estimator over given parameters. """ errorEstTrain = self.errorEstimator(mus) idxMaxEst = np.argmax(errorEstTrain) maxEst = errorEstTrain[idxMaxEst] if plot and not np.all(np.isinf(errorEstTrain)): from matplotlib import pyplot as plt onemus = np.ones(self.S) plt.figure() plt.semilogy(np.real(mus), errorEstTrain, 'k') plt.semilogy(np.real(mus[[0, -1]]), [self.greedyTol] * 2, 'r--') plt.semilogy(np.real(self.mus), 2. * self.greedyTol * onemus, '*m') plt.semilogy(np.real(mus[idxMaxEst]), maxEst, 'xr') plt.grid() plt.show() plt.close() return errorEstTrain, idxMaxEst, maxEst def greedyNextSample(self, muidx:int, plotEst : bool = False)\ -> Tuple[Np1D, int, float, complex]: """Compute next greedy snapshot of solution map.""" modeAssert(self._mode, message = "Cannot add greedy sample.") mu = self.muTrain[muidx] if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} to " "training set.").format( self.samplingEngine.nsamples + 1, mu), timestamp = self.timestamp) self.mus = np.append(self.mus, mu) idxs = np.arange(len(self.muTrain)) mask = np.ones_like(idxs, dtype = bool) mask[muidx] = False idxs = idxs[mask] self.muTrain = self.muTrain[idxs] self.samplingEngine.nextSample(mu, homogeneized = self.homogeneized) errorEstTrain, muidx, maxErrorEst = self.getMaxErrorEstimator( self.muTrain, plotEst) return errorEstTrain, muidx, maxErrorEst, self.muTrain[muidx] def greedy(self, plotEst : bool = False): """Compute greedy snapshots of solution map.""" modeAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.samples is not None: return if self.verbosity >= 2: verbosityDepth("INIT", "Starting computation of snapshots.", timestamp = self.timestamp) self.resetSamples() self.mus, _ = self.trainingSetGenerator.generatePoints( self.nTestPoints) self.mus = np.array([x[0] for x in self.mus], dtype = np.complex) nTrain = self.nTrainingPoints muTrainBase, _ = self.trainingSetGenerator.generatePoints(nTrain) self.muTrain = np.empty(len(muTrainBase) + 1, dtype = np.complex) j = 0 for mu in muTrainBase: if not np.any(np.isclose(self.mus, mu)): self.muTrain[j] = mu[0] j += 1 self.muTrain[j] = self.mus[-1] self.muTrain = self.muTrain[: j + 1] self.mus = self.mus[:-1] for j in range(len(self.mus)): if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} to " "training set.").format( self.samplingEngine.nsamples + 1, self.mus[j]), timestamp = self.timestamp) self.samplingEngine.nextSample(self.mus[j], homogeneized = self.homogeneized) errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample(-1, plotEst) if self.verbosity >= 2: verbosityDepth("MAIN", "Uniform error estimate {:.4e}.".format( maxErrorEst), timestamp = self.timestamp) trainedModelOld = copy(self.trainedModel) while (self.samplingEngine.nsamples < self.maxIter and maxErrorEst > self.greedyTol): if (1. - self.refinementRatio) * nTrain > len(self.muTrain): muTrainExtra, _ = self.trainingSetGenerator.refine(nTrain) self.muTrain = np.sort(np.append(self.muTrain, muTrainExtra)) nTrain += len(muTrainExtra) if self.verbosity >= 5: verbosityDepth("MAIN", ("Enriching training set by {} " "elements.").format( len(muTrainExtra)), timestamp = self.timestamp) muTrainOld, maxErrorEstOld = self.muTrain, maxErrorEst errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample( muidx, plotEst) if self.verbosity >= 2: verbosityDepth("MAIN", "Uniform error estimate {:.4e}.".format( maxErrorEst), timestamp = self.timestamp) if (np.isnan(maxErrorEst) or np.isinf(maxErrorEst) or maxErrorEstOld < maxErrorEst * self.TOL_INSTABILITY): RROMPyWarning(("Instability in a posteriori estimator. " "Starting preemptive greedy loop termination.")) maxErrorEst = maxErrorEstOld self.muTrain = muTrainOld self.mus = self.mus[:-1] self.samplingEngine.popSample() self.trainedModel.data = copy(trainedModelOld.data) break trainedModelOld.data = copy(self.trainedModel.data) if (self.interactive and self.samplingEngine.nsamples >= self.maxIter): verbosityDepth("MAIN", ("Maximum number of iterations {} " "reached. Want to increase maxIter " "and continue? Y/N").format( self.maxIter), timestamp = self.timestamp, end = "") increasemaxIter = input() if increasemaxIter.upper() == "Y": verbosityDepth("MAIN", "Doubling value of maxIter...", timestamp = self.timestamp) self._maxIter *= 2 if (self.interactive and maxErrorEst <= self.greedyTol): verbosityDepth("MAIN", ("Required tolerance {} achieved. Want " "to decrease greedyTol and continue? " "Y/N").format(self.greedyTol), timestamp = self.timestamp, end = "") increasemaxIter = input() if increasemaxIter.upper() == "Y": verbosityDepth("MAIN", "Halving value of greedyTol...", timestamp = self.timestamp) self._greedyTol *= .5 if self.verbosity >= 2: verbosityDepth("DEL", ("Done computing snapshots (final snapshot " "count: {}).").format( self.samplingEngine.nsamples), timestamp = self.timestamp) def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (super().checkComputedApprox() and self.S == self.trainedModel.data.projMat.shape[1]) def computeScaleFactor(self): """Compute parameter rescaling factor.""" modeAssert(self._mode, message = "Cannot compute rescaling factor.") self.scaleFactor= np.abs(np.power(self.trainingSetGenerator.lims[0][0], self.HFEngine.rescalingExp) - np.power(self.trainingSetGenerator.lims[1][0], self.HFEngine.rescalingExp)) / 2. def assembleReducedResidualBlocksbb(self, bs:List[Np1D], pMat:Np2D, scaling : float = 1.): """ Build blocks (of type bb) of reduced linear system through projections. """ self.initEstNormer() nbs = len(bs) - self.resbb = np.empty((nbs, nbs), dtype = np.complex) + resbb = np.empty((nbs, nbs), dtype = np.complex) for i in range(nbs): Mbi = scaling ** i * bs[i] - self.resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) + resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) for j in range(i): Mbj = scaling ** j * bs[j] - self.resbb[i, j] = self.estNormer.innerProduct(Mbj, Mbi) + resbb[i, j] = self.estNormer.innerProduct(Mbj, Mbi) for i in range(nbs): for j in range(i + 1, nbs): - self.resbb[i, j] = self.resbb[j, i].conj() + resbb[i, j] = resbb[j, i].conj() + self.trainedModel.data.resbb = resbb def assembleReducedResidualBlocksAb(self, As:List[Np2D], bs:List[Np1D], pMat:Np2D, scaling : float = 1.): """ Build blocks (of type Ab) of reduced linear system through projections. """ self.initEstNormer() nAs = len(As) nbs = len(bs) - if not hasattr(self, "resAb") or self.resAb is None: - self.resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) + if (not hasattr(self.trainedModel.data, "resAb") + or self.trainedModel.data.resAb is None): + resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) for j in range(nAs): MAj = scaling ** (j + 1) * As[j].dot(pMat) for i in range(nbs): Mbi = scaling ** (i + 1) * bs[i] - self.resAb[i, :, j] = self.estNormer.innerProduct(MAj, Mbi) + resAb[i, :, j] = self.estNormer.innerProduct(MAj, Mbi) else: - Sold = self.resAb.shape[1] + Sold = self.trainedModel.data.resAb.shape[1] if Sold > self.S: - self.resAb = self.resAb[:, : self.S, :] + resAb = self.trainedModel.data.resAb[:, : self.S, :] else: - resAbNew = np.empty((nbs, self.S, nAs), dtype = np.complex) - resAbNew[:, : Sold, :] = self.resAb - self.resAb = resAbNew + resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) + resAb[:, : Sold, :] = self.trainedModel.data.resAb for j in range(nAs): MAj = scaling ** (j + 1) * As[j].dot(pMat[:, Sold :]) for i in range(nbs): Mbi = scaling ** (i + 1) * bs[i] - self.resAb[i, Sold :, j] = self.estNormer.innerProduct( - MAj, Mbi) + resAb[i, Sold :, j] = self.estNormer.innerProduct(MAj, + Mbi) + self.trainedModel.data.resAb = resAb def assembleReducedResidualBlocksAA(self, As:List[Np2D], pMat:Np2D, scaling : float = 1.): """ Build blocks (of type AA) of reduced linear system through projections. """ self.initEstNormer() nAs = len(As) - if not hasattr(self, "resAA") or self.resAA is None: - self.resAA = np.empty((self.S, nAs, self.S, nAs), - dtype = np.complex) + if (not hasattr(self.trainedModel.data, "resAA") + or self.trainedModel.data.resAA is None): + resAA = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) for i in range(nAs): MAi = scaling ** (i + 1) * As[i].dot(pMat) - self.resAA[:, i, :, i] = self.estNormer.innerProduct(MAi, MAi) + resAA[:, i, :, i] = self.estNormer.innerProduct(MAi, MAi) for j in range(i): MAj = scaling ** (j + 1) * As[j].dot(pMat) - self.resAA[:, i, :, j] = self.estNormer.innerProduct(MAj, - MAi) + resAA[:, i, :, j] = self.estNormer.innerProduct(MAj, MAi) for i in range(nAs): for j in range(i + 1, nAs): - self.resAA[:, i, :, j] = self.resAA[:, j, :, i].T.conj() + resAA[:, i, :, j] = resAA[:, j, :, i].T.conj() else: - Sold = self.resAA.shape[0] + Sold = self.trainedModel.data.resAA.shape[0] if Sold > self.S: - self.resAA = self.resAA[: self.S, :, : self.S, :] + resAA = self.trainedModel.data.resAA[: self.S, :, : self.S, :] else: - resAANew = np.empty((self.S, nAs, self.S, nAs), - dtype = np.complex) - resAANew[: Sold, :, : Sold, :] = self.resAA - self.resAA = resAANew + resAA = np.empty((self.S, nAs, self.S, nAs), + dtype = np.complex) + resAA[: Sold, :, : Sold, :] = self.trainedModel.data.resAA for i in range(nAs): MAi = scaling ** (i + 1) * As[i].dot(pMat) - self.resAA[: Sold, i, Sold :, i] = ( - self.estNormer.innerProduct(MAi[:, Sold :], - MAi[:, : Sold])) - self.resAA[Sold :, i, : Sold, i] = ( - self.resAA[: Sold, i, Sold :, i].T.conj()) - self.resAA[Sold :, i, Sold :, i] = ( - self.estNormer.innerProduct(MAi[:, Sold :], - MAi[:, Sold :])) + resAA[: Sold, i, Sold :, i] = self.estNormer.innerProduct( + MAi[:, Sold :], + MAi[:, : Sold]) + resAA[Sold :, i, : Sold, i] = resAA[: Sold, i, + Sold :, i].T.conj() + resAA[Sold :, i, Sold :, i] = self.estNormer.innerProduct( + MAi[:, Sold :], + MAi[:, Sold :]) for j in range(i): MAj = scaling ** (j + 1) * As[j].dot(pMat) - self.resAA[: Sold, i, Sold :, j] = ( + resAA[: Sold, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, : Sold])) - self.resAA[Sold :, i, : Sold, j] = ( + resAA[Sold :, i, : Sold, j] = ( self.estNormer.innerProduct(MAj[:, : Sold], MAi[:, Sold :])) - self.resAA[Sold :, i, Sold :, j] = ( + resAA[Sold :, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, Sold :])) for i in range(nAs): for j in range(i + 1, nAs): - self.resAA[: Sold, i, Sold :, j] = ( - self.resAA[Sold :, j, : Sold, i].T.conj()) - self.resAA[Sold :, i, : Sold, j] = ( - self.resAA[: Sold, j, Sold :, i].T.conj()) - self.resAA[Sold :, i, Sold :, j] = ( - self.resAA[Sold :, j, Sold :, i].T.conj()) + resAA[: Sold, i, Sold :, j] = resAA[Sold :, j, + : Sold, i].T.conj() + resAA[Sold :, i, : Sold, j] = resAA[: Sold, j, + Sold :, i].T.conj() + resAA[Sold :, i, Sold :, j] = resAA[Sold :, j, + Sold :, i].T.conj() + self.trainedModel.data.resAA = resAA