diff --git a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py index 37bb2e8..e41b930 100644 --- a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py +++ b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py @@ -1,537 +1,548 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import deepcopy as copy from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb, PolynomialInterpolator as PI) from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.poly_fitting.moving_least_squares import ( polybases as mlspb, MovingLeastSquaresInterpolator as MLSI) from rrompy.sampling.pivoted import (SamplingEnginePivoted, SamplingEnginePivotedPOD) from rrompy.utilities.base.types import ListAny, DictAny, HFEng, paramVal from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import (fullDegreeN, totalDegreeN, nextDerivativeIndices) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList __all__ = ['GenericPivotedApproximant'] class GenericPivotedApproximant(GenericApproximant): """ ROM pivoted approximant (with pole matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'cutOffTolerance': tolerance for ignoring parasitic poles; defaults to np.inf; - 'cutOffType': rule for tolerance computation for parasitic poles; defaults to 'MAGNITUDE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'MMarginal': degree of marginal interpolant; defaults to 0; - 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 0, i.e. identity; - 'nNearestNeighborMarginal': number of marginal nearest neighbors considered if polybasisMarginal allows; defaults to -1; - 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'matchingWeight': weight for pole matching optimization; - 'cutOffTolerance': tolerance for ignoring parasitic poles; - 'cutOffType': rule for tolerance computation for parasitic poles; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'MMarginal': degree of marginal interpolant; - 'polydegreetypeMarginal': type of polynomial degree for marginal; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'nNearestNeighborMarginal': number of marginal nearest neighbors considered if polybasisMarginal allows; - 'interpRcondMarginal': tolerance for marginal interpolation. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. matchingWeight: Weight for pole matching optimization. cutOffTolerance: Tolerance for ignoring parasitic poles. cutOffType: Rule for tolerance computation for parasitic poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasisMarginal: Type of polynomial basis for marginal interpolation. MMarginal: Degree of marginal interpolant. polydegreetypeMarginal: Type of polynomial degree for marginal. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. nNearestNeighborMarginal: Number of marginal nearest neighbors considered if polybasisMarginal allows. interpRcondMarginal: Tolerance for marginal interpolation. muBoundsPivot: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, HFEngine:HFEng, mu0 : paramVal = None, directionPivot : ListAny = [0], approxParameters : DictAny = {}, approx_state : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() if len(directionPivot) > 1: raise RROMPyException(("Exactly 1 pivot parameter allowed in pole " "matching.")) from rrompy.parameter.parameter_sampling import QuadratureSampler as QS QSBase = QS([[0], [1]], "UNIFORM") self._addParametersToList(["matchingWeight", "cutOffTolerance", "cutOffType", "polybasisMarginal", "MMarginal", "polydegreetypeMarginal", "radialDirectionalWeightsMarginal", "nNearestNeighborMarginal", "interpRcondMarginal"], [1, np.inf, "MAGNITUDE", "MONOMIAL", 0, "TOTAL", 1, -1, -1], ["samplerPivot", "SMarginal", "samplerMarginal"], [QSBase, [1], QSBase]) del QS self._directionPivot = directionPivot super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, approx_state = approx_state, verbosity = verbosity, timestamp = timestamp) self._postInit() def setupSampling(self): """Setup sampling engine.""" RROMPyAssert(self._mode, message = "Cannot setup sampling engine.") if not hasattr(self, "_POD") or self._POD is None: return if self.POD: SamplingEngine = SamplingEnginePivotedPOD else: SamplingEngine = SamplingEnginePivoted self.samplingEngine = SamplingEngine(self.HFEngine, self.directionPivot, sample_state = self.approx_state, verbosity = self.verbosity) + @property + def mus(self): + """Value of mus. Its assignment may reset snapshots.""" + return self._mus + @mus.setter + def mus(self, mus): + musOld = copy(self.mus) if hasattr(self, '_mus') else None + if (musOld is None or len(mus) != len(musOld) or not mus == musOld): + self.resetSamples() + self._mus = mus + @property def matchingWeight(self): """Value of matchingWeight.""" return self._matchingWeight @matchingWeight.setter def matchingWeight(self, matchingWeight): self._matchingWeight = matchingWeight self._approxParameters["matchingWeight"] = self.matchingWeight @property def cutOffTolerance(self): """Value of cutOffTolerance.""" return self._cutOffTolerance @cutOffTolerance.setter def cutOffTolerance(self, cutOffTolerance): self._cutOffTolerance = cutOffTolerance self._approxParameters["cutOffTolerance"] = self.cutOffTolerance @property def cutOffType(self): """Value of cutOffType.""" return self._cutOffType @cutOffType.setter def cutOffType(self, cutOffType): try: cutOffType = cutOffType.upper().strip().replace(" ","") if cutOffType not in ["MAGNITUDE", "POTENTIAL"]: raise RROMPyException("Prescribed cutOffType not recognized.") self._cutOffType = cutOffType except: RROMPyWarning(("Prescribed cutOffType not recognized. Overriding " "to 'MAGNITUDE'.")) self._cutOffType = "MAGNITUDE" self._approxParameters["cutOffType"] = self.cutOffType @property def SMarginal(self): """Value of SMarginal.""" return self._SMarginal @SMarginal.setter def SMarginal(self, SMarginal): if SMarginal <= 0: raise RROMPyException("SMarginal must be positive.") if hasattr(self, "_SMarginal") and self._SMarginal is not None: Sold = self.SMarginal else: Sold = -1 self._SMarginal = SMarginal self._approxParameters["SMarginal"] = self.SMarginal if Sold != self.SMarginal: self.resetSamples() @property def polybasisMarginal(self): """Value of polybasisMarginal.""" return self._polybasisMarginal @polybasisMarginal.setter def polybasisMarginal(self, polybasisMarginal): try: polybasisMarginal = polybasisMarginal.upper().strip().replace(" ", "") if polybasisMarginal not in ppb + rbpb + mlspb: raise RROMPyException( "Prescribed marginal polybasis not recognized.") self._polybasisMarginal = polybasisMarginal except: RROMPyWarning(("Prescribed marginal polybasis not recognized. " "Overriding to 'MONOMIAL'.")) self._polybasisMarginal = "MONOMIAL" self._approxParameters["polybasisMarginal"] = self.polybasisMarginal @property def MMarginal(self): """Value of MMarginal.""" return self._MMarginal @MMarginal.setter def MMarginal(self, MMarginal): if MMarginal < 0: raise RROMPyException("MMarginal must be non-negative.") self._MMarginal = MMarginal self._approxParameters["MMarginal"] = self.MMarginal @property def polydegreetypeMarginal(self): """Value of polydegreetypeMarginal.""" return self._polydegreetypeMarginal @polydegreetypeMarginal.setter def polydegreetypeMarginal(self, polydegreetypeM): try: polydegreetypeM = polydegreetypeM.upper().strip().replace(" ","") if polydegreetypeM not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetypeMarginal not " "recognized.")) self._polydegreetypeMarginal = polydegreetypeM except: RROMPyWarning(("Prescribed polydegreetypeMarginal not recognized. " "Overriding to 'TOTAL'.")) self._polydegreetypeMarginal = "TOTAL" self._approxParameters["polydegreetypeMarginal"] = ( self.polydegreetypeMarginal) @property def radialDirectionalWeightsMarginal(self): """Value of radialDirectionalWeightsMarginal.""" return self._radialDirectionalWeightsMarginal @radialDirectionalWeightsMarginal.setter def radialDirectionalWeightsMarginal(self, radialDirWeightsMarginal): self._radialDirectionalWeightsMarginal = radialDirWeightsMarginal self._approxParameters["radialDirectionalWeightsMarginal"] = ( self.radialDirectionalWeightsMarginal) @property def nNearestNeighborMarginal(self): """Value of nNearestNeighborMarginal.""" return self._nNearestNeighborMarginal @nNearestNeighborMarginal.setter def nNearestNeighborMarginal(self, nNearestNeighborMarginal): self._nNearestNeighborMarginal = nNearestNeighborMarginal self._approxParameters["nNearestNeighborMarginal"] = ( self.nNearestNeighborMarginal) @property def interpRcondMarginal(self): """Value of interpRcondMarginal.""" return self._interpRcondMarginal @interpRcondMarginal.setter def interpRcondMarginal(self, interpRcondMarginal): self._interpRcondMarginal = interpRcondMarginal self._approxParameters["interpRcondMarginal"] = ( self.interpRcondMarginal) @property def directionPivot(self): """Value of directionPivot. Its assignment may reset snapshots.""" return self._directionPivot @directionPivot.setter def directionPivot(self, directionPivot): if hasattr(self, '_directionPivot'): directionPivotOld = copy(self.directionPivot) else: directionPivotOld = None if (directionPivotOld is None or len(directionPivot) != len(directionPivotOld) or not directionPivot == directionPivotOld): self.resetSamples() self._directionPivot = directionPivot @property def directionMarginal(self): return [x for x in range(self.HFEngine.npar) \ if x not in self.directionPivot] @property def nparPivot(self): return len(self.directionPivot) @property def nparMarginal(self): return self.npar - self.nparPivot @property def rescalingExpPivot(self): return [self.HFEngine.rescalingExp[x] for x in self.directionPivot] @property def rescalingExpMarginal(self): return [self.HFEngine.rescalingExp[x] for x in self.directionMarginal] @property def muBoundsPivot(self): """Value of muBoundsPivot.""" return self.samplerPivot.lims @property def muBoundsMarginal(self): """Value of muBoundsMarginal.""" return self.samplerMarginal.lims @property def samplerPivot(self): """Value of samplerPivot.""" return self._samplerPivot @samplerPivot.setter def samplerPivot(self, samplerPivot): if 'generatePoints' not in dir(samplerPivot): raise RROMPyException("Pivot sampler type not recognized.") if hasattr(self, '_samplerPivot') and self._samplerPivot is not None: samplerOld = self.samplerPivot self._samplerPivot = samplerPivot self._approxParameters["samplerPivot"] = self.samplerPivot.__str__() if not 'samplerOld' in locals() or samplerOld != self.samplerPivot: self.resetSamples() @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'generatePoints' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") if (hasattr(self, '_samplerMarginal') and self._samplerMarginal is not None): samplerOld = self.samplerMarginal self._samplerMarginal = samplerMarginal self._approxParameters["samplerMarginal"] = ( self.samplerMarginal.__str__()) if not 'samplerOld' in locals() or samplerOld != self.samplerMarginal: self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self._musMUniqueCN = None self._derMIdxs = None self._reorderM = None def setSamples(self, samplingEngine): """Copy samplingEngine and samples.""" super().setSamples(samplingEngine) self.mus = copy(self.samplingEngine[0].mus) for sEj in self.samplingEngine[1:]: self.mus.append(sEj.mus) def computeSnapshots(self): """Compute snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start snapshot computation.") if self.samplingEngine.nsamplesTot != self.S * self.SMarginal: self.computeScaleFactor() self.resetSamples() vbMng(self, "INIT", "Starting computation of snapshots.", 5) self.musPivot = self.samplerPivot.generatePoints(self.S) self.musMarginal = self.samplerMarginal.generatePoints( self.SMarginal) self.mus = emptyParameterList() self.mus.reset((self.S * self.SMarginal, self.HFEngine.npar)) self.samplingEngine.resetHistory(self.SMarginal) for j, muMarg in enumerate(self.musMarginal): for k in range(j * self.S, (j + 1) * self.S): self.mus.data[k, self.directionPivot] = ( self.musPivot[k - j * self.S].data) self.mus.data[k, self.directionMarginal] = muMarg.data self.samplingEngine.iterSample(self.musPivot, self.musMarginal) if self.POD: self.samplingEngine.coalesceSamples(self.interpRcondMarginal) else: self.samplingEngine.coalesceSamples() vbMng(self, "DEL", "Done computing snapshots.", 5) def _setupMarginalInterpolationIndices(self): """Setup parameters for polyvander.""" RROMPyAssert(self._mode, message = "Cannot setup interpolation indices.") if (self._musMUniqueCN is None or len(self._reorderM) != len(self.musMarginal)): self._musMUniqueCN, musMIdxsTo, musMIdxs, musMCount = ( self.trainedModel.centerNormalizeMarginal(self.musMarginal)\ .unique(return_index = True, return_inverse = True, return_counts = True)) self._musMUnique = self.musMarginal[musMIdxsTo] self._derMIdxs = [None] * len(self._musMUniqueCN) self._reorderM = np.empty(len(musMIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musMCount): self._derMIdxs[j] = nextDerivativeIndices([], self.nparMarginal, cnt) jIdx = np.nonzero(musMIdxs == j)[0] self._reorderM[jIdx] = np.arange(filled, filled + cnt) filled += cnt def _setupMarginalInterp(self): """Compute marginal interpolator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of marginal interpolator.", 7) self._setupMarginalInterpolationIndices() if self.polydegreetypeMarginal == "TOTAL": cfun = totalDegreeN else: cfun = fullDegreeN MM = copy(self.MMarginal) while len(self.musMarginal) < cfun(MM, self.nparMarginal): MM -= 1 if MM < self.MMarginal: RROMPyWarning( ("MMarginal too large compared to SMarginal. " "Reducing MMarginal by {}").format(self.MMarginal - MM)) self.MMarginal = MM mI = [] for j in range(len(self.musMarginal)): canonicalj = 1. * (np.arange(len(self.musMarginal)) == j) self._MMarginal = MM while self.MMarginal >= 0: if self.polybasisMarginal in ppb: p = PI() wellCond, msg = p.setupByInterpolation( self._musMUniqueCN, canonicalj, self.MMarginal, self.polybasisMarginal, self.verbosity >= 5, self.polydegreetypeMarginal == "TOTAL", {"derIdxs": self._derMIdxs, "reorder": self._reorderM, "scl": np.power(self.scaleFactorMarginal, -1.)}, {"rcond": self.interpRcondMarginal}) elif self.polybasisMarginal in rbpb: p = RBI() wellCond, msg = p.setupByInterpolation( self._musMUniqueCN, canonicalj, self.MMarginal, self.polybasisMarginal, self.radialDirectionalWeightsMarginal, self.verbosity >= 5, self.polydegreetypeMarginal == "TOTAL", {"derIdxs": self._derMIdxs, "reorder": self._reorderM, "scl": np.power(self.scaleFactorMarginal, -1.), "nNearestNeighbor" : self.nNearestNeighborMarginal}, {"rcond": self.interpRcondMarginal}) else:# if self.polybasisMarginal in mlspb: p = MLSI() wellCond, msg = p.setupByInterpolation( self._musMUniqueCN, canonicalj, self.MMarginal, self.polybasisMarginal, self.radialDirectionalWeightsMarginal, self.verbosity >= 5, self.polydegreetypeMarginal == "TOTAL", {"derIdxs": self._derMIdxs, "reorder": self._reorderM, "scl": np.power(self.scaleFactorMarginal, -1.), "nNearestNeighbor" : self.nNearestNeighborMarginal}) vbMng(self, "MAIN", msg, 5) if wellCond: break RROMPyWarning(("Polyfit is poorly conditioned. Reducing " "MMarginal by 1.")) self.MMarginal = self.MMarginal - 1 mI = mI + [copy(p)] vbMng(self, "DEL", "Done computing marginal interpolator.", 7) return mI def computeScaleFactor(self): """Compute parameter rescaling factor.""" RROMPyAssert(self._mode, message = "Cannot compute rescaling factor.") self.scaleFactorPivot = .5 * np.abs( self.muBoundsPivot[0] ** self.rescalingExpPivot - self.muBoundsPivot[1] ** self.rescalingExpPivot) self.scaleFactorMarginal = .5 * np.abs( self.muBoundsMarginal[0] ** self.rescalingExpMarginal - self.muBoundsMarginal[1] ** self.rescalingExpMarginal) self.scaleFactor = np.empty(self.npar) self.scaleFactor[self.directionPivot] = self.scaleFactorPivot self.scaleFactor[self.directionMarginal] = self.scaleFactorMarginal diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py index 60ffa70..909b403 100644 --- a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py +++ b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py @@ -1,641 +1,376 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np -from rrompy.reduction_methods.base import checkRobustTolerance from .generic_pivoted_approximant import GenericPivotedApproximant from rrompy.reduction_methods.standard.rational_interpolant import ( - RationalInterpolant as RI) -from rrompy.utilities.poly_fitting.polynomial import ( - polybases as ppb, polyfitname, - polyvander as pvP, polyvanderTotal as pvTP, - PolynomialInterpolator as PI) -from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, - RadialBasisInterpolator as RBI) + RationalInterpolant) +from rrompy.utilities.poly_fitting.polynomial import polybases as ppb +from rrompy.utilities.poly_fitting.radial_basis import polybases as rbpb from rrompy.utilities.poly_fitting.moving_least_squares import ( - polybases as mlspb, - MovingLeastSquaresInterpolator as MLSI) -from rrompy.utilities.base.types import (Np1D, Np2D, HFEng, DictAny, Tuple, - List, ListAny, paramVal) -from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp -from rrompy.utilities.numerical import (multifactorial, customPInv, dot, - fullDegreeN, totalDegreeN, - degreeTotalToFull, fullDegreeMaxMask, - totalDegreeMaxMask, - nextDerivativeIndices, - hashDerivativeToIdx as hashD, - hashIdxToDerivative as hashI) + polybases as mlspb) +from rrompy.utilities.base.types import HFEng, DictAny, ListAny, paramVal +from rrompy.utilities.base import verbosityManager as vbMng +from rrompy.utilities.numerical import dot, nextDerivativeIndices from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) -from rrompy.parameter import checkParameter __all__ = ['RationalInterpolantPivoted'] -class RationalInterpolantPivoted(GenericPivotedApproximant): +class RationalInterpolantPivoted(GenericPivotedApproximant, + RationalInterpolant): """ ROM pivoted rational interpolant (with pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'cutOffTolerance': tolerance for ignoring parasitic poles; defaults to np.inf; - 'cutOffType': rule for tolerance computation for parasitic poles; defaults to 'MAGNITUDE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - - 'polybasisPivot': type of polynomial basis for pivot + - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 0; - 'N': degree of rational interpolant denominator; defaults to 0; - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL'; - 'MMarginal': degree of marginal interpolant; defaults to 0; - 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; - - 'radialDirectionalWeightsPivot': radial basis weights for pivot + - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 0, i.e. identity; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 0, i.e. identity; - - 'nNearestNeighborPivot': number of pivot nearest neighbors - considered if polybasisPivot allows; defaults to -1; + - 'nNearestNeighbor': number of pivot nearest neighbors considered + if polybasis allows; defaults to -1; - 'nNearestNeighborMarginal': number of marginal nearest neighbors considered if polybasisMarginal allows; defaults to -1; - - 'interpRcondPivot': tolerance for pivot interpolation; defaults - to None; + - 'interpRcond': tolerance for pivot interpolation; defaults to + None; - 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0. + management; defaults to 0; + - 'correctorForce': whether corrector should forcefully delete bad + poles; defaults to False; + - 'correctorTol': tolerance for corrector step; defaults to 0., + i.e. no bad poles; + - 'correctorMaxIter': maximum number of corrector iterations; + defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'matchingWeight': weight for pole matching optimization; - 'cutOffTolerance': tolerance for ignoring parasitic poles; - 'cutOffType': rule for tolerance computation for parasitic poles; - - 'polybasisPivot': type of polynomial basis for pivot + - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'polydegreetype': type of polynomial degree; - 'MMarginal': degree of marginal interpolant; - 'polydegreetypeMarginal': type of polynomial degree for marginal; - - 'radialDirectionalWeightsPivot': radial basis weights for pivot + - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - - 'nNearestNeighborPivot': number of pivot nearest neighbors - considered if polybasisPivot allows; + - 'nNearestNeighbor': number of pivot nearest neighbors considered + if polybasis allows; - 'nNearestNeighborMarginal': number of marginal nearest neighbors considered if polybasisMarginal allows; - - 'interpRcondPivot': tolerance for pivot interpolation; + - 'interpRcond': tolerance for pivot interpolation; - 'interpRcondMarginal': tolerance for marginal interpolation; - 'robustTol': tolerance for robust rational denominator - management. + management; + - 'correctorForce': whether corrector should forcefully delete bad + poles; + - 'correctorTol': tolerance for corrector step; + - 'correctorMaxIter': maximum number of corrector iterations. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. matchingWeight: Weight for pole matching optimization. cutOffTolerance: Tolerance for ignoring parasitic poles. cutOffType: Rule for tolerance computation for parasitic poles. S: Total number of pivot samples current approximant relies upon. sampler: Pivot sample point generator. - polybasisPivot: Type of polynomial basis for pivot interpolation. + polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. polydegreetype: Type of polynomial degree. MMarginal: Degree of marginal interpolant. polydegreetypeMarginal: Type of polynomial degree for marginal. - radialDirectionalWeightsPivot: Radial basis weights for pivot - numerator. + radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. - nNearestNeighborPivot: Number of pivot nearest neighbors considered if - polybasisPivot allows. + nNearestNeighbor: Number of pivot nearest neighbors considered if + polybasis allows. nNearestNeighborMarginal: Number of marginal nearest neighbors considered if polybasisMarginal allows. - interpRcondPivot: Tolerance for pivot interpolation. + interpRcond: Tolerance for pivot interpolation. interpRcondMarginal: Tolerance for marginal interpolation. robustTol: Tolerance for robust rational denominator management. + correctorForce: Whether corrector should forcefully delete bad poles. + correctorTol: Tolerance for corrector step. + correctorMaxIter: Maximum number of corrector iterations. muBoundsPivot: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def __init__(self, HFEngine:HFEng, mu0 : paramVal = None, directionPivot : ListAny = [0], approxParameters : DictAny = {}, approx_state : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() - self._addParametersToList(["polybasisPivot", "M", "N", - "polydegreetype", - "radialDirectionalWeightsPivot", - "nNearestNeighborPivot", - "interpRcondPivot", "robustTol"], - ["MONOMIAL", 0, 0, "TOTAL", 1, -1, -1, 0]) + self._addParametersToList(toBeExcluded = ["sampler", "centeredLike"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, directionPivot = directionPivot, approxParameters = approxParameters, approx_state = approx_state, verbosity = verbosity, timestamp = timestamp) self._postInit() @property def tModelType(self): from rrompy.reduction_methods.trained_model import \ TrainedModelPivotedRational return TrainedModelPivotedRational def initializeModelData(self, datadict): from rrompy.reduction_methods.trained_model import \ TrainedModelPivotedData return (TrainedModelPivotedData(datadict["mu0"], datadict.pop("projMat"), datadict["scaleFactor"], datadict.pop("rescalingExp"), datadict["directionPivot"]), ["mu0", "scaleFactor", "directionPivot", "mus"]) @property - def polybasisPivot(self): - """Value of polybasisPivot.""" - return self._polybasisPivot - @polybasisPivot.setter - def polybasisPivot(self, polybasisPivot): + def npar(self): + """Number of parameters.""" + if hasattr(self, "_temporaryPivot"): return self.nparPivot + return super().npar + + @property + def centeredLike(self): + """Value of centeredLike.""" + return False + @centeredLike.setter + def centeredLike(self, centeredLike): + RROMPyWarning(("centeredLike is used just to simplify inheritance, " + "and its value cannot be changed from False.")) + + @property + def polybasis(self): + """Value of polybasis.""" + return self._polybasis + @polybasis.setter + def polybasis(self, polybasis): try: - polybasisPivot = polybasisPivot.upper().strip().replace(" ","") - if polybasisPivot not in ppb + rbpb + mlspb: + polybasis = polybasis.upper().strip().replace(" ","") + if polybasis not in ppb + rbpb + mlspb: raise RROMPyException( "Prescribed pivot polybasis not recognized.") - self._polybasisPivot = polybasisPivot + self._polybasis = polybasis except: RROMPyWarning(("Prescribed pivot polybasis not recognized. " "Overriding to 'MONOMIAL'.")) - self._polybasisPivot = "MONOMIAL" - self._approxParameters["polybasisPivot"] = self.polybasisPivot - - @property - def polybasisPivot0(self): - if "_" in self.polybasisPivot: - return self.polybasisPivot.split("_")[0] - return self.polybasisPivot + self._polybasis = "MONOMIAL" + self._approxParameters["polybasis"] = self.polybasis @property - def radialDirectionalWeightsPivot(self): - """Value of radialDirectionalWeightsPivot.""" - return self._radialDirectionalWeightsPivot - @radialDirectionalWeightsPivot.setter - def radialDirectionalWeightsPivot(self, radialDirectionalWeightsPivot): - self._radialDirectionalWeightsPivot = radialDirectionalWeightsPivot - self._approxParameters["radialDirectionalWeightsPivot"] = ( - self.radialDirectionalWeightsPivot) + def polybasis0(self): + if "_" in self.polybasis: + return self.polybasis.split("_")[0] + return self.polybasis @property - def nNearestNeighborPivot(self): - """Value of nNearestNeighborPivot.""" - return self._nNearestNeighborPivot - @nNearestNeighborPivot.setter - def nNearestNeighborPivot(self, nNearestNeighborPivot): - self._nNearestNeighborPivot = nNearestNeighborPivot - self._approxParameters["nNearestNeighborPivot"] = ( - self.nNearestNeighborPivot) + def correctorTol(self): + """Value of correctorTol.""" + return self._correctorTol + @correctorTol.setter + def correctorTol(self, correctorTol): + if correctorTol < 0. or (correctorTol > 0. and self.nparPivot > 1): + RROMPyWarning(("Overriding prescribed corrector tolerance " + "to 0.")) + correctorTol = 0. + self._correctorTol = correctorTol + self._approxParameters["correctorTol"] = self.correctorTol @property - def interpRcondPivot(self): - """Value of interpRcondPivot.""" - return self._interpRcondPivot - @interpRcondPivot.setter - def interpRcondPivot(self, interpRcondPivot): - self._interpRcondPivot = interpRcondPivot - self._approxParameters["interpRcondPivot"] = self.interpRcondPivot - - @property - def M(self): - """Value of M.""" - return self._M - @M.setter - def M(self, M): - if M < 0: raise RROMPyException("M must be non-negative.") - self._M = M - self._approxParameters["M"] = self.M - - @property - def N(self): - """Value of N.""" - return self._N - @N.setter - def N(self, N): - if N < 0: raise RROMPyException("N must be non-negative.") - self._N = N - self._approxParameters["N"] = self.N - - @property - def polydegreetype(self): - """Value of polydegreetype.""" - return self._polydegreetype - @polydegreetype.setter - def polydegreetype(self, polydegreetype): - try: - polydegreetype = polydegreetype.upper().strip().replace(" ","") - if polydegreetype not in ["TOTAL", "FULL"]: - raise RROMPyException(("Prescribed polydegreetype not " - "recognized.")) - self._polydegreetype = polydegreetype - except: - RROMPyWarning(("Prescribed polydegreetype not recognized. " - "Overriding to 'TOTAL'.")) - self._polydegreetype = "TOTAL" - self._approxParameters["polydegreetype"] = self.polydegreetype - - @property - def robustTol(self): - """Value of tolerance for robust rational denominator management.""" - return self._robustTol - @robustTol.setter - def robustTol(self, robustTol): - if robustTol < 0.: - RROMPyWarning(("Overriding prescribed negative robustness " - "tolerance to 0.")) - robustTol = 0. - self._robustTol = robustTol - self._approxParameters["robustTol"] = self.robustTol - - def resetSamples(self): - """Reset samples.""" - super().resetSamples() - self._musPUniqueCN = None - self._derPIdxs = None - self._reorderP = None - - def _setupPivotInterpolationIndices(self): + def correctorMaxIter(self): + """Value of correctorMaxIter.""" + return self._correctorMaxIter + @correctorMaxIter.setter + def correctorMaxIter(self, correctorMaxIter): + if correctorMaxIter < 1 or (correctorMaxIter > 1 + and self.nparPivot > 1): + RROMPyWarning(("Overriding prescribed max number of corrector " + "iterations to 1.")) + correctorMaxIter = 1 + self._correctorMaxIter = correctorMaxIter + self._approxParameters["correctorMaxIter"] = self.correctorMaxIter + + def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" RROMPyAssert(self._mode, message = "Cannot setup interpolation indices.") - if (self._musPUniqueCN is None - or len(self._reorderP) != len(self.musPivot)): - self._musPUniqueCN, musPIdxsTo, musPIdxs, musPCount = ( + if (self._musUniqueCN is None + or len(self._reorder) != len(self.musPivot)): + self._musUniqueCN, musIdxsTo, musIdxs, musCount = ( self.trainedModel.centerNormalizePivot(self.musPivot).unique( return_index = True, return_inverse = True, return_counts = True)) - self._musPUnique = self.mus[musPIdxsTo] - self._derPIdxs = [None] * len(self._musPUniqueCN) - self._reorderP = np.empty(len(musPIdxs), dtype = int) + self._musUnique = self.musPivot[musIdxsTo] + self._derIdxs = [None] * len(self._musUniqueCN) + self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 - for j, cnt in enumerate(musPCount): - self._derPIdxs[j] = nextDerivativeIndices([], - self.nparPivot, cnt) - jIdx = np.nonzero(musPIdxs == j)[0] - self._reorderP[jIdx] = np.arange(filled, filled + cnt) + for j, cnt in enumerate(musCount): + self._derIdxs[j] = nextDerivativeIndices([], self.nparPivot, + cnt) + jIdx = np.nonzero(musIdxs == j)[0] + self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt - def _setupDenominator(self): - """Compute rational denominator.""" - RROMPyAssert(self._mode, message = "Cannot setup denominator.") - vbMng(self, "INIT", "Starting computation of denominator.", 7) - NinvD = None - N0 = copy(self.N) - qs = [] - self.verbosity -= 10 - for j in range(len(self.musMarginal)): - self._N = N0 - while self.N > 0: - if NinvD != self.N: - invD, fitinvP = self._computeInterpolantInverseBlocks() - NinvD = self.N - if self.POD: - ev, eV = RI.findeveVGQR(self, self.samplingEngine.RPOD[j], - invD) - else: - ev, eV = RI.findeveVGExplicit(self, - self.samplingEngine.samples[j], invD) - nevBad = checkRobustTolerance(ev, self.robustTol) - if nevBad <= 1: break - if self.catchInstability: - raise RROMPyException(("Instability in denominator " - "computation: eigenproblem is " - "poorly conditioned.")) - RROMPyWarning(("Smallest {} eigenvalues below tolerance. " - "Reducing N by 1.").format(nevBad)) - self.N = self.N - 1 - if self.N <= 0: - self._N = 0 - eV = np.ones((1, 1)) - q = PI() - q.npar = self.nparPivot - q.polybasis = self.polybasisPivot0 - if self.polydegreetype == "TOTAL": - q.coeffs = degreeTotalToFull(tuple([self.N + 1] * q.npar), - q.npar, eV[:, 0]) - else: - q.coeffs = eV[:, 0].reshape([self.N + 1] * q.npar) - qs = qs + [copy(q)] - self.verbosity += 10 - vbMng(self, "DEL", "Done computing denominator.", 7) - return qs, fitinvP - - def _setupNumerator(self): - """Compute rational numerator.""" - RROMPyAssert(self._mode, message = "Cannot setup numerator.") - vbMng(self, "INIT", "Starting computation of numerator.", 7) - Qevaldiag = np.zeros((len(self.musPivot), len(self.musPivot)), - dtype = np.complex) - verb = self.trainedModel.verbosity - self.trainedModel.verbosity = 0 - self._setupPivotInterpolationIndices() - cfun = totalDegreeN if self.polydegreetype == "TOTAL" else fullDegreeN - M = copy(self.M) - while len(self.musPivot) < cfun(M, self.nparPivot): M -= 1 - if M < self.M: - RROMPyWarning(("M too large compared to S. Reducing M by " - "{}").format(self.M - M)) - self.M = M - tensor_idx = 0 - ps = [] - for k, muM in enumerate(self.musMarginal): - self._M = M - idxGlob = 0 - for j, derIdxs in enumerate(self._derPIdxs): - mujEff = [fp] * self.npar - for jj, kk in enumerate(self.directionPivot): - mujEff[kk] = self._musPUnique[j, jj] - for jj, kk in enumerate(self.directionMarginal): - mujEff[kk] = muM(0, jj) - mujEff = checkParameter(mujEff, self.npar) - nder = len(derIdxs) - idxLoc = np.arange(len(self.musPivot))[ - (self._reorderP >= idxGlob) - * (self._reorderP < idxGlob + nder)] - idxGlob += nder - Qval = [0] * nder - for der in range(nder): - derIdx = hashI(der, self.nparPivot) - derIdxEff = [0] * self.npar - sclEff = [0] * self.npar - for jj, kk in enumerate(self.directionPivot): - derIdxEff[kk] = derIdx[jj] - sclEff[kk] = self.scaleFactorPivot[jj] ** -1. - Qval[der] = (self.trainedModel.getQVal(mujEff, derIdxEff, - scl = sclEff) - / multifactorial(derIdx)) - for derU, derUIdx in enumerate(derIdxs): - for derQ, derQIdx in enumerate(derIdxs): - diffIdx = [x - y for (x, y) in zip(derUIdx, derQIdx)] - if all([x >= 0 for x in diffIdx]): - diffj = hashD(diffIdx) - Qevaldiag[idxLoc[derU], idxLoc[derQ]] = Qval[diffj] - while self.M >= 0: - if self.polybasisPivot in ppb: - p = PI() - wellCond, msg = p.setupByInterpolation( - self._musPUniqueCN, Qevaldiag, self.M, - self.polybasisPivot, self.verbosity >= 5, - self.polydegreetype == "TOTAL", - {"derIdxs": self._derPIdxs, - "reorder": self._reorderP, - "scl": np.power(self.scaleFactorPivot, -1.)}, - {"rcond": self.interpRcondPivot}) - elif self.polybasisPivot in rbpb: - p = RBI() - wellCond, msg = p.setupByInterpolation( - self._musPUniqueCN, Qevaldiag, self.M, - self.polybasisPivot, - self.radialDirectionalWeightsPivot, - self.verbosity >= 5, - self.polydegreetype == "TOTAL", - {"derIdxs": self._derPIdxs, - "reorder": self._reorderP, - "scl": np.power(self.scaleFactorPivot, -1.), - "nNearestNeighbor" : self.nNearestNeighborPivot}, - {"rcond": self.interpRcondPivot}) - else:# if self.polybasisPivot in mlspb: - p = MLSI() - wellCond, msg = p.setupByInterpolation( - self._musPUniqueCN, Qevaldiag, self.M, - self.polybasisPivot, - self.radialDirectionalWeightsPivot, - self.verbosity >= 5, - self.polydegreetype == "TOTAL", - {"derIdxs": self._derPIdxs, - "reorder": self._reorderP, - "scl": np.power(self.scaleFactorPivot, -1.), - "nNearestNeighbor" : self.nNearestNeighborPivot}) - vbMng(self, "MAIN", msg, 5) - if wellCond: break - if self.catchInstability: - raise RROMPyException(("Instability in numerator " - "computation: polyfit is " - "poorly conditioned.")) - RROMPyWarning(("Polyfit is poorly conditioned. " - "Reducing M by 1.")) - self.M = self.M - 1 - tensor_idx_new = tensor_idx + Qevaldiag.shape[1] - if self.POD: - p.postmultiplyTensorize(self.samplingEngine.RPODCoalesced.T[ - tensor_idx : tensor_idx_new, :]) - else: - p.pad(tensor_idx, len(self.mus) - tensor_idx_new) - tensor_idx = tensor_idx_new - ps = ps + [copy(p)] - self.trainedModel.verbosity = verb - vbMng(self, "DEL", "Done computing numerator.", 7) - return ps - def setupApprox(self): """ Compute rational interpolant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() pMat = self.samplingEngine.samplesCoalesced.data pMatEff = dot(self.HFEngine.C, pMat) if self.approx_state else pMat if self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "projMat": pMatEff, "scaleFactor": self.scaleFactor, "rescalingExp": self.HFEngine.rescalingExp, "directionPivot": self.directionPivot} self.trainedModel.data = self.initializeModelData(datadict)[0] else: self.trainedModel = self.trainedModel self.trainedModel.data.projMat = copy(pMatEff) self.trainedModel.data.musPivot = copy(self.musPivot) self.trainedModel.data.musMarginal = copy(self.musMarginal) self.trainedModel.data.marginalInterp = self._setupMarginalInterp() - if self.N > 0: - Qs = self._setupDenominator()[0] + N0 = copy(self.N) + Qs, Ps = [], [] + self._temporaryPivot = 1 + if self.POD: + self._RPODOldPivot = copy(self.samplingEngine.RPODCoalesced) else: - Q = PI() - Q.npar = self.nparPivot - Q.coeffs = np.ones(tuple([1] * Q.npar), - dtype = self.musMarginal.dtype) - Q.polybasis = self.polybasisPivot0 - Qs = [Q for _ in range(len(self.musMarginal))] - self.trainedModel.data._temporary = 1 + self._samplesOldPivot = copy(self.samplingEngine.samples) + self._scaleFactorOldPivot = copy(self.scaleFactor) + self.scaleFactor = self.scaleFactorPivot + for j in range(len(self.musMarginal)): + self._N = N0 + if self.POD: + self.samplingEngine.RPOD = ( + self._RPODOldPivot[:, j * self.S : (j + 1) * self.S]) + else: + self.samplingEngine.samples = self._samplesOldPivot[j] + self._iterCorrector() + Qs = Qs + [copy(self.trainedModel.data.Q)] + Ps = Ps + [copy(self.trainedModel.data.P)] + del self.trainedModel.data.Q, self.trainedModel.data.P + if self.POD: + self.samplingEngine.RPODCoalesced = self._RPODOldPivot + else: + self.samplingEngine.samples = self._samplesOldPivot + self.scaleFactor = self._scaleFactorOldPivot + del self._temporaryPivot self.trainedModel.data.Qs = Qs - self.trainedModel.data.Ps = self._setupNumerator() + self.trainedModel.data.Ps = Ps vbMng(self, "INIT", "Matching poles.", 10) self.trainedModel.initializeFromRational(self.HFEngine, self.matchingWeight, self.POD, self.approx_state) - del self.trainedModel.data._temporary vbMng(self, "DEL", "Done matching poles.", 10) if not np.isinf(self.cutOffTolerance): vbMng(self, "INIT", "Recompressing by cut-off.", 10) msg = self.trainedModel.recompressByCutOff([-1., 1.], self.cutOffTolerance, self.cutOffType) vbMng(self, "DEL", "Done recompressing." + msg, 10) self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) - - def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]: - """ - Compute inverse factors for minimal interpolant target functional. - """ - RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") - self._setupPivotInterpolationIndices() - cfun = totalDegreeN if self.polydegreetype == "TOTAL" else fullDegreeN - N = copy(self.N) - while len(self.musPivot) < cfun(N, self.nparPivot): N -= 1 - if N < self.N: - RROMPyWarning(("N too large compared to S. Reducing N by " - "{}").format(self.N - N)) - self.N = N - while self.N >= 0: - if self.polydegreetype == "TOTAL": - TE = pvTP(self._musPUniqueCN, self.N, self.polybasisPivot0, - self._derPIdxs, self._reorderP, - scl = np.power(self.scaleFactorPivot, -1.)) - idxsB = totalDegreeMaxMask(self.N, self.nparPivot) - else: #if self.polydegreetype == "FULL": - TE = pvP(self._musPUniqueCN, [self.N] * self.nparPivot, - self.polybasisPivot0, self._derPIdxs, self._reorderP, - scl = np.power(self.scaleFactorPivot, -1.)) - idxsB = fullDegreeMaxMask(self.N, self.nparPivot) - fitOut = customPInv(TE, rcond = self.interpRcondPivot, - full = True) - vbMng(self, "MAIN", - ("Fitting {} samples with degree {} through {}... " - "Conditioning of pseudoinverse system: {:.4e}.").format( - TE.shape[0], self.N, - polyfitname(self.polybasisPivot0), - fitOut[1][1][0] / fitOut[1][1][-1]), - 5) - if fitOut[1][0] == TE.shape[1]: - fitinvP = fitOut[0][idxsB, :] - break - RROMPyWarning("Polyfit is poorly conditioned. Reducing N by 1.") - self.N -= 1 - if self.N < 0: - raise RROMPyException(("Instability in computation of " - "denominator. Aborting.")) - TN = pvTP(self._musPUniqueCN, self.N, self.polybasisPivot0, - self._derPIdxs, self._reorderP, - scl = np.power(self.scaleFactorPivot, -1.)) - invD = [None] * (len(idxsB)) - for k in range(len(idxsB)): - pseudoInv = np.diag(fitinvP[k, :]) - idxGlob = 0 - for j, derIdxs in enumerate(self._derPIdxs): - nder = len(derIdxs) - idxGlob += nder - if nder > 1: - idxLoc = np.arange(len(self.musPivot))[ - (self._reorderP >= idxGlob - nder) - * (self._reorderP < idxGlob)] - invLoc = fitinvP[k, idxLoc] - pseudoInv[np.ix_(idxLoc, idxLoc)] = 0. - for diffj, diffjIdx in enumerate(derIdxs): - for derQ, derQIdx in enumerate(derIdxs): - derUIdx = [x - y for (x, y) in - zip(diffjIdx, derQIdx)] - if all([x >= 0 for x in derUIdx]): - derU = hashD(derUIdx) - pseudoInv[idxLoc[derU], idxLoc[derQ]] = ( - invLoc[diffj]) - invD[k] = dot(pseudoInv, TN) - return invD, fitinvP - - def getResidues(self, *args, **kwargs) -> Np1D: - """ - Obtain approximant residues. - - Returns: - Matrix with residues as columns. - """ - return self.trainedModel.getResidues(*args, **kwargs) diff --git a/rrompy/reduction_methods/standard/rational_interpolant.py b/rrompy/reduction_methods/standard/rational_interpolant.py index d93444d..1026f37 100644 --- a/rrompy/reduction_methods/standard/rational_interpolant.py +++ b/rrompy/reduction_methods/standard/rational_interpolant.py @@ -1,720 +1,721 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.reduction_methods.base import checkRobustTolerance from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.poly_fitting.polynomial import ( polybases as ppb, polyfitname, polyvander as pvP, polyvanderTotal as pvTP, polyTimes, polyTimesTable, PolynomialInterpolator as PI) from rrompy.utilities.poly_fitting.heaviside import rational2heaviside from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.poly_fitting.moving_least_squares import ( polybases as mlspb, MovingLeastSquaresInterpolator as MLSI) from rrompy.utilities.base.types import (Np1D, Np2D, HFEng, DictAny, Tuple, List, paramVal, sampList) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import (customPInv, dot, fullDegreeN, totalDegreeN, degreeTotalToFull, fullDegreeMaxMask, totalDegreeMaxMask, nextDerivativeIndices, hashDerivativeToIdx as hashD) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['RationalInterpolant'] class RationalInterpolant(GenericStandardApproximant): """ ROM rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 0; - 'N': degree of rational interpolant denominator; defaults to 0; - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL'; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; defaults to 0, i.e. identity; - 'nNearestNeighbor': mumber of nearest neighbors considered in numerator if polybasis allows; defaults to -1; - 'interpRcond': tolerance for interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator management; defaults to 0; - 'centeredLike': whether samples should be managed as if centered; involves making svd and interpolation problems square; defaults - to False. + to False; + - 'correctorForce': whether corrector should forcefully delete bad + poles; defaults to False; + - 'correctorTol': tolerance for corrector step; defaults to 0., + i.e. no bad poles; + - 'correctorMaxIter': maximum number of corrector iterations; + defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'polybasis': type of polynomial basis for interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'polydegreetype': type of polynomial degree; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; - 'nNearestNeighbor': mumber of nearest neighbors considered in numerator if polybasis allows; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust rational denominator management; - 'centeredLike': whether samples should be managed as if centered; - involves making svd and interpolation problems square. + involves making svd and interpolation problems square; + - 'correctorForce': whether corrector should forcefully delete bad + poles; + - 'correctorTol': tolerance for corrector step; + - 'correctorMaxIter': maximum number of corrector iterations. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. polybasis: type of polynomial basis for interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. polydegreetype: Type of polynomial degree. radialDirectionalWeights: Radial basis weights for interpolant numerator. nNearestNeighbor: Number of nearest neighbors considered in numerator if polybasis allows. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust rational denominator management. centeredLike: Whether samples should be managed as if centered; involves making svd and interpolation problems square. + correctorForce: Whether corrector should forcefully delete bad poles. + correctorTol: Tolerance for corrector step. + correctorMaxIter: Maximum number of corrector iterations. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def __init__(self, HFEngine:HFEng, mu0 : paramVal = None, approxParameters : DictAny = {}, approx_state : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["polybasis", "M", "N", "polydegreetype", "radialDirectionalWeights", "nNearestNeighbor", "interpRcond", "robustTol", "centeredLike", - "correctorKind", "correctorTol", + "correctorForce", "correctorTol", "correctorMaxIter"], ["MONOMIAL", 0, 0, "TOTAL", 1, -1, -1, 0, - False, "CONSERVATIVE", 0., 1]) + False, False, 0., 1]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, approx_state = approx_state, verbosity = verbosity, timestamp = timestamp) self.catchInstability = False self._postInit() @property def tModelType(self): from rrompy.reduction_methods.trained_model import TrainedModelRational return TrainedModelRational @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in ppb + rbpb + mlspb: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def polybasis0(self): if "_" in self.polybasis: return self.polybasis.split("_")[0] return self.polybasis @property def interpRcond(self): """Value of interpRcond.""" return self._interpRcond @interpRcond.setter def interpRcond(self, interpRcond): self._interpRcond = interpRcond self._approxParameters["interpRcond"] = self.interpRcond @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) @property def nNearestNeighbor(self): """Value of nNearestNeighbor.""" return self._nNearestNeighbor @nNearestNeighbor.setter def nNearestNeighbor(self, nNearestNeighbor): self._nNearestNeighbor = nNearestNeighbor self._approxParameters["nNearestNeighbor"] = self.nNearestNeighbor @property def M(self): """Value of M.""" return self._M @M.setter def M(self, M): if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M @property def N(self): """Value of N.""" return self._N @N.setter def N(self, N): if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N @property def polydegreetype(self): """Value of polydegreetype.""" return self._polydegreetype @polydegreetype.setter def polydegreetype(self, polydegreetype): try: polydegreetype = polydegreetype.upper().strip().replace(" ","") if polydegreetype not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetype not " "recognized.")) self._polydegreetype = polydegreetype except: RROMPyWarning(("Prescribed polydegreetype not recognized. " "Overriding to 'TOTAL'.")) self._polydegreetype = "TOTAL" self._approxParameters["polydegreetype"] = self.polydegreetype @property def robustTol(self): """Value of tolerance for robust rational denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def centeredLike(self): """Whether samples should be managed as if centered.""" return self._centeredLike @centeredLike.setter def centeredLike(self, centeredLike): if centeredLike and not hasattr(self, "_centeredLike"): RROMPyWarning(("Centered-like method is unstable for more than " "one parameter.")) self._centeredLike = centeredLike self._approxParameters["centeredLike"] = self.centeredLike @property - def correctorKind(self): - """Value of correctorKind.""" - return self._correctorKind - @correctorKind.setter - def correctorKind(self, correctorKind): - try: - correctorKind = correctorKind.upper().strip().replace(" ","") - if correctorKind not in ["CONSERVATIVE", "MINIMAL"]: - raise RROMPyException(("Prescribed correctorKind not " - "recognized.")) - self._correctorKind = correctorKind - except: - RROMPyWarning(("Overriding prescribed corrector tolerance " - "to 'CONSERVATIVE'.")) - self._correctorKind = "CONSERVATIVE" - self._approxParameters["correctorKind"] = self.correctorKind + def correctorForce(self): + """Value of correctorForce.""" + return self._correctorForce + @correctorForce.setter + def correctorForce(self, correctorForce): + self._correctorForce = correctorForce + self._approxParameters["correctorForce"] = self.correctorForce @property def correctorTol(self): """Value of correctorTol.""" return self._correctorTol @correctorTol.setter def correctorTol(self, correctorTol): if correctorTol < 0. or (correctorTol > 0. and self.npar > 1): RROMPyWarning(("Overriding prescribed corrector tolerance " "to 0.")) correctorTol = 0. self._correctorTol = correctorTol self._approxParameters["correctorTol"] = self.correctorTol - + @property def correctorMaxIter(self): """Value of correctorMaxIter.""" return self._correctorMaxIter @correctorMaxIter.setter def correctorMaxIter(self, correctorMaxIter): if correctorMaxIter < 1 or (correctorMaxIter > 1 and self.npar > 1): RROMPyWarning(("Overriding prescribed max number of corrector " "iterations to 1.")) correctorMaxIter = 1 self._correctorMaxIter = correctorMaxIter self._approxParameters["correctorMaxIter"] = self.correctorMaxIter def resetSamples(self): """Reset samples.""" super().resetSamples() self._musUniqueCN = None self._derIdxs = None self._reorder = None def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" if self._musUniqueCN is None or len(self._reorder) != len(self.mus): self._musUniqueCN, musIdxsTo, musIdxs, musCount = ( self.trainedModel.centerNormalize(self.mus).unique( return_index = True, return_inverse = True, return_counts = True)) if self.centeredLike and len(self._musUniqueCN) > 1: raise RROMPyException(("Cannot apply centered-like method " "with more than one distinct sample " "point.")) self._musUnique = self.mus[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.mus.shape[1], cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def _setupDenominator(self): """Compute rational denominator.""" RROMPyAssert(self._mode, message = "Cannot setup denominator.") vbMng(self, "INIT", "Starting computation of denominator.", 7) while self.N > 0: invD, fitinv = self._computeInterpolantInverseBlocks() if self.centeredLike: if self.polydegreetype == "TOTAL": Seff = totalDegreeN(self.N, self.npar) else: Seff = fullDegreeN(self.N, self.npar) else: Seff = self.S idxSamplesEff = list(range(self.S - Seff, self.S)) if self.POD: ev, eV = self.findeveVGQR( self.samplingEngine.RPOD[:, idxSamplesEff], invD) else: ev, eV = self.findeveVGExplicit( self.samplingEngine.samples(idxSamplesEff), invD) nevBad = checkRobustTolerance(ev, self.robustTol) if nevBad <= 1: break if self.catchInstability: raise RROMPyException(("Instability in denominator " "computation: eigenproblem is poorly " "conditioned.")) RROMPyWarning(("Smallest {} eigenvalues below tolerance. Reducing " "N by 1.").format(nevBad)) self.N = self.N - 1 if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) q = PI() q.npar = self.npar q.polybasis = self.polybasis0 if self.polydegreetype == "TOTAL": q.coeffs = degreeTotalToFull(tuple([self.N + 1] * self.npar), self.npar, eV[:, 0]) else: q.coeffs = eV[:, 0].reshape([self.N + 1] * self.npar) vbMng(self, "DEL", "Done computing denominator.", 7) return q, fitinv def _setupNumerator(self): """Compute rational numerator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of numerator.", 7) - verb = self.trainedModel.verbosity - self.trainedModel.verbosity = 0 self._setupInterpolationIndices() Qevaldiag = polyTimesTable(self.trainedModel.data.Q, self._musUniqueCN, self._reorder, self._derIdxs, np.power(self.scaleFactor, -1.)) if self.POD: Qevaldiag = Qevaldiag.dot(self.samplingEngine.RPOD.T) - self.trainedModel.verbosity = verb cfun = totalDegreeN if self.polydegreetype == "TOTAL" else fullDegreeN M = copy(self.M) while self.S < cfun(M, self.npar): M -= 1 if M < self.M: RROMPyWarning(("M too large compared to S. Reducing M by " "{}").format(self.M - M)) self.M = M while self.M >= 0: if self.centeredLike: Seff = cfun(self.M, self.npar) derIdxsEff = [self._derIdxs[0][: Seff]] reorder = self._reorder[: Seff] QevaldiagEff = Qevaldiag[: Seff, : Seff] else: derIdxsEff = self._derIdxs reorder = self._reorder QevaldiagEff = Qevaldiag if self.polybasis in ppb: p = PI() wellCond, msg = p.setupByInterpolation( self._musUniqueCN, QevaldiagEff, self.M, self.polybasis, self.verbosity >= 5, self.polydegreetype == "TOTAL", {"derIdxs": derIdxsEff, "reorder": reorder, "scl": np.power(self.scaleFactor, -1.)}, {"rcond": self.interpRcond}) elif self.polybasis in rbpb: p = RBI() wellCond, msg = p.setupByInterpolation( self._musUniqueCN, QevaldiagEff, self.M, self.polybasis, self.radialDirectionalWeights, self.verbosity >= 5, self.polydegreetype == "TOTAL", {"derIdxs": derIdxsEff, "reorder": reorder, "scl": np.power(self.scaleFactor, -1.), "nNearestNeighbor": self.nNearestNeighbor}, {"rcond": self.interpRcond}) else:# if self.polybasis in mlspb: p = MLSI() wellCond, msg = p.setupByInterpolation( self._musUniqueCN, QevaldiagEff, self.M, self.polybasis, self.radialDirectionalWeights, self.verbosity >= 5, self.polydegreetype == "TOTAL", {"derIdxs": derIdxsEff, "reorder": reorder, "scl": np.power(self.scaleFactor, -1.), "nNearestNeighbor": self.nNearestNeighbor}) vbMng(self, "MAIN", msg, 5) if wellCond: break if self.catchInstability: raise RROMPyException(("Instability in numerator computation: " "polyfit is poorly conditioned.")) RROMPyWarning("Polyfit is poorly conditioned. Reducing M by 1.") self.M = self.M - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) vbMng(self, "DEL", "Done computing numerator.", 7) return p def setupApprox(self): """Compute rational interpolant.""" if self.checkComputedApprox(): return RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() pMat = self.samplingEngine.samples.data pMatEff = dot(self.HFEngine.C, pMat) if self.approx_state else pMat if self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "projMat": pMatEff, "scaleFactor": self.scaleFactor, "rescalingExp": self.HFEngine.rescalingExp} self.trainedModel.data = self.initializeModelData(datadict)[0] else: self.trainedModel = self.trainedModel self.trainedModel.data.projMat = copy(pMatEff) self.trainedModel.data.mus = copy(self.mus) self._iterCorrector() self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) def _iterCorrector(self): self._N0 = self.N if self.correctorTol > 0. and (self.correctorMaxIter > 1 - or self.correctorKind == "MINIMAL"): - vbMng(self, "INIT", "Starting correction iterations.", 15) + or self.correctorForce): + vbMng(self, "INIT", "Starting correction iterations.", 7) self._Qhat = PI() self._Qhat.npar = self.npar self._Qhat.polybasis = "MONOMIAL" self._Qhat.coeffs = np.ones(1, dtype = np.complex) if self.POD: self._RPODOld = copy(self.samplingEngine.RPOD) else: self._samplesOld = copy(self.samplingEngine.samples) for j in range(self.correctorMaxIter): if self.N > 0: Q = self._setupDenominator()[0] else: Q = PI() Q.coeffs = np.ones((1,) * self.npar, dtype = np.complex) Q.npar = self.npar Q.polybasis = self.polybasis self.trainedModel.data.Q = Q self.trainedModel.data.P = self._setupNumerator() self._applyCorrector(j) if self.N <= 0: break self._N = self._N0 del self._N0 if self.correctorTol <= 0. or (self.correctorMaxIter <= 1 - and self.correctorKind == "CONSERVATIVE"): + and not self.correctorForce): return if self.POD: self.samplingEngine.RPOD = self._RPODOld del self._RPODOld else: self.samplingEngine.samples = self._samplesOld del self._samplesOld - if self.correctorKind == "CONSERVATIVE": - QOld, QOldBasis = self.trainedModel.data.Q.coeffs, self.polybasis - else: + if self.correctorForce: QOld, QOldBasis = [1.], "MONOMIAL" + else: + QOld, QOldBasis = Q.coeffs, self.polybasis Q = polyTimes(self._Qhat.coeffs, QOld, Pbasis = self._Qhat.polybasis, Qbasis = QOldBasis, Rbasis = self.polybasis) del self._Qhat gamma = np.linalg.norm(Q) self.trainedModel.data.Q.coeffs = np.pad(Q, (0, self._N - len(Q) + 1), "constant") / gamma - if self.correctorKind == "CONSERVATIVE": - self.trainedModel.data.P.coeffs /= gamma - else: + if self.correctorForce: self.trainedModel.data.P = self._setupNumerator() - vbMng(self, "DEL", "Terminated correction iterations.", 15) + else: + self.trainedModel.data.P.coeffs /= gamma + vbMng(self, "DEL", "Terminated correction iterations.", 7) def _applyCorrector(self, j:int): if self.correctorTol <= 0. or (j >= self.correctorMaxIter - 1 - and self.correctorKind == "CONSERVATIVE"): + and not self.correctorForce): self._N = 0 return cfs, pls, _ = rational2heaviside(self.trainedModel.data.P, self.trainedModel.data.Q) cfs = cfs[: self.N] if self.POD: resEff = np.linalg.norm(cfs, axis = 1) else: resEff = self.HFEngine.norm(self.samplingEngine.samples.dot(cfs.T), is_state = self.approx_state) resEff /= np.max(np.hstack((np.ones((self.N, 1)), np.abs(pls).reshape(-1, 1))), axis = 1) - resEff /= np.linalg.norm(resEff) + resEff /= np.mean(resEff) idxKeep = np.where(resEff >= self.correctorTol)[0] vbMng(self, "MAIN", ("Correction iteration no. {}: {} out of {} residuals satisfy " - "tolerance.").format(j + 1, len(idxKeep), self.N), 15) + "tolerance.").format(j + 1, len(idxKeep), self.N), 10) self._N -= len(idxKeep) - if self.N <= 0 and self.correctorKind == "CONSERVATIVE": return + if self.N <= 0 and not self.correctorForce: return for i in idxKeep: self._Qhat.coeffs = polyTimes(self._Qhat.coeffs, [- pls[i], 1.], Pbasis = self._Qhat.polybasis, Rbasis = self._Qhat.polybasis) self._Qhat.coeffs /= np.linalg.norm(self._Qhat.coeffs) if self.N <= 0: return vbMng(self, "MAIN", ("Removing poles at (normalized positions): " - "{}.").format(pls[resEff < self.correctorTol]), 75) + "{}.").format(pls[resEff < self.correctorTol]), 65) That = polyTimesTable(self._Qhat, self._musUniqueCN, self._reorder, self._derIdxs, np.power(self.scaleFactor, -1.)).T if self.POD: self.samplingEngine.RPOD = self._RPODOld.dot(That) else: self.samplingEngine.samples = self._samplesOld.dot(That) def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]: """ Compute inverse factors for minimal interpolant target functional. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") self._setupInterpolationIndices() cfun = totalDegreeN if self.polydegreetype == "TOTAL" else fullDegreeN N = copy(self.N) while self.S < cfun(N, self.npar): N -= 1 if N < self.N: RROMPyWarning(("N too large compared to S. Reducing N by " "{}").format(self.N - N)) self.N = N while self.N >= 0: if self.centeredLike: Seff = cfun(self.N, self.npar) derIdxsEff = [self._derIdxs[0][: Seff]] reorder = self._reorder[: Seff] else: Seff = self.S derIdxsEff = self._derIdxs reorder = self._reorder if self.polydegreetype == "TOTAL": TE = pvTP(self._musUniqueCN, self.N, self.polybasis0, derIdxsEff, reorder, scl = np.power(self.scaleFactor, -1.)) idxsB = totalDegreeMaxMask(self.N, self.npar) else: #if self.polydegreetype == "FULL": TE = pvP(self._musUniqueCN, [self.N] * self.npar, self.polybasis0, derIdxsEff, reorder, scl = np.power(self.scaleFactor, -1.)) idxsB = fullDegreeMaxMask(self.N, self.npar) fitOut = customPInv(TE, rcond = self.interpRcond, full = True) vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... " "Conditioning of pseudoinverse system: {:.4e}.").format( TE.shape[0], self.N, polyfitname(self.polybasis0), fitOut[1][1][0] / fitOut[1][1][-1]), 5) if fitOut[1][0] == TE.shape[1]: fitinv = fitOut[0][idxsB, :] break if self.catchInstability: raise RROMPyException(("Instability in denominator " "computation: polyfit is poorly " "conditioned.")) RROMPyWarning("Polyfit is poorly conditioned. Reducing N by 1.") self.N = self.N - 1 if self.polydegreetype == "TOTAL": TN = pvTP(self._musUniqueCN, self.N, self.polybasis0, derIdxsEff, reorder, scl = np.power(self.scaleFactor, -1.)) else: #if self.polydegreetype == "FULL": TN = pvP(self._musUniqueCN, [self.N] * self.npar, self.polybasis0, derIdxsEff, reorder, scl = np.power(self.scaleFactor, -1.)) invD = [None] * (len(idxsB)) for k in range(len(idxsB)): pseudoInv = np.diag(fitinv[k, :]) idxGlob = 0 for j, derIdxs in enumerate(derIdxsEff): nder = len(derIdxs) idxGlob += nder if nder > 1: idxLoc = np.arange(Seff)[(reorder >= idxGlob - nder) * (reorder < idxGlob)] invLoc = fitinv[k, idxLoc] pseudoInv[np.ix_(idxLoc, idxLoc)] = 0. for diffj, diffjIdx in enumerate(derIdxs): for derQ, derQIdx in enumerate(derIdxs): derUIdx = [x - y for (x, y) in zip(diffjIdx, derQIdx)] if all([x >= 0 for x in derUIdx]): derU = hashD(derUIdx) pseudoInv[idxLoc[derU], idxLoc[derQ]] = ( invLoc[diffj]) invD[k] = dot(pseudoInv, TN) return invD, fitinv def findeveVGExplicit(self, sampleE:sampList, invD:List[Np2D]) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of rational denominator matrix. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") nEnd = invD[0].shape[1] eWidth = len(invD) vbMng(self, "INIT", "Building gramian matrix.", 10) gramian = self.HFEngine.innerProduct(sampleE, sampleE, is_state = self.approx_state) G = np.zeros((nEnd, nEnd), dtype = np.complex) for k in range(eWidth): G += dot(dot(gramian, invD[k]).T, invD[k].conj()).T vbMng(self, "DEL", "Done building gramian.", 10) vbMng(self, "INIT", "Solving eigenvalue problem for gramian matrix.", 7) ev, eV = np.linalg.eigh(G) vbMng(self, "MAIN", ("Solved eigenvalue problem of size {} with condition number " "{:.4e}.").format(nEnd, ev[-1] / ev[0]), 5) vbMng(self, "DEL", "Done solving eigenvalue problem.", 7) return ev, eV def findeveVGQR(self, RPODE:Np2D, invD:List[Np2D]) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of rational denominator matrix through SVD of R factor. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") nEnd = invD[0].shape[1] S = RPODE.shape[0] eWidth = len(invD) vbMng(self, "INIT", "Building half-gramian matrix stack.", 10) Rstack = np.zeros((S * eWidth, nEnd), dtype = np.complex) for k in range(eWidth): Rstack[k * S : (k + 1) * S, :] = dot(RPODE, invD[k]) vbMng(self, "DEL", "Done building half-gramian.", 10) vbMng(self, "INIT", "Solving svd for square root of gramian matrix.", 7) _, s, eV = np.linalg.svd(Rstack, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].T.conj() vbMng(self, "MAIN", ("Solved svd problem of size {} x {} with condition number " "{:.4e}.").format(*Rstack.shape, s[0] / s[-1]), 5) vbMng(self, "DEL", "Done solving svd.", 7) return ev, eV def getResidues(self, *args, **kwargs) -> Np1D: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues(*args, **kwargs) diff --git a/rrompy/reduction_methods/standard/rational_moving_least_squares.py b/rrompy/reduction_methods/standard/rational_moving_least_squares.py index 8cd3013..cb0377c 100644 --- a/rrompy/reduction_methods/standard/rational_moving_least_squares.py +++ b/rrompy/reduction_methods/standard/rational_moving_least_squares.py @@ -1,340 +1,340 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .rational_interpolant import RationalInterpolant from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb, polyvander as pvP, polyvanderTotal as pvTP) from rrompy.utilities.base.types import Np2D, HFEng, DictAny, paramVal from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import (fullDegreeMaxMask, totalDegreeMaxMask, dot) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['RationalMovingLeastSquares'] class RationalMovingLeastSquares(RationalInterpolant): """ ROM rational moving LS interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 0; - 'N': degree of rational interpolant denominator; defaults to 0; - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL'; - 'radialBasis': numerator radial basis type; defaults to 'GAUSSIAN'; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; defaults to 0, i.e. identity; - 'nNearestNeighbor': number of nearest neighbors considered in numerator if radialBasis allows; defaults to -1; - 'radialBasisDen': denominator radial basis type; defaults to 'GAUSSIAN'; - 'radialDirectionalWeightsDen': radial basis weights for interpolant denominator; defaults to 0, i.e. identity; - 'nNearestNeighborDen': number of nearest neighbors considered in denominator if radialBasisDen allows; defaults to -1; - 'interpRcond': tolerance for interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'polybasis': type of polynomial basis for interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'polydegreetype': type of polynomial degree; - 'radialBasis': numerator radial basis type; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; - 'nNearestNeighbor': number of nearest neighbors considered in numerator if radialBasis allows; - 'radialBasisDen': denominator radial basis type; - 'radialDirectionalWeightsDen': radial basis weights for interpolant denominator; - 'nNearestNeighborDen': number of nearest neighbors considered in denominator if radialBasisDen allows; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. polybasis: type of polynomial basis for interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. polydegreetype: Type of polynomial degree. radialBasis: Numerator radial basis type. radialDirectionalWeights: Radial basis weights for interpolant numerator. nNearestNeighbor: Number of nearest neighbors considered in numerator if radialBasis allows. radialBasisDen: Denominator radial basis type. radialDirectionalWeightsDen: Radial basis weights for interpolant denominator. nNearestNeighborDen: Number of nearest neighbors considered in denominator if radialBasisDen allows. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust rational denominator management. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def __init__(self, HFEngine:HFEng, mu0 : paramVal = None, approxParameters : DictAny = {}, approx_state : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["radialBasis", "radialBasisDen", "radialDirectionalWeightsDen", "nNearestNeighborDen"], ["GAUSSIAN", "GAUSSIAN", 1, -1], - toBeExcluded = ["correctorKind", + toBeExcluded = ["correctorForce", "correctorTol", "correctorMaxIter"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, approx_state = approx_state, verbosity = verbosity, timestamp = timestamp) self.catchInstability = False self._postInit() @property - def correctorKind(self): - """Value of correctorKind.""" - return "CONSERVATIVE" - @correctorKind.setter - def correctorKind(self, correctorKind): - RROMPyWarning(("correctorKind is used just to simplify inheritance, " - "and its value cannot be changed from 'CONSERVATIVE'.")) + def correctorForce(self): + """Value of correctorForce.""" + return False + @correctorForce.setter + def correctorForce(self, correctorForce): + RROMPyWarning(("correctorForce is used just to simplify inheritance, " + "and its value cannot be changed from False.")) @property def correctorTol(self): """Value of correctorTol.""" return 0. @correctorTol.setter def correctorTol(self, correctorTol): RROMPyWarning(("correctorTol is used just to simplify inheritance, " "and its value cannot be changed from 0.")) @property def correctorMaxIter(self): """Value of correctorMaxIter.""" return 1 @correctorMaxIter.setter def correctorMaxIter(self, correctorMaxIter): RROMPyWarning(("correctorMaxIter is used just to simplify " "inheritance, and its value cannot be changed from 1.")) @property def tModelType(self): from rrompy.reduction_methods.trained_model import \ TrainedModelRationalMLS return TrainedModelRationalMLS @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in ppb: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def radialBasis(self): """Value of radialBasis.""" return self._radialBasis @radialBasis.setter def radialBasis(self, radialBasis): self._radialBasis = radialBasis self._approxParameters["radialBasis"] = self.radialBasis @property def radialBasisDen(self): """Value of radialBasisDen.""" return self._radialBasisDen @radialBasisDen.setter def radialBasisDen(self, radialBasisDen): self._radialBasisDen = radialBasisDen self._approxParameters["radialBasisDen"] = self.radialBasisDen @property def radialDirectionalWeightsDen(self): """Value of radialDirectionalWeightsDen.""" return self._radialDirectionalWeightsDen @radialDirectionalWeightsDen.setter def radialDirectionalWeightsDen(self, radialDirectionalWeightsDen): self._radialDirectionalWeightsDen = radialDirectionalWeightsDen self._approxParameters["radialDirectionalWeightsDen"] = ( self.radialDirectionalWeightsDen) @property def nNearestNeighborDen(self): """Value of nNearestNeighborDen.""" return self._nNearestNeighborDen @nNearestNeighborDen.setter def nNearestNeighborDen(self, nNearestNeighborDen): self._nNearestNeighborDen = nNearestNeighborDen self._approxParameters["nNearestNeighborDen"] = ( self.nNearestNeighborDen) def _setupDenominator(self) -> Np2D: """Compute rational denominator.""" RROMPyAssert(self._mode, message = "Cannot setup denominator.") vbMng(self, "INIT", "Starting computation of denominator-related blocks.", 7) self._setupInterpolationIndices() if self.polydegreetype == "TOTAL": TN = pvTP(self._musUniqueCN, self.N, self.polybasis0, self._derIdxs, self._reorder, scl = np.power(self.scaleFactor, -1.)) else: #if self.polydegreetype == "FULL": TN = pvP(self._musUniqueCN, [self.N] * self.npar, self.polybasis0, self._derIdxs, self._reorder, scl = np.power(self.scaleFactor, -1.)) TNTen = np.zeros((self.S, self.S, TN.shape[1]), dtype = TN.dtype) TNTen[np.arange(self.S), np.arange(self.S)] = TN if self.POD: TNTen = dot(self.samplingEngine.RPOD, TNTen) vbMng(self, "DEL", "Done computing denominator-related blocks.", 7) return TN, TNTen def _setupNumerator(self) -> Np2D: """Compute rational numerator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of denominator-related blocks.", 7) self._setupInterpolationIndices() if self.polydegreetype == "TOTAL": TM = pvTP(self._musUniqueCN, self.M, self.polybasis0, self._derIdxs, self._reorder, scl = np.power(self.scaleFactor, -1.)) else: #if self.polydegreetype == "FULL": TM = pvP(self._musUniqueCN, [self.M] * self.npar, self.polybasis0, self._derIdxs, self._reorder, scl = np.power(self.scaleFactor, -1.)) vbMng(self, "DEL", "Done computing denominator-related blocks.", 7) return TM def setupApprox(self): """ Compute rational interpolant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() pMat = self.samplingEngine.samples.data pMatEff = dot(self.HFEngine.C, pMat) if self.approx_state else pMat if self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "projMat": pMatEff, "scaleFactor": self.scaleFactor, "rescalingExp": self.HFEngine.rescalingExp} data = self.initializeModelData(datadict)[0] data.POD = self.POD data.polybasis = self.polybasis data.polydegreetype = self.polydegreetype data.radialBasis = self.radialBasis data.radialWeights = self.radialDirectionalWeights data.nNearestNeighbor = self.nNearestNeighbor data.radialBasisDen = self.radialBasisDen data.radialWeightsDen = self.radialDirectionalWeightsDen data.nNearestNeighborDen = self.nNearestNeighborDen data.interpRcond = self.interpRcond self.trainedModel.data = data else: self.trainedModel = self.trainedModel self.trainedModel.data.projMat = copy(pMatEff) if not self.POD: self.trainedModel.data.gramian = self.HFEngine.innerProduct( self.samplingEngine.samples, self.samplingEngine.samples, is_state = self.approx_state) self.trainedModel.data.mus = copy(self.mus) self.trainedModel.data.M = self.M self.trainedModel.data.N = self.N QVan, self.trainedModel.data.QBlocks = self._setupDenominator() self.trainedModel.data.PVan = self._setupNumerator() if self.polydegreetype == "TOTAL": degreeMaxMask = totalDegreeMaxMask else: #if self.polydegreetype == "FULL": degreeMaxMask = fullDegreeMaxMask if self.N > self.M: self.trainedModel.data.QVan = QVan self.trainedModel.data.domQIdxs = degreeMaxMask(self.N, self.npar) else: self.trainedModel.data.domQIdxs = degreeMaxMask(self.M, self.npar) self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) diff --git a/rrompy/reduction_methods/trained_model/trained_model_pivoted_general.py b/rrompy/reduction_methods/trained_model/trained_model_pivoted_general.py index 4607347..4aeb57b 100644 --- a/rrompy/reduction_methods/trained_model/trained_model_pivoted_general.py +++ b/rrompy/reduction_methods/trained_model/trained_model_pivoted_general.py @@ -1,382 +1,386 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.special import factorial as fact from itertools import combinations from .trained_model import TrainedModel from rrompy.utilities.base.types import (Np1D, Tuple, List, ListAny, paramVal, paramList, sampList, HFEng) from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp -from rrompy.utilities.numerical import pointMatching +from rrompy.utilities.numerical import pointMatching, chordalMetricTable from rrompy.utilities.poly_fitting.heaviside import HeavisideInterpolator as HI from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert from rrompy.parameter import checkParameter, checkParameterList from rrompy.sampling import emptySampleList, sampleList __all__ = ['TrainedModelPivotedGeneral'] class TrainedModelPivotedGeneral(TrainedModel): """ ROM approximant evaluation for pivoted approximants (with pole matching). Attributes: Data: dictionary with all that can be pickled. """ def centerNormalizePivot(self, mu : paramList = [], mu0 : paramVal = None) -> paramList: """ Compute normalized parameter to be plugged into approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.data.mu0Pivot. Returns: Normalized parameter. """ mu = checkParameterList(mu, self.data.nparPivot)[0] if mu0 is None: mu0 = self.data.mu0Pivot rad = ((mu ** self.data.rescalingExpPivot - mu0 ** self.data.rescalingExpPivot) / self.data.scaleFactorPivot) return rad def centerNormalizeMarginal(self, mu : paramList = [], mu0 : paramVal = None) -> paramList: """ Compute normalized parameter to be plugged into approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.data.mu0Marginal. Returns: Normalized parameter. """ mu = checkParameterList(mu, self.data.nparMarginal)[0] if mu0 is None: mu0 = self.data.mu0Marginal rad = ((mu ** self.data.rescalingExpMarginal - mu0 ** self.data.rescalingExpMarginal) / self.data.scaleFactorMarginal) return rad def initializeFromLists(self, poles:ListAny, coeffs:ListAny, basis:str, HFEngine:HFEng, matchingWeight : float = 1., POD : bool = True, is_state : bool = True): """Initialize Heaviside representation.""" musM = self.data.musMarginal margAbsDist = np.sum(np.abs(np.repeat(musM.data, len(musM), 0) - np.tile(musM.data, [len(musM), 1]) ), axis = 1).reshape(len(musM), len(musM)) N = len(poles[0]) explored = [0] unexplored = list(range(1, len(musM))) + badPoles = list(np.where(np.isinf(poles[0]))[0]) for _ in range(1, len(musM)): minIdx = np.argmin(np.concatenate([margAbsDist[ex, unexplored] \ for ex in explored])) minIex = explored[minIdx // len(unexplored)] minIunex = unexplored[minIdx % len(unexplored)] - dist = np.abs(np.tile(poles[minIex].reshape(-1, 1), N) - - poles[minIunex].reshape(1, -1)) - # chordal metric - dist = dist.T * (np.abs(poles[minIex]) ** 2. + 1.) ** -.5 - dist = dist.T * (np.abs(poles[minIunex]) ** 2. + 1.) ** -.5 + dist = chordalMetricTable(poles[minIex], poles[minIunex]) + yInf = np.where(np.isinf(poles[minIunex]))[0] + badPoles += list(yInf) if matchingWeight != 0: resex = coeffs[minIex][: N] resunex = coeffs[minIunex][: N] + xInf = np.where(np.isinf(poles[minIex]))[0] if POD: - distR = resex.dot(resunex.T.conj()) - distR = (distR.T / np.linalg.norm(resex, axis = 1)).T - distR = distR / np.linalg.norm(resunex, axis = 1) + distR = resunex.dot(resex.T.conj()) + normex = np.linalg.norm(resex, axis = 1) + normunex = np.linalg.norm(resunex, axis = 1) else: resex = self.data.projMat.dot(resex.T) resunex = self.data.projMat.dot(resunex.T) distR = HFEngine.innerProduct(resex, resunex, - is_state = is_state).T - distR = (distR.T / HFEngine.norm(resex, - is_state = is_state)).T - distR = distR / HFEngine.norm(resunex, is_state = is_state) + is_state = is_state) + normex = HFEngine.norm(resex, is_state = is_state) + normunex = HFEngine.norm(resunex, is_state = is_state) + normex[xInf], normunex[yInf] = 1., 1. + distR = (distR / normex).T / normunex distR = np.abs(distR) distR[distR > 1.] = 1. + distR[xInf], distR[:, yInf] = 0., 0. dist += 2. / np.pi * matchingWeight * np.arccos(distR) reordering = pointMatching(dist) poles[minIunex] = poles[minIunex][reordering] coeffs[minIunex][: N] = coeffs[minIunex][reordering] explored += [minIunex] unexplored.remove(minIunex) + goodPoles = [i for i in range(N) if i not in badPoles] + goodPolesExt = goodPoles + list(range(N, len(coeffs[0]))) HIs = [] for pls, cfs in zip(poles, coeffs): hsi = HI() - hsi.poles = pls - hsi.coeffs = cfs + hsi.poles = pls[goodPoles] + hsi.coeffs = cfs[goodPolesExt] hsi.npar = 1 hsi.polybasis = basis HIs += [hsi] self.data.HIs = HIs def recompressByCutOff(self, murange : Tuple[float, float] = [- 1., 1.], tol : float = np.inf, rtype : str = "MAGNITUDE"): if np.isinf(tol): return " No poles erased." N = len(self.data.HIs[0].poles) mu0 = np.mean(murange) musig = murange[0] - mu0 if np.isclose(musig, 0.): radius = lambda x: np.abs(x - mu0) else: if rtype == "MAGNITUDE": murdir = (murange[0] - mu0) / np.abs(musig) def radius(x): scalprod = (x - mu0) * murdir.conj() / np.abs(musig) rescalepar = np.abs(np.real(scalprod)) rescaleort = np.abs(np.imag(scalprod)) return ((rescalepar - 1.) ** 2. * (rescalepar > 1.) + rescaleort ** 2.) ** .5 else:#if rtype == "POTENTIAL": def radius(x): rescale = (x - mu0) / musig return np.max(np.abs(rescale * np.array([-1., 1.]) + (rescale ** 2. - 1) ** .5)) - 1. keepPole, removePole = [], [] for j in range(N): for hi in self.data.HIs: if radius(hi.poles[j]) <= tol: keepPole += [j] break if len(keepPole) == 0 or keepPole[-1] != j: removePole += [j] if len(keepPole) == N: return " No poles erased." keepCoeff = keepPole + [N] keepCoeff = keepCoeff + list(range(N + 1,len(self.data.HIs[0].coeffs))) for hi in self.data.HIs: polyCorrection = np.zeros_like(hi.coeffs[0, :]) for j in removePole: polyCorrection += hi.coeffs[j, :] / (mu0 - hi.poles[j]) if len(hi.coeffs) == N: hi.coeffs = np.vstack((hi.coeffs, polyCorrection)) else: hi.coeffs[N, :] += polyCorrection hi.poles = hi.poles[keepPole] hi.coeffs = hi.coeffs[keepCoeff, :] return (" Erased {} pole".format(len(removePole)) + "s" * (len(removePole) > 1) + ".") def interpolateMarginal(self, mu : paramList = [], samples : ListAny = [], der : List[int] = None, scl : Np1D = None) -> sampList: """ Evaluate marginal interpolator at arbitrary marginal parameter. Args: mu: Target parameter. samples: Objects to interpolate. der(optional): Derivatives to take before evaluation. """ mu = checkParameterList(mu, self.data.nparMarginal)[0] sList = isinstance(samples[0], sampleList) sEff = [None] * len(samples) for j in range(len(samples)): if sList: sEff[j] = samples[j].data else: sEff[j] = samples[j] try: dtype = sEff[0].dtype except: dtype = sEff[0][0].dtype vbMng(self, "INIT", "Interpolating marginal at mu = {}.".format(mu), 95) muC = self.centerNormalizeMarginal(mu) p = emptySampleList() p.reset((len(sEff[0]), len(muC)), dtype = dtype) p.data[:] = 0. if len(sEff[0]) > 0: for mIj, spj in zip(self.data.marginalInterp, sEff): p = p + spj.reshape(len(sEff[0]), - 1) * mIj(muC, der, scl) vbMng(self, "DEL", "Done interpolating marginal.", 95) if not sList: p = p.data.flatten() return p def interpolateMarginalInterpolator(self, mu : paramVal = []) -> Np1D: """Obtain interpolated approximant interpolator.""" mu = checkParameter(mu, self.data.nparMarginal)[0] hsi = HI() hsi.poles = self.interpolateMarginalPoles(mu) hsi.coeffs = self.interpolateMarginalCoeffs(mu) hsi.npar = 1 hsi.polybasis = self.data.HIs[0].polybasis return hsi def interpolateMarginalPoles(self, mu : paramList = []) -> Np1D: """Obtain interpolated approximant poles.""" mu = checkParameterList(mu, self.data.nparMarginal)[0] return self.interpolateMarginal(mu, [hi.poles for hi in self.data.HIs]) def interpolateMarginalCoeffs(self, mu : paramList = []) -> Np1D: """Obtain interpolated approximant coefficients.""" mu = checkParameterList(mu, self.data.nparMarginal)[0] cs = self.interpolateMarginal(mu, [hi.coeffs for hi in self.data.HIs]) if isinstance(cs, (list, tuple,)): cs = np.array(cs) return cs.reshape(self.data.HIs[0].coeffs.shape) def getApproxReduced(self, mu : paramList = []) -> sampList: """ Evaluate reduced representation of approximant at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = checkParameterList(mu, self.data.npar)[0] if (not hasattr(self, "lastSolvedApproxReduced") or self.lastSolvedApproxReduced != mu): vbMng(self, "INIT", "Evaluating approximant at mu = {}.".format(mu), 12) self.uApproxReduced = emptySampleList() for i, muPL in enumerate(mu): muL = self.centerNormalizePivot([muPL(0, x) \ for x in self.data.directionPivot]) muM = [muPL(0, x) for x in self.data.directionMarginal] vbMng(self, "INIT", "Assembling reduced model for mu = {}.".format(muPL), 87) hsL = self.interpolateMarginalInterpolator(muM) vbMng(self, "DEL", "Done assembling reduced model.", 87) uAppR = hsL(muL) if i == 0: #self.data.HIs[0].coeffs.shape[1], len(mu) self.uApproxReduced.reset((len(uAppR), len(mu)), dtype = uAppR.dtype) self.uApproxReduced[i] = uAppR vbMng(self, "DEL", "Done evaluating approximant.", 12) self.lastSolvedApproxReduced = mu return self.uApproxReduced def getPVal(self, mu : paramList = []) -> sampList: """ Evaluate rational numerator at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = checkParameterList(mu, self.data.npar)[0] p = emptySampleList() p.reset((len(self.data.HIs[0].coeffs.shape[1]), len(mu))) for i, muPL in enumerate(mu): muL = self.centerNormalizePivot([muPL(0, x) \ for x in self.data.directionPivot]) muM = [muPL(0, x) for x in self.data.directionMarginal] hsL = self.interpolateMarginalInterpolator(muM) p[i] = hsL(muL) * np.prod(muL(0, 0) - hsL.poles) return p def getQVal(self, mu:Np1D, der : List[int] = None, scl : Np1D = None) -> Np1D: """ Evaluate rational denominator at arbitrary parameter. Args: mu: Target parameter. der(optional): Derivatives to take before evaluation. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = checkParameterList(mu, self.data.npar)[0] muP = self.centerNormalizePivot(checkParameterList( mu.data[:, self.data.directionPivot], self.data.nparPivot)[0]) muM = checkParameterList(mu.data[:, self.data.directionMarginal], self.data.nparMarginal)[0] if der is None: derP, derM = 0, [0] else: derP = der[self.data.directionPivot[0]] derM = [der[x] for x in self.data.directionMarginal] if np.any(np.array(derM) != 0): raise RROMPyException(("Derivatives of Q with respect to marginal " "parameters not allowed.")) sclP = 1 if scl is None else scl[self.data.directionPivot[0]] derVal = np.zeros(len(mu), dtype = np.complex) N = len(self.data.HIs[0].poles) if derP == N: derVal[:] = 1. elif derP >= 0 and derP < N: pls = self.interpolateMarginalPoles(muM).reshape(-1, len(mu)).T plsDist = muP.data.reshape(-1, 1) - pls for terms in combinations(np.arange(N), N - derP): derVal += np.prod(plsDist[:, list(terms)], axis = 1) return sclP ** derP * fact(derP) * derVal def getPoles(self, *args, **kwargs) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") if len(args) + len(kwargs) > 1: raise RROMPyException(("Wrong number of parameters passed. " "Only 1 available.")) elif len(args) + len(kwargs) == 1: if len(args) == 1: mVals = args[0] else: mVals = kwargs["marginalVals"] if not hasattr(mVals, "__len__"): mVals = [mVals] mVals = list(mVals) else: mVals = [fp] try: rDim = mVals.index(fp) if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]: raise except: raise RROMPyException(("Exactly 1 'freepar' entry in " "marginalVals must be provided.")) if rDim != self.data.directionPivot[0]: raise RROMPyException(("'freepar' entry in marginalVals must " "coincide with pivot direction.")) mVals[rDim] = self.data.mu0(rDim) mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim] roots = np.sort(np.array(self.interpolateMarginalPoles(mMarg))) return np.power(self.data.mu0(rDim) ** self.data.rescalingExp[rDim] + self.data.scaleFactor[rDim] * roots, 1. / self.data.rescalingExp[rDim]) def getResidues(self, *args, **kwargs) -> Np1D: """ Obtain approximant residues. Returns: Numpy matrix with residues as columns. """ pls = self.getPoles(*args, **kwargs) if len(args) == 1: mVals = args[0] elif len(args) == 0: mVals = [None] else: mVals = kwargs["marginalVals"] if not hasattr(mVals, "__len__"): mVals = [mVals] mVals = list(mVals) rDim = mVals.index(fp) mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim] residues = self.interpolateMarginalCoeffs(mMarg)[: len(pls)] res = self.data.projMat.dot(residues.T) return pls, res diff --git a/rrompy/reduction_methods/trained_model/trained_model_pivoted_rational.py b/rrompy/reduction_methods/trained_model/trained_model_pivoted_rational.py index 200264e..c36f233 100644 --- a/rrompy/reduction_methods/trained_model/trained_model_pivoted_rational.py +++ b/rrompy/reduction_methods/trained_model/trained_model_pivoted_rational.py @@ -1,110 +1,124 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from .trained_model_pivoted_general import TrainedModelPivotedGeneral from .trained_model_rational import TrainedModelRational from rrompy.utilities.base.types import Np1D, List, paramList, sampList, HFEng from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.poly_fitting.heaviside import rational2heaviside from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.parameter import checkParameterList from rrompy.sampling import sampleList __all__ = ['TrainedModelPivotedRational'] class TrainedModelPivotedRational(TrainedModelPivotedGeneral, TrainedModelRational): """ ROM approximant evaluation for pivoted Rational approximant (with pole matching). Attributes: Data: dictionary with all that can be pickled. """ def initializeFromRational(self, HFEngine:HFEng, matchingWeight : float = 1., POD : bool = True, is_state : bool = True): """Initialize Heaviside representation.""" RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") poles, coeffs = [], [] for Q, P in zip(self.data.Qs, self.data.Ps): cfs, pls, basis = rational2heaviside( P, Q, scalingExp = self.data.rescalingExpPivot) poles += [pls] coeffs += [cfs] + NEff = max([len(pls) for pls in poles]) + for j in range(len(poles)): + dN = NEff - len(poles[j]) + if dN > 0: + coeffs[j] = np.vstack((coeffs[j][: len(poles[j])], + np.zeros((dN, coeffs[j].shape[1])), + coeffs[j][len(poles[j]) :])) + poles[j] = np.append(poles[j], [np.inf] * dN) + cEff = max([len(cfs) for cfs in coeffs]) + for j in range(len(coeffs)): + dc = cEff - len(coeffs[j]) + if dc > 0: + coeffs[j] = np.vstack((coeffs[j], + np.zeros((dc, coeffs[j].shape[1])))) self.initializeFromLists(poles, coeffs, basis, HFEngine, matchingWeight, POD, is_state) def getPVal(self, mu : paramList = []) -> sampList: """ Evaluate rational numerator at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") if not hasattr(self.data, "_temporary"): return super().getPVal(mu) mu = checkParameterList(mu, self.data.npar)[0] muP = checkParameterList(mu.data[:, self.data.directionPivot], self.data.nparPivot)[0] muM = checkParameterList(mu.data[:, self.data.directionMarginal], self.data.nparMarginal)[0] muP = checkParameterList(muP, self.data.nparPivot)[0] vbMng(self, "INIT", "Evaluating numerator at mu = {}.".format(muP), 17) muPC = self.centerNormalizePivot(muP) pP = [sampleList(P(muPC)) for P in self.data.Ps] vbMng(self, "DEL", "Done evaluating numerator.", 17) return self.interpolateMarginal(muM, pP) def getQVal(self, mu:Np1D, der : List[int] = None, scl : Np1D = None) -> Np1D: """ Evaluate rational denominator at arbitrary parameter. Args: mu: Target parameter. der(optional): Derivatives to take before evaluation. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") if not hasattr(self.data, "_temporary"): return super().getQVal(mu, der, scl) mu = checkParameterList(mu, self.data.npar)[0] muP = checkParameterList(mu.data[:, self.data.directionPivot], self.data.nparPivot)[0] muM = checkParameterList(mu.data[:, self.data.directionMarginal], self.data.nparMarginal)[0] if der is None: derP, derM = None, None else: derP = [der[x] for x in self.data.directionPivot] derM = [der[x] for x in self.data.directionMarginal] if scl is None: sclP, sclM = None, None else: sclP = [scl[x] for x in self.data.directionPivot] sclM = [scl[x] for x in self.data.directionMarginal] muP = checkParameterList(muP, self.data.nparPivot)[0] vbMng(self, "INIT", "Evaluating denominator at mu = {}.".format(muP), 17) muPC = self.centerNormalizePivot(muP) qP = [Q(muPC, derP, sclP).reshape(1, -1) for Q in self.data.Qs] vbMng(self, "DEL", "Done evaluating denominator.", 17) return self.interpolateMarginal(muM, qP, derM, sclM) diff --git a/rrompy/utilities/numerical/__init__.py b/rrompy/utilities/numerical/__init__.py index d015ec8..2f4b5cd 100644 --- a/rrompy/utilities/numerical/__init__.py +++ b/rrompy/utilities/numerical/__init__.py @@ -1,69 +1,70 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from .custom_pinv import customPInv from .degree import (fullDegreeN, totalDegreeN, fullDegreeSet, totalDegreeSet, degreeTotalToFull, fullDegreeMaxMask, totalDegreeMaxMask) from .factorials import multibinom, multifactorial from .halton import haltonGenerate from .hash_derivative import (nextDerivativeIndices, hashDerivativeToIdx, hashIdxToDerivative) from .kroneckerer import kroneckerer from .low_discrepancy import lowDiscrepancy from .marginalize_poly_list import marginalizePolyList from .nonlinear_eigenproblem import (linearizeDense, eigNonlinearDense, eigvalsNonlinearDense) from .number_theory import squareResonances -from .point_matching import pointMatching +from .point_matching import pointMatching, chordalMetricTable from .rayleigh_quotient_iteration import rayleighQuotientIteration from .sobol import sobolGenerate from .tensor_la import dot, solve freepar = None __all__ = [ 'freepar', 'customPInv', 'fullDegreeN', 'totalDegreeN', 'fullDegreeSet', 'totalDegreeSet', 'degreeTotalToFull', 'fullDegreeMaxMask', 'totalDegreeMaxMask', 'multibinom', 'multifactorial', 'haltonGenerate', 'nextDerivativeIndices', 'hashDerivativeToIdx', 'hashIdxToDerivative', 'kroneckerer', 'lowDiscrepancy', 'marginalizePolyList', 'linearizeDense', 'eigNonlinearDense', 'eigvalsNonlinearDense', 'squareResonances', 'pointMatching', + 'chordalMetricTable', 'rayleighQuotientIteration', 'sobolGenerate', 'dot', 'solve' ] diff --git a/rrompy/utilities/numerical/point_matching.py b/rrompy/utilities/numerical/point_matching.py index 599e938..211eebd 100644 --- a/rrompy/utilities/numerical/point_matching.py +++ b/rrompy/utilities/numerical/point_matching.py @@ -1,26 +1,37 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # +import numpy as np from scipy.optimize import linear_sum_assignment as LSA from rrompy.utilities.base.types import Np1D, Np2D -__all__ = ['pointMatching'] +__all__ = ['pointMatching', 'chordalMetricTable'] def pointMatching(distanceMatrix:Np2D) -> Np1D: return LSA(distanceMatrix)[1] +def chordalMetricTable(x:Np1D, y:Np1D) -> Np2D: + x, y = np.array(x), np.array(y) + xInf, yInf = np.where(np.isinf(x))[0], np.where(np.isinf(y))[0] + x[xInf], y[yInf] = 0., 0. + T = np.abs(np.tile(x.reshape(-1, 1), len(y)) - y.reshape(1, -1)) + T[xInf, :], T[:, yInf] = 1., 1. + T[np.ix_(xInf, yInf)] = 0. + T = T.T * (np.abs(x) ** 2. + 1.) ** -.5 + T = T.T * (np.abs(y) ** 2. + 1.) ** -.5 + return T