diff --git a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py index e1cbbfd..dc1a33b 100644 --- a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py +++ b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py @@ -1,981 +1,981 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from os import mkdir, remove, rmdir import numpy as np from collections.abc import Iterable from copy import deepcopy as copy from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from .trained_model.convert_trained_model_pivoted import ( convertTrainedModelPivoted) from rrompy.utilities.base.data_structures import purgeDict, getNewFilename from rrompy.utilities.poly_fitting.polynomial import polybases as ppb from rrompy.utilities.poly_fitting.radial_basis import polybases as rbpb from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk from rrompy.utilities.base.types import Np2D, paramList, List, ListAny from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.degree import reduceDegreeN from rrompy.utilities.exception_manager import (RROMPyException, RROMPyWarning, RROMPy_FRAGILE) from rrompy.parameter import checkParameterList from rrompy.utilities.parallel import poolRank, bcast __all__ = ['GenericPivotedApproximantNoMatch', 'GenericPivotedApproximantPolyMatch', 'GenericPivotedApproximantPoleMatch'] class GenericPivotedApproximantBase(GenericApproximant): def __init__(self, directionPivot:ListAny, *args, storeAllSamples : bool = False, **kwargs): self._preInit() if len(directionPivot) > 1: raise RROMPyException(("Exactly 1 pivot parameter allowed in pole " "matching.")) from rrompy.parameter.parameter_sampling import (EmptySampler as ES, SparseGridSampler as SG) self._addParametersToList(["radialDirectionalWeightsMarginal"], [-1], ["samplerPivot", "SMarginal", "samplerMarginal"], [ES(), 1, SG([[-1.], [1.]])], toBeExcluded = ["sampler"]) self._directionPivot = directionPivot self.storeAllSamples = storeAllSamples if not hasattr(self, "_output_lvl"): self._output_lvl = [] self._output_lvl += [1 / 2] super().__init__(*args, **kwargs) self._postInit() def setupSampling(self): super().setupSampling(False) def initializeModelData(self, datadict): if "directionPivot" in datadict.keys(): from .trained_model.trained_model_pivoted_data import ( TrainedModelPivotedData) data = TrainedModelPivotedData(datadict["mu0"], datadict["mus"], datadict.pop("projMat"), datadict["scaleFactor"], datadict.pop("parameterMap"), datadict["directionPivot"]) return (data, ["mu0", "scaleFactor", "directionPivot", "mus"]) else: return super().initializeModelData(datadict) @property def npar(self): """Number of parameters.""" if hasattr(self, "_temporaryPivot"): return self.nparPivot return super().npar def checkParameterListPivot(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.nparPivot, check_if_single) def checkParameterListMarginal(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.nparMarginal, check_if_single) def mapParameterList(self, *args, **kwargs): if hasattr(self, "_temporaryPivot"): return self.mapParameterListPivot(*args, **kwargs) return super().mapParameterList(*args, **kwargs) def mapParameterListPivot(self, mu:paramList, direct : str = "F", idx : List[int] = None): if idx is None: idx = self.directionPivot else: idx = [self.directionPivot[j] for j in idx] return super().mapParameterList(mu, direct, idx) def mapParameterListMarginal(self, mu:paramList, direct : str = "F", idx : List[int] = None): if idx is None: idx = self.directionMarginal else: idx = [self.directionMarginal[j] for j in idx] return super().mapParameterList(mu, direct, idx) @property def mu0(self): """Value of mu0.""" if hasattr(self, "_temporaryPivot"): return self.checkParameterListPivot(self._mu0(self.directionPivot)) return self._mu0 @mu0.setter def mu0(self, mu0): GenericApproximant.mu0.fset(self, mu0) @property def mus(self): """Value of mus. Its assignment may reset snapshots.""" return self._mus @mus.setter def mus(self, mus): mus = self.checkParameterList(mus) musOld = copy(self.mus) if hasattr(self, '_mus') else None if (musOld is None or len(mus) != len(musOld) or not mus == musOld): self.resetSamples() self._mus = mus @property def musMarginal(self): """Value of musMarginal. Its assignment may reset snapshots.""" return self._musMarginal @musMarginal.setter def musMarginal(self, musMarginal): musMarginal = self.checkParameterListMarginal(musMarginal) if hasattr(self, '_musMarginal'): musMOld = copy(self.musMarginal) else: musMOld = None if (musMOld is None or len(musMarginal) != len(musMOld) or not musMarginal == musMOld): self.resetSamples() self._musMarginal = musMarginal @property def SMarginal(self): """Value of SMarginal.""" return self._SMarginal @SMarginal.setter def SMarginal(self, SMarginal): if SMarginal <= 0: raise RROMPyException("SMarginal must be positive.") if hasattr(self, "_SMarginal") and self._SMarginal is not None: Sold = self.SMarginal else: Sold = -1 self._SMarginal = SMarginal self._approxParameters["SMarginal"] = self.SMarginal if Sold != self.SMarginal: self.resetSamples() @property def radialDirectionalWeightsMarginal(self): """Value of radialDirectionalWeightsMarginal.""" return self._radialDirectionalWeightsMarginal @radialDirectionalWeightsMarginal.setter def radialDirectionalWeightsMarginal(self, radialDirWeightsMarg): if radialDirWeightsMarg == -1: radialDirWeightsMarg = [1.] * self.nparMarginal if isinstance(radialDirWeightsMarg, Iterable): radialDirWeightsMarg = list(radialDirWeightsMarg) else: radialDirWeightsMarg = [radialDirWeightsMarg] self._radialDirectionalWeightsMarginal = radialDirWeightsMarg self._approxParameters["radialDirectionalWeightsMarginal"] = ( self.radialDirectionalWeightsMarginal) @property def directionPivot(self): """Value of directionPivot. Its assignment may reset snapshots.""" return self._directionPivot @directionPivot.setter def directionPivot(self, directionPivot): if hasattr(self, '_directionPivot'): directionPivotOld = copy(self.directionPivot) else: directionPivotOld = None if (directionPivotOld is None or len(directionPivot) != len(directionPivotOld) or not directionPivot == directionPivotOld): self.resetSamples() self._directionPivot = directionPivot @property def directionMarginal(self): return [x for x in range(self.HFEngine.npar) \ if x not in self.directionPivot] @property def nparPivot(self): return len(self.directionPivot) @property def nparMarginal(self): return self.npar - self.nparPivot @property def muBounds(self): """Value of muBounds.""" return self.samplerPivot.lims @property def muBoundsMarginal(self): """Value of muBoundsMarginal.""" return self.samplerMarginal.lims @property def sampler(self): """Proxy of samplerPivot.""" return self._samplerPivot @property def samplerPivot(self): """Value of samplerPivot.""" return self._samplerPivot @samplerPivot.setter def samplerPivot(self, samplerPivot): if 'generatePoints' not in dir(samplerPivot): raise RROMPyException("Pivot sampler type not recognized.") if hasattr(self, '_samplerPivot') and self._samplerPivot is not None: samplerOld = self.samplerPivot self._samplerPivot = copy(samplerPivot) self._approxParameters["samplerPivot"] = self.samplerPivot if not 'samplerOld' in locals() or samplerOld != self.samplerPivot: self.resetSamples() @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'generatePoints' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") if (hasattr(self, '_samplerMarginal') and self._samplerMarginal is not None): samplerOld = self.samplerMarginal self._samplerMarginal = copy(samplerMarginal) self._approxParameters["samplerMarginal"] = self.samplerMarginal if not 'samplerOld' in locals() or samplerOld != self.samplerMarginal: self.resetSamples() @property def matchState(self): """Utility value of matchState.""" return False def computeScaleFactor(self): """Compute parameter rescaling factor.""" self.scaleFactorPivot = .5 * np.abs(( self.mapParameterListPivot(self.muBounds[0]) - self.mapParameterListPivot(self.muBounds[1]))[0]) self.scaleFactorMarginal = .5 * np.abs(( self.mapParameterListMarginal(self.muBoundsMarginal[0]) - self.mapParameterListMarginal(self.muBoundsMarginal[1]))[0]) self.scaleFactor = np.empty(self.npar) self.scaleFactor[self.directionPivot] = self.scaleFactorPivot self.scaleFactor[self.directionMarginal] = self.scaleFactorMarginal def _setupTrainedModel(self, pMat:Np2D, pMatUpdate : bool = False, pMatOld : Np2D = None, forceNew : bool = False): if forceNew or self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "mus": copy(self.mus), "projMat": pMat, "scaleFactor": self.scaleFactor, "parameterMap": self.HFEngine.parameterMap, "directionPivot": self.directionPivot} self.trainedModel.data = self.initializeModelData(datadict)[0] else: self.trainedModel = self.trainedModel if pMatUpdate: self.trainedModel.data.projMat = np.hstack( (self.trainedModel.data.projMat, pMat)) else: self.trainedModel.data.projMat = copy(pMat) self.trainedModel.data.mus = copy(self.mus) self.trainedModel.data.musMarginal = copy(self.musMarginal) def addSamplePoints(self, mus:paramList): """Add global sample points to reduced model.""" raise RROMPyException(("Cannot add global samples to pivoted reduced " "model.")) def normApprox(self, mu:paramList) -> float: _PODOld, self._POD = self.POD, 0 result = super().normApprox(mu) self._POD = _PODOld return result @property def storedSamplesFilenames(self) -> List[str]: if not hasattr(self, "_sampleBaseFilename"): return [] return [self._sampleBaseFilename + "{}_{}.pkl" .format(idx + 1, self.name()) for idx in range(len(self.musMarginal))] def purgeStoredSamples(self): if not hasattr(self, "_sampleBaseFilename"): return for file in self.storedSamplesFilenames: remove(file) rmdir(self._sampleBaseFilename[: -8]) def storeSamples(self, idx : int = None): """Store samples to file.""" if not hasattr(self, "_sampleBaseFilename"): filenameBase = None if poolRank() == 0: foldername = getNewFilename(self.name(), "samples") mkdir(foldername) filenameBase = foldername + "/sample_" self._sampleBaseFilename = bcast(filenameBase, force = True) if idx is not None: super().storeSamples(self._sampleBaseFilename + str(idx + 1), False) def loadTrainedModel(self, filename:str): """Load trained reduced model from file.""" super().loadTrainedModel(filename) self._musMarginal = self.trainedModel.data.musMarginal def setTrainedModel(self, model): """Deepcopy approximation from trained model.""" super().setTrainedModel(model) self.trainedModel = convertTrainedModelPivoted(self.trainedModel, self.tModelType, True) self._preliminaryMarginalFinalization() self._finalizeMarginalization() self.trainedModel.data.approxParameters = self.approxParameters class GenericPivotedApproximantNoMatch(GenericPivotedApproximantBase): """ ROM pivoted approximant (without matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ @property def tModelType(self): from .trained_model.trained_model_pivoted_rational_nomatch import ( TrainedModelPivotedRationalNoMatch) return TrainedModelPivotedRationalNoMatch def _finalizeMarginalization(self): self.trainedModel.setupMarginalInterp( [self.radialDirectionalWeightsMarginal]) self.trainedModel.data.approxParameters = copy(self.approxParameters) def _preliminaryMarginalFinalization(self): pass class GenericPivotedApproximantPolyMatch(GenericPivotedApproximantBase): """ ROM pivoted approximant (with polynomial matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for matching; defaults to 1; - 'matchingKind': kind of matching; allowed values include 'ROTATE' and 'PROJECT'; defaults to 'ROTATE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for matching; - 'matchingKind': kind of matching; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for matching. matchingKind: Kind of matching. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ _allowedBadPoleCorrectionKinds = ["ERASE", "RATIONAL", "POLYNOMIAL"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchState", "matchingWeight", "matchingKind", "polybasisMarginal", "paramsMarginal"], [False, 1., "ROTATE", "MONOMIAL", {}]) self.parameterMarginalList = ["MMarginal", "nNeighborsMarginal", "polydegreetypeMarginal", "interpTolMarginal", "radialDirectionalWeightsMarginalAdapt"] super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_pivoted_rational_polymatch import ( TrainedModelPivotedRationalPolyMatch) return TrainedModelPivotedRationalPolyMatch @property def matchState(self): """Value of matchState.""" return self._matchState @matchState.setter def matchState(self, matchState): self._matchState = matchState self._approxParameters["matchState"] = self.matchState @property def matchingWeight(self): """Value of matchingWeight.""" return self._matchingWeight @matchingWeight.setter def matchingWeight(self, matchingWeight): self._matchingWeight = matchingWeight self._approxParameters["matchingWeight"] = self.matchingWeight @property def matchingKind(self): """Value of matchingKind.""" return self._matchingKind @matchingKind.setter def matchingKind(self, matchingKind): try: matchingKind = matchingKind.upper().strip().replace(" ", "") if matchingKind not in ["ROTATE", "PROJECT"]: raise RROMPyException( "Prescribed matching kind not recognized.") self._matchingKind = matchingKind except: RROMPyWarning(("Prescribed matching kind not recognized. " "Overriding to 'ROTATE'.")) self._matchingKind = "ROTATE" self._approxParameters["matchingKind"] = self.matchingKind @property def polybasisMarginal(self): """Value of polybasisMarginal.""" return self._polybasisMarginal @polybasisMarginal.setter def polybasisMarginal(self, polybasisMarginal): try: polybasisMarginal = polybasisMarginal.upper().strip().replace(" ", "") if polybasisMarginal not in ppb + rbpb + ["NEARESTNEIGHBOR"] + sk: raise RROMPyException( "Prescribed marginal polybasis not recognized.") self._polybasisMarginal = polybasisMarginal except: RROMPyWarning(("Prescribed marginal polybasis not recognized. " "Overriding to 'MONOMIAL'.")) self._polybasisMarginal = "MONOMIAL" self._approxParameters["polybasisMarginal"] = self.polybasisMarginal @property def paramsMarginal(self): """Value of paramsMarginal.""" return self._paramsMarginal @paramsMarginal.setter def paramsMarginal(self, paramsMarginal): paramsMarginal = purgeDict(paramsMarginal, self.parameterMarginalList, dictname = self.name() + ".paramsMarginal", baselevel = 1) keyList = list(paramsMarginal.keys()) if not hasattr(self, "_paramsMarginal"): self._paramsMarginal = {} if "MMarginal" in keyList: MMarg = paramsMarginal["MMarginal"] elif ("MMarginal" in self.paramsMarginal and not hasattr(self, "_MMarginal_isauto")): MMarg = self.paramsMarginal["MMarginal"] else: MMarg = "AUTO" if isinstance(MMarg, str): MMarg = MMarg.strip().replace(" ","") if "-" not in MMarg: MMarg = MMarg + "-0" self._MMarginal_isauto = True self._MMarginal_shift = int(MMarg.split("-")[-1]) MMarg = 0 if MMarg < 0: raise RROMPyException("MMarginal must be non-negative.") self._paramsMarginal["MMarginal"] = MMarg if "nNeighborsMarginal" in keyList: self._paramsMarginal["nNeighborsMarginal"] = max(1, paramsMarginal["nNeighborsMarginal"]) elif "nNeighborsMarginal" not in self.paramsMarginal: self._paramsMarginal["nNeighborsMarginal"] = 1 if "polydegreetypeMarginal" in keyList: try: polydegtypeM = paramsMarginal["polydegreetypeMarginal"]\ .upper().strip().replace(" ","") - if polydegtypeM not in ["TOTAL", "FULL"]: + if polydegtypeM not in ["TOTAL", "TENSOR"]: raise RROMPyException(("Prescribed polydegreetypeMarginal " "not recognized.")) self._paramsMarginal["polydegreetypeMarginal"] = polydegtypeM except: RROMPyWarning(("Prescribed polydegreetypeMarginal not " "recognized. Overriding to 'TOTAL'.")) self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL" elif "polydegreetypeMarginal" not in self.paramsMarginal: self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL" if "interpTolMarginal" in keyList: self._paramsMarginal["interpTolMarginal"] = ( paramsMarginal["interpTolMarginal"]) elif "interpTolMarginal" not in self.paramsMarginal: self._paramsMarginal["interpTolMarginal"] = -1 if "radialDirectionalWeightsMarginalAdapt" in keyList: self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = ( paramsMarginal["radialDirectionalWeightsMarginalAdapt"]) elif "radialDirectionalWeightsMarginalAdapt" not in self.paramsMarginal: self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = [ -1., -1.] self._approxParameters["paramsMarginal"] = self.paramsMarginal def _setMMarginalAuto(self): if (self.polybasisMarginal not in ppb + rbpb or "MMarginal" not in self.paramsMarginal or "polydegreetypeMarginal" not in self.paramsMarginal): raise RROMPyException(("Cannot set MMarginal if " "polybasisMarginal does not allow it.")) self.paramsMarginal["MMarginal"] = max(0, reduceDegreeN( len(self.musMarginal), len(self.musMarginal), self.nparMarginal, self.paramsMarginal["polydegreetypeMarginal"]) - self._MMarginal_shift) vbMng(self, "MAIN", ("Automatically setting MMarginal to {}.").format( self.paramsMarginal["MMarginal"]), 25) def purgeparamsMarginal(self): self.paramsMarginal = {} paramsMbadkeys = [] if self.polybasisMarginal in ppb + rbpb + sk: paramsMbadkeys += ["nNeighborsMarginal"] if self.polybasisMarginal not in rbpb: paramsMbadkeys += ["radialDirectionalWeightsMarginalAdapt"] if self.polybasisMarginal in ["NEARESTNEIGHBOR"] + sk: paramsMbadkeys += ["MMarginal", "polydegreetypeMarginal", "interpTolMarginal"] if hasattr(self, "_MMarginal_isauto"): del self._MMarginal_isauto if hasattr(self, "_MMarginal_shift"): del self._MMarginal_shift for key in paramsMbadkeys: if key in self._paramsMarginal: del self._paramsMarginal[key] self._approxParameters["paramsMarginal"] = self.paramsMarginal def _finalizeMarginalization(self): if self.polybasisMarginal in rbpb + ["NEARESTNEIGHBOR"]: self.computeScaleFactor() rDWMEff = np.array([w * f for w, f in zip( self.radialDirectionalWeightsMarginal, self.scaleFactorMarginal)]) if self.polybasisMarginal in ppb + rbpb + sk: interpPars = [self.polybasisMarginal] if self.polybasisMarginal in ppb + rbpb: if self.polybasisMarginal in rbpb: interpPars += [rDWMEff] interpPars += [self.verbosity >= 5, - self.paramsMarginal["polydegreetypeMarginal"] == "TOTAL"] + self.paramsMarginal["polydegreetypeMarginal"]] if self.polybasisMarginal in ppb: interpPars += [{}] else: # if self.polybasisMarginal in rbpb: interpPars += [{"optimizeScalingBounds":self.paramsMarginal[ "radialDirectionalWeightsMarginalAdapt"]}] interpPars += [ {"rcond":self.paramsMarginal["interpTolMarginal"]}] extraPar = hasattr(self, "_MMarginal_isauto") else: # if self.polybasisMarginal in sk: idxEff = [x for x in range(self.samplerMarginal.npoints) if not hasattr(self.trainedModel, "_idxExcl") or x not in self.trainedModel._idxExcl] extraPar = self.samplerMarginal.depth[idxEff] else: # if self.polybasisMarginal == "NEARESTNEIGHBOR": interpPars = [self.paramsMarginal["nNeighborsMarginal"], rDWMEff] extraPar = None self.trainedModel.setupMarginalInterp(self, interpPars, extraPar) self.trainedModel.data.approxParameters = copy(self.approxParameters) def _preliminaryMarginalFinalization(self): if self._mode == RROMPy_FRAGILE: self._matchState = False vbMng(self, "INIT", "Matching rational functions.", 10) self.trainedModel.initializeFromRational(self.matchingWeight, self.matchingKind, self.HFEngine, self.matchState) vbMng(self, "DEL", "Done matching rational functions.", 10) def _postApplyC(self): if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C to " "orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMat = None for j, mu in enumerate(self.trainedModel.data.mus): pMatj = self.trainedModel.data.projMat[:, j] pMatj = np.expand_dims(self.HFEngine.applyC(pMatj, mu), -1) if pMat is None: pMat = np.array(pMatj) else: pMat = np.append(pMat, pMatj, axis = 1) vbMng(self, "DEL", "Done extracting system output.", 35) self.trainedModel.data.projMat = pMat @abstractmethod def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() setupOK = super().setupApprox(*args, **kwargs) if self.matchState: self._postApplyC() return setupOK class GenericPivotedApproximantPoleMatch(GenericPivotedApproximantPolyMatch): """ ROM pivoted approximant (with pole matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ _allowedBadPoleCorrectionKinds = ["ERASE", "RATIONAL", "POLYNOMIAL"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingShared", "badPoleCorrection"], [1., "ERASE"], toBeExcluded = ["matchingKind"]) super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_pivoted_rational_polematch import ( TrainedModelPivotedRationalPoleMatch) return TrainedModelPivotedRationalPoleMatch @property def matchingShared(self): """Value of matchingShared.""" return self._matchingShared @matchingShared.setter def matchingShared(self, matchingShared): if matchingShared > 1.: RROMPyWarning("Shared ratio too large. Clipping to 1.") matchingShared = 1. elif matchingShared < 0.: RROMPyWarning("Shared ratio too small. Clipping to 0.") matchingShared = 0. self._matchingShared = matchingShared self._approxParameters["matchingShared"] = self.matchingShared @property def badPoleCorrection(self): """Value of badPoleCorrection.""" return self._badPoleCorrection @badPoleCorrection.setter def badPoleCorrection(self, badPoleC): try: badPoleC = badPoleC.upper().strip().replace(" ","") if badPoleC not in self._allowedBadPoleCorrectionKinds: raise RROMPyException(("Prescribed badPoleCorrection not " "recognized.")) self._badPoleCorrection = badPoleC except: RROMPyWarning(("Prescribed badPoleCorrection not recognized. " "Overriding to 'ERASE'.")) self._badPoleCorrection = "ERASE" self._approxParameters["badPoleCorrection"] = self.badPoleCorrection def _finalizeMarginalization(self): vbMng(self, "INIT", "Checking shared ratio.", 10) msg = self.trainedModel.checkShared(self.matchingShared, self.badPoleCorrection) vbMng(self, "DEL", "Done checking. " + msg, 10) super()._finalizeMarginalization() def _preliminaryMarginalFinalization(self): if self._mode == RROMPy_FRAGILE: self._matchState = False vbMng(self, "INIT", "Matching poles and residues.", 10) self.trainedModel.initializeFromRational(self.matchingWeight, self.HFEngine, self.matchState) vbMng(self, "DEL", "Done matching poles and residues.", 10) diff --git a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py index 0f16b95..d41da90 100644 --- a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py +++ b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py @@ -1,914 +1,915 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from copy import deepcopy as copy import numpy as np from collections.abc import Iterable from matplotlib import pyplot as plt from rrompy.reduction_methods.pivoted.generic_pivoted_approximant import ( GenericPivotedApproximantBase, GenericPivotedApproximantPolyMatch, GenericPivotedApproximantPoleMatch) from rrompy.reduction_methods.pivoted.gather_pivoted_approximant import ( gatherPivotedApproximant) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal, paramList, ListAny) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.point_matching import pointMatching from rrompy.utilities.numerical.point_distances import doubleDistanceMatrix from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import (masterCore, indicesScatter, arrayGatherv, isend) __all__ = ['GenericPivotedGreedyApproximantPolyMatch', 'GenericPivotedGreedyApproximantPoleMatch'] class GenericPivotedGreedyApproximantBase(GenericPivotedApproximantBase): _allowedEstimatorKindsMarginal = ["LOOK_AHEAD", "LOOK_AHEAD_RECOVER", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingWeightError", "matchingErrorRelative", "errorEstimatorKindMarginal", "greedyTolMarginal", "maxIterMarginal", "autoCollapse"], [0., False, "NONE", 1e-1, 1e2, False]) super().__init__(*args, **kwargs) self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self._scaleFactorOldPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif isinstance(scaleFactorDer, Iterable): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'refine' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") GenericPivotedApproximantBase.samplerMarginal.fset(self, samplerMarginal) @property def errorEstimatorKindMarginal(self): """Value of errorEstimatorKindMarginal.""" return self._errorEstimatorKindMarginal @errorEstimatorKindMarginal.setter def errorEstimatorKindMarginal(self, errorEstimatorKindMarginal): errorEstimatorKindMarginal = errorEstimatorKindMarginal.upper() if errorEstimatorKindMarginal not in ( self._allowedEstimatorKindsMarginal): RROMPyWarning(("Marginal error estimator kind not recognized. " "Overriding to 'NONE'.")) errorEstimatorKindMarginal = "NONE" self._errorEstimatorKindMarginal = errorEstimatorKindMarginal self._approxParameters["errorEstimatorKindMarginal"] = ( self.errorEstimatorKindMarginal) @property def matchingWeightError(self): """Value of matchingWeightError.""" return self._matchingWeightError @matchingWeightError.setter def matchingWeightError(self, matchingWeightError): self._matchingWeightError = matchingWeightError self._approxParameters["matchingWeightError"] = ( self.matchingWeightError) @property def matchingErrorRelative(self): """Value of matchingErrorRelative.""" return self._matchingErrorRelative @matchingErrorRelative.setter def matchingErrorRelative(self, matchingErrorRelative): self._matchingErrorRelative = matchingErrorRelative self._approxParameters["matchingErrorRelative"] = ( self.matchingErrorRelative) @property def greedyTolMarginal(self): """Value of greedyTolMarginal.""" return self._greedyTolMarginal @greedyTolMarginal.setter def greedyTolMarginal(self, greedyTolMarginal): if greedyTolMarginal < 0: raise RROMPyException("greedyTolMarginal must be non-negative.") if (hasattr(self, "_greedyTolMarginal") and self.greedyTolMarginal is not None): greedyTolMarginalold = self.greedyTolMarginal else: greedyTolMarginalold = -1 self._greedyTolMarginal = greedyTolMarginal self._approxParameters["greedyTolMarginal"] = self.greedyTolMarginal if greedyTolMarginalold != self.greedyTolMarginal: self.resetSamples() @property def maxIterMarginal(self): """Value of maxIterMarginal.""" return self._maxIterMarginal @maxIterMarginal.setter def maxIterMarginal(self, maxIterMarginal): if maxIterMarginal <= 0: raise RROMPyException("maxIterMarginal must be positive.") if (hasattr(self, "_maxIterMarginal") and self.maxIterMarginal is not None): maxIterMarginalold = self.maxIterMarginal else: maxIterMarginalold = -1 self._maxIterMarginal = maxIterMarginal self._approxParameters["maxIterMarginal"] = self.maxIterMarginal if maxIterMarginalold != self.maxIterMarginal: self.resetSamples() @property def autoCollapse(self): """Value of autoCollapse.""" return self._autoCollapse @autoCollapse.setter def autoCollapse(self, autoCollapse): self._autoCollapse = autoCollapse self._approxParameters["autoCollapse"] = self.autoCollapse def resetSamples(self): """Reset samples.""" super().resetSamples() if not hasattr(self, "_temporaryPivot"): self._mus = emptyParameterList() self._musMarginal = emptyParameterList() if hasattr(self, "samplerMarginal"): self.samplerMarginal.reset() if hasattr(self, "samplingEngine") and self.samplingEngine is not None: self.samplingEngine.resetHistory() @abstractmethod def getErrorEstimatorMarginalLookAhead(self) -> Np1D: pass def getErrorEstimatorMarginalNone(self) -> Np1D: nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) return (1. + self.greedyTolMarginal) * np.ones(nErr) def errorEstimatorMarginal(self, return_max : bool = False) -> Np1D: vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format( self.trainedModel.data.musMarginal), 10) if self.errorEstimatorKindMarginal == "NONE": nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) err = (1. + self.greedyTolMarginal) * np.ones(nErr) else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": err = self.getErrorEstimatorMarginalLookAhead() vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10) if not return_max: return err idxMaxEst = np.where(err > self.greedyTolMarginal)[0] maxErr = err[idxMaxEst] if self.errorEstimatorKindMarginal == "NONE": maxErr = None return err, idxMaxEst, maxErr def plotEstimatorMarginal(self, est:Np1D, idxMax:List[int], estMax:List[float]): if self.errorEstimatorKindMarginal == "NONE": return if (not (np.any(np.isnan(est)) or np.any(np.isinf(est))) and masterCore() and hasattr(self.trainedModel, "_musMExcl")): fig = plt.figure(figsize = plt.figaspect(1. / self.nparMarginal)) for jpar in range(self.nparMarginal): ax = fig.add_subplot(1, self.nparMarginal, 1 + jpar) musre = np.real(self.trainedModel._musMExcl) if len(idxMax) > 0 and estMax is not None: maxrej = musre[idxMax, jpar] errCP = copy(est) idx = np.delete(np.arange(self.nparMarginal), jpar) while len(musre) > 0: if self.nparMarginal == 1: currIdx = np.arange(len(musre)) else: currIdx = np.where(np.isclose(np.sum( np.abs(musre[:, idx] - musre[0, idx]), 1), 0., atol = 1e-15))[0] currIdxSorted = currIdx[np.argsort(musre[currIdx, jpar])] ax.semilogy(musre[currIdxSorted, jpar], errCP[currIdxSorted], 'k.-', linewidth = 1) musre = np.delete(musre, currIdx, 0) errCP = np.delete(errCP, currIdx) ax.semilogy(self.musMarginal.re(jpar), (self.greedyTolMarginal,) * len(self.musMarginal), '*m') if len(idxMax) > 0 and estMax is not None: ax.semilogy(maxrej, estMax, 'xr') ax.set_xlim(*list(self.samplerMarginal.lims.re(jpar))) ax.grid() plt.tight_layout() plt.show() @abstractmethod def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): pass def _addMarginalSample(self, mus:paramList): mus = self.checkParameterListMarginal(mus) if len(mus) == 0: return self._nmusOld, nmus = len(self.musMarginal), len(mus) if (hasattr(self, "trainedModel") and self.trainedModel is not None and hasattr(self.trainedModel, "_musMExcl")): self._nmusOld += len(self.trainedModel._musMExcl) vbMng(self, "MAIN", ("Adding marginal sample point{} no. {}{} at {} to training " "set.").format("s" * (nmus > 1), self._nmusOld + 1, "--{}".format(self._nmusOld + nmus) * (nmus > 1), mus), 3) self.musMarginal.append(mus) self.setupApproxPivoted(mus) self._preliminaryMarginalFinalization() del self._nmusOld if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): ubRange = len(self.trainedModel.data.musMarginal) if hasattr(self.trainedModel, "_idxExcl"): shRange = len(self.trainedModel._musMExcl) else: shRange = 0 testIdxs = list(range(ubRange + shRange - len(mus), ubRange + shRange)) for j in testIdxs[::-1]: self.musMarginal.pop(j - shRange) if hasattr(self.trainedModel, "_idxExcl"): testIdxs = self.trainedModel._idxExcl + testIdxs self._updateTrainedModelMarginalSamples(testIdxs) self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = self.SMarginal def greedyNextSampleMarginal(self, muidx:List[int], plotEst : str = "NONE") \ -> Tuple[Np1D, List[int], float, paramVal]: RROMPyAssert(self._mode, message = "Cannot add greedy sample.") muidx = self._musMarginalTestIdxs[muidx] if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): if not hasattr(self.trainedModel, "_idxExcl"): raise RROMPyException(("Sample index to be added not present " "in trained model.")) testIdxs = copy(self.trainedModel._idxExcl) skippedIdx = 0 for cj, j in enumerate(self.trainedModel._idxExcl): if j in muidx: testIdxs.pop(skippedIdx) self.musMarginal.insert(self.trainedModel._musMExcl[cj], j - skippedIdx) else: skippedIdx += 1 if len(self.trainedModel._idxExcl) < (len(muidx) + len(testIdxs)): raise RROMPyException(("Sample index to be added not present " "in trained model.")) self._updateTrainedModelMarginalSamples(testIdxs) self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) self.firstGreedyIterM = False idxAdded = self.samplerMarginal.refine(muidx)[0] self._addMarginalSample(self.samplerMarginal.points[idxAdded]) errorEstTest, muidx, maxErrorEst = self.errorEstimatorMarginal(True) if plotEst == "ALL": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) return (errorEstTest, muidx, maxErrorEst, self.samplerMarginal.points[muidx]) def _preliminaryTrainingMarginal(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if np.sum(self.samplingEngine.nsamples) > 0: return self.resetSamples() self._addMarginalSample(self.samplerMarginal.generatePoints( self.SMarginal)) def _preSetupApproxPivoted(self, mus:paramList) \ -> Tuple[ListAny, ListAny, ListAny]: self.computeScaleFactor() if self.trainedModel is None: self._setupTrainedModel(np.zeros((0, 0))) self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] self.trainedModel.data.Psupp = [] self._trainedModelOld = copy(self.trainedModel) self._scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 self._musLoc = copy(self.mus) idx, sizes = indicesScatter(len(mus), return_sizes = True) emptyCores = np.where(sizes == 0)[0] self.verbosity -= 10 self.samplingEngine.verbosity -= 10 return idx, sizes, emptyCores def _postSetupApproxPivoted(self, mus:Np2D, pMat:Np2D, Ps:ListAny, Qs:ListAny, sizes:ListAny): self.scaleFactor = self._scaleFactorOldPivot del self._scaleFactorOldPivot, self._temporaryPivot pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, mus, sizes, self.polybasis) if len(self._musLoc) > 0: self._mus = self.checkParameterList(self._musLoc) self._mus.append(mus) else: self._mus = self.checkParameterList(mus) self.trainedModel = self._trainedModelOld del self._trainedModelOld if not self.matchState and self.autoCollapse: pMat, padLeft, suppNew = 1., 0, [0] * len(nsamples) else: padLeft = self.trainedModel.data.projMat.shape[1] suppNew = list(padLeft + np.append(0, np.cumsum(nsamples[: -1]))) self._setupTrainedModel(pMat, padLeft > 0) if not self.matchState and self.autoCollapse: self.trainedModel.data._collapsed = True self.trainedModel.data.Qs += Qs self.trainedModel.data.Ps += Ps self.trainedModel.data.Psupp += suppNew self.trainedModel.data.approxParameters = copy(self.approxParameters) self.verbosity += 10 self.samplingEngine.verbosity += 10 def _localPivotedResult(self, pMat:Np2D, req:ListAny, emptyCores:ListAny, mus:Np2D) -> Tuple[Np2D, ListAny, Np2D]: pMati = self.samplingEngine.projectionMatrix musi = self.samplingEngine.mus if not self.matchState: if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C " "to orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMatiEff = None for j, mu in enumerate(musi): pMij = np.expand_dims(self.HFEngine.applyC(pMati[:, j], mu), -1) if pMatiEff is None: pMatiEff = np.array(pMij) else: pMatiEff = np.append(pMatiEff, pMij, axis = 1) pMati = pMatiEff vbMng(self, "DEL", "Done extracting system output.", 35) if pMat is None: mus = copy(musi.data) pMat = copy(pMati) if masterCore(): for dest in emptyCores: req += [isend(len(pMat), dest = dest, tag = dest)] else: mus = np.vstack((mus, musi.data)) if not self.matchState and self.autoCollapse: pMat = copy(pMati) else: pMat = np.hstack((pMat, pMati)) return pMat, req, mus @abstractmethod def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) self._preSetupApproxPivoted() data = [] pass self._postSetupApproxPivoted(mus, data) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: """Compute greedy snapshots of solution map.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) vbMng(self, "INIT", "Starting computation of snapshots.", 5) max2ErrorEst, self.firstGreedyIterM = np.inf, True self._preliminaryTrainingMarginal() if self.errorEstimatorKindMarginal == "NONE": muidx = [] else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": muidx = np.arange(len(self.trainedModel.data.musMarginal)) self._musMarginalTestIdxs = np.array(muidx) while self.firstGreedyIterM or (max2ErrorEst > self.greedyTolMarginal and self.samplerMarginal.npoints < self.maxIterMarginal): errorEstTest, muidx, maxErrorEst, mu = \ self.greedyNextSampleMarginal(muidx, plotEst) if maxErrorEst is None: max2ErrorEst = 1. + self.greedyTolMarginal else: if len(maxErrorEst) > 0: max2ErrorEst = np.max(maxErrorEst) else: max2ErrorEst = np.max(errorEstTest) vbMng(self, "MAIN", ("Uniform testing error estimate " "{:.4e}.").format(max2ErrorEst), 5) if plotEst == "LAST": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: " "{}).").format(len(self.mus)), 5) if (self.errorEstimatorKindMarginal == "LOOK_AHEAD_RECOVER" and hasattr(self.trainedModel, "_idxExcl") and len(self.trainedModel._idxExcl) > 0): vbMng(self, "INIT", "Recovering {} test models.".format( len(self.trainedModel._idxExcl)), 7) for j, mu in zip(self.trainedModel._idxExcl, self.trainedModel._musMExcl): self.musMarginal.insert(mu, j) self._preliminaryMarginalFinalization() self._updateTrainedModelMarginalSamples() self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) vbMng(self, "DEL", "Done recovering test models.", 7) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def checkComputedApproxPivoted(self) -> bool: return (super().checkComputedApprox() and len(self.musMarginal) == len(self.trainedModel.data.musMarginal)) class GenericPivotedGreedyApproximantPolyMatch( GenericPivotedGreedyApproximantBase, GenericPivotedApproximantPolyMatch): """ ROM pivoted greedy interpolant computation for parametric problems (with polynomial matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingKind': kind of matching; allowed values include 'ROTATE' and 'PROJECT'; defaults to 'ROTATE'; - 'matchingWeightError': weight for matching in error estimation; defaults to 0; - 'matchingErrorRelative': whether error estimation is relative; defaults to False, i.e. absolute error; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'autoCollapse': whether to collapse trained reduced model as soon as it is built; defaults to False. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for matching; - 'matchingKind': kind of matching; - 'matchingWeightError': weight for matching in error estimation; - 'matchingErrorRelative': whether error estimation is relative; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'autoCollapse': whether to collapse trained reduced model as soon as it is built. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. matchingKind: Kind of matching. matchingWeightError: Weight for pole matching optimization in error estimation. matchingErrorRelative: Whether error estimation is relative. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. autoCollapse: Whether to collapse trained reduced model as soon as it is built. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def getErrorEstimatorMarginalLookAhead(self) -> Np1D: if not hasattr(self.trainedModel, "_musMExcl"): err = np.zeros(0) err[:] = np.inf self._musMarginalTestIdxs = np.zeros(0, dtype = int) return err self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl, dtype = int) idx, sizes = indicesScatter(len(self.trainedModel._musMExcl), return_sizes = True) err = [] if len(idx) > 0: self.verbosity -= 25 self.trainedModel.verbosity -= 25 for j in idx: muTest = self.trainedModel._musMExcl[j] QEx = self.trainedModel._QsExcl[j].coeffs QAp = self.trainedModel.interpolateMarginalQ(muTest)[0] no2Ex = np.sum(np.abs(QEx) ** 2.) no2Ap = np.sum(np.abs(QAp) ** 2.) inner = np.sum([QEx[j] * QAp[j].conj() for j in range(min(len(QEx), len(QAp)))]) if self.matchingWeightError != 0: PEx = self.trainedModel._PsExcl[j].coeffs.T PAp = self.trainedModel.interpolateMarginalP(muTest)[0].T if not self.trainedModel.data._collapsed: PsuppTest = self.trainedModel._PsuppExcl[j] PEx = dot(self.trainedModel.data.projMat[:, PsuppTest : PsuppTest + PEx.shape[0]], PEx) PAp = dot(self.trainedModel.data.projMat, PAp) - no2PEx = self.HFEngine.norm(PEx, - is_state = self.matchState) ** 2. - no2PAp = self.HFEngine.norm(PAp, - is_state = self.matchState) ** 2. - innerP = [self.HFEngine.innerProduct(PEx[:, j], PAp[:, j], - is_state = self.matchState) - for j in range(min(PEx.shape[1], PAp.shape[1]))] - no2Ex = no2Ex + self.matchingWeightError * np.sum(no2PEx) - no2Ap = no2Ap + self.matchingWeightError * np.sum(no2PAp) - inner = inner + self.matchingWeightError * np.sum(innerP) + no2PEx = np.sum(self.HFEngine.norm(PEx, + is_state = self.matchState) ** 2.) + no2PAp = np.sum(self.HFEngine.norm(PAp, + is_state = self.matchState) ** 2.) + innerP = np.sum([self.HFEngine.innerProduct( + PEx[:, j], PAp[:, j], is_state = self.matchState) + for j in range(min(PEx.shape[1], PAp.shape[1]))]) + no2Ex = no2Ex + self.matchingWeightError * no2PEx + no2Ap = no2Ap + self.matchingWeightError * no2PAp + inner = inner + self.matchingWeightError * innerP + #component of QEx orthogonal to QAp dist2 = no2Ex - np.abs(inner) ** 2. / no2Ap if self.matchingErrorRelative: dist2 /= no2Ex else: dist2 /= 1. + self.matchingWeightError err += [dist2 ** .5] self.verbosity += 25 self.trainedModel.verbosity += 25 return arrayGatherv(np.array(err), sizes) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight, self.matchingKind, self.HFEngine, self.matchState) def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() _polybasisMarginal = self.polybasisMarginal self._polybasisMarginal = ("PIECEWISE_LINEAR_" + self.samplerMarginal.kind) setupOK = super().setupApprox(*args, **kwargs) self._polybasisMarginal = _polybasisMarginal if self.matchState: self._postApplyC() return setupOK class GenericPivotedGreedyApproximantPoleMatch( GenericPivotedGreedyApproximantPolyMatch, GenericPivotedApproximantPoleMatch): """ ROM pivoted greedy interpolant computation for parametric problems (with pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'matchingErrorRelative': whether error estimation is relative; defaults to False, i.e. absolute error; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'autoCollapse': whether to collapse trained reduced model as soon as it is built; defaults to False. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'matchingErrorRelative': whether error estimation is relative; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'autoCollapse': whether to collapse trained reduced model as soon as it is built. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. matchingWeightError: Weight for pole matching optimization in error estimation. matchingErrorRelative: Whether error estimation is relative. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. autoCollapse: Whether to collapse trained reduced model as soon as it is built. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def getErrorEstimatorMarginalLookAhead(self) -> Np1D: if not hasattr(self.trainedModel, "_musMExcl"): err = np.zeros(0) err[:] = np.inf self._musMarginalTestIdxs = np.zeros(0, dtype = int) return err self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl, dtype = int) idx, sizes = indicesScatter(len(self.trainedModel._musMExcl), return_sizes = True) err = [] if len(idx) > 0: self.verbosity -= 25 self.trainedModel.verbosity -= 25 for j in idx: muTest = self.trainedModel._musMExcl[j] HITest = self.trainedModel._HIsExcl[j] polesEx = HITest.poles idxGood = np.isinf(polesEx) + np.isnan(polesEx) == False polesEx = polesEx[idxGood] if len(polesEx) == 0: err += [0.] continue polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0] if self.matchingWeightError != 0: resEx = HITest.coeffs[np.where(idxGood)[0]].T resAp = self.trainedModel.interpolateMarginalCoeffs( muTest)[0][: len(polesAp), :].T if not self.trainedModel.data._collapsed: PsuppTest = self.trainedModel._PsuppExcl[j] resEx = dot(self.trainedModel.data.projMat[:, PsuppTest : PsuppTest + resEx.shape[0]], resEx) resAp = dot(self.trainedModel.data.projMat, resAp) else: resEx, resAp = None, None #match Ap to Ex distMat = doubleDistanceMatrix(polesEx, polesAp, self.matchingWeightError, resEx, resAp, self.HFEngine, self.matchState) pmR, pmC = pointMatching(distMat) dist = np.linalg.norm(distMat[pmR, pmC].flatten()) if self.matchingErrorRelative: if self.matchingWeightError != 0: resEx0 = resEx[:, pmR] res0 = np.zeros_like(resEx[:, [0]]) else: resEx0, res0 = None, None dist0 = doubleDistanceMatrix(polesEx[pmR], [0.], self.matchingWeightError, resEx0, res0, self.HFEngine, self.matchState).flatten() dist /= np.linalg.norm(dist0) err += [dist] self.verbosity += 25 self.trainedModel.verbosity += 25 return arrayGatherv(np.array(err), sizes) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight, self.HFEngine, self.matchState) diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py index 4b5167e..c1f9618 100755 --- a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py +++ b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py @@ -1,696 +1,700 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from collections.abc import Iterable from copy import deepcopy as copy from .generic_pivoted_approximant import (GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximantPolyMatch, GenericPivotedApproximantPoleMatch) from .gather_pivoted_approximant import gatherPivotedApproximant from rrompy.reduction_methods.standard.rational_interpolant import ( RationalInterpolant) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.base.types import paramList from rrompy.utilities.numerical.hash_derivative import nextDerivativeIndices from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv __all__ = ['RationalInterpolantPivotedNoMatch', 'RationalInterpolantPivotedPolyMatch', 'RationalInterpolantPivotedPoleMatch'] class RationalInterpolantPivotedBase(GenericPivotedApproximantBase, RationalInterpolant): def __init__(self, *args, **kwargs): self._preInit() - self._addParametersToList(toBeExcluded = ["indexPrimary", "MAuxiliary", + self._addParametersToList(toBeExcluded = ["polydegreetype", + "indexPrimary", "MAuxiliary", "NAuxiliary"]) super().__init__(*args, **kwargs) if self.nparPivot > 1: self.HFEngine._ignoreResidues = 1 self._postInit() @property def indexPrimary(self): return 0 @property def MAuxiliary(self): return 0 @property def NAuxiliary(self): return 0 + @property + def polydegreetype(self): return "TENSOR" + @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self.scaleFactorPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif isinstance(scaleFactorDer, Iterable): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" RROMPyAssert(self._mode, message = "Cannot setup interpolation indices.") if (self._musUniqueCN is None or len(self._reorder) != len(self.musPivot)): try: muPC = self.trainedModel.centerNormalizePivot(self.musPivot) except: muPC = self.trainedModel.centerNormalize(self.musPivot) self._musUniqueCN, musIdxsTo, musIdxs, musCount = (muPC.unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.musPivot[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.nparPivot, cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def addMarginalSamplePoints(self, musMarginal:paramList) -> int: """Add marginal sample points to reduced model.""" RROMPyAssert(self._mode, message = "Cannot add sample points.") musMarginal = self.checkParameterListMarginal(musMarginal) vbMng(self, "INIT", "Adding marginal sample point{} at {}.".format( "s" * (len(musMarginal) > 1), musMarginal), 5) if (self.SMarginal > 0 and hasattr(self, "polybasisMarginal") and self.polybasisMarginal in sk): RROMPyWarning(("Manually adding new samples with piecewise linear " "marginal interpolation is dangerous. Sample depth " "in samplerMarginal must be managed correctly.")) mus = np.empty((self.S * len(musMarginal), self.HFEngine.npar), dtype = np.complex) mus[:, self.directionPivot] = np.tile(self.musPivot.data, (len(musMarginal), 1)) mus[:, self.directionMarginal] = np.repeat(musMarginal.data, self.S, axis = 0) self._mus.append(mus) self._musMarginal.append(musMarginal) N0 = copy(self.N) idx, sizes = indicesScatter(len(musMarginal), return_sizes = True) pMat, Ps, Qs = None, [], [] req, emptyCores = [], np.where(sizes == 0)[0] if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 30) if self.storeAllSamples: self.storeSamples() pL = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = np.complex) else: _scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 for i in idx: musi = self.mus[self.S * (i + self.SMarginal) : self.S * (i + self.SMarginal + 1)] vbMng(self, "MAIN", "Building marginal model no. {} at {}.".format( i + self.SMarginal + 1, self.musMarginal[i + self.SMarginal]), 5) vbMng(self, "INIT", "Starting computation of snapshots.", 10) self.samplingEngine.resetHistory() self.samplingEngine.iterSample(musi) vbMng(self, "DEL", "Done computing snapshots.", 10) self.verbosity -= 5 self.samplingEngine.verbosity -= 5 self._setupRational(self._setupDenominator()) self.verbosity += 5 self.samplingEngine.verbosity += 5 if self.storeAllSamples: self.storeSamples(i + self.SMarginal) pMati = self.samplingEngine.projectionMatrix if not self.matchState: if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C " "to orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMatiEff = None for j, mu in enumerate(musi): pMij = np.expand_dims(self.HFEngine.applyC( pMati[:, j], mu), -1) if pMatiEff is None: pMatiEff = np.array(pMij) else: pMatiEff = np.append(pMatiEff, pMij, axis = 1) pMati = pMatiEff vbMng(self, "DEL", "Done extracting system output.", 35) if pMat is None and i == 0: for dest in emptyCores: req += [isend(len(pMati), dest = dest, tag = dest)] if self.trainedModel.data._collapsed: pMat = 1. else: if pMat is None: pMat = copy(pMati) else: pMat = np.hstack((pMat, pMati)) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] if self.trainedModel.data._collapsed: Ps[-1].postmultiplyTensorize(pMati.T) del self.trainedModel.data.Q, self.trainedModel.data.P self.N = N0 del self._temporaryPivot self.scaleFactor = _scaleFactorOldPivot for r in req: r.wait() if self.trainedModel.data._collapsed: pMat = pMati[:, : 0] pMat, Ps, Qs, _, _ = gatherPivotedApproximant(pMat, Ps, Qs, self.mus.data, sizes, self.polybasis, False) if self.trainedModel.data._collapsed: self._setupTrainedModel(1.) Psupp = [0] * len(musMarginal) else: self._setupTrainedModel(pMat, len(self.trainedModel.data.projMat) > 0) Psupp = (self.SMarginal + np.arange(0, len(musMarginal))) * self.S self._SMarginal += len(musMarginal) self.trainedModel.data.Qs += Qs self.trainedModel.data.Ps += Ps self.trainedModel.data.Psupp += list(Psupp) self._preliminaryMarginalFinalization() self._finalizeMarginalization() vbMng(self, "DEL", "Done adding marginal sample points.", 5) return 0 def setupApprox(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeScaleFactor() self.resetSamples() self.samplingEngine.scaleFactor = self.scaleFactorDer self._mus = emptyParameterList() self._musMarginal = emptyParameterList() self.musPivot = self.samplerPivot.generatePoints(self.S) while len(self.musPivot) > self.S: self.musPivot.pop() musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) while len(musMarginal) > self.SMarginal: musMarginal.pop() self._setupTrainedModel(np.zeros((0, 0)), forceNew = True) self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] self.trainedModel.data.Psupp = [] self._SMarginal = 0 val = self.addMarginalSamplePoints(musMarginal) vbMng(self, "DEL", "Done setting up approximant.", 5) return val class RationalInterpolantPivotedNoMatch(RationalInterpolantPivotedBase, GenericPivotedApproximantNoMatch): """ ROM pivoted rational interpolant (without matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for pivot interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ class RationalInterpolantPivotedPolyMatch(RationalInterpolantPivotedBase, GenericPivotedApproximantPolyMatch): """ ROM pivoted rational interpolant (with polynomial matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for matching; defaults to 1; - 'matchingKind': kind of matching; allowed values include 'ROTATE' and 'PROJECT'; defaults to 'ROTATE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for matching; - 'matchingKind': kind of matching; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for matching. matchingKind: Kind of matching. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() setupOK = super().setupApprox(*args, **kwargs) if self.matchState: self._postApplyC() return setupOK class RationalInterpolantPivotedPoleMatch(RationalInterpolantPivotedBase, GenericPivotedApproximantPoleMatch): """ ROM pivoted rational interpolant (with pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() setupOK = super().setupApprox(*args, **kwargs) if self.matchState: self._postApplyC() return setupOK diff --git a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py index 7ef5e46..6b3ea95 100644 --- a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py +++ b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py @@ -1,540 +1,570 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import warnings import numpy as np from scipy.special import factorial as fact -from scipy.sparse import csr_matrix, hstack, SparseEfficiencyWarning +from scipy.sparse import csr_matrix, hstack, vstack, SparseEfficiencyWarning from copy import deepcopy as copy from itertools import combinations from .trained_model_pivoted_rational_nomatch import ( TrainedModelPivotedRationalNoMatch) from .trained_model_pivoted_rational_polymatch import ( TrainedModelPivotedRationalPolyMatch) from rrompy.utilities.base.types import (Tuple, Np1D, Np2D, List, ListAny, paramList, sampList, HFEng) from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.point_matching import rationalFunctionMatching from rrompy.utilities.numerical.degree import reduceDegreeN from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb, PolynomialInterpolator as PI) from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.poly_fitting.nearest_neighbor import ( NearestNeighborInterpolator as NNI) from rrompy.utilities.poly_fitting.heaviside import (rational2heaviside, polyval as heavival, heavisideUniformShape, HeavisideInterpolator as HI) from rrompy.utilities.poly_fitting.piecewise_linear import (sparsekinds, PiecewiseLinearInterpolator as PLI) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.sampling import sampleList, emptySampleList __all__ = ['TrainedModelPivotedRationalPoleMatch'] class TrainedModelPivotedRationalPoleMatch( TrainedModelPivotedRationalPolyMatch): """ ROM approximant evaluation for pivoted approximants based on interpolation of rational approximants (with pole matching). Attributes: Data: dictionary with all that can be pickled. """ def compress(self, collapse : bool = False, tol : float = 0., returnRMat : bool = False, **compressMatrixkwargs): Psupp = copy(self.data.Psupp) RMat = super().compress(collapse, tol, True, **compressMatrixkwargs) if RMat is None: return for j in range(len(self.data.coeffsEff)): self.data.coeffsEff[j] = dot(self.data.coeffsEff[j], RMat.T) for obj, suppj in zip(self.data.HIs, Psupp): obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]]) if hasattr(self, "_HIsExcl"): for obj, suppj in zip(self._HIsExcl, Psupp): obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]]) if not hasattr(self, "_PsExcl"): self._PsuppExcl = [0] * len(self._PsuppExcl) if returnRMat: return RMat - def setupMarginalInterp(self, approx, interpPars:ListAny, extraPar = None): + def setupMarginalInterp(self, approx, interpPars:ListAny, extraPar = None, + correction : str = None): vbMng(self, "INIT", "Starting computation of marginal interpolator.", 12) musMCN = self.centerNormalizeMarginal(self.data.musMarginal) nM, pbM = len(musMCN), approx.polybasisMarginal if pbM in ppb + rbpb: if extraPar: approx._setMMarginalAuto() _MMarginalEff = approx.paramsMarginal["MMarginal"] if pbM in ppb: p = PI() elif pbM in rbpb: p = RBI() else: # if pbM in sparsekinds + ["NEARESTNEIGHBOR"]: if pbM == "NEARESTNEIGHBOR": p = NNI() else: # if pbM in sparsekinds: pllims = [[-1.] * self.data.nparMarginal, [1.] * self.data.nparMarginal] p = PLI() for ipts, pts in enumerate(self.data.suppEffPts): if len(pts) == 0: raise RROMPyException("Empty list of support points.") musMCNEff, valsEff = musMCN[pts], np.eye(len(pts)) if pbM in ppb + rbpb: if extraPar: if ipts > 0: verb = approx.verbosity approx.verbosity = 0 _musM = approx.musMarginal approx.musMarginal = musMCNEff approx._setMMarginalAuto() approx.musMarginal = _musM approx.verbosity = verb else: approx.paramsMarginal["MMarginal"] = reduceDegreeN( _MMarginalEff, len(musMCNEff), self.data.nparMarginal, approx.paramsMarginal["polydegreetypeMarginal"]) MMEff = approx.paramsMarginal["MMarginal"] while MMEff >= 0: wellCond, msg = p.setupByInterpolation(musMCNEff, valsEff, MMEff, *interpPars) vbMng(self, "MAIN", msg, 30) if wellCond: break vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing " "MMarginal by 1."), 35) MMEff -= 1 if MMEff < 0: raise RROMPyException(("Instability in computation of " "interpolant. Aborting.")) if (pbM in rbpb and len(interpPars) > 4 and "optimizeScalingBounds" in interpPars[4].keys()): interpPars[4]["optimizeScalingBounds"] = [-1., -1.] elif pbM == "NEARESTNEIGHBOR": if ipts > 0: interpPars[0] = 1 p.setupByInterpolation(musMCNEff, valsEff, *interpPars) elif ipts == 0: # and pbM in sparsekinds: p.setupByInterpolation(musMCNEff, valsEff, pllims, extraPar[pts], *interpPars) if ipts == 0: self.data.marginalInterp = copy(p) self.data.coeffsEff, self.data.polesEff = [], [] N = len(self.data.suppEffIdx) goodIdx = np.where(self.data.suppEffIdx != -1)[0] for hi, sup in zip(self.data.HIs, self.data.Psupp): pEff, cEff = hi.poles.reshape(-1, 1), hi.coeffs cEffH = np.empty((cEff.shape[0], 0)) if (self.data._collapsed or self.data.projMat.shape[1] == cEff.shape[1]): cEff = np.hstack([cEff, cEffH]) else: supC = self.data.projMat.shape[1] - sup - cEff.shape[1] cEff = hstack((csr_matrix((len(cEff), sup)), csr_matrix(cEff), csr_matrix((len(cEff), supC)), cEffH), "csr") goodIdxC = np.append(goodIdx, np.arange(N, cEff.shape[0])) self.data.coeffsEff += [cEff[goodIdxC, :]] self.data.polesEff += [pEff[goodIdx]] else: + if correction is None: correction = approx.badPoleCorrection + correction = correction.upper().strip().replace(" ","") + if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: + RROMPyWarning(("Correction kind not recognized. " + "Overriding to 'ERASE'.")) + correction = "ERASE" ptsBad = [i for i in range(nM) if i not in pts] idxBad = np.where(self.data.suppEffIdx[goodIdx] == ipts)[0] warnings.simplefilter('ignore', SparseEfficiencyWarning) + if (self.data._collapsed + or self.data.projMat.shape[1] == cEff.shape[1]): + cfBase = np.zeros((len(idxBad), cEff.shape[1]), + dtype = np.complex) + else: + cfBase = csr_matrix((len(idxBad), + self.data.coeffsEff[0].shape[1]), + dtype = np.complex) if pbM in sparsekinds: - for ij, j in enumerate(ptsBad): - nearest = pts[np.argmin(np.sum(np.abs(musMCNEff.data - - np.tile(musMCN[j], [len(pts), 1]) - ), axis = 1).flatten())] - self.data.coeffsEff[j][idxBad] = copy( - self.data.coeffsEff[nearest][idxBad]) - self.data.polesEff[j][idxBad] = copy( - self.data.polesEff[nearest][idxBad]) + valMuMBad = np.zeros((len(pts), len(ptsBad))) + for ijb, jb in enumerate(ptsBad): + dists = np.sum(np.abs(musMCNEff.data + - np.tile(musMCN[jb], + [len(pts), 1])), + axis = 1).flatten() + idxs = np.argsort(dists) + dists /= dists[idxs[0]] + idxs = idxs[np.where(dists[idxs] < 1. + 1e-8)[0]] + valMuMBad[idxs, ijb] = (dists[idxs] ** -1. + / np.sum(dists[idxs] ** -1.)) else: - if (self.data._collapsed - or self.data.projMat.shape[1] == cEff.shape[1]): - cfBase = np.zeros((len(idxBad), cEff.shape[1]), - dtype = np.complex) - else: - cfBase = csr_matrix((len(idxBad), - self.data.coeffsEff[0].shape[1]), - dtype = np.complex) valMuMBad = p(musMCN[ptsBad]) - for ijb, jb in enumerate(ptsBad): - self.data.coeffsEff[jb][idxBad] = copy(cfBase) - self.data.polesEff[jb][idxBad] = 0. - for ij, j in enumerate(pts): - val = valMuMBad[ij][ijb] - if not np.isclose(val, 0., atol = 1e-15): + for ijb, jb in enumerate(ptsBad): + self.data.coeffsEff[jb][idxBad] = copy(cfBase) + self.data.polesEff[jb][idxBad] = 0. + for ij, j in enumerate(pts): + val = valMuMBad[ij][ijb] + if not np.isclose(val, 0., atol = 1e-15): + if correction != "RATIONAL": self.data.coeffsEff[jb][idxBad] += (val * self.data.coeffsEff[j][idxBad]) - self.data.polesEff[jb][idxBad] += (val + self.data.polesEff[jb][idxBad] += (val * self.data.polesEff[j][idxBad]) + if correction == "POLYNOMIAL": + N = len(self.data.polesEff[jb]) + idxR = N + np.arange(len(idxBad)) + if isinstance(self.data.coeffsEff[jb], csr_matrix): + self.data.coeffsEff[jb] = vstack([ + self.data.coeffsEff[jb][: N], + - self.data.coeffsEff[jb][idxBad], + self.data.coeffsEff[jb][N :]]) + else: + self.data.coeffsEff[jb] = np.vstack([ + self.data.coeffsEff[jb][: N], + - self.data.coeffsEff[jb][idxBad], + self.data.coeffsEff[jb][N :]]) + self.data.polesEff[jb] = np.vstack([ + self.data.polesEff[jb], + self.data.polesEff[jb][idxBad]]) + self.removePoleResLocal(idxR, jb, None, correction) warnings.filters.pop(0) if pbM in ppb + rbpb: approx.paramsMarginal["MMarginal"] = _MMarginalEff vbMng(self, "DEL", "Done computing marginal interpolator.", 12) def updateEffectiveSamples(self, exclude:List[int], *args, **kwargs): if hasattr(self, "_idxExcl"): for j, excl in enumerate(self._idxExcl): self.data.HIs.insert(excl, self._HIsExcl[j]) TrainedModelPivotedRationalNoMatch.updateEffectiveSamples(self, exclude) self._HIsExcl = [] for excl in self._idxExcl[::-1]: self._HIsExcl = [self.data.HIs.pop(excl)] + self._HIsExcl poles = [hi.poles for hi in self.data.HIs] coeffs = [hi.coeffs for hi in self.data.HIs] self.initializeFromLists(poles, coeffs, self.data.Psupp, self.data.HIs[0].polybasis, *args, **kwargs) def initializeFromRational(self, *args, **kwargs): """Initialize Heaviside representation.""" RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") poles, coeffs = [], [] for Q, P in zip(self.data.Qs, self.data.Ps): cfs, pls, basis = rational2heaviside(P, Q) poles += [pls] coeffs += [cfs] self.initializeFromLists(poles, coeffs, self.data.Psupp, basis, *args, **kwargs) def initializeFromLists(self, poles:ListAny, coeffs:ListAny, supps:ListAny, basis:str, matchingWeight:float, HFEngine:HFEng, is_state:bool): """Initialize Heaviside representation.""" poles, coeffs = heavisideUniformShape(poles, coeffs) poles, coeffs = rationalFunctionMatching(poles, coeffs, self.data.musMarginal.data, matchingWeight, supps, self.data.projMat, HFEngine, is_state, None) self.data.HIs = [] for pls, cfs in zip(poles, coeffs): hsi = HI() hsi.poles = pls if len(cfs) == len(pls): cfs = np.pad(cfs, ((0, 1), (0, 0)), "constant") hsi.coeffs = cfs hsi.npar = 1 hsi.polybasis = basis self.data.HIs += [hsi] self.data.suppEffPts = [np.arange(len(self.data.HIs))] self.data.suppEffIdx = np.zeros(len(poles[0]), dtype = int) def checkShared(self, shared:float, correction : str = "ERASE") -> str: N = len(self.data.HIs[0].poles) M = len(self.data.HIs) correction = correction.upper().strip().replace(" ","") if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: RROMPyWarning(("Correction kind not recognized. Overriding to " "'ERASE'.")) correction = "ERASE" goodLocPoles = np.array([np.isinf(hi.poles) == False for hi in self.data.HIs]) self.data.suppEffPts = [np.arange(len(self.data.HIs))] self.data.suppEffIdx = - np.ones(N, dtype = int) goodGlobPoles = np.sum(goodLocPoles, axis = 0) goodEnoughPoles = goodGlobPoles >= max(1., 1. * shared * M) keepPole = np.where(goodEnoughPoles)[0] halfPole = np.where(goodEnoughPoles * (goodGlobPoles < M))[0] self.data.suppEffIdx[keepPole] = 0 for idxR in halfPole: pts = np.where(goodLocPoles[:, idxR])[0] idxEff = len(self.data.suppEffPts) for idEff, prevPts in enumerate(self.data.suppEffPts): if len(prevPts) == len(pts): if np.allclose(prevPts, pts): idxEff = idEff break if idxEff == len(self.data.suppEffPts): self.data.suppEffPts += [pts] self.data.suppEffIdx[idxR] = idxEff degBad = len(self.data.HIs[0].coeffs) - N - 1 for pt in range(len(self.data.HIs)): idxR = np.where(goodLocPoles[pt] * (goodEnoughPoles == False))[0] self.removePoleResLocal(idxR, pt, degBad, correction, True) return ("Hard-erased {} pole".format(N - len(keepPole)) + "s" * (N - len(keepPole) != 1) + " and soft-erased {} pole".format(len(halfPole)) + "s" * (len(halfPole) != 1) + ".") def removePoleResLocal(self, badidx:List[int], margidx:int, degcorr : int = None, correction : str = "ERASE", hidden : bool = False): if not hasattr(badidx, "__len__"): badidx = [badidx] badidx = np.array(badidx) if len(badidx) == 0: return correction = correction.upper().strip().replace(" ","") if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: RROMPyWarning(("Correction kind not recognized. Overriding to " "'ERASE'.")) correction = "ERASE" if hidden: N = len(self.data.HIs[margidx].poles) else: N = len(self.data.polesEff[margidx]) goodidx = [j for j in range(N) if j not in badidx] if correction != "ERASE": if degcorr is None: if hidden: degcorr = len(self.data.HIs[margidx].coeffs) - N - 1 else: degcorr = self.data.coeffsEff[margidx].shape[0] - N - 1 muM, musEff = self.data.musMarginal[margidx], [] polybasis = self.data.HIs[margidx].polybasis for mu in self.data.mus: if np.allclose(mu(self.data.directionMarginal), muM): musEff += [mu(self.data.directionPivot[0])] musEff = self.centerNormalizePivot(musEff) if hidden: plsBad = self.data.HIs[margidx].poles[badidx] else: plsBad = self.data.polesEff[margidx][badidx, 0] plsBadEff = np.isinf(plsBad) == False plsBad, badidx = plsBad[plsBadEff], badidx[plsBadEff] if hidden: plsGood = self.data.HIs[margidx].poles[goodidx] corrVals = heavival(musEff, self.data.HIs[margidx].coeffs[badidx], plsBad, polybasis).T else: plsGood = self.data.polesEff[margidx][goodidx] - corrVals = heavival(musEff, - self.data.coeffsEff[margidx].toarray()[badidx], - plsBad, polybasis).T + if isinstance(self.data.coeffsEff[margidx], csr_matrix): + cEff = self.data.coeffsEff[margidx].toarray()[badidx] + else: + cEff = self.data.coeffsEff[margidx][badidx] + corrVals = heavival(musEff, cEff, plsBad, polybasis).T if correction == "RATIONAL": hi = HI() hi.setupByInterpolation(musEff, plsGood, corrVals, degcorr, polybasis) if hidden: self.data.HIs[margidx].coeffs[goodidx] += ( hi.coeffs[: len(goodidx)]) else: self.data.coeffsEff[margidx][goodidx, :] += ( hi.coeffs[: len(goodidx)]) polyCorr = hi.coeffs[len(goodidx) :] elif correction == "POLYNOMIAL": pi = PI() pi.setupByInterpolation(musEff, corrVals, degcorr, polybasis.split("_")[0]) polyCorr = pi.coeffs if hidden: self.data.HIs[margidx].coeffs[N : N + degcorr + 1] += polyCorr else: self.data.coeffsEff[margidx][N : N + degcorr + 1, :] += ( polyCorr) if hidden: self.data.HIs[margidx].poles[badidx] = np.inf self.data.HIs[margidx].coeffs[badidx] = 0. else: self.data.polesEff[margidx] = self.data.polesEff[margidx][goodidx] goodidx += list(range(N, self.data.coeffsEff[margidx].shape[0])) self.data.coeffsEff[margidx] = ( self.data.coeffsEff[margidx][goodidx, :]) def removePoleResGlobal(self, badidx:List[int], degcorr : int = None, correction : str = "ERASE", hidden : bool = False): if not hasattr(badidx, "__len__"): badidx = [badidx] if len(badidx) == 0: return correction = correction.upper().strip().replace(" ","") if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: RROMPyWarning(("Correction kind not recognized. Overriding to " "'ERASE'.")) correction = "ERASE" for margidx in range(len(self.data.HIs)): self.removePoleResLocal(badidx, margidx, degcorr, correction, hidden) def getApproxReduced(self, mu : paramList = []) -> sampList: """ Evaluate reduced representation of approximant at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = self.checkParameterList(mu) if (not hasattr(self, "lastSolvedApproxReduced") or self.lastSolvedApproxReduced != mu): vbMng(self, "INIT", "Evaluating approximant at mu = {}.".format(mu), 12) muP = self.centerNormalizePivot(mu(self.data.directionPivot)) muM = mu(self.data.directionMarginal) his = self.interpolateMarginalInterpolator(muM) for i, (mP, hi) in enumerate(zip(muP, his)): uAppR = hi(mP)[:, 0] if i == 0: uApproxR = np.empty((len(uAppR), len(mu)), dtype = np.complex) uApproxR[:, i] = uAppR self.uApproxReduced = sampleList(uApproxR) vbMng(self, "DEL", "Done evaluating approximant.", 12) self.lastSolvedApproxReduced = mu return self.uApproxReduced def interpolateMarginalInterpolator(self, mu : paramList = []) -> ListAny: """Obtain interpolated approximant interpolator.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal models at mu = {}.".format(mu), 95) his = [] muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) verb, self.verbosity = self.verbosity, 0 poless = self.interpolateMarginalPoles(mu, mIvals) coeffss = self.interpolateMarginalCoeffs(mu, mIvals) self.verbosity = verb for j in range(len(mu)): his += [HI()] his[-1].poles = poless[j] his[-1].coeffs = coeffss[j] his[-1].npar = 1 his[-1].polybasis = self.data.HIs[0].polybasis vbMng(self, "DEL", "Done interpolating marginal models.", 95) return his def interpolateMarginalPoles(self, mu : paramList = [], mIvals : Np2D = None) -> ListAny: """Obtain interpolated approximant poles.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal poles at mu = {}.".format(mu), 95) intMPoles = np.zeros((len(mu),) + self.data.polesEff[0].shape, dtype = np.complex) if mIvals is None: muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) for pEff, mI in zip(self.data.polesEff, mIvals): for j, m in enumerate(mI): intMPoles[j] += m * pEff vbMng(self, "DEL", "Done interpolating marginal poles.", 95) return intMPoles[..., 0] def interpolateMarginalCoeffs(self, mu : paramList = [], mIvals : Np2D = None) -> ListAny: """Obtain interpolated approximant coefficients.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal coefficients at mu = {}.".format(mu), 95) intMCoeffs = np.zeros((len(mu),) + self.data.coeffsEff[0].shape, dtype = np.complex) if mIvals is None: muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) for cEff, mI in zip(self.data.coeffsEff, mIvals): for j, m in enumerate(mI): intMCoeffs[j] += m * cEff vbMng(self, "DEL", "Done interpolating marginal coefficients.", 95) return intMCoeffs def getPVal(self, mu : paramList = []) -> sampList: """ Evaluate rational numerator at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = self.checkParameterList(mu) p = emptySampleList() muP = self.centerNormalizePivot(mu(self.data.directionPivot)) muM = mu(self.data.directionMarginal) his = self.interpolateMarginalInterpolator(muM) for i, (mP, hi) in enumerate(zip(muP, his)): Pval = hi(mP) * np.prod(mP[0] - hi.poles) if i == 0: p.reset((len(Pval), len(mu)), dtype = np.complex) p[i] = Pval return p def getQVal(self, mu:Np1D, der : List[int] = None, scl : Np1D = None) -> Np1D: """ Evaluate rational denominator at arbitrary parameter. Args: mu: Target parameter. der(optional): Derivatives to take before evaluation. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = self.checkParameterList(mu) muP = self.centerNormalizePivot(mu(self.data.directionPivot)) muM = mu(self.data.directionMarginal) if der is None: derP, derM = 0, [0] else: derP = der[self.data.directionPivot[0]] derM = [der[x] for x in self.data.directionMarginal] if np.any(np.array(derM) != 0): raise RROMPyException(("Derivatives of Q with respect to marginal " "parameters not allowed.")) sclP = 1 if scl is None else scl[self.data.directionPivot[0]] derVal = np.zeros(len(mu), dtype = np.complex) pls = self.interpolateMarginalPoles(muM) for i, (mP, pl) in enumerate(zip(muP, pls)): N = len(pl) if derP == N: derVal[i] = 1. elif derP >= 0 and derP < N: plDist = mP[0] - pl for terms in combinations(np.arange(N), N - derP): derVal[i] += np.prod(plDist[list(terms)]) return sclP ** derP * fact(derP) * derVal def getPoles(self, marginalVals : ListAny = [fp]) -> paramList: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") RROMPyAssert(self.data.npar, len(marginalVals), "Number of parameters") mVals = list(marginalVals) rDim = mVals.index(fp) if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]: raise RROMPyException(("Exactly 1 'freepar' entry in " "marginalVals must be provided.")) if rDim != self.data.directionPivot[0]: raise RROMPyException(("'freepar' entry in marginalVals must " "coincide with pivot direction.")) mVals[rDim] = self.data.mu0(0, rDim) mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim] roots = (self.data.scaleFactor[rDim] * self.interpolateMarginalPoles(mMarg)[0]) return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim), idx = [rDim])(0, 0) + roots, "B", [rDim])(0) def getResidues(self, marginalVals : ListAny = [fp]) -> Tuple[paramList, Np2D]: """ Obtain approximant residues. Returns: Numpy matrix with residues as columns. """ mVals = list(marginalVals) pls = self.getPoles(mVals) rDim = mVals.index(fp) mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim] res = (self.interpolateMarginalCoeffs(mMarg)[0][: len(pls), :].T * self.data.scaleFactor[self.data.directionPivot[0]]) if not self.data._collapsed: res = dot(self.data.projMat, res) return pls, res.T diff --git a/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py b/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py index 1bdc5f7..288b71d 100755 --- a/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py +++ b/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py @@ -1,483 +1,487 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.hfengines.base.linear_affine_engine import checkIfAffine from .generic_greedy_approximant import GenericGreedyApproximant from rrompy.utilities.poly_fitting.polynomial import (polybases, polyfitname, PolynomialInterpolator as PI, polyvander) from rrompy.utilities.numerical import dot from rrompy.utilities.expression import expressionEvaluator from rrompy.reduction_methods.standard import RationalInterpolant from rrompy.utilities.base.types import Np1D, Tuple, paramVal, List from rrompy.utilities.base.verbosity_depth import verbosityManager as vbMng from rrompy.utilities.poly_fitting import customFit from rrompy.utilities.exception_manager import (RROMPyWarning, RROMPyException, RROMPyAssert, RROMPy_FRAGILE) from rrompy.sampling import sampleList, emptySampleList __all__ = ['RationalInterpolantGreedy'] class RationalInterpolantGreedy(GenericGreedyApproximant, RationalInterpolant): """ ROM greedy rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': number of starting training points; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'sampler': sample point generator; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'samplerTrainSet': training sample points generator; defaults to sampler; - 'polybasis': type of basis for interpolation; defaults to 'MONOMIAL'; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', 'LOOK_AHEAD_OUTPUT', and 'NONE'; defaults to 'NONE'; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'N': degree of rational interpolant denominator; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'samplerTrainSet': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: number of test points. N: Denominator degree of approximant. sampler: Sample point generator. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. samplerTrainSet: training sample points generator. errorEstimatorKind: kind of error estimator. functionalSolve: Strategy for minimization of denominator functional. interpTol: tolerance for interpolation. QTol: tolerance for robust rational denominator management. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ _allowedEstimatorKinds = ["AFFINE", "DISCREPANCY", "LOOK_AHEAD", "LOOK_AHEAD_RES", "LOOK_AHEAD_OUTPUT", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["errorEstimatorKind"], ["DISCREPANCY"], - toBeExcluded = ["M", "indexPrimary", - "MAuxiliary", "NAuxiliary", + toBeExcluded = ["M", "polydegreetype", + "indexPrimary", "MAuxiliary", + "NAuxiliary", "radialDirectionalWeights"]) super().__init__(*args, **kwargs) self.M = "AUTO" self._postInit() def _setMAuto(self): self.M = self.S - 1 @property def N(self): """Value of N.""" return self._N _N_fix = None @N.setter def N(self, N): self._N_isauto, self._N_shift = True, 0 if isinstance(N, str): N = N.strip().replace(" ","") if "-" in N: self._N_shift = int(N.split("-")[-1]) self._N_fix, N = -1, 0 elif self._N_fix is None: self._N_fix = N if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N def _setNAuto(self): N = max(0, self.S - self._N_shift - 1) if self._N_fix >= 0: N = min(N, self._N_fix) self.N = N @property def indexPrimary(self): return 0 @property def MAuxiliary(self): return 0 @property def NAuxiliary(self): return 0 + @property + def polydegreetype(self): return "TENSOR" + @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in polybases: raise RROMPyException("Sample type not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def errorEstimatorKind(self): """Value of errorEstimatorKind.""" return self._errorEstimatorKind @errorEstimatorKind.setter def errorEstimatorKind(self, errorEstimatorKind): errorEstimatorKind = errorEstimatorKind.upper() if errorEstimatorKind not in self._allowedEstimatorKinds: RROMPyWarning(("Error estimator kind not recognized. Overriding " "to 'NONE'.")) errorEstimatorKind = "NONE" self._errorEstimatorKind = errorEstimatorKind self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind def _polyvanderAuxiliary(self, mus, deg, *args): return polyvander(mus, deg, *args) def getErrorEstimatorDiscrepancy(self, mus:Np1D) -> Np1D: """Discrepancy-based residual estimator.""" checkIfAffine(self.HFEngine, "apply discrepancy-based error estimator", False, self._affine_lvl) mus = self.checkParameterList(mus) muCTest = self.trainedModel.centerNormalize(mus) tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0 QTest = self.trainedModel.getQVal(mus) QTzero = np.where(QTest == 0.)[0] if len(QTzero) > 0: RROMPyWarning(("Adjusting estimator to avoid division by " "numerically zero denominator.")) QTest[QTzero] = np.finfo(np.complex).eps / (1. + self.N) self.HFEngine.buildA() self.HFEngine.buildb() nAs, nbs = self.HFEngine.nAs, self.HFEngine.nbs muTrainEff = self.mapParameterList(self.mus) muTestEff = self.mapParameterList(mus) PTrain = self.trainedModel.getPVal(self.mus).data.T QTrain = self.trainedModel.getQVal(self.mus) QTzero = np.where(QTrain == 0.)[0] if len(QTzero) > 0: RROMPyWarning(("Adjusting estimator to avoid division by " "numerically zero denominator.")) QTrain[QTzero] = np.finfo(np.complex).eps / (1. + self.N) PTest = self.trainedModel.getPVal(mus).data self.trainedModel.verbosity = tMverb radiusAbTrain = np.empty((self.S, nAs * self.S + nbs), dtype = np.complex) radiusA = np.empty((self.S, nAs, len(mus)), dtype = np.complex) radiusb = np.empty((nbs, len(mus)), dtype = np.complex) for j, thA in enumerate(self.HFEngine.thAs): idxs = j * self.S + np.arange(self.S) radiusAbTrain[:, idxs] = expressionEvaluator(thA[0], muTrainEff, (self.S, 1)) * PTrain radiusA[:, j] = PTest * expressionEvaluator(thA[0], muTestEff, (len(mus),)) for j, thb in enumerate(self.HFEngine.thbs): idx = nAs * self.S + j radiusAbTrain[:, idx] = QTrain * expressionEvaluator(thb[0], muTrainEff, (self.S,)) radiusb[j] = QTest * expressionEvaluator(thb[0], muTestEff, (len(mus),)) QRHSNorm2 = self._affineResidualMatricesContraction(radiusb) vanTrain = self._polyvanderAuxiliary(self._musUniqueCN, self.S - 1, self.polybasis0, self._derIdxs, self._reorder) interpPQ = customFit(vanTrain, radiusAbTrain, rcond = self.interpTol) vanTest = self._polyvanderAuxiliary(muCTest, self.S - 1, self.polybasis0) DradiusAb = vanTest.dot(interpPQ) radiusA = (radiusA - DradiusAb[:, : - nbs].reshape(len(mus), -1, self.S).T) radiusb = radiusb - DradiusAb[:, - nbs :].T ff, Lf, LL = self._affineResidualMatricesContraction(radiusb, radiusA) err = np.abs((LL - 2. * np.real(Lf) + ff) / QRHSNorm2) ** .5 return err def getErrorEstimatorLookAhead(self, mus:Np1D, what : str = "") -> Tuple[Np1D, List[int]]: """Residual estimator based on look-ahead idea.""" errTest, QTest, idxMaxEst = self._EIMStep(mus) mu_muTestS = mus[idxMaxEst] app_muTestSample = self.trainedModel.getApproxReduced(mu_muTestS) if self._mode == RROMPy_FRAGILE: if what == "RES" and not self.HFEngine.isCEye: raise RROMPyException(("Cannot compute LOOK_AHEAD_RES " "estimator in fragile mode for " "non-scalar C.")) app_muTestSample = dot(self.trainedModel.data.projMat[:, : app_muTestSample.shape[0]], app_muTestSample) else: app_muTestSample = dot(self.samplingEngine.projectionMatrix, app_muTestSample) app_muTestSample = sampleList(app_muTestSample) if what == "RES": errmu = self.HFEngine.residual(mu_muTestS, app_muTestSample, post_c = False) solmu = self.HFEngine.residual(mu_muTestS, None, post_c = False) normSol = self.HFEngine.norm(solmu, dual = True) normErr = self.HFEngine.norm(errmu, dual = True) else: for j, mu in enumerate(mu_muTestS): uEx = self.samplingEngine.nextSample(mu) if what == "OUTPUT": uEx = self.HFEngine.applyC(uEx, mu) app_muTS = self.HFEngine.applyC(app_muTestSample[j], mu) if j == 0: app_muTestS = emptySampleList() app_muTestS.reset((len(app_muTS), len(mu_muTestS)), dtype = np.complex) app_muTestS[j] = app_muTS if j == 0: solmu = emptySampleList() solmu.reset((len(uEx), len(mu_muTestS)), dtype = np.complex) solmu[j] = uEx if what == "OUTPUT": app_muTestSample = app_muTestS errmu = solmu - app_muTestSample normSol = self.HFEngine.norm(solmu, is_state = what != "OUTPUT") normErr = self.HFEngine.norm(errmu, is_state = what != "OUTPUT") errsamples = normErr / normSol musT = copy(self.mus) musT.append(mu_muTestS) musT = self.trainedModel.centerNormalize(musT) musC = self.trainedModel.centerNormalize(mus) errT = np.zeros((len(musT), len(mu_muTestS)), dtype = np.complex) errT[np.arange(len(self.mus), len(musT)), np.arange(len(mu_muTestS))] = errsamples * QTest[idxMaxEst] vanT = self._polyvanderAuxiliary(musT, self.S, self.polybasis) fitOut = customFit(vanT, errT, full = True, rcond = self.interpTol) vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... Conditioning " "of LS system: {:.4e}.").format(len(vanT), self.S, polyfitname(self.polybasis), fitOut[1][2][0] / fitOut[1][2][-1]), 15) vanC = self._polyvanderAuxiliary(musC, self.S, self.polybasis) err = np.sum(np.abs(vanC.dot(fitOut[0])), axis = -1) / QTest return err, idxMaxEst def getErrorEstimatorNone(self, mus:Np1D) -> Np1D: """EIM-based residual estimator.""" err = np.max(self._EIMStep(mus, True), axis = 1) err *= self.greedyTol / np.mean(err) return err def _EIMStep(self, mus:Np1D, only_one : bool = False) -> Tuple[Np1D, Np1D, List[int]]: """EIM step to find next magic point.""" mus = self.checkParameterList(mus) tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0 QTest = self.trainedModel.getQVal(mus) QTzero = np.where(QTest == 0.)[0] if len(QTzero) > 0: RROMPyWarning(("Adjusting estimator to avoid division by " "numerically zero denominator.")) QTest[QTzero] = np.finfo(np.complex).eps / (1. + self.N) QTest = np.abs(QTest) muCTest = self.trainedModel.centerNormalize(mus) muCTrain = self.trainedModel.centerNormalize(self.mus) self.trainedModel.verbosity = tMverb vanTest = self._polyvanderAuxiliary(muCTest, self.S - 1, self.polybasis) vanTestNext = self._polyvanderAuxiliary(muCTest, self.S, self.polybasis)[:, vanTest.shape[1] :] idxsTest = np.arange(vanTestNext.shape[1]) basis = np.zeros((len(idxsTest), 0), dtype = float) idxMaxEst = [] while len(idxsTest) > 0: vanTrial = self._polyvanderAuxiliary(muCTrain, self.S - 1, self.polybasis) vanTrialNext = self._polyvanderAuxiliary(muCTrain, self.S, self.polybasis)[:, vanTrial.shape[1] :] vanTrial = np.hstack((vanTrial, vanTrialNext.dot(basis).reshape( len(vanTrialNext), basis.shape[1]))) valuesTrial = vanTrialNext[:, idxsTest] vanTestEff = np.hstack((vanTest, vanTestNext.dot(basis).reshape( len(vanTestNext), basis.shape[1]))) vanTestNextEff = vanTestNext[:, idxsTest] coeffTest = np.linalg.solve(vanTrial, valuesTrial) errTest = (np.abs(vanTestNextEff - vanTestEff.dot(coeffTest)) / np.expand_dims(QTest, 1)) if only_one: return errTest idxMaxErr = np.unravel_index(np.argmax(errTest), errTest.shape) idxMaxEst += [idxMaxErr[0]] muCTrain.append(muCTest[idxMaxErr[0]]) basis = np.pad(basis, [(0, 0), (0, 1)], "constant") basis[idxsTest[idxMaxErr[1]], -1] = 1. idxsTest = np.delete(idxsTest, idxMaxErr[1]) return errTest, QTest, idxMaxEst def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D: """Standard residual-based error estimator.""" setupOK = self.setupApproxLocal() if setupOK > 0: err = np.empty(len(mus)) err[:] = np.nan if not return_max: return err return err, [- setupOK], np.nan mus = self.checkParameterList(mus) vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format(mus), 10) if self.errorEstimatorKind == "AFFINE": err = self.getErrorEstimatorAffine(mus) else: self._setupInterpolationIndices() if self.errorEstimatorKind == "DISCREPANCY": err = self.getErrorEstimatorDiscrepancy(mus) elif self.errorEstimatorKind[: 10] == "LOOK_AHEAD": err, idxMaxEst = self.getErrorEstimatorLookAhead(mus, self.errorEstimatorKind[11 :]) else: #if self.errorEstimatorKind == "NONE": err = self.getErrorEstimatorNone(mus) vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10) if not return_max: return err if self.errorEstimatorKind[: 10] != "LOOK_AHEAD": idxMaxEst = [np.argmax(err)] return err, idxMaxEst, err[idxMaxEst] _warnPlottingNormalization = 1 def plotEstimator(self, *args, **kwargs): super().plotEstimator(*args, **kwargs) if (self.errorEstimatorKind == "NONE" and self._warnPlottingNormalization): RROMPyWarning(("Error estimator arbitrarily normalized before " "plotting.")) self._warnPlottingNormalization = 0 def greedyNextSample(self, *args, **kwargs) -> Tuple[Np1D, int, float, paramVal]: """Compute next greedy snapshot of solution map.""" RROMPyAssert(self._mode, message = "Cannot add greedy sample.") err, muidx, maxErr, muNext = super().greedyNextSample(*args, **kwargs) if (self.errorEstimatorKind == "NONE" and not np.isnan(maxErr) and not np.isinf(maxErr)): maxErr = None return err, muidx, maxErr, muNext def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.nsamples > 0: return if self.npar > 1: raise RROMPyException(("Cannot apply minimal rational interpolant " "in multivariate case.")) super()._preliminaryTraining() def setupApproxLocal(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") self.verbosity -= 10 vbMng(self, "INIT", "Setting up local approximant.", 5) pMat = self.samplingEngine.projectionMatrix firstRun = self.trainedModel is None if not firstRun: pMat = pMat[:, len(self.trainedModel.data.mus) :] self._setupTrainedModel(pMat, not firstRun) unstable = 0 if self.S > 1: Q = self._setupDenominator() else: Q = PI() Q.coeffs = np.ones(1, dtype = np.complex) Q.npar, Q.polybasis = 1, self.polybasis if not unstable: self._setupRational(Q) if self.M < self.S - 1: RROMPyWarning(("Instability in numerator computation. " "Aborting.")) unstable = 1 if not unstable: self.trainedModel.data.approxParameters = copy( self.approxParameters) vbMng(self, "DEL", "Done setting up local approximant.", 5) self.verbosity += 10 return unstable def setupApprox(self, plotEst : str = "NONE") -> int: val = super().setupApprox(plotEst) if val == 0: self._setupRational(self.trainedModel.data.Q, self.trainedModel.data.P) self.trainedModel.data.approxParameters = copy( self.approxParameters) return val diff --git a/rrompy/reduction_methods/standard/rational_interpolant.py b/rrompy/reduction_methods/standard/rational_interpolant.py index 7e25809..f329faa 100755 --- a/rrompy/reduction_methods/standard/rational_interpolant.py +++ b/rrompy/reduction_methods/standard/rational_interpolant.py @@ -1,748 +1,821 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np +from scipy.sparse import coo_matrix from scipy.linalg import eig from collections.abc import Iterable from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.poly_fitting.polynomial import ( polybases as ppb, polyfitname, polyvander as pvP, polyTimes, PolynomialInterpolator as PI, PolynomialInterpolatorNodal as PIN) from rrompy.utilities.poly_fitting.heaviside import rational2heaviside from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramList, interpEng) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import pseudoInverse, dot, baseDistanceMatrix from rrompy.utilities.numerical.factorials import multifactorial -from rrompy.utilities.numerical.degree import marginalDegreeMaxMask +from rrompy.utilities.numerical.degree import (mapTotalToTensorMixed, + reduceDegreeNMixed, + degreeSetMixed) from rrompy.utilities.numerical.hash_derivative import (nextDerivativeIndices, hashDerivativeToIdx as hashD, hashIdxToDerivative as hashI) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['RationalInterpolant'] def polyTimesTable(P:interpEng, mus:Np1D, reorder:List[int], derIdxs:List[List[List[int]]], scl : Np1D = None) -> Np2D: """Table of polynomial products.""" if not isinstance(P, PI): raise RROMPyException(("Polynomial to evaluate must be a polynomial " "interpolator.")) Pvals = [[0.] * len(derIdx) for derIdx in derIdxs] for j, derIdx in enumerate(derIdxs): nder = len(derIdx) for der in range(nder): derI = hashI(der, P.npar) Pvals[j][der] = P([mus[j]], derI, scl) / multifactorial(derI) - return blockDiagDer(Pvals, reorder, derIdxs) + return blockDiagDer(Pvals, reorder, derIdxs, format = "dense") -def vanderInvTable(vanInv:Np2D, idxs:List[int], reorder:List[int], +def vanderInvTable(vanInv:Np2D, reorder:List[int], derIdxs:List[List[List[int]]]) -> Np2D: """Table of Vandermonde pseudo-inverse.""" S = len(reorder) - Ts = [None] * len(idxs) - for k in range(len(idxs)): + Ts = [None] * len(vanInv) + for k in range(len(vanInv)): invLocs = [None] * len(derIdxs) idxGlob = 0 for j, derIdx in enumerate(derIdxs): nder = len(derIdx) idxGlob += nder idxLoc = np.arange(S)[(reorder >= idxGlob - nder) * (reorder < idxGlob)] invLocs[j] = vanInv[k, idxLoc] Ts[k] = blockDiagDer(invLocs, reorder, derIdxs, [2, 1, 0]) return Ts def blockDiagDer(vals:List[Np1D], reorder:List[int], derIdxs:List[List[List[int]]], - permute : List[int] = None) -> Np2D: + permute : List[int] = None, format : str = "sparse") -> Np2D: """Table of derivative values for point confluence.""" S = len(reorder) - T = np.zeros((S, S), dtype = np.complex) + if format == "sparse": + data, idxI, idxJ = [], [], [] + else: #format == "dense": + T = np.zeros((S, S), dtype = np.complex) if permute is None: permute = [0, 1, 2] idxGlob = 0 for j, derIdx in enumerate(derIdxs): nder = len(derIdx) idxGlob += nder idxLoc = np.arange(S)[(reorder >= idxGlob - nder) * (reorder < idxGlob)] val = vals[j] for derI, derIdxI in enumerate(derIdx): for derJ, derIdxJ in enumerate(derIdx): diffIdx = [x - y for (x, y) in zip(derIdxI, derIdxJ)] if all([x >= 0 for x in diffIdx]): diffj = hashD(diffIdx) i1, i2, i3 = np.array([derI, derJ, diffj])[permute] - T[idxLoc[i1], idxLoc[i2]] = val[i3] + if format == "sparse": + data += [val[i3]] + idxI += [idxLoc[i1]] + idxJ += [idxLoc[i2]] + else: + T[idxLoc[i1], idxLoc[i2]] = val[i3] + if format == "sparse": + T = coo_matrix((data, (idxI, idxJ)), shape = (S, S)).tocsr() return T +def fullDegreeMaxMaskMixed(deg:int, degS:int, npar:int, + index:int) -> List[int]: + nLeft = (1 + degS) ** (npar - index - 1) + nRight = (1 + degS) ** (index) + baseidx = deg * nLeft + np.arange(nLeft) + return (np.tile(baseidx.reshape(-1, 1), (1, nRight)) + + np.arange(0, (deg + 1) * nLeft * nRight, (deg + 1) * nLeft) + ).T.flatten() + +def totalDegreeMaxMaskMixed(deg:int, degS:int, npar:int, + index:int) -> List[int]: + degSet = degreeSetMixed(deg, degS, npar, index, "TOTAL") + return np.where(np.sum(degSet, axis = 1) == deg)[0] + class RationalInterpolant(GenericStandardApproximant): """ ROM rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; defaults to 'MONOMIAL'; + - 'polydegreetype': type of polynomial degree; allowed values + include 'TENSOR' and 'MIXED'; defaults to 'MIXED'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'indexPrimary': index of pivot parameter; defaults to 0; - 'MAuxiliary': degree of rational interpolant numerator with respect to non-pivot parameters; defaults to 0; - 'NAuxiliary': degree of rational interpolant denominator with respect to non-pivot parameters; defaults to 0; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for interpolation; + - 'polydegreetype': type of polynomial degree; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'indexPrimary': index of pivot parameter; defaults to 0; - 'MAuxiliary': degree of rational interpolant numerator with respect to non-pivot parameters; defaults to 0; - 'NAuxiliary': degree of rational interpolant denominator with respect to non-pivot parameters; defaults to 0; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for interpolation via numpy.polyfit; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. - polybasis: type of polynomial basis for interpolation. + polybasis: Type of polynomial basis for interpolation. + polydegreetype: Type of polynomial degree. M: Numerator degree of approximant. N: Denominator degree of approximant. indexPrimary: Index of pivot parameter. MAuxiliary: Degree of rational interpolant numerator with respect to non-pivot parameters. NAuxiliary: Degree of rational interpolant denominator with respect to non-pivot parameters. radialDirectionalWeights: Radial basis weights for interpolant numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for interpolation via numpy.polyfit. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ _allowedFunctionalSolveKinds = ["NORM", "DOMINANT", "BARYCENTRIC_NORM", "BARYCENTRIC_AVERAGE"] def __init__(self, *args, **kwargs): self._preInit() - self._addParametersToList(["polybasis", "M", "N", "indexPrimary", - "MAuxiliary", "NAuxiliary", + self._addParametersToList(["polybasis", "M", "N", "polydegreetype", + "indexPrimary", "MAuxiliary", "NAuxiliary", "radialDirectionalWeights", "radialDirectionalWeightsAdapt", "functionalSolve", "interpTol", "QTol"], - ["MONOMIAL", "AUTO", "AUTO", 0, 0, 0, 1., - [-1., -1.], "NORM", -1, 0.]) + ["MONOMIAL", "AUTO", "AUTO", "MIXED", 0, 0, + 0, 1., [-1., -1.], "NORM", -1, 0.]) super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_rational import TrainedModelRational return TrainedModelRational @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in ppb + rbpb: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def polybasis0(self): if "_" in self.polybasis: return self.polybasis.split("_")[0] return self.polybasis @property def functionalSolve(self): """Value of functionalSolve.""" return self._functionalSolve @functionalSolve.setter def functionalSolve(self, functionalSolve): try: functionalSolve = functionalSolve.upper().strip().replace(" ","") if functionalSolve == "BARYCENTRIC": functionalSolve += "_NORM" if functionalSolve not in self._allowedFunctionalSolveKinds: raise RROMPyException(("Prescribed functionalSolve not " "recognized.")) self._functionalSolve = functionalSolve except: RROMPyWarning(("Prescribed functionalSolve not recognized. " "Overriding to 'NORM'.")) self._functionalSolve = "NORM" self._approxParameters["functionalSolve"] = self.functionalSolve @property def interpTol(self): """Value of interpTol.""" return self._interpTol @interpTol.setter def interpTol(self, interpTol): self._interpTol = interpTol self._approxParameters["interpTol"] = self.interpTol @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): if isinstance(radialDirectionalWeights, Iterable): radialDirectionalWeights = list(radialDirectionalWeights) else: radialDirectionalWeights = [radialDirectionalWeights] self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) @property def radialDirectionalWeightsAdapt(self): """Value of radialDirectionalWeightsAdapt.""" return self._radialDirectionalWeightsAdapt @radialDirectionalWeightsAdapt.setter def radialDirectionalWeightsAdapt(self, radialDirectionalWeightsAdapt): self._radialDirectionalWeightsAdapt = radialDirectionalWeightsAdapt self._approxParameters["radialDirectionalWeightsAdapt"] = ( self.radialDirectionalWeightsAdapt) @property def M(self): """Value of M.""" return self._M @M.setter def M(self, M): if isinstance(M, str): M = M.strip().replace(" ","") if "-" not in M: M = M + "-0" self._M_isauto, self._M_shift = True, int(M.split("-")[-1]) M = 0 if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M def _setMAuto(self): - self.M = max(0, self.S // (self.MAuxiliary + 1) ** (self.npar - 1) - - self._M_shift - 1) + self.M = max(0, reduceDegreeNMixed(self.S - 1, self.MAuxiliary, self.S, + self.npar, self.polydegreetype) + - self._M_shift) vbMng(self, "MAIN", "Automatically setting M to {}.".format(self.M), 25) @property def N(self): """Value of N.""" return self._N @N.setter def N(self, N): if isinstance(N, str): N = N.strip().replace(" ","") if "-" not in N: N = N + "-0" self._N_isauto, self._N_shift = True, int(N.split("-")[-1]) N = 0 if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N def _setNAuto(self): - self.N = max(0, self.S // (self.NAuxiliary + 1) ** (self.npar - 1) - - self._N_shift - 1) + self.N = max(0, reduceDegreeNMixed(self.S - 1, self.NAuxiliary, self.S, + self.npar, self.polydegreetype) + - self._N_shift) vbMng(self, "MAIN", "Automatically setting N to {}.".format(self.N), 25) @property def indexPrimary(self): """Value of indexPrimary.""" return self._indexPrimary @indexPrimary.setter def indexPrimary(self, indexPrimary): if indexPrimary < 0 or indexPrimary > self.npar: raise RROMPyException("indexPrimary must be between 0 and npar.") self._indexPrimary = indexPrimary self._approxParameters["indexPrimary"] = self.indexPrimary @property def MAuxiliary(self): """Value of MAuxiliary.""" return self._MAuxiliary @MAuxiliary.setter def MAuxiliary(self, MAuxiliary): if MAuxiliary < 0: raise RROMPyException("MAuxiliary must be non-negative.") self._MAuxiliary = MAuxiliary self._approxParameters["MAuxiliary"] = self.MAuxiliary @property def NAuxiliary(self): """Value of NAuxiliary.""" return self._NAuxiliary @NAuxiliary.setter def NAuxiliary(self, NAuxiliary): if NAuxiliary < 0: raise RROMPyException("NAuxiliary must be non-negative.") self._NAuxiliary = NAuxiliary self._approxParameters["NAuxiliary"] = self.NAuxiliary + @property + def polydegreetype(self): + """Value of polydegreetype.""" + return self._polydegreetype + @polydegreetype.setter + def polydegreetype(self, polydegreetype): + try: + polydegreetype = polydegreetype.upper().strip().replace(" ","") + if polydegreetype not in ["MIXED", "TOTAL", "TENSOR"]: + raise RROMPyException(("Prescribed polydegreetype not " + "recognized.")) + if polydegreetype == "TOTAL": polydegreetype = "MIXED" + self._polydegreetype = polydegreetype + except: + RROMPyWarning(("Prescribed polydegreetype not recognized. " + "Overriding to 'MIXED'.")) + self._polydegreetype = "MIXED" + self._approxParameters["polydegreetype"] = self.polydegreetype + @property def QTol(self): """Value of tolerance for robust rational denominator management.""" return self._QTol @QTol.setter def QTol(self, QTol): if QTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) QTol = 0. self._QTol = QTol self._approxParameters["QTol"] = self.QTol def resetSamples(self): """Reset samples.""" super().resetSamples() self._musUniqueCN = None self._derIdxs = None self._reorder = None def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" if self._musUniqueCN is None or len(self._reorder) != len(self.mus): self._musUniqueCN, musIdxsTo, musIdxs, musCount = ( self.trainedModel.centerNormalize(self.mus).unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.mus[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.mus.shape[1], cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def _setupDenominator(self): """Compute rational denominator.""" RROMPyAssert(self._mode, message = "Cannot setup denominator.") vbMng(self, "INIT", "Starting computation of denominator.", 7) if hasattr(self, "_N_isauto"): self._setNAuto() else: - Nmax = self.S // (1 + self.NAuxiliary) ** (self.npar - 1) - 1 - if Nmax < self.N: + N = reduceDegreeNMixed(self.N, self.NAuxiliary, self.S, self.npar, + self.polydegreetype) + if N < self.N: RROMPyWarning(("N too large compared to S. Reducing N by " - "{}").format(self.N - Nmax)) - self.N = Nmax + "{}").format(self.N - N)) + self.N = N while self.N > 0: if self.functionalSolve != "NORM" and self.npar > 1: RROMPyWarning(("Strategy for functional optimization must be " "'NORM' for more than one parameter. " "Overriding to 'NORM'.")) self.functionalSolve = "NORM" if (self.functionalSolve[:11] == "BARYCENTRIC" and self.N + 1 < self.S): RROMPyWarning(("Barycentric strategy cannot be applied with " "Least Squares. Overriding to 'NORM'.")) self.functionalSolve = "NORM" if self.functionalSolve[:11] == "BARYCENTRIC": invD, TN = None, None self._setupInterpolationIndices() if len(self._musUnique) != self.S: RROMPyWarning(("Barycentric functional optimization " "cannot be applied to repeated samples. " "Overriding to 'NORM'.")) self.functionalSolve = "NORM" if self.functionalSolve[:11] != "BARYCENTRIC": invD, TN = self._computeInterpolantInverseBlocks() if self.POD == 1: sampleE = self.samplingEngine.Rscale Rscaling = None elif self.POD == 1/2: sampleE = self.samplingEngine.samples_normal Rscaling = self.samplingEngine.Rscale else: sampleE = self.samplingEngine.samples Rscaling = None ev, eV = self.findeveVG(sampleE, invD, TN, Rscaling) if self.functionalSolve[:11] == "BARYCENTRIC": break nevBad = np.sum(np.abs(ev / ev[-1]) < self.QTol) if not nevBad: break dN = int(np.ceil(nevBad / (1 + self.NAuxiliary) ** (self.npar - 1))) vbMng(self, "MAIN", ("Smallest {} eigenvalue{} below tolerance. Reducing N by " "{}.").format(nevBad, "s" * (nevBad > 1), dN), 10) self.N = self.N - dN if hasattr(self, "_gram"): del self._gram if self.N <= 0: - self.N, eV = 0, np.ones((1,) * self.npar, dtype = np.complex) + self.N = 0 + deg = [self.NAuxiliary + 1] * self.npar + deg[self.indexPrimary] = 1 + eV = np.zeros(deg, dtype = np.complex) + eV[(0,) * self.npar] = 1. if self.N > 0 and self.functionalSolve[:11] == "BARYCENTRIC": q = PIN() q.polybasis, q.nodes = self.polybasis0, eV else: q = PI() q.npar, q.polybasis = self.npar, self.polybasis0 deg = [self.NAuxiliary + 1] * self.npar deg[self.indexPrimary] = self.N + 1 - q.coeffs = eV.reshape(deg) + if self.polydegreetype == "TENSOR": + q.coeffs = eV.reshape(deg) + else: #if self.polydegreetype == "MIXED": + q.coeffs = mapTotalToTensorMixed(deg, self.npar, eV, + self.indexPrimary) vbMng(self, "DEL", "Done computing denominator.", 7) return q def _setupNumerator(self): """Compute rational numerator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of numerator.", 7) self._setupInterpolationIndices() Qevaldiag = polyTimesTable(self.trainedModel.data.Q, self._musUniqueCN, self._reorder, self._derIdxs, self.scaleFactorRel) if self.POD == 1: Qevaldiag = Qevaldiag.dot(self.samplingEngine.Rscale.T) elif self.POD == 1/2: Qevaldiag = Qevaldiag * self.samplingEngine.Rscale if hasattr(self, "_M_isauto"): self._setMAuto() else: - Mmax = self.S // (1 + self.MAuxiliary) ** (self.npar - 1) - 1 - if Mmax < self.M: + M = reduceDegreeNMixed(self.M, self.MAuxiliary, self.S, self.npar, + self.polydegreetype) + if M < self.M: RROMPyWarning(("M too large compared to S. Reducing M by " - "{}").format(self.M - Mmax)) - self.M = Mmax - M = self.M + "{}").format(self.M - M)) + self.M = M while self.M >= 0: deg = [self.MAuxiliary] * self.npar deg[self.indexPrimary] = self.M - pParRest = [deg, self.polybasis, self.verbosity >= 5, 0, + pParRest = [deg, self.polybasis, self.verbosity >= 5, + self.polydegreetype, {"derIdxs": self._derIdxs, "reorder": self._reorder, - "scl": self.scaleFactorRel}] + "scl": self.scaleFactorRel, + "degreetype": self.polydegreetype, + "mixedIdx": self.indexPrimary}] if self.polybasis in ppb: p = PI() else: self.computeScaleFactor() rDWEff = np.array([w * f for w, f in zip( self.radialDirectionalWeights, self.scaleFactor)]) pParRest = pParRest[: 2] + [rDWEff] + pParRest[2 :] pParRest[-1]["optimizeScalingBounds"] = ( self.radialDirectionalWeightsAdapt) p = RBI() if self.polybasis in ppb + rbpb: pParRest += [{"rcond": self.interpTol}] wellCond, msg = p.setupByInterpolation(self._musUniqueCN, Qevaldiag, *pParRest) vbMng(self, "MAIN", msg, 5) if wellCond: break vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing M " "by 1."), 10) - self._M = self.M - 1 + self.M = self.M - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) - self.M = M vbMng(self, "DEL", "Done computing numerator.", 7) return p def setupApprox(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.".format(self.name()), 5) self.computeSnapshots() self._setupTrainedModel(self.samplingEngine.projectionMatrix) self._setupRational(self._setupDenominator()) self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def _setupRational(self, Q:interpEng, P : interpEng = None): vbMng(self, "INIT", "Starting approximant finalization.", 5) self.trainedModel.data.Q = Q if P is None: P = self._setupNumerator() while self.N > 0 and self.npar == 1: if self.HFEngine._ignoreResidues: pls = Q.roots() cfs, projMat = None, None else: cfs, pls, _ = rational2heaviside(P, Q) cfs = cfs[: len(pls)].T if self.POD != 1: projMat = self.samplingEngine.projectionMatrix else: projMat = None foci = self.sampler.normalFoci() plsA = self.mapParameterList(self.mapParameterList(self.mu0)(0, 0) + self.scaleFactor * pls, "B")(0) idxBad = self.HFEngine.flagBadPolesResiduesAbsolute(plsA, cfs, projMat) if not self.HFEngine._ignoreResidues: cfs[:, idxBad] = 0. idxBad += self.HFEngine.flagBadPolesResiduesRelative(pls, cfs, projMat, foci) idxBad = idxBad > 0 if not np.any(idxBad): break vbMng(self, "MAIN", "Removing {} spurious pole{} out of {}.".format( np.sum(idxBad), "s" * (np.sum(idxBad) > 1), self.N), 10) if isinstance(Q, PIN): Q.nodes = Q.nodes[idxBad == False] else: Q = PI() Q.npar, Q.polybasis = 1, self.polybasis0 Q.coeffs = np.ones(1, dtype = np.complex) for pl in pls[idxBad == False]: Q.coeffs = polyTimes(Q.coeffs, [- pl, 1.], Pbasis = Q.polybasis, Rbasis = Q.polybasis) Q.coeffs /= np.linalg.norm(Q.coeffs) self.trainedModel.data.Q = Q self.N = Q.deg[0] P = self._setupNumerator() self.trainedModel.data.P = P vbMng(self, "DEL", "Terminated approximant finalization.", 5) def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]: """ Compute inverse factors for minimal interpolant target functional. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") self._setupInterpolationIndices() pvPPar = [self.polybasis0, self._derIdxs, self._reorder, - self.scaleFactorRel] + self.scaleFactorRel, self.polydegreetype, self.indexPrimary] full = self.N + 1 == self.S == len(self._musUniqueCN) if full: mus = self._musUniqueCN[self._reorder] dist = baseDistanceMatrix(mus, magnitude = False)[..., 0] dist[np.arange(self.N + 1), np.arange(self.N + 1)] = multifactorial([self.N]) fitinvE = np.prod(dist, axis = 1) ** -1 vbMng(self, "MAIN", ("Evaluating quasi-Lagrangian basis of degree {} at {} " "sample points.").format(self.N, self.N + 1), 5) invD = [np.diag(fitinvE)] TN = pvP(self._musUniqueCN, self.N, *pvPPar) else: - deg, TN = [self.NAuxiliary] * self.npar, None - E = self.S // (1 + self.NAuxiliary) ** (self.npar - 1) - 1 + TN = None + E = reduceDegreeNMixed(self.S - 1, self.NAuxiliary, self.S, + self.npar, self.polydegreetype) while self.N >= 0: if TN is None: + deg = [self.NAuxiliary] * self.npar deg[self.indexPrimary] = self.N TN = pvP(self._musUniqueCN, deg, *pvPPar) if self.N == E: TE = TN else: deg[self.indexPrimary] = E TE = pvP(self._musUniqueCN, deg, *pvPPar) - idxsB = marginalDegreeMaxMask(deg, self.indexPrimary) + if self.polydegreetype == "TENSOR": + idxsB = fullDegreeMaxMaskMixed(E, self.NAuxiliary, + self.npar, + self.indexPrimary) + else: #if self.polydegreetype == "MIXED": + idxsB = totalDegreeMaxMaskMixed(E, self.NAuxiliary, + self.npar, + self.indexPrimary) fitOut = pseudoInverse(TE, rcond = self.interpTol, full = True) + degE = deg[0] if self.npar == 1 else deg vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... " "Conditioning of pseudoinverse system: {:.4e}.").format( - TE.shape[0], E, + TE.shape[0], degE, polyfitname(self.polybasis0), fitOut[1][1][0] / fitOut[1][1][-1]), 5) if fitOut[1][0] == TE.shape[1]: fitinv = fitOut[0][idxsB, :] break E -= 1 if E < self.N: vbMng(self, "MAIN", "Polyfit is poorly conditioned. Reducing N by 1.", 10) self.N, TN = E, None if self.N < 0: raise RROMPyException(("Instability in computation of " "denominator. Aborting.")) - invD = vanderInvTable(fitinv, idxsB, self._reorder, self._derIdxs) + invD = vanderInvTable(fitinv, self._reorder, self._derIdxs) return invD, TN def findeveVG(self, sampleE:Np2D, invD:List[Np2D], TN:Np2D, Rscaling : Np1D = None) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of rational denominator matrix, or of its right chol factor if POD. """ RROMPyAssert(self._mode, message = "Cannot solve spectral problem.") if self.POD == 1: if self.functionalSolve[:11] == "BARYCENTRIC": Rstack = sampleE else: vbMng(self, "INIT", "Building generalized half-gramian.", 10) S, eWidth = sampleE.shape[0], len(invD) Rstack = np.zeros((S * eWidth, TN.shape[1]), dtype = np.complex) for k in range(eWidth): Rstack[k * S : (k + 1) * S, :] = dot(sampleE, dot(invD[k], TN)) vbMng(self, "DEL", "Done building half-gramian.", 10) _, s, Vh = np.linalg.svd(Rstack, full_matrices = False) evG, eVG = s[::-1], Vh[::-1].T.conj() evExp, probKind = -2., "svd " else: if not hasattr(self, "_gram"): vbMng(self, "INIT", "Building gramian matrix.", 10) self._gram = self.HFEngine.innerProduct(sampleE, sampleE, is_state = True) if Rscaling is not None: self._gram = (self._gram.T * Rscaling.conj()).T * Rscaling vbMng(self, "DEL", "Done building gramian.", 10) if self.functionalSolve[:11] == "BARYCENTRIC": G = self._gram else: vbMng(self, "INIT", "Building generalized gramian.", 10) G = np.zeros((TN.shape[1],) * 2, dtype = np.complex) for k in range(len(invD)): iDkN = dot(invD[k], TN) G += dot(dot(self._gram, iDkN).T, iDkN.conj()).T vbMng(self, "DEL", "Done building gramian.", 10) evG, eVG = np.linalg.eigh(G) evExp, probKind = -1., "eigen" if (self.functionalSolve in ["NORM", "BARYCENTRIC_NORM"] or np.sum(np.abs(evG) < np.finfo(float).eps * np.abs(evG[-1]) * len(evG)) == 1): eV = eVG[:, 0] elif self.functionalSolve == "BARYCENTRIC_AVERAGE": eV = eVG.dot(evG ** evExp * np.sum(eVG, axis = 0).conj()) else: eV = eVG.dot(evG ** evExp * eVG[0].conj()) vbMng(self, "MAIN", ("Solved {}problem of size {} with condition number " "{:.4e}.").format(probKind, len(evG) - 1, evG[-1] / evG[1]), 5) if self.functionalSolve[:11] == "BARYCENTRIC": S, mus = len(eV), self._musUniqueCN[self._reorder].flatten() arrow = np.zeros((S + 1,) * 2, dtype = np.complex) arrow[1 :, 0] = 1. arrow[0, 1 :] = eV arrow[np.arange(1, S + 1), np.arange(1, S + 1)] = mus active = np.eye(S + 1) active[0, 0] = 0. poles, qTm1 = eig(arrow, active) eVgood = np.isinf(poles) + np.isnan(poles) == False poles = poles[eVgood] self.N = len(poles) if self.QTol > 0: # compare optimal score with self.N poles to those obtained # by removing one of the poles qTm1 = qTm1[1 :, eVgood].conj() ** -1. dists = mus.reshape(-1, 1) - mus dists[np.arange(S), np.arange(S)] = multifactorial([self.N]) dists = np.prod(dists, axis = 1).conj() ** -1. qComp = np.empty((self.N + 1, S), dtype = np.complex) qComp[0] = dists * np.prod(qTm1, axis = 1) for j in range(self.N): qTmj = np.prod(qTm1[:, np.arange(self.N) != j], axis = 1) qComp[j + 1] = dists * qTmj Lqs = qComp.dot(eVG) scores = np.real(np.sum(Lqs * evG ** -evExp * Lqs.conj(), axis = 1)) evBad = scores[1 :] < self.QTol * scores[0] nevBad = np.sum(evBad) if nevBad: vbMng(self, "MAIN", ("Suboptimal pole{} detected. Reducing N by " "{}.").format("s" * (nevBad > 1), nevBad), 10) self.N = self.N - nevBad poles = poles[evBad == False] eV = poles return evG[1 :], eV def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues(*args, **kwargs) diff --git a/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py b/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py index 9c23253..10e8df4 100644 --- a/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py +++ b/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py @@ -1,173 +1,172 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.reduction_methods.base.trained_model.trained_model import ( TrainedModel) from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.compress_matrix import compressMatrix from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, ListAny, paramVal, paramList, sampList) from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.sampling import sampleList __all__ = ['TrainedModelRational'] class TrainedModelRational(TrainedModel): """ ROM approximant evaluation for rational approximant. Attributes: Data: dictionary with all that can be pickled. """ def compress(self, collapse : bool = False, tol : float = 0., *args, **kwargs): if not collapse and tol <= 0.: return RMat = self.data.projMat if not collapse: if hasattr(self.data, "_compressTol"): RROMPyWarning(("Recompressing already compressed model is " "ineffective. Aborting.")) return self.data.projMat, RMat, _ = compressMatrix(RMat, tol, *args, **kwargs) self.data.P.postmultiplyTensorize(RMat.T) super().compress(collapse, tol) def centerNormalize(self, mu : paramList = [], mu0 : paramVal = None) -> paramList: """ Compute normalized parameter to be plugged into approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.data.mu0. Returns: Normalized parameter. """ mu = self.checkParameterList(mu) if mu0 is None: mu0 = self.data.mu0 return (self.mapParameterList(mu) - self.mapParameterList(mu0)) / self.data.scaleFactor def getPVal(self, mu : paramList = []) -> sampList: """ Evaluate rational numerator at arbitrary parameter. Args: mu: Target parameter. """ mu = self.checkParameterList(mu) vbMng(self, "INIT", "Evaluating numerator at mu = {}.".format(mu), 17) p = sampleList(self.data.P(self.centerNormalize(mu))) vbMng(self, "DEL", "Done evaluating numerator.", 17) return p def getQVal(self, mu:Np1D, der : List[int] = None, scl : Np1D = None) -> Np1D: """ Evaluate rational denominator at arbitrary parameter. Args: mu: Target parameter. der(optional): Derivatives to take before evaluation. """ mu = self.checkParameterList(mu) vbMng(self, "INIT", "Evaluating denominator at mu = {}.".format(mu), 17) q = self.data.Q(self.centerNormalize(mu), der, scl) vbMng(self, "DEL", "Done evaluating denominator.", 17) return q def getApproxReduced(self, mu : paramList = []) -> sampList: """ Evaluate reduced representation of approximant at arbitrary parameter. Args: mu: Target parameter. """ mu = self.checkParameterList(mu) if (not hasattr(self, "lastSolvedApproxReduced") or self.lastSolvedApproxReduced != mu): vbMng(self, "INIT", "Evaluating approximant at mu = {}.".format(mu), 12) QV = self.getQVal(mu) QVzero = np.where(QV == 0.)[0] if len(QVzero) > 0: QV[QVzero] = np.finfo(np.complex).eps / (1. + self.data.Q.deg[0]) self.uApproxReduced = self.getPVal(mu) / QV vbMng(self, "DEL", "Done evaluating approximant.", 12) self.lastSolvedApproxReduced = mu return self.uApproxReduced def getPoles(self, marginalVals : ListAny = [fp]) -> paramList: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ RROMPyAssert(self.data.npar, len(marginalVals), "Number of parameters") mVals = list(marginalVals) rDim = mVals.index(fp) if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]: raise RROMPyException(("Exactly 1 'freepar' entry in " "marginalVals must be provided.")) mVals[rDim] = self.data.mu0(0, rDim) mVals = list(self.centerNormalize(mVals).data.flatten()) mVals[rDim] = fp roots = self.data.scaleFactor[rDim] * self.data.Q.roots(mVals) return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim), idx = [rDim])(0, 0) + roots, "B", [rDim])(0) def getResidues(self, marginalVals : ListAny = [fp]) -> Tuple[paramList, Np2D]: """ Obtain approximant residues. Returns: Numpy matrix with residues as columns. """ mVals = list(marginalVals) pls = self.getPoles(mVals) if len(pls) == 0: return pls, np.empty((0, 0), dtype = np.complex) rDim = mVals.index(fp) poles = emptyParameterList() poles.reset((len(pls), self.data.npar), dtype = np.complex) for k, pl in enumerate(pls): mValsLoc = list(mVals) mValsLoc[rDim] = pl poles[k] = mValsLoc QV = self.getQVal(poles, list(1 * (np.arange(self.data.npar) == rDim))) QVzero = np.where(QV == 0.)[0] if len(QVzero) > 0: RROMPyWarning(("Adjusting residues to avoid division by " "numerically zero denominator.")) QV[QVzero] = np.finfo(np.complex).eps / (1. + self.data.Q.deg[0]) - res = (self.getPVal(poles).data / QV - * self.data.scaleFactor[self.data.directionPivot[0]]) + res = self.getPVal(poles).data / QV * self.data.scaleFactor[rDim] if not self.data._collapsed: res = dot(self.data.projMat, res) - return pls, res.T \ No newline at end of file + return pls, res.T diff --git a/rrompy/utilities/numerical/degree.py b/rrompy/utilities/numerical/degree.py index 447ed92..28d35b7 100644 --- a/rrompy/utilities/numerical/degree.py +++ b/rrompy/utilities/numerical/degree.py @@ -1,78 +1,124 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.special import binom from rrompy.utilities.base.types import Np2D, Tuple, List from .kroneckerer import kroneckerer +from rrompy.utilities.exception_manager import RROMPyException -__all__ = ['fullDegreeN', 'totalDegreeN', 'reduceDegreeN', 'fullDegreeSet', - 'totalDegreeSet', 'degreeTotalToFull', 'fullDegreeMaxMask', - 'totalDegreeMaxMask', 'marginalDegreeMaxMask'] +__all__ = ['degreeN', 'degreeNMixed', 'reduceDegreeN', 'reduceDegreeNMixed', + 'degreeSet', 'degreeSetMixed', 'mapTotalToTensor', + 'mapTotalToTensorMixed'] -def fullDegreeN(deg:int, npar:int) -> int: - return (1 + deg) ** npar +def degreeN(deg:int, npar:int, degtype:str) -> int: + degtype = degtype.upper().strip().replace(" ","") + if degtype == "TENSOR": return degreeNMixed(deg, deg, npar, degtype) + if degtype == "TOTAL": return int(binom(deg + npar, npar)) + raise RROMPyException("Degree type not recognized.") -def totalDegreeN(deg:int, npar:int) -> int: - return int(binom(deg + npar, npar)) +def degreeNMixed(deg:int, degS:int, npar:int, degtype:str) -> int: + degtype = degtype.upper().strip().replace(" ","") + if npar < 0: return 0 + if npar < 2: return 1 + deg * npar + if degtype == "TENSOR": return (1 + deg) * (1 + degS) ** (npar - 1) + if degtype in ["TOTAL", "MIXED"]: + degS = min(deg, degS) + tot = ((1 + degS) ** (npar - 1) * (1 + deg - degS) + + np.sum((1 + np.arange(degS)) ** (npar - 1))) + return tot + raise RROMPyException("Degree type not recognized.") def reduceDegreeN(deg:int, S:int, npar:int, degtype:str) -> int: - cfun = totalDegreeN if degtype == "TOTAL" else fullDegreeN - while S < cfun(deg, npar): deg -= 1 + while deg > 0 and S < degreeN(deg, npar, degtype): deg -= 1 return deg -def fullDegreeSet(deg:int, npar:int) -> List[List[int]]: - idxs = np.empty((fullDegreeN(deg, npar), npar), dtype = int) - for j in range(npar): - idxs[:, j] = kroneckerer(np.arange(deg + 1), fullDegreeN(deg, j), - fullDegreeN(deg, npar - j - 1)) - return idxs +def reduceDegreeNMixed(deg:int, degS:int, S:int, npar:int, degtype:str) -> int: + while deg > 0 and S < degreeNMixed(deg, degS, npar, degtype): deg -= 1 + return deg + +def degreeSet(deg:int, npar:int, degtype:str, + return_mask : bool = False) -> List[List[int]]: + degtype = degtype.upper().strip().replace(" ","") + if degtype == "TENSOR": + return degreeSetMixed(deg, deg, npar, 0, degtype, return_mask) + if degtype == "TOTAL": + idxs = degreeSet(deg, npar, "TENSOR") + mask = np.sum(idxs, axis = 1) <= deg + if return_mask: return idxs[mask], mask + return idxs[mask] + raise RROMPyException("Degree type not recognized.") -def totalDegreeSet(deg:int, npar:int, +def degreeSetMixed(deg:int, degS:int, npar:int, index:int, degtype:str, return_mask : bool = False) -> List[List[int]]: - remN = fullDegreeSet(deg, npar) - mask = np.sum(remN, axis = 1) <= deg - if return_mask: return remN[mask], mask - return remN[mask] + degtype = degtype.upper().strip().replace(" ","") + if degtype == "TENSOR": + idxs = np.empty((degreeNMixed(deg, degS, npar, degtype), npar), + dtype = int) + for j in range(npar): + if j <= index: + nL = degreeN(degS, j, "TENSOR") + else: + nL = degreeNMixed(deg, degS, j, "TENSOR") + if j >= index: + nR = degreeN(degS, npar - j - 1, "TENSOR") + else: + nR = degreeNMixed(deg, degS, npar - j - 1, "TENSOR") + d = deg if j == index else degS + idxs[:, j] = kroneckerer(np.arange(d + 1), nL, nR) + if return_mask: return idxs, np.ones(len(idxs), dtype = bool) + return idxs + if degtype in ["TOTAL", "MIXED"]: + idxs, mask = degreeSetMixed(deg, degS, npar, index, "TENSOR", + return_mask = True) + if npar > 1: + mask = (np.max(idxs[:, np.arange(npar) != index], axis = 1) + <= np.clip(deg - idxs[:, index], -1, degS)) + idxs = idxs[mask] + if return_mask: return idxs, mask + return idxs + raise RROMPyException("Degree type not recognized.") -def degreeTotalToFull(shapeFull:Tuple[int], dim:int, coeffs:Np2D) -> Np2D: +def mapTotalToTensor(shapeTensor:Tuple[int], npar:int, coeffs:Np2D) -> Np2D: + if npar < 2: return coeffs.reshape(shapeTensor) from .hash_derivative import hashIdxToDerivative as hashI - full = np.zeros(shapeFull, dtype = coeffs.dtype) + deg = np.max(shapeTensor) + full = np.zeros(shapeTensor, dtype = coeffs.dtype) for j in range(len(coeffs)): - full[tuple(hashI(j, dim))] = coeffs[j] + idx = hashI(j, npar) + if np.sum(idx) >= deg: + raise RROMPyException("Too many coefficients for prescribed size.") + full[tuple(idx)] = coeffs[j] return full -def fullDegreeMaxMask(deg:int, npar:int) -> List[int]: - return np.where(np.any(fullDegreeSet(deg, npar) == deg, axis = 1))[0] - -def totalDegreeMaxMask(deg:int, npar:int) -> List[int]: - if deg == 0: return np.zeros(1, dtype = int) - return np.arange(totalDegreeN(deg - 1, npar), totalDegreeN(deg, npar)) - -def marginalDegreeMaxMask(degs:List[int], index:int) -> List[int]: - degP, npar = degs[index], len(degs) - if index: - degM = degs[0] - else: - degM = degs[1] if npar > 1 else 1 - nLeft = fullDegreeN(degM, npar - index - 1) - nRight = fullDegreeN(degM, index) - baseidx = np.arange(degP * nLeft, (degP + 1) * nLeft) - return (np.tile(baseidx.reshape(-1, 1), (1, nRight)) - + np.arange(0, (degP + 1) * nLeft * nRight, (degP + 1) * nLeft) - ).T.flatten() \ No newline at end of file +def mapTotalToTensorMixed(shapeTensor:Tuple[int], npar:int, coeffs:Np2D, + index:int) -> Np2D: + if npar < 2: return coeffs.reshape(shapeTensor) + from .hash_derivative import hashIdxToDerivative as hashI + deg = shapeTensor[index] - 1 + degS = shapeTensor[0] - 1 if index else shapeTensor[1] - 1 + mask = np.arange(npar) != index + full = np.zeros(shapeTensor, dtype = coeffs.dtype) + ij, j = 0, 0 + while j < len(coeffs): + idx = np.array(hashI(ij, npar)) + if np.all(np.max(idx[mask]) <= np.clip(deg - idx[index], -1, degS)): + full[tuple(idx)] = coeffs[j] + j += 1 + ij += 1 + return full diff --git a/rrompy/utilities/numerical/hash_derivative.py b/rrompy/utilities/numerical/hash_derivative.py index 5488031..5a836b5 100644 --- a/rrompy/utilities/numerical/hash_derivative.py +++ b/rrompy/utilities/numerical/hash_derivative.py @@ -1,94 +1,93 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.utilities.base.types import List from rrompy.utilities.exception_manager import RROMPyException -from .degree import totalDegreeN +from .degree import degreeN __all__ = ['nextDerivativeIndices', 'hashDerivativeToIdx', 'hashIdxToDerivative'] def nextDerivativeIndices(derIdxs:List[List[int]], dim:int, count : int = 1) -> List[List[int]]: out = [] if count <= 0: return out derIdxs = copy(derIdxs) sumDer, sumInverse, sumCount = np.unique( [np.sum(derIdx) for derIdx in derIdxs], return_inverse = True, return_counts = True) if len(derIdxs) == 0 or 0 not in sumDer: out += [[0] * dim] count -= 1 if count <= 0: return out derIdxs += [[0] * dim] shellIncomplete = 1 _, sumInverse = np.unique([np.sum(derIdx) for derIdx in derIdxs], return_inverse = True) else: sumCount = np.cumsum(sumCount) shellIncomplete = 1 for shellIncomplete in range(1, len(sumDer) + 1): - theoreticalCount = totalDegreeN(shellIncomplete, dim) + theoreticalCount = degreeN(shellIncomplete, dim, "TOTAL") if (shellIncomplete not in sumDer or theoreticalCount > sumCount[shellIncomplete]): break if theoreticalCount < sumCount[shellIncomplete]: raise RROMPyException("Starting index list is redundant.") shell_previous = [derIdxs[x] for x in np.nonzero(sumInverse == shellIncomplete - 1)[0]] while count > 0: shell_current = [derIdxs[x] for x in np.nonzero(sumInverse == shellIncomplete)[0]] for prev in shell_previous: prevC = copy(prev) for d in range(dim): prevC[d] += 1 if prevC not in shell_current: out += [copy(prevC)] shell_current += [copy(prevC)] derIdxs += [copy(prevC)] count -= 1 if count <= 0: return out prevC[d] -= 1 shell_previous = copy(shell_current) _, sumInverse = np.unique([np.sum(derIdx) for derIdx in derIdxs], return_inverse = True) shellIncomplete += 1 def hashDerivativeToIdx(derIdx:List[int]) -> int: dim = len(derIdx) if dim == 0: return 0 derMag = sum(derIdx) - base = totalDegreeN(derMag - 1, dim) + base = degreeN(derMag - 1, dim, "TOTAL") if derMag == derIdx[0]: return base return base + hashDerivativeToIdx(derIdx[1:]) def hashIdxToDerivative(n:int, dim:int) -> List[int]: if n == 0: return [0] * dim shell = 0 shellOld = -1 shellNew = 1 while shellNew <= n: shell += 1 - shellOld = shellNew - shellNew = totalDegreeN(shell, dim) + shellOld, shellNew = shellNew, degreeN(shell, dim, "TOTAL") rest = hashIdxToDerivative(n - shellOld, dim - 1) return [shell - sum(rest)] + rest diff --git a/rrompy/utilities/numerical/point_matching.py b/rrompy/utilities/numerical/point_matching.py index 8ee1a29..1ef9eb3 100644 --- a/rrompy/utilities/numerical/point_matching.py +++ b/rrompy/utilities/numerical/point_matching.py @@ -1,192 +1,190 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from scipy.optimize import linear_sum_assignment as LSA from .tensor_la import dot from .point_distances import baseDistanceMatrix, doubleDistanceMatrix from rrompy.utilities.base.types import Tuple, List, ListAny, Np1D, Np2D, HFEng from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert __all__ = ['pointMatching', 'polynomialMatching', 'rationalFunctionMatching'] def pointMatching(distMatrix:Np2D) -> Tuple[Np1D, Np1D]: return LSA(distMatrix) def polynomialMatching(Qs:List[Np1D], Ps:List[Np2D], featPts:Np2D, matchingWeight:float, supps:ListAny, projMat:Np2D, HFEngine : HFEng = None, is_state : bool = True, root : int = 0, kind : str = "ROTATE") -> Tuple[List[Np1D], List[Np2D]]: """ Match poles and residues of a set of rational functions. Args: Qs: List of denominator coefficients. Ps: List of numerator coefficients. featPts: Marginal parameters corresponding to rational models. matchingWeight: Matching weight in distance computation. supps: Support indices for projection matrix. projMat: Projection matrix for residues. HFEngine(optional): Engine for distance evaluation. Defaults to None, i.e. Euclidean metric. is_state(optional): Whether residues are of system state. Defaults to True. root(optional): Root of search tree. Defaults to 0. kind(optional): Kind of matching, either 'ROTATE' or 'PROJECT'. Defaults to 'ROTATE'. Returns: Matched list of (lists of) poles and list of (lists of) residues. """ M = len(featPts) RROMPyAssert(len(Qs), M, "Number of rational functions to be matched") RROMPyAssert(len(Ps), M, "Number of rational functions to be matched") if M <= 1: return Qs, Ps kind = kind.upper().strip().replace(" ","") if kind not in ["ROTATE", "PROJECT"]: raise RROMPyException("Matching kind not recognized.") degQ = np.max([Q.shape[0] for Q in Qs]) degP = np.max([P.shape[0] for P in Ps]) for j in range(M): if Qs[j].shape[0] < degQ: Qs[j] = np.pad(Qs[j], (0, degQ - Qs[j].shape[0]), "constant") if Ps[j].shape[0] < degP: Ps[j] = np.pad(Ps[j], [(0, degP - Ps[j].shape[0]), (0, 0)], "constant") featDist = baseDistanceMatrix(featPts) free = list(range(M)) if matchingWeight != 0: if hasattr(projMat, "shape"): PsC = [dot(projMat[:, supps[j] : supps[j] + Ps[j].shape[1]], Ps[j].T) for j in range(M)] else: PsC = [dot(projMat, Ps[j].T) for j in range(M)] fixed = [free.pop(root)] for j in range(M - 1, 0, -1): #find closest point idx = np.argmin(featDist[np.ix_(fixed, free)].flatten()) Ifix = fixed[idx // j] fixed += [free.pop(idx % j)] Ifree = fixed[-1] - if kind == "PROJECT": norm2 = np.sum(np.abs(Qs[Ifree]) ** 2.) + if kind == "PROJECT": scale = np.sum(np.abs(Qs[Ifree]) ** 2.) inner = np.sum(Qs[Ifix] * Qs[Ifree].conj()) if matchingWeight != 0: if HFEngine is None: - if kind == "PROJECT": - norm2P = np.sum(np.abs(Ps[Ifree]) ** 2.) + if kind == "PROJECT": scaleP = np.sum(np.abs(Ps[Ifree]) ** 2.) innerP = np.sum(Ps[Ifix] * Ps[Ifree].conj()) else: if kind == "PROJECT": - norm2P = HFEngine.norm(PsC[Ifree], is_state = is_state) - norm2P = np.sum(norm2P) - innerP = [HFEngine.innerProduct( - PsC[Ifix][:, j], PsC[Ifree][:, j], - is_state = is_state) for j in range(degP)] - innerP = np.sum(innerP) - if kind == "PROJECT": norm2 = norm2 + matchingWeight * norm2P + scaleP = np.sum(HFEngine.norm(PsC[Ifree], + is_state = is_state)) + innerP = np.sum([HFEngine.innerProduct( + PsC[Ifix][:, j], PsC[Ifree][:, j], + is_state = is_state) for j in range(degP)]) + if kind == "PROJECT": scale = scale + matchingWeight * scaleP inner = inner + matchingWeight * innerP - scale = np.abs(inner) if kind == "ROTATE" else norm2 - if scale >= 1e-15: + if kind == "ROTATE": scale = np.abs(inner) + if scale >= 1e-15 * np.abs(inner): w = inner / scale Qs[Ifree], Ps[Ifree] = Qs[Ifree] * w, Ps[Ifree] * w if matchingWeight != 0: PsC[Ifree] = PsC[Ifree] * w return Qs, Ps def rationalFunctionMatching(poles:List[Np1D], coeffs:List[Np2D], featPts:Np2D, matchingWeight:float, supps:ListAny, projMat:Np2D, HFEngine : HFEng = None, is_state : bool = True, root : int = None) \ -> Tuple[List[Np1D], List[Np2D]]: """ Match poles and residues of a set of rational functions. Args: poles: List of (lists of) poles. coeffs: List of (lists of) residues. featPts: Marginal parameters corresponding to rational models. matchingWeight: Matching weight in distance computation. supps: Support indices for projection matrix. projMat: Projection matrix for residues. HFEngine(optional): Engine for distance evaluation. Defaults to None, i.e. Euclidean metric. is_state(optional): Whether residues are of system state. Defaults to True. root(optional): Root of search tree. Defaults to None, i.e. automatically chosen. Returns: Matched list of (lists of) poles and list of (lists of) residues. """ M, N = len(featPts), len(poles[0]) RROMPyAssert(len(poles), M, "Number of rational functions to be matched") RROMPyAssert(len(coeffs), M, "Number of rational functions to be matched") if M <= 1: return poles, coeffs featDist = baseDistanceMatrix(featPts) free = list(range(M)) if root is None: #start from sample point with closest neighbor, #among those with no inf pole notInfPls = np.where([np.any(np.isinf(p)) == False for p in poles])[0] MEff = len(notInfPls) if MEff == 1: root = notInfPls[0] else: featDistEff = featDist[notInfPls][:, notInfPls] root = notInfPls[np.argpartition(featDistEff.flatten(), MEff)[MEff] % MEff] polesC = copy(poles) if matchingWeight != 0: if hasattr(projMat, "shape"): resC = [dot(projMat[:, supps[j] : supps[j] + coeffs[j].shape[1]], coeffs[j][: N].T) for j in range(M)] else: resC = [dot(projMat, coeffs[j][: N].T) for j in range(M)] fixed = [free.pop(root)] for j in range(M - 1, 0, -1): #find closest point idx = np.argmin(featDist[np.ix_(fixed, free)].flatten()) Ifix = fixed[idx // j] fixed += [free.pop(idx % j)] Ifree = fixed[-1] plsfix, plsfree = polesC[Ifix], polesC[Ifree] freeInf = np.where(np.isinf(plsfree))[0] freeNotInf = np.where(np.isinf(plsfree) == False)[0] plsfree = plsfree[freeNotInf] if matchingWeight == 0: resfix, resfree = None, None else: resfix, resfree = resC[Ifix], resC[Ifree][:, freeNotInf] #build assignment distance matrix distj = doubleDistanceMatrix(plsfree, plsfix, matchingWeight, resfree, resfix, HFEngine, is_state) reordering = pointMatching(distj)[1] reorderingInf = [x for x in range(N) if x not in reordering] #reorder good poles poles[Ifree][reordering], poles[Ifree][reorderingInf] = ( poles[Ifree][freeNotInf], poles[Ifree][freeInf]) coeffs[Ifree][reordering], coeffs[Ifree][reorderingInf] = ( coeffs[Ifree][freeNotInf], coeffs[Ifree][freeInf]) #transfer missing poles over polesC[Ifree][reordering], polesC[Ifree][reorderingInf] = ( polesC[Ifree][freeNotInf], polesC[Ifix][reorderingInf]) if matchingWeight != 0: resC[Ifree][:, reordering], resC[Ifree][:, reorderingInf] = ( resC[Ifree][:, freeNotInf], resC[Ifix][:, reorderingInf]) return poles, coeffs diff --git a/rrompy/utilities/poly_fitting/heaviside/vander.py b/rrompy/utilities/poly_fitting/heaviside/vander.py index a3cfe66..14b065b 100644 --- a/rrompy/utilities/poly_fitting/heaviside/vander.py +++ b/rrompy/utilities/poly_fitting/heaviside/vander.py @@ -1,63 +1,63 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.poly_fitting.polynomial.vander import polyvander as pvP from rrompy.utilities.base.types import Np1D, Np2D, List, paramList from rrompy.parameter import checkParameterList from rrompy.utilities.exception_manager import RROMPyException __all__ = ['heavisidevander', 'polyvander'] def heavisidevander(x:paramList, poles:Np1D, reorder : List[int] = None) -> Np2D: """Compute Heaviside-Vandermonde matrix.""" x = checkParameterList(x, 1) x_un, idx_un = x.unique(return_inverse = True) nx = len(x) if len(x_un) < nx: raise RROMPyException("Sample points must be distinct.") del x_un x = x.data.flatten() if reorder is not None: x = x[reorder] poles = poles.flatten() Van = np.empty((len(x), len(poles)), dtype = np.complex) for j in range(len(x)): Van[j, :] = 1. / (x[j] - poles) return Van def polyvander(x:paramList, poles:Np1D, degs : List[int] = None, basis : str = "MONOMIAL_HEAVISIDE", derIdxs : List[List[List[int]]] = None, reorder : List[int] = None, scl : Np1D = None, - forceTotalDegree : bool = False) -> Np2D: + degreetype : str = "TENSOR") -> Np2D: """ Compute full Hermite-Vandermonde matrix with specified derivative directions. """ if derIdxs is not None and np.sum(np.sum(derIdxs)) > 0: raise RROMPyException(("Cannot take derivatives of heaviside " "function.")) basisp = basis.split("_")[0] VanH = heavisidevander(x, poles, reorder = reorder) if degs is None or np.sum(degs) < 0: VanP = np.empty((len(x), 0)) else: VanP = pvP(x, degs, basisp, derIdxs = derIdxs, reorder = reorder, - scl = scl, forceTotalDegree = forceTotalDegree) + scl = scl, degreetype = degreetype) return np.block([[VanH, VanP]]) diff --git a/rrompy/utilities/poly_fitting/polynomial/polynomial_interpolator.py b/rrompy/utilities/poly_fitting/polynomial/polynomial_interpolator.py index f498158..3aa7de3 100644 --- a/rrompy/utilities/poly_fitting/polynomial/polynomial_interpolator.py +++ b/rrompy/utilities/poly_fitting/polynomial/polynomial_interpolator.py @@ -1,235 +1,239 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from scipy.special import factorial as fact from collections.abc import Iterable from itertools import combinations from rrompy.utilities.base.types import (List, ListAny, DictAny, Np1D, Np2D, paramList) from rrompy.utilities.base import freepar as fp from rrompy.utilities.poly_fitting.interpolator import GenericInterpolator from rrompy.utilities.poly_fitting.custom_fit import customFit from .base import polyfitname from .val import polyval from .roots import polyroots from .vander import polyvander as pv from .polynomial_algebra import changePolyBasis, polyTimes from rrompy.utilities.numerical import dot, baseDistanceMatrix -from rrompy.utilities.numerical.degree import degreeTotalToFull +from rrompy.utilities.numerical.degree import (mapTotalToTensor, + mapTotalToTensorMixed) from rrompy.utilities.exception_manager import RROMPyAssert, RROMPyException from rrompy.parameter import checkParameterList __all__ = ['PolynomialInterpolator', 'PolynomialInterpolatorNodal'] class PolynomialInterpolator(GenericInterpolator): """Function class with setup by polynomial interpolation.""" def __init__(self, other = None): if other is None: return self.coeffs = other.coeffs self.npar = other.npar self.polybasis = other.polybasis @property def shape(self): if self.coeffs.ndim > self.npar: sh = self.coeffs.shape[self.npar :] else: sh = tuple([1]) return sh @property def deg(self): return [x - 1 for x in self.coeffs.shape[: self.npar]] def __getitem__(self, key): return self.coeffs[key] def __call__(self, mu:paramList, der : List[int] = None, scl : Np1D = None): if hasattr(self, "_dirPivot"): mu = checkParameterList(mu)(self._dirPivot) return polyval(mu, self.coeffs, self.polybasis, der, scl) def __copy__(self): return PolynomialInterpolator(self) def __deepcopy__(self, memo): other = PolynomialInterpolator() other.coeffs, other.npar, other.polybasis = copy( (self.coeffs, self.npar, self.polybasis), memo) return other def pad(self, nleft : List[int] = None, nright : List[int] = None): if nleft is None: nleft = [0] * len(self.shape) if nright is None: nright = [0] * len(self.shape) if not isinstance(nleft, Iterable): nleft = [nleft] if not isinstance(nright, Iterable): nright = [nright] RROMPyAssert(len(self.shape), len(nleft), "Shape of output") RROMPyAssert(len(self.shape), len(nright), "Shape of output") padwidth = [(0, 0)] * self.npar padwidth = padwidth + [(l, r) for l, r in zip(nleft, nright)] self.coeffs = np.pad(self.coeffs, padwidth, "constant", constant_values = (0., 0.)) def postmultiplyTensorize(self, A:Np2D): RROMPyAssert(A.shape[0], self.shape[-1], "Shape of output") self.coeffs = dot(self.coeffs, A) def setupByInterpolation(self, support:paramList, values:ListAny, deg:int, polybasis : str = "MONOMIAL", - verbose : bool = True, totalDegree : bool = True, + verbose : bool = True, degreetype : str = "TOTAL", vanderCoeffs : DictAny = {}, fitCoeffs : DictAny = {}): support = checkParameterList(support) + degreetype = degreetype.upper().strip().replace(" ","") + if degreetype not in ["TENSOR", "TOTAL", "MIXED"]: + raise RROMPyException("Degree type not recognized.") self.npar = support.shape[1] self.polybasis = polybasis - if not totalDegree and not isinstance(deg, Iterable): - deg = [deg] * self.npar + if not isinstance(deg, Iterable): deg = [deg] * self.npar vander = pv(support, deg, basis = polybasis, **vanderCoeffs) RROMPyAssert(len(vander), len(values), "Number of support values") outDim = values.shape[1:] values = values.reshape(values.shape[0], -1) fitOut = customFit(vander, values, full = True, **fitCoeffs) P = fitOut[0] if verbose: - if self.npar == 1 and isinstance(deg, Iterable): - degM = deg[0] - else: - degM = deg + degM = deg[0] if self.npar == 1 else deg msg = ("Fitting {} samples with degree {} through {}... " "Conditioning of LS system: {:.4e}.").format( len(vander), degM, polyfitname(self.polybasis), fitOut[1][2][0] / fitOut[1][2][-1]) else: msg = None - if totalDegree: - self.coeffs = degreeTotalToFull(tuple([deg + 1] * self.npar) - + outDim, self.npar, P) - else: + if degreetype == "TENSOR": self.coeffs = P.reshape(tuple([d + 1 for d in deg]) + outDim) + elif degreetype == "TOTAL": + self.coeffs = mapTotalToTensor(tuple([d + 1 for d in deg]) + + outDim, self.npar, P) + else: #if degreetype == "MIXED": + mixedIdx = vanderCoeffs["mixedIdx"] + self.coeffs = mapTotalToTensorMixed(tuple([d + 1 for d in deg]) + + outDim, self.npar, P, mixedIdx) return fitOut[1][1] == vander.shape[1], msg def roots(self, marginalVals : ListAny = [fp]): RROMPyAssert(self.shape, (1,), "Shape of output") RROMPyAssert(len(marginalVals), self.npar, "Number of parameters") rDim = marginalVals.index(fp) if rDim < len(marginalVals) - 1 and fp in marginalVals[rDim + 1 :]: raise RROMPyException(("Exactly 1 'freepar' entry in " "marginalVals must be provided.")) return polyroots(self.coeffs, self.polybasis, marginalVals) class PolynomialInterpolatorNodal(PolynomialInterpolator): """ Function class with setup by polynomial interpolation. Stores roots of monomial polynomial instead of coefficients. Only for 1D. """ def __init__(self, other = None): self.npar = 1 if other is None: return self.nodes = other.nodes self.polybasis = other.polybasis @property def nodes(self): return self._nodes @nodes.setter def nodes(self, nodes): self.coeffs = None self._nodes = nodes @property def coeffs(self): if self._coeffs is None: self.buildCoeffs() return self._coeffs @coeffs.setter def coeffs(self, coeffs): self._coeffs = coeffs @property def shape(self): return (1,) @property def deg(self): return [len(self.nodes)] def __getitem__(self, key): return self.coeffs[key] def __call__(self, mu:paramList, der : List[int] = None, scl : Np1D = None): dirPivot = self._dirPivot if hasattr(self, "_dirPivot") else 0 if der is None: der = 0 elif isinstance(der, (list,tuple,np.ndarray,)): der = der[dirPivot] if scl is None: scl = 1. elif isinstance(scl, (list,tuple,np.ndarray,)): scl = scl[dirPivot] mu = checkParameterList(mu)(dirPivot) val = np.zeros(len(mu), dtype = np.complex) if der == self.deg[0]: val[:] = 1. elif der >= 0 and der < self.deg[0]: plDist = baseDistanceMatrix(mu, self.nodes, magnitude = False)[..., 0] for terms in combinations(np.arange(self.deg[0]), self.deg[0] - der): val += np.prod(plDist[:, list(terms)], axis = 1) return scl ** der * fact(der) * val def __copy__(self): return PolynomialInterpolatorNodal(self) def __deepcopy__(self, memo): other = PolynomialInterpolatorNodal() other.nodes, other.polybasis = copy((self.nodes, self.polybasis), memo) return other def buildCoeffs(self): N = len(self.nodes) if N > 0: local = [np.array([- pl, 1.], dtype = np.complex) for pl in self.nodes] while N > 1: for j in range(N // 2): local[j] = polyTimes(local[j], local[- 1 - j]) local = local[(N - 1) // 2 :: -1] N = len(local) else: local = [np.ones(1, dtype = np.complex)] self._coeffs = changePolyBasis(local[0], None, "MONOMIAL", self.polybasis) def pad(self, *args, **kwargs): raise RROMPyException(("Padding not allowed for polynomials in nodal " "form")) def postmultiplyTensorize(self, *args, **kwargs): raise RROMPyException(("Post-multiply not allowed for polynomials in " "nodal form")) def setupByInterpolation(self, support:paramList, *args, **kwargs): support = checkParameterList(support) self.npar = support.shape[1] if self.npar > 1: raise RROMPyException(("Polynomial in nodal form must have " "scalar input")) output = super().setupByInterpolation(support, *args, **kwargs) self._nodes = super().roots() return output def roots(self, marginalVals : ListAny = [fp]): return np.array(self.nodes) diff --git a/rrompy/utilities/poly_fitting/polynomial/vander.py b/rrompy/utilities/poly_fitting/polynomial/vander.py index 50368e3..0a4e8e9 100644 --- a/rrompy/utilities/poly_fitting/polynomial/vander.py +++ b/rrompy/utilities/poly_fitting/polynomial/vander.py @@ -1,132 +1,145 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from collections.abc import Iterable from .base import polybases from .der import polyder from rrompy.utilities.base.types import Np1D, Np2D, List, paramList -from rrompy.utilities.numerical.degree import totalDegreeSet +from rrompy.utilities.numerical.degree import degreeSet, degreeSetMixed from rrompy.parameter import checkParameterList from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert __all__ = ['polyvander'] -def firstDerTransition(dim:int, TDirac:List[Np2D], basis:str, +def firstDerTransition(npar:int, TDirac:List[Np2D], basis:str, scl : Np1D = None) -> Np2D: """Manage step from function samples to function derivatives.""" - C_m = np.zeros((dim, len(TDirac), len(TDirac)), dtype = float) + C_m = np.zeros((npar, len(TDirac), len(TDirac)), dtype = float) for j, Tj in enumerate(TDirac): - m, om = [0] * dim, [(0, 0)] * dim - for idx in range(dim): + m, om = [0] * npar, [(0, 0)] * npar + for idx in range(npar): m[idx], om[idx] = 1, (0, 1) J_der = polyder(Tj, basis, m, scl) if J_der.size != len(TDirac): J_der = np.pad(J_der, mode = "constant", pad_width = om) C_m[idx, :, j] = np.ravel(J_der) m[idx], om[idx] = 0, (0, 0) return C_m def polyvander(x:paramList, degs:List[int], basis:str, derIdxs : List[List[List[int]]] = None, reorder : List[int] = None, scl : Np1D = None, - forceTotalDegree : bool = False) -> Np2D: + degreetype : str = "TENSOR", mixedIdx : int = -1) -> Np2D: """ Compute full Hermite-Vandermonde matrix with specified derivative directions. E.g. assume that we want to obtain the Vandermonde matrix for (value, derx, derx2) at x = [0, 0], (value, dery) at x = [1, 0], (dery, derxy) at x = [0, 0], of degree 3 in x and 1 in y, using Chebyshev polynomials. This can be done by polyvander([[0, 0], [1, 0]], # unique sample points [3, 1], # polynomial degree "chebyshev", # polynomial family [ [[0, 0], [1, 0], [2, 0], [0, 1], [1, 1]], # derivative directions at first point [[0, 0], [0, 1]] # derivative directions at second point ], [0, 1, 2, 5, 6, 3, 4] # reorder indices ) """ x = checkParameterList(x) - dim = x.shape[1] - totalDeg = (forceTotalDegree - or not isinstance(degs, (list, tuple, np.ndarray,))) - if forceTotalDegree and isinstance(degs, (list, tuple, np.ndarray,)): + npar = x.shape[1] + degreetype = degreetype.upper().strip().replace(" ","") + if degreetype not in ["TENSOR", "TOTAL", "MIXED"]: + raise RROMPyException("Degree type not recognized.") + if degreetype == "MIXED" and npar == 1: degreetype = "TOTAL" + if not isinstance(degs, (list, tuple, np.ndarray,)): + degreetype, degs = "TOTAL", [degs] * npar + if degreetype == "TOTAL": if np.any(np.array(degs) != degs[0]): raise RROMPyException(("Cannot force total degree if prescribed " "degrees are different")) - degs = degs[0] - if not isinstance(degs, (list, tuple, np.ndarray,)): degs = [degs] * dim - RROMPyAssert(len(degs), dim, "Number of parameters") + if degreetype == "MIXED": + if mixedIdx == -1: + raise RROMPyException("Must specify mixedIdx for mixed vander.") + RROMPyAssert(len(degs), npar, "Number of parameters") x_un, idx_un = x.unique(return_inverse = True) if len(x_un) < len(x): raise RROMPyException("Sample points must be distinct.") del x_un if basis.upper() not in polybases: raise RROMPyException("Polynomial basis not recognized.") vanderbase = {"CHEBYSHEV" : np.polynomial.chebyshev.chebvander, "LEGENDRE" : np.polynomial.legendre.legvander, "MONOMIAL" : np.polynomial.polynomial.polyvander }[basis.upper()] VanBase = vanderbase(x(0), degs[0]) - for j in range(1, dim): + for j in range(1, npar): VNext = vanderbase(x(j), degs[j]) for jj in range(j): VNext = np.expand_dims(VNext, 1) VanBase = VanBase[..., None] * VNext VanBase = VanBase.reshape((len(x), -1)) if derIdxs is None or VanBase.shape[-1] <= 1: Van = VanBase else: derFlat, idxRep = [], [] for j, derIdx in enumerate(derIdxs): derFlat += derIdx[:] idxRep += [j] * len(derIdx[:]) for j in range(len(derFlat)): if not isinstance(derFlat[j], Iterable): derFlat[j] = [derFlat[j]] - RROMPyAssert(len(derFlat[j]), dim, "Number of dimensions") + RROMPyAssert(len(derFlat[j]), npar, "Number of dimensions") #manage mixed derivatives TDirac = [y.reshape([d + 1 for d in degs]) for y in np.eye(VanBase.shape[-1], dtype = int)] - Cs_loc = firstDerTransition(dim, TDirac, basis, scl) + Cs_loc = firstDerTransition(npar, TDirac, basis, scl) Van = np.empty((len(derFlat), VanBase.shape[-1]), dtype = VanBase.dtype) for j in range(len(derFlat)): Van[j, :] = VanBase[idxRep[j], :] - for k in range(dim): + for k in range(npar): for der in range(derFlat[j][k]): Van[j, :] = Van[j, :].dot(Cs_loc[k]) / (der + 1) if reorder is not None: Van = Van[reorder, :] - if not totalDeg: return Van - derIdxs, mask = totalDegreeSet(degs[0], dim, return_mask = True) - ordIdxs = np.empty(len(derIdxs), dtype = int) - derTotal = np.array([np.sum(y) for y in derIdxs]) - idxPrev = 0 - rangeAux = np.arange(len(derIdxs)) - for j in range(degs[0] + 1): - idxLocal = rangeAux[derTotal == j][::-1] - idxPrev += len(idxLocal) - ordIdxs[idxPrev - len(idxLocal) : idxPrev] = idxLocal - return Van[:, mask][:, ordIdxs] + if degreetype != "TENSOR": + if degreetype == "TOTAL": + derIdxs, mask = degreeSet(degs[0], npar, degreetype, True) + degM = degs[0] + else: #if degreetype == "MIXED": + degS = degs[0] if mixedIdx else degs[1] + derIdxs, mask = degreeSetMixed(degs[mixedIdx], degS, npar, + mixedIdx, degreetype, True) + degM = max(degs) + ordIdxs = np.empty(len(derIdxs), dtype = int) + derTotal = np.array([np.sum(y) for y in derIdxs]) + idxPrev = 0 + rangeAux = np.arange(len(derIdxs)) + for j in range(degs[0] + 1): + idxLocal = rangeAux[derTotal == j][::-1] + idxPrev += len(idxLocal) + ordIdxs[idxPrev - len(idxLocal) : idxPrev] = idxLocal + Van = Van[:, mask][:, ordIdxs] + return Van diff --git a/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py b/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py index b65fd6e..6a57251 100644 --- a/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py +++ b/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py @@ -1,139 +1,144 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from collections.abc import Iterable from rrompy.utilities.base.types import (List, ListAny, DictAny, Np1D, Np2D, paramList) from rrompy.utilities.poly_fitting.interpolator import GenericInterpolator from rrompy.utilities.poly_fitting.custom_fit import customFit from .base import polyfitname from .val import polyval from .vander import polyvander as pv from rrompy.utilities.numerical import dot -from rrompy.utilities.numerical.degree import degreeTotalToFull +from rrompy.utilities.numerical.degree import (mapTotalToTensor, + mapTotalToTensorMixed) from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert from rrompy.parameter import checkParameterList __all__ = ['RadialBasisInterpolator'] class RadialBasisInterpolator(GenericInterpolator): """Function class with setup by radial basis interpolation.""" def __init__(self, other = None): if other is None: return self.support = other.support self.coeffsGlobal = other.coeffsGlobal self.coeffsLocal = other.coeffsLocal self.directionalWeights = other.directionalWeights self.npar = other.npar self.polybasis = other.polybasis @property def shape(self): sh = self.coeffsLocal.shape[1 :] if self.coeffsLocal.ndim > 1 else 1 return sh @property def deg(self): return [x - 1 for x in self.coeffsGlobal.shape[: self.npar]] def __call__(self, mu:paramList, der : List[int] = None, scl : Np1D = None): if der is not None and np.sum(der) > 0: raise RROMPyException(("Cannot take derivatives of radial basis " "function.")) return polyval(mu, self.coeffsGlobal, self.coeffsLocal, self.support, self.directionalWeights, self.polybasis) def __copy__(self): return RadialBasisInterpolator(self) def __deepcopy__(self, memo): other = RadialBasisInterpolator() (other.support, other.coeffsGlobal, other.coeffsLocal, other.directionalWeights, other.npar, other.polybasis) = copy( (self.support, self.coeffsGlobal, self.coeffsLocal, self.directionalWeights, self.npar, self.polybasis), memo) return other def postmultiplyTensorize(self, A:Np2D): RROMPyAssert(A.shape[0], self.shape[-1], "Shape of output") self.coeffsLocal = dot(self.coeffsLocal, A) self.coeffsGlobal = dot(self.coeffsGlobal, A) def pad(self, nleft : List[int] = None, nright : List[int] = None): if nleft is None: nleft = [0] * len(self.shape) if nright is None: nright = [0] * len(self.shape) if not isinstance(nleft, Iterable): nleft = [nleft] if not isinstance(nright, Iterable): nright = [nright] RROMPyAssert(len(self.shape), len(nleft), "Shape of output") RROMPyAssert(len(self.shape), len(nright), "Shape of output") padwidth = [(0, 0)] + [(l, r) for l, r in zip(nleft, nright)] self.coeffsLocal = np.pad(self.coeffsLocal, padwidth, "constant", constant_values = (0., 0.)) padwidth = [(0, 0)] * (self.npar - 1) + padwidth self.coeffsGlobal = np.pad(self.coeffsGlobal, padwidth, "constant", constant_values = (0., 0.)) def setupByInterpolation(self, support:paramList, values:ListAny, deg:int, polybasis : str = "MONOMIAL_GAUSSIAN", directionalWeights : Np1D = None, - verbose : bool = True, totalDegree : bool = True, + verbose : bool = True, degreetype : str = "TOTAL", vanderCoeffs : DictAny = {}, fitCoeffs : DictAny = {}): support = checkParameterList(support) RROMPyAssert(len(support), len(values), "Number of support values") + degreetype = degreetype.upper().strip().replace(" ","") + if degreetype not in ["TENSOR", "TOTAL", "MIXED"]: + raise RROMPyException("Degree type not recognized.") self.support = copy(support) if "reorder" in vanderCoeffs.keys(): self.support = self.support[vanderCoeffs["reorder"]] self.npar = support.shape[1] if directionalWeights is None: directionalWeights = [1.] * self.npar directionalWeights = np.array(directionalWeights) self.polybasis = polybasis - if not totalDegree and not isinstance(deg, Iterable): - deg = [deg] * self.npar + if not isinstance(deg, Iterable): deg = [deg] * self.npar vander, self.directionalWeights = pv(support, deg, basis = polybasis, directionalWeights = directionalWeights, **vanderCoeffs) outDim = values.shape[1:] values = values.reshape(values.shape[0], -1) values = np.pad(values, ((0, len(vander) - len(values)), (0, 0)), "constant") fitOut = customFit(vander, values, full = True, **fitCoeffs) P = fitOut[0][len(support) :] if verbose: - if self.npar == 1 and isinstance(deg, Iterable): - degM = deg[0] - else: - degM = deg + degM = deg[0] if self.npar == 1 else deg msg = ("Fitting {}+{} samples with degree {} through {}... " "Conditioning of LS system: {:.4e}.").format( len(support), len(vander) - len(support), degM, polyfitname(self.polybasis), fitOut[1][2][0] / fitOut[1][2][-1]) else: msg = None self.coeffsLocal = fitOut[0][: len(support)].reshape((len(support),) + outDim) - if totalDegree: - self.coeffsGlobal = degreeTotalToFull(tuple([deg + 1] * self.npar) - + outDim, self.npar, P) - else: + if degreetype == "TENSOR": self.coeffsGlobal = P.reshape(tuple([d + 1 for d in deg]) + outDim) + elif degreetype == "TOTAL": + self.coeffsGlobal = mapTotalToTensor(tuple([d + 1 for d in deg]) + + outDim, self.npar, P) + else: #if degreetype == "MIXED": + mixedIdx = vanderCoeffs["mixedIdx"] + self.coeffsGlobal = mapTotalToTensorMixed( + tuple([d + 1 for d in deg]) + outDim, + self.npar, P, mixedIdx) return fitOut[1][1] == vander.shape[1], msg diff --git a/rrompy/utilities/poly_fitting/radial_basis/vander.py b/rrompy/utilities/poly_fitting/radial_basis/vander.py index 602c7fd..8ae22bf 100644 --- a/rrompy/utilities/poly_fitting/radial_basis/vander.py +++ b/rrompy/utilities/poly_fitting/radial_basis/vander.py @@ -1,96 +1,96 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from collections.abc import Iterable from .kernel import kernels from rrompy.utilities.poly_fitting.polynomial.vander import polyvander as pvP from rrompy.utilities.base.types import Np1D, Np2D, Tuple, List, paramList from rrompy.parameter import checkParameterList from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert __all__ = ['rbvander', 'polyvander'] def rbvander(x:paramList, basis:str, reorder : List[int] = None, directionalWeights : Np1D = None) -> Np2D: """Compute radial-basis-Vandermonde matrix.""" x = checkParameterList(x) x_un = x.unique() nx = len(x) if len(x_un) < nx: raise RROMPyException("Sample points must be distinct.") del x_un x = x.data if directionalWeights is None: directionalWeights = np.ones(x.shape[1]) elif not isinstance(directionalWeights, Iterable): directionalWeights = directionalWeights * np.ones(x.shape[1]) RROMPyAssert(len(directionalWeights), x.shape[1], "Number of directional weights") basis = basis.upper() if basis not in kernels.keys(): raise RROMPyException("Radial basis not recognized.") radialker = kernels[basis] Van = np.zeros((nx, nx)) if reorder is not None: x = x[reorder] xDiff2V, xDiff2I, xDiff2J = np.zeros(0), [], [] for i in range(nx - 1): xiD2Loc = np.sum(np.abs((x[i + 1 :] - x[i]) * directionalWeights) ** 2., axis = 1) xiD2Good = np.where(xiD2Loc <= radialker.threshold)[0] xDiff2V = np.append(xDiff2V, xiD2Loc[xiD2Good]) xDiff2I += [i] * len(xiD2Good) xDiff2J += list(i + 1 + xiD2Good) kernelV = radialker(xDiff2V, apply_threshold = False) Van = np.eye(nx) Van[xDiff2I, xDiff2J] = kernelV Van[xDiff2J, xDiff2I] = kernelV return Van def polyvander(x:paramList, degs:List[int], basis:str, derIdxs : List[List[List[int]]] = None, reorder : List[int] = None, directionalWeights : Np1D = None, - scl : Np1D = None, forceTotalDegree : bool = False, + scl : Np1D = None, degreetype : str = "TENSOR", optimizeScalingBounds : List[float] = [-1., -1.], maxOptimizeIter : int = 10) -> Tuple[Np2D, Np1D]: """ Compute full Hermite-Vandermonde matrix with specified derivative directions. """ if derIdxs is not None and np.sum(np.sum(derIdxs)) > 0: raise RROMPyException(("Cannot take derivatives of radial basis " "function.")) basisp, basisr = basis.split("_") VanP = pvP(x, degs, basisp, derIdxs = derIdxs, reorder = reorder, - scl = scl, forceTotalDegree = forceTotalDegree) + scl = scl, degreetype = degreetype) VanZ = np.zeros([VanP.shape[1]] * 2) optDir, optFact = 0, 100. for it in range(maxOptimizeIter): VanR = rbvander(x, basisr, reorder = reorder, directionalWeights = directionalWeights) Van = np.block([[VanR, VanP], [VanP.T.conj(), VanZ]]) if optimizeScalingBounds[0] < 0. or it == maxOptimizeIter - 1: break ncond = np.linalg.cond(Van) if ncond < optimizeScalingBounds[0]: if optDir != -1: optFact **= .5 optDir, directionalWeights = -1, directionalWeights / optFact elif ncond > optimizeScalingBounds[1]: if optDir != 1: optFact **= .5 optDir, directionalWeights = 1, directionalWeights * optFact else: break return Van, directionalWeights diff --git a/tests/1_utilities/radial_fitting.py b/tests/1_utilities/radial_fitting.py index d79fcd3..d22f3a8 100644 --- a/tests/1_utilities/radial_fitting.py +++ b/tests/1_utilities/radial_fitting.py @@ -1,129 +1,129 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.poly_fitting import customFit from rrompy.utilities.poly_fitting.radial_basis import (kernels, polybases, polyfitname, polydomcoeff, polyval, polyvander) -from rrompy.utilities.numerical.degree import degreeTotalToFull +from rrompy.utilities.numerical.degree import mapTotalToTensor from rrompy.parameter import checkParameterList def test_monomial_gaussian(): polyrbname = "MONOMIAL_GAUSSIAN" assert polyrbname in polybases fitname = polyfitname(polyrbname) domcoeff = polydomcoeff(5, polyrbname) assert fitname == "polyfit_gaussian" assert np.isclose(domcoeff, 1., rtol = 1e-5) radialGaussian = kernels["GAUSSIAN"] directionalWeights = np.array([5.]) xSupp = checkParameterList(np.arange(-1, 3), 1) cRBCoeffs = np.array([-1., 3., -3., 1., 1., 2., -.5]) globalCoeffs = cRBCoeffs[4 :] localCoeffs = cRBCoeffs[: 4] ySupp = 1 + 2. * xSupp.data - .5 * xSupp.data ** 2. xx = np.linspace(-2., 3., 100) yy = polyval(checkParameterList(xx, 1), globalCoeffs, localCoeffs, xSupp, directionalWeights, polyrbname) yyman = 1. + 2. * xx - .5 * xx ** 2. for j, xc in enumerate(np.arange(-1, 3)): r2j = (5. * (xx - xc)) ** 2. rbj = radialGaussian(r2j) assert np.allclose(rbj, np.exp(-.5 * r2j)) yyman += localCoeffs[j] * rbj ySupp += localCoeffs[j] * radialGaussian((directionalWeights[0] * (xSupp.data - xc)) ** 2.) assert np.allclose(yy, yyman, atol = 1e-5) VanT = polyvander(xSupp, [2], polyrbname, directionalWeights = directionalWeights)[0] ySupp = np.pad(ySupp.flatten(), (0, len(VanT) - len(xSupp)), "constant") out = customFit(VanT, ySupp) assert np.allclose(out, cRBCoeffs, atol = 1e-8) def test_chebyshev_multiquadric(): polyrbname = "CHEBYSHEV_MULTIQUADRIC" assert polyrbname in polybases fitname = polyfitname(polyrbname) domcoeff = polydomcoeff(5, polyrbname) assert fitname == "chebfit_multiquadric" assert np.isclose(domcoeff, 16, rtol = 1e-5) multiQuadric = kernels["MULTIQUADRIC"] directionalWeights = np.array([1.]) xSupp = checkParameterList(np.arange(-1, 3), 1) cRBCoeffs = np.array([-1., 3., -3., 1., 1., 2., -.5]) localCoeffs = cRBCoeffs[: 4] globalCoeffs = cRBCoeffs[4 :] ySupp = 1 + 2. * xSupp.data - .5 * (2. * xSupp.data ** 2. - 1.) xx = np.linspace(-2., 3., 100) yy = polyval(checkParameterList(xx, 1), globalCoeffs, localCoeffs, xSupp, directionalWeights, polyrbname) yyman = 1. + 2. * xx - .5 * (2. * xx ** 2. - 1.) for j, xc in enumerate(np.arange(-1, 3)): r2j = (directionalWeights[0] * (xx - xc)) ** 2. rbj = multiQuadric(r2j) assert np.allclose(rbj, np.power(r2j + 1, -.5)) yyman += localCoeffs[j] * rbj ySupp += localCoeffs[j] * multiQuadric((directionalWeights[0] * (xSupp.data - xc)) ** 2.) assert np.allclose(yy, yyman, atol = 1e-5) VanT = polyvander(xSupp, [2], polyrbname, directionalWeights = directionalWeights)[0] ySupp = np.pad(ySupp.flatten(), (0, len(VanT) - len(xSupp)), "constant") out = customFit(VanT, ySupp) assert np.allclose(out, cRBCoeffs, atol = 1e-8) def test_total_degree_2d(): values = lambda x, y: (x - 3.) ** 2. * y - (x + 1.) * y ** 2. polyrbname = "CHEBYSHEV_GAUSSIAN" xs, ys = np.meshgrid(np.linspace(0., 4., 5), np.linspace(0., 4., 4)) xySupp = np.concatenate((xs.reshape(-1, 1), ys.reshape(-1, 1)), axis = 1) zs = values(xs, ys) zSupp = zs.flatten() deg = 3 directionalWeights = [2., 1.] VanT = polyvander(xySupp, deg, polyrbname, directionalWeights = directionalWeights)[0] cFit = np.linalg.solve(VanT, np.pad(zSupp, (0, len(VanT) - len(zSupp)), 'constant')) - globCoeff = degreeTotalToFull([deg + 1] * 2, 2, cFit[len(zSupp) :]) + globCoeff = mapTotalToTensor([deg + 1] * 2, 2, cFit[len(zSupp) :]) localCoeffs = cFit[: len(zSupp)] globalCoeffs = globCoeff xx, yy = np.meshgrid(np.linspace(0., 4., 100), np.linspace(0., 4., 100)) xxyy = np.concatenate((xx.reshape(-1, 1), yy.reshape(-1, 1)), axis = 1) zz = polyval(xxyy, globalCoeffs, localCoeffs, xySupp, directionalWeights, polyrbname).reshape(xx.shape) zzex = values(xx, yy) error = np.abs(zz - zzex) print(np.max(error)) assert np.max(error) < 1e-10 diff --git a/tests/2_hfengines/laplace_disk_gaussian.py b/tests/2_hfengines/laplace_disk_gaussian.py index 3af1aed..3eeab62 100644 --- a/tests/2_hfengines/laplace_disk_gaussian.py +++ b/tests/2_hfengines/laplace_disk_gaussian.py @@ -1,150 +1,150 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from numpy.polynomial.polynomial import polyfit as fit import fenics as fen from rrompy.utilities.base.types import paramVal from rrompy.hfengines.fenics_engines import LaplaceBaseProblemEngine from rrompy.solver.fenics import fenZERO from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.poly_fitting.polynomial import ( PolynomialInterpolator as PI) from rrompy.solver.fenics import fenics2Vector class LaplaceDiskGaussian(LaplaceBaseProblemEngine): """ Solver for disk Laplace problems with parametric forcing term center. - \Delta u = C exp(-.5 * ||\cdot - (mu, 0)||^2) in \Omega = B(0, 5) u = 0 on \partial\Omega. """ def __init__(self, n:int, mu0 : paramVal = [0.], *args, **kwargs): super().__init__(mu0, *args, **kwargs) self.nbs = 19 import mshr mesh = mshr.generate_mesh(mshr.Circle(fen.Point(0., 0.), 5.), 3 * n) self.V = fen.FunctionSpace(mesh, "P", 1) def buildb(self): """Build terms of operator of linear system.""" if self.thbs[0] is None: self.thbs = self.getMonomialWeights(self.nbs) bDEIMCoeffs = None for j in range(self.nbs): if self.bs[j] is None: vbMng(self, "INIT", "Assembling forcing term b{}.".format(j), 20) if bDEIMCoeffs is None: muDEIM = 3. * np.linspace(0., 1., self.nbs // 2 + 1) ** 2. muDEIM = np.concatenate((-muDEIM[:0:-1], muDEIM)) for jj, muD in enumerate(muDEIM): x, y = fen.SpatialCoordinate(self.V.mesh())[:] C = np.exp(-.5 * muD ** 2.) CR, CI = np.real(C), np.imag(C) f0 = ((2 * np.pi) ** -.5 * fen.exp(-.5 * (x ** 2. + y ** 2.))) muR, muI = np.real(muD), np.imag(muD) f1R = fen.exp(muR * x) * fen.cos(muI * x) f1I = fen.exp(muR * x) * fen.sin(muI * x) fRe = f0 * (CR * f1R - CI * f1I) + fenZERO fIm = f0 * (CR * f1I + CI * f1R) + fenZERO parsRe = self.iterReduceQuadratureDegree(zip([fRe], ["forcingTerm{}Real".format(jj)])) parsIm = self.iterReduceQuadratureDegree(zip([fIm], ["forcingTerm{}Imag".format(jj)])) LR = fen.dot(fRe, self.v) * fen.dx LI = fen.dot(fIm, self.v) * fen.dx DBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) bjj = (fenics2Vector(LR, parsRe, DBC0, 1) + 1.j * fenics2Vector(LI, parsIm, DBC0, 1)) if jj == 0: bDEIM = np.empty((self.nbs, len(bjj)), dtype = np.complex) bDEIM[jj] = bjj bDEIMCoeffs = (fit(muDEIM / 3., bDEIM, self.nbs - 1).T * np.power(3., - np.arange(self.nbs))).T self.bs[j] = bDEIMCoeffs[j] vbMng(self, "DEL", "Done assembling forcing term.", 20) class LaplaceDiskGaussian2(LaplaceDiskGaussian): """ Solver for disk Laplace problems with parametric forcing term center. - \Delta u = C exp(-.5 * ||\cdot - (mu1, mu2)||^2) in \Omega = B(0, 5) u = 0 on \partial\Omega. """ def __init__(self, n:int, mu0 : paramVal = [0., 0.], *args, **kwargs): super().__init__(n = n, mu0 = mu0, *args, **kwargs) self.nbs = 16 self.npar = 2 def buildb(self): """Build terms of operator of linear system.""" if self.thbs[0] is None: self.thbs = [None] * self.nbs bDEIMCoeffs = None for j in range(self.nbs): j1, j2 = j % int(self.nbs ** .5), j // int(self.nbs ** .5) if self.bs[j] is None: vbMng(self, "INIT", "Assembling forcing term b{}.".format(j), 20) if bDEIMCoeffs is None: muD1 = np.linspace(-2., 2., int(self.nbs ** .5)) muDEIM = np.empty((self.nbs, 2)) muDEIM[:, 0] = np.repeat(muD1, int(self.nbs ** .5)) muDEIM[:, 1] = np.tile(muD1, int(self.nbs ** .5)) for jj, muD in enumerate(muDEIM): x, y = fen.SpatialCoordinate(self.V.mesh())[:] C = np.exp(-.5 * (muD[0] ** 2. + muD[1] ** 2.)) CR, CI = np.real(C), np.imag(C) f0 = ((2 * np.pi) ** -.5 * fen.exp(-.5 * (x ** 2. + y ** 2.))) muxR, muxI = np.real(muD[0]), np.imag(muD[0]) muyR, muyI = np.real(muD[1]), np.imag(muD[1]) f1R = (fen.exp(muxR * x + muyR * y) * fen.cos(muxI * x + muyI * y)) f1I = (fen.exp(muxR * x + muyR * y) * fen.sin(muxI * x + muyI * y)) fRe = f0 * (CR * f1R - CI * f1I) + fenZERO fIm = f0 * (CR * f1I + CI * f1R) + fenZERO parsRe = self.iterReduceQuadratureDegree(zip([fRe], ["forcingTerm{}Real".format(jj)])) parsIm = self.iterReduceQuadratureDegree(zip([fIm], ["forcingTerm{}Imag".format(jj)])) LR = fen.dot(fRe, self.v) * fen.dx LI = fen.dot(fIm, self.v) * fen.dx DBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) bjj = (fenics2Vector(LR, parsRe, DBC0, 1) + 1.j * fenics2Vector(LI, parsIm, DBC0, 1)) if jj == 0: bDEIM = np.empty((self.nbs, len(bjj)), dtype = np.complex) bDEIM[jj] = bjj p = PI() wellCond, _ = p.setupByInterpolation(muDEIM, bDEIM, - int(self.nbs ** .5) - 1, - "MONOMIAL", False, False) + int(self.nbs ** .5) - 1, + "MONOMIAL", 0, "TENSOR") bDEIMCoeffs = p.coeffs self.bs[j1 + int(self.nbs ** .5) * j2] = bDEIMCoeffs[j1, j2] self.thbs[j1 + int(self.nbs ** .5) * j2] = ( self.getMonomialSingleWeight([j1, j2])) vbMng(self, "DEL", "Done assembling forcing term.", 20) diff --git a/tests/4_reduction_methods_multiD/rational_interpolant_2d.py b/tests/4_reduction_methods_multiD/rational_interpolant_2d.py index d55471c..f211eeb 100644 --- a/tests/4_reduction_methods_multiD/rational_interpolant_2d.py +++ b/tests/4_reduction_methods_multiD/rational_interpolant_2d.py @@ -1,78 +1,77 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from matrix_random import matrixRandom from rrompy.reduction_methods import RationalInterpolant as RI from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, ManualSampler as MS) def test_monomials(capsys): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": False, "S": 16, "MAuxiliary":3, "NAuxiliary":3, "QTol": 1e-6, "interpTol": 1e-3, "polybasis": "MONOMIAL", "sampler": QS([[4.9, 6.85], [5.1, 7.15]], "UNIFORM")} approx = RI(solver, mu0, params, verbosity = 100) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) - assert np.isclose(errNP / solver.norm(uh), 3.269e-02, rtol = 1) + assert np.isclose(errNP / solver.norm(uh), 5.04e-03, rtol = 1) out, err = capsys.readouterr() - assert ("below tolerance. Reducing N " in out) + assert (" Reducing N " in out) assert len(err) == 0 def test_well_cond(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() params = {"POD": True, "M": 3, "N": 3, "MAuxiliary": 3, "NAuxiliary": 3, "S": 16, "interpTol": 1e-10, "polybasis": "CHEBYSHEV", "sampler": QS([[4.9, 6.85], [5.1, 7.15]], "UNIFORM")} approx = RI(solver, mu0, params, verbosity = 0) approx.setupApprox() assert np.isclose(approx.normErr(mu)[0] / approx.normHF(mu)[0], - 1.3098e-01, rtol = 1e-1) + 6.327e-03, rtol = 1e-1) def test_hermite(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() sampler0 = QS([[4.9, 6.85], [5.1, 7.15]], "UNIFORM") params = {"POD": True, "M": 3, "N": 3, "MAuxiliary": 3, "NAuxiliary": 3, "S": 25, "polybasis": "CHEBYSHEV", "sampler": MS([[4.9, 6.85], [5.1, 7.15]], points = sampler0.generatePoints(9))} approx = RI(solver, mu0, params, verbosity = 0) approx.setupApprox() assert np.isclose(approx.normErr(mu)[0] / approx.normHF(mu)[0], - 8.905e-03, rtol = 5e-1) - + 4.622e-05, rtol = 5e-1)