Page MenuHomec4science

rational_interpolant_greedy.py
No OneTemporary

File Metadata

Created
Sat, May 4, 16:18

rational_interpolant_greedy.py

# Copyright (C) 2018-2020 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy as copy
import numpy as np
from rrompy.hfengines.base.linear_affine_engine import checkIfAffine
from .generic_greedy_approximant import GenericGreedyApproximant
from rrompy.utilities.poly_fitting.polynomial import polyvander
from rrompy.utilities.numerical import dot
from rrompy.utilities.numerical.degree import degreeToDOFs
from rrompy.utilities.expression import expressionEvaluator
from rrompy.reduction_methods.standard import RationalInterpolant
from rrompy.utilities.base.types import Np1D, Tuple, paramVal, List
from rrompy.utilities.base.verbosity_depth import verbosityManager as vbMng
from rrompy.utilities.poly_fitting import customFit
from rrompy.utilities.exception_manager import (RROMPyWarning, RROMPyException,
RROMPyAssert, RROMPy_FRAGILE)
from rrompy.sampling import sampleList, emptySampleList
__all__ = ['RationalInterpolantGreedy']
class RationalInterpolantGreedy(GenericGreedyApproximant, RationalInterpolant):
"""
ROM greedy rational interpolant computation for parametric problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': kind of snapshots orthogonalization; allowed values
include 0, 1/2, and 1; defaults to 1, i.e. POD;
- 'scaleFactorDer': scaling factors for derivative computation;
defaults to 'AUTO';
- 'S': number of starting training points;
- 'polydegreetype': type of polynomial degree; allowed values are
'*' or '*_*', with * either 'TENSOR' or 'TOTAL'; defaults to
'TOTAL';
- 'N': degree of rational interpolant denominator; defaults to
'AUTO', i.e. maximum allowed;
- 'MAuxiliary': degree of rational interpolant numerator with
respect to non-pivot parameters; defaults to 0;
- 'NAuxiliary': degree of rational interpolant denominator with
respect to non-pivot parameters; defaults to 0;
- 'sampler': sample point generator;
- 'rationalMode': mode of rational approximation; allowed values
include 'MINIMAL[_STATE]' and 'BARYCENTRIC[_STATE]'; defaults
to 'MINIMAL';
- 'greedyTol': uniform error tolerance for greedy algorithm;
defaults to 1e-2;
- 'collinearityTol': collinearity tolerance for greedy algorithm;
defaults to 0.;
- 'maxIter': maximum number of greedy steps; defaults to 1e2;
- 'nTestPoints': number of test points; defaults to 5e2;
- 'samplerTrainSet': training sample points generator; defaults to
sampler;
- 'polybasis': type of basis for interpolation; defaults to
'MONOMIAL';
- 'errorEstimatorKind': kind of error estimator; available values
include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
'LOOK_AHEAD_RES', 'LOOK_AHEAD_OUTPUT', and 'NONE'; defaults to
'NONE';
- 'functionalSolve': strategy for minimization of denominator
functional; allowed values include 'NORM' and 'DOMINANT';
defaults to 'NORM';
- 'interpTol': tolerance for interpolation; defaults to None;
- 'forceQReal': force denominator to have real coefficients;
defaults to False;
- 'polyTruncateTol': tolerance for truncation of rational terms;
defaults to 0;
- 'QTol': tolerance for robust rational denominator management;
defaults to 0.
Defaults to empty dict.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterListSoft: Recognized keys of soft approximant parameters:
- 'POD': kind of snapshots orthogonalization;
- 'scaleFactorDer': scaling factors for derivative computation;
- 'rationalMode': mode of rational approximation;
- 'polydegreetype': type of polynomial degree;
- 'N': degree of rational interpolant denominator;
- 'MAuxiliary': degree of rational interpolant numerator with
respect to non-pivot parameters; defaults to 0;
- 'NAuxiliary': degree of rational interpolant denominator with
respect to non-pivot parameters; defaults to 0;
- 'greedyTol': uniform error tolerance for greedy algorithm;
- 'collinearityTol': collinearity tolerance for greedy algorithm;
- 'maxIter': maximum number of greedy steps;
- 'nTestPoints': number of test points;
- 'samplerTrainSet': training sample points generator;
- 'polybasis': type of polynomial basis for interpolation;
- 'errorEstimatorKind': kind of error estimator;
- 'functionalSolve': strategy for minimization of denominator
functional;
- 'interpTol': tolerance for interpolation;
- 'forceQReal': force denominator to have real coefficients;
- 'polyTruncateTol': tolerance for truncation of rational terms;
- 'QTol': tolerance for robust rational denominator management.
parameterListCritical: Recognized keys of critical approximant
parameters:
- 'S': total number of samples current approximant relies upon;
- 'sampler': sample point generator.
verbosity: Verbosity level.
POD: Kind of snapshots orthogonalization.
scaleFactorDer: Scaling factors for derivative computation.
S: number of test points.
polydegreetype: Type of polynomial degree.
N: Denominator degree of approximant.
MAuxiliary: Degree of rational interpolant numerator with respect to
non-pivot parameters.
NAuxiliary: Degree of rational interpolant denominator with respect to
non-pivot parameters.
sampler: Sample point generator.
rationalMode: Mode of rational approximation.
greedyTol: uniform error tolerance for greedy algorithm.
collinearityTol: Collinearity tolerance for greedy algorithm.
maxIter: maximum number of greedy steps.
nTestPoints: number of starting training points.
samplerTrainSet: training sample points generator.
polybasis: Type of polynomial basis for interpolation.
errorEstimatorKind: kind of error estimator.
functionalSolve: Strategy for minimization of denominator functional.
interpTol: tolerance for interpolation.
forceQReal: Force denominator to have real coefficients.
polyTruncateTol: Tolerance for truncation of rational terms.
QTol: tolerance for robust rational denominator management.
muBounds: list of bounds for parameter values.
samplingEngine: Sampling engine.
uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
sampleList.
lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
solution(s) as parameterList.
uApproxReduced: Reduced approximate solution(s) with parameter(s)
lastSolvedApprox as sampleList.
lastSolvedApproxReduced: Parameter(s) corresponding to last computed
reduced approximate solution(s) as parameterList.
uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
sampleList.
lastSolvedApprox: Parameter(s) corresponding to last computed
approximate solution(s) as parameterList.
"""
_allowedEstimatorKinds = ["AFFINE", "DISCREPANCY", "LOOK_AHEAD",
"LOOK_AHEAD_RES", "LOOK_AHEAD_OUTPUT", "NONE"]
def __init__(self, *args, **kwargs):
self._preInit()
self._addParametersToList(["errorEstimatorKind"], ["DISCREPANCY"],
toBeExcluded = ["M"])
super().__init__(*args, **kwargs)
self.M = "AUTO"
self._postInit()
@property
def rationalMode(self):
"""Value of rationalMode."""
return self._rationalMode
@rationalMode.setter
def rationalMode(self, rationalMode):
try:
rationalMode = rationalMode.upper().strip().replace(" ","")
if rationalMode in ["MINIMAL", "BARYCENTRIC"]:
rationalMode += "_STATE"
if rationalMode not in ["MINIMAL_STATE", "BARYCENTRIC_STATE"]:
raise RROMPyException("Prescribed mode not recognized.")
except Exception as E:
RROMPyWarning(str(E) + " Overriding to 'MINIMAL'.")
rationalMode = "MINIMAL_STATE"
self._rationalMode = rationalMode
self._approxParameters["rationalMode"] = self.rationalMode
@property
def errorEstimatorKind(self):
"""Value of errorEstimatorKind."""
return self._errorEstimatorKind
@errorEstimatorKind.setter
def errorEstimatorKind(self, errorEstimatorKind):
errorEstimatorKind = errorEstimatorKind.upper()
if errorEstimatorKind not in self._allowedEstimatorKinds:
RROMPyWarning(("Error estimator kind not recognized. Overriding "
"to 'NONE'."))
errorEstimatorKind = "NONE"
self._errorEstimatorKind = errorEstimatorKind
self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind
def _polyvanderAuxiliary(self, mus, deg, *args, **kwargs):
return polyvander(mus, deg, *args, **kwargs)
def getErrorEstimatorDiscrepancy(self, mus:Np1D) -> Np1D:
"""Discrepancy-based residual estimator."""
checkIfAffine(self.HFEngine, "apply discrepancy-based error estimator",
False, self._affine_lvl)
mus = self.checkParameterList(mus)
muCTest = self.trainedModel.centerNormalize(mus)
tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0
QTest = self.trainedModel.getQVal(mus)
QTzero = np.where(QTest == 0.)[0]
if len(QTzero) > 0:
RROMPyWarning(("Adjusting estimator to avoid division by "
"numerically zero denominator."))
QTest[QTzero] = (np.finfo(np.complex).eps
/ len(self.trainedModel.data.Q))
self.HFEngine.buildA()
self.HFEngine.buildb()
nAs, nbs = self.HFEngine.nAs, self.HFEngine.nbs
muTrainEff = self.mapParameterList(self.mus)
muTestEff = self.mapParameterList(mus)
PTrain = self.trainedModel.getPVal(self.mus).data.T
QTrain = self.trainedModel.getQVal(self.mus)
QTzero = np.where(QTrain == 0.)[0]
if len(QTzero) > 0:
RROMPyWarning(("Adjusting estimator to avoid division by "
"numerically zero denominator."))
QTrain[QTzero] = (np.finfo(np.complex).eps
/ len(self.trainedModel.data.Q))
PTest = self.trainedModel.getPVal(mus).data
self.trainedModel.verbosity = tMverb
radiusAbTrain = np.empty((self.S, nAs * self.S + nbs),
dtype = np.complex)
radiusA = np.empty((self.S, nAs, len(mus)), dtype = np.complex)
radiusb = np.empty((nbs, len(mus)), dtype = np.complex)
for j, thA in enumerate(self.HFEngine.thAs):
idxs = j * self.S + np.arange(self.S)
radiusAbTrain[:, idxs] = expressionEvaluator(thA[0], muTrainEff,
(self.S, 1)) * PTrain
radiusA[:, j] = PTest * expressionEvaluator(thA[0], muTestEff,
(len(mus),))
for j, thb in enumerate(self.HFEngine.thbs):
idx = nAs * self.S + j
radiusAbTrain[:, idx] = QTrain * expressionEvaluator(thb[0],
muTrainEff, (self.S,))
radiusb[j] = QTest * expressionEvaluator(thb[0], muTestEff,
(len(mus),))
QRHSNorm2 = self._affineResidualMatricesContraction(radiusb)
deg = [self.M] + [self.MAuxiliary] * (self.npar - 1)
vanTrain = self._polyvanderAuxiliary(self._musUniqueCN, deg,
self.polybasis, self._derIdxs,
self._reorder)
interpPQ = customFit(vanTrain, radiusAbTrain, rcond = self.interpTol)
vanTest = self._polyvanderAuxiliary(muCTest, deg, self.polybasis)
DradiusAb = vanTest.dot(interpPQ)
radiusA = (radiusA
- DradiusAb[:, : - nbs].reshape(len(mus), -1, self.S).T)
radiusb = radiusb - DradiusAb[:, - nbs :].T
ff, Lf, LL = self._affineResidualMatricesContraction(radiusb, radiusA)
err = np.abs((LL - 2. * np.real(Lf) + ff) / QRHSNorm2) ** .5
return err
def getErrorEstimatorLookAhead(self, mus:Np1D,
what : str = "") -> Tuple[Np1D, List[int]]:
"""Residual estimator based on look-ahead idea."""
err, idxMaxEst = self._EIMStep(mus)
mu_muTestS = mus[idxMaxEst]
app_muTestSample = self.trainedModel.getApproxReduced(mu_muTestS)
if self._mode == RROMPy_FRAGILE:
if what == "RES" and not self.HFEngine.isCEye:
raise RROMPyException(("Cannot compute LOOK_AHEAD_RES "
"estimator in fragile mode for "
"non-scalar C."))
app_muTestSample = dot(self.trainedModel.data.projMat[:,
: app_muTestSample.shape[0]],
app_muTestSample)
else:
app_muTestSample = dot(self.samplingEngine.projectionMatrix,
app_muTestSample)
app_muTestSample = sampleList(app_muTestSample)
if what == "RES":
errmu = self.HFEngine.residual(mu_muTestS, app_muTestSample,
post_c = False)
solmu = self.HFEngine.residual(mu_muTestS, None, post_c = False)
normSol = self.HFEngine.norm(solmu, dual = True)
normErr = self.HFEngine.norm(errmu, dual = True)
else:
for j, mu in enumerate(mu_muTestS):
uEx = self.samplingEngine.nextSample(mu)
if what == "OUTPUT":
uEx = self.HFEngine.applyC(uEx, mu)
app_muTS = self.HFEngine.applyC(app_muTestSample[j], mu)
if j == 0:
app_muTestS = emptySampleList()
app_muTestS.reset((len(app_muTS), len(mu_muTestS)),
dtype = np.complex)
app_muTestS[j] = app_muTS
if j == 0:
solmu = emptySampleList()
solmu.reset((len(uEx), len(mu_muTestS)),
dtype = np.complex)
solmu[j] = uEx
if what == "OUTPUT": app_muTestSample = app_muTestS
errmu = solmu - app_muTestSample
normSol = self.HFEngine.norm(solmu, is_state = what != "OUTPUT")
normErr = self.HFEngine.norm(errmu, is_state = what != "OUTPUT")
errTest, errRel = err[idxMaxEst], normErr / normSol
errMean = errTest.conj().dot(errRel) / errTest.conj().dot(errTest)
return errMean * err, idxMaxEst
def getErrorEstimatorNone(self, mus:Np1D) -> Np1D:
"""EIM-based residual estimator."""
err = self._EIMStep(mus, True)
err *= self.greedyTol / np.mean(err)
return err
def _EIMStep(self, mus:Np1D,
only_one : bool = False) -> Tuple[Np1D, List[int]]:
"""EIM step to find next magic point."""
mus = self.checkParameterList(mus)
tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0
QTest = self.trainedModel.getQVal(mus)
QTzero = np.where(QTest == 0.)[0]
if len(QTzero) > 0:
RROMPyWarning(("Adjusting estimator to avoid division by "
"numerically zero denominator."))
QTest[QTzero] = (np.finfo(np.complex).eps
/ len(self.trainedModel.data.Q))
muCTest = self.trainedModel.centerNormalize(mus)
muCTrain = self.trainedModel.centerNormalize(self.mus)
self.trainedModel.verbosity = tMverb
deg = [self.M] + [self.MAuxiliary] * (self.npar - 1)
S0 = len(muCTrain)
if S0 != degreeToDOFs(deg, self.polydegreetypeM):
if self.npar > 1:
raise RROMPyException(("Evaluating error estimator without "
"interpolation is not allowed."))
RROMPyWarning(("Evaluating error estimator without "
"interpolation might lead to instabilities."))
deg[0] = self.S - 1
deg[0] = deg[0] + 1
vanTest = self._polyvanderAuxiliary(muCTest, deg, self.polybasis,
degreetype = self.polydegreetypeM)
vanTest, vanTestNext = vanTest[:, : S0], vanTest[:, S0 :]
idxsTest = np.arange(vanTestNext.shape[1])
basis = np.zeros((len(idxsTest), 0), dtype = float)
idxMaxEst = np.empty(len(idxsTest), dtype = int)
while len(idxsTest) > 0:
vanTrial = self._polyvanderAuxiliary(muCTrain, deg, self.polybasis,
degreetype = self.polydegreetypeM)
vanTrial, vanTrialNext = vanTrial[:, : S0], vanTrial[:, S0 :]
vanTrial = np.hstack((vanTrial, vanTrialNext.dot(basis).reshape(
len(vanTrialNext), basis.shape[1])))
valuesTrial = vanTrialNext[:, idxsTest]
vanTestEff = np.hstack((vanTest, vanTestNext.dot(basis).reshape(
len(vanTestNext), basis.shape[1])))
vanTestNextEff = vanTestNext[:, idxsTest]
coeffTest = customFit(vanTrial, valuesTrial)
errTest = np.abs((vanTestNextEff - vanTestEff.dot(coeffTest))
/ np.expand_dims(QTest, 1))
if only_one:
return errTest
elif basis.shape[1] == 0:
errTest0 = errTest[:, 0]
idxMaxErr = np.unravel_index(np.argmax(errTest), errTest.shape)
idxMaxEst[idxsTest[idxMaxErr[1]]] = idxMaxErr[0]
muCTrain.append(muCTest[idxMaxErr[0]])
basis = np.pad(basis, [(0, 0), (0, 1)], "constant")
basis[idxsTest[idxMaxErr[1]], -1] = 1.
idxsTest = np.delete(idxsTest, idxMaxErr[1])
return errTest0, idxMaxEst
def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D:
"""Standard residual-based error estimator."""
setupOK = self.setupApproxLocal()
if setupOK > 0:
err = np.empty(len(mus))
err[:] = np.nan
if not return_max: return err
return err, [- setupOK], np.nan
mus = self.checkParameterList(mus)
vbMng(self.trainedModel, "INIT",
"Evaluating error estimator at mu = {}.".format(mus), 10)
if self.errorEstimatorKind == "AFFINE":
err = self.getErrorEstimatorAffine(mus)
else:
self._setupInterpolationIndices()
if self.errorEstimatorKind == "DISCREPANCY":
err = self.getErrorEstimatorDiscrepancy(mus)
elif self.errorEstimatorKind[: 10] == "LOOK_AHEAD":
err, idxMaxEst = self.getErrorEstimatorLookAhead(mus,
self.errorEstimatorKind[11 :])
else: #if self.errorEstimatorKind == "NONE":
err = self.getErrorEstimatorNone(mus)
vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10)
if not return_max: return err
if self.errorEstimatorKind[: 10] != "LOOK_AHEAD":
idxMaxEst = [np.argmax(err)]
return err, idxMaxEst, err[idxMaxEst]
_warnPlottingNormalization = 1
def plotEstimator(self, *args, **kwargs):
super().plotEstimator(*args, **kwargs)
if (self.errorEstimatorKind == "NONE"
and self._warnPlottingNormalization):
RROMPyWarning(("Error estimator arbitrarily normalized before "
"plotting."))
self._warnPlottingNormalization = 0
def greedyNextSample(self, *args,
**kwargs) -> Tuple[Np1D, int, float, paramVal]:
"""Compute next greedy snapshot of solution map."""
RROMPyAssert(self._mode, message = "Cannot add greedy sample.")
err, muidx, maxErr, muNext = super().greedyNextSample(*args, **kwargs)
if (self.errorEstimatorKind == "NONE" and not np.isnan(maxErr)
and not np.isinf(maxErr)):
maxErr = None
return err, muidx, maxErr, muNext
def _preliminaryTraining(self):
"""Initialize starting snapshots of solution map."""
RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
if self.samplingEngine.nsamples > 0: return
super()._preliminaryTraining()
def setupApproxLocal(self) -> int:
"""Compute rational interpolant."""
if self.checkComputedApprox(): return -1
RROMPyAssert(self._mode, message = "Cannot setup approximant.")
self.verbosity -= 10
vbMng(self, "INIT", "Setting up local approximant.", 5)
pMat = self.samplingEngine.projectionMatrix
firstRun = self.trainedModel is None
if not firstRun: pMat = pMat[:, len(self.trainedModel.data.mus) :]
self._setupTrainedModel(pMat, not firstRun)
Q = self._setupDenominator()
unstable = 0
if not firstRun: _M = self.M
self._setupRational(Q)
if not firstRun and self.M < _M:
RROMPyWarning(("Instability in numerator computation. "
"Aborting."))
unstable = 1
if not unstable:
self.trainedModel.data.approxParameters = copy(
self.approxParameters)
vbMng(self, "DEL", "Done setting up local approximant.", 5)
self.verbosity += 10
return unstable
def setupApprox(self, plotEst : str = "NONE") -> int:
val = super().setupApprox(plotEst)
if val == 0:
self._setupRational(self.trainedModel.data.Q,
self.trainedModel.data.P)
self.trainedModel.data.approxParameters = copy(
self.approxParameters)
return val

Event Timeline