Page MenuHomec4science

rational_interpolant_greedy.py
No OneTemporary

File Metadata

Created
Sun, Apr 28, 16:44

rational_interpolant_greedy.py

# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy as copy
import numpy as np
from .generic_distributed_greedy_approximant import \
GenericDistributedGreedyApproximant
from rrompy.utilities.poly_fitting.polynomial import polybases
from rrompy.reduction_methods.distributed import RationalInterpolant
from rrompy.reduction_methods.trained_model import TrainedModelPade as tModel
from rrompy.reduction_methods.trained_model import TrainedModelData
from rrompy.utilities.base.types import Np1D, Np2D, DictAny, HFEng, paramVal
from rrompy.utilities.base import verbosityManager as vbMng
from rrompy.utilities.exception_manager import RROMPyWarning
from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert
__all__ = ['RationalInterpolantGreedy']
class RationalInterpolantGreedy(GenericDistributedGreedyApproximant,
RationalInterpolant):
"""
ROM greedy rational interpolant computation for parametric problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'S': number of starting training points;
- 'sampler': sample point generator;
- 'radialBasis': radial basis family for interpolant numerator;
defaults to 0, i.e. no radial basis;
- 'radialBasisWeights': radial basis weights for interpolant
numerator; defaults to 0, i.e. identity;
- 'greedyTol': uniform error tolerance for greedy algorithm;
defaults to 1e-2;
- 'interactive': whether to interactively terminate greedy
algorithm; defaults to False;
- 'maxIter': maximum number of greedy steps; defaults to 1e2;
- 'refinementRatio': ratio of training points to be exhausted
before training set refinement; defaults to 0.2;
- 'nTestPoints': number of test points; defaults to 5e2;
- 'trainSetGenerator': training sample points generator; defaults
to Chebyshev sampler within muBounds;
- 'polybasis': type of basis for interpolation; allowed values
include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to
'MONOMIAL';
- 'Delta': difference between M and N in rational approximant;
defaults to 0;
- 'errorEstimatorKind': kind of error estimator; available values
include 'EXACT', 'BASIC', and 'BARE'; defaults to 'EXACT';
- 'interpRcond': tolerance for interpolation; defaults to None;
- 'robustTol': tolerance for robust Pade' denominator management;
defaults to 0.
Defaults to empty dict.
homogeneized(optional): Whether to homogeneize Dirichlet BCs. Defaults
to False.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
homogeneized: Whether to homogeneize Dirichlet BCs.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterListSoft: Recognized keys of soft approximant parameters:
- 'POD': whether to compute POD of snapshots.
- 'radialBasis': radial basis family for interpolant numerator;
defaults to 0, i.e. no radial basis;
- 'radialBasisWeights': radial basis weights for interpolant
numerator; defaults to 0, i.e. identity;
- 'greedyTol': uniform error tolerance for greedy algorithm;
- 'interactive': whether to interactively terminate greedy
algorithm;
- 'maxIter': maximum number of greedy steps;
- 'refinementRatio': ratio of test points to be exhausted before
test set refinement;
- 'nTestPoints': number of test points;
- 'trainSetGenerator': training sample points generator;
- 'Delta': difference between M and N in rational approximant;
- 'errorEstimatorKind': kind of error estimator;
- 'interpRcond': tolerance for interpolation;
- 'robustTol': tolerance for robust Pade' denominator management.
parameterListCritical: Recognized keys of critical approximant
parameters:
- 'S': total number of samples current approximant relies upon;
- 'sampler': sample point generator.
POD: whether to compute POD of snapshots.
S: number of test points.
sampler: Sample point generator.
radialBasis: Radial basis family for interpolant numerator.
radialBasisWeights: Radial basis weights for interpolant numerator.
greedyTol: uniform error tolerance for greedy algorithm.
interactive: whether to interactively terminate greedy algorithm.
maxIter: maximum number of greedy steps.
refinementRatio: ratio of training points to be exhausted before
training set refinement.
nTestPoints: number of starting training points.
trainSetGenerator: training sample points generator.
robustTol: tolerance for robust Pade' denominator management.
Delta: difference between M and N in rational approximant.
errorEstimatorKind: kind of error estimator.
interpRcond: tolerance for interpolation.
robustTol: tolerance for robust Pade' denominator management.
muBounds: list of bounds for parameter values.
samplingEngine: Sampling engine.
estimatorNormEngine: Engine for estimator norm computation.
uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
sampleList.
lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
solution(s) as parameterList.
uApproxReduced: Reduced approximate solution(s) with parameter(s)
lastSolvedApprox as sampleList.
lastSolvedApproxReduced: Parameter(s) corresponding to last computed
reduced approximate solution(s) as parameterList.
uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
sampleList.
lastSolvedApprox: Parameter(s) corresponding to last computed
approximate solution(s) as parameterList.
"""
_allowedEstimatorKinds = ["EXACT", "BASIC", "BARE"]
def __init__(self, HFEngine:HFEng, mu0 : paramVal = None,
approxParameters : DictAny = {}, homogeneized : bool = False,
verbosity : int = 10, timestamp : bool = True):
self._preInit()
self._addParametersToList(["Delta", "polybasis", "errorEstimatorKind",
"interpRcond", "robustTol"],
[0, "MONOMIAL", "EXACT", -1, 0],
toBeExcluded = ["E"])
super().__init__(HFEngine = HFEngine, mu0 = mu0,
approxParameters = approxParameters,
homogeneized = homogeneized,
verbosity = verbosity, timestamp = timestamp)
vbMng(self, "INIT", "Computing Taylor blocks of system.", 7)
nAs = self.HFEngine.nAs - 1
nbs = max(self.HFEngine.nbs, (nAs + 1) * self.homogeneized)
self.As = [self.HFEngine.A(self.mu0, j + 1) for j in range(nAs)]
self.bs = [self.HFEngine.b(self.mu0, j, self.homogeneized)
for j in range(nbs)]
vbMng(self, "DEL", "Done computing Taylor blocks.", 7)
self._postInit()
@property
def polybasis(self):
"""Value of polybasis."""
return self._polybasis
@polybasis.setter
def polybasis(self, polybasis):
try:
polybasis = polybasis.upper().strip().replace(" ","")
if polybasis not in polybases:
raise RROMPyException("Sample type not recognized.")
self._polybasis = polybasis
except:
RROMPyWarning(("Prescribed polybasis not recognized. Overriding "
"to 'MONOMIAL'."))
self._polybasis = "MONOMIAL"
self._approxParameters["polybasis"] = self.polybasis
@property
def Delta(self):
"""Value of Delta."""
return self._Delta
@Delta.setter
def Delta(self, Delta):
if not np.isclose(Delta, np.floor(Delta)):
raise RROMPyException("Delta must be an integer.")
if Delta < 0:
RROMPyWarning(("Error estimator unreliable for Delta < 0. "
"Overloading of errorEstimator is suggested."))
else:
Deltamin = (max(self.HFEngine.nbs,
self.HFEngine.nAs * self.homogeneized)
- 1 - 1 * (self.HFEngine.nAs > 1))
if Delta < Deltamin:
RROMPyWarning(("Method may be unreliable for selected Delta. "
"Suggested minimal value of Delta: {}.").format(
Deltamin))
self._Delta = Delta
self._approxParameters["Delta"] = self.Delta
@property
def errorEstimatorKind(self):
"""Value of errorEstimatorKind."""
return self._errorEstimatorKind
@errorEstimatorKind.setter
def errorEstimatorKind(self, errorEstimatorKind):
errorEstimatorKind = errorEstimatorKind.upper()
if errorEstimatorKind not in self._allowedEstimatorKinds:
RROMPyWarning(("Error estimator kind not recognized. Overriding "
"to 'EXACT'."))
errorEstimatorKind = "EXACT"
self._errorEstimatorKind = errorEstimatorKind
self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind
@property
def nTestPoints(self):
"""Value of nTestPoints."""
return self._nTestPoints
@nTestPoints.setter
def nTestPoints(self, nTestPoints):
if nTestPoints <= np.abs(self.Delta):
RROMPyWarning(("nTestPoints must be at least abs(Delta) + 1. "
"Increasing value to abs(Delta) + 1."))
nTestPoints = np.abs(self.Delta) + 1
if not np.isclose(nTestPoints, np.int(nTestPoints)):
raise RROMPyException("nTestPoints must be an integer.")
nTestPoints = np.int(nTestPoints)
if hasattr(self, "_nTestPoints") and self.nTestPoints is not None:
nTestPointsold = self.nTestPoints
else: nTestPointsold = -1
self._nTestPoints = nTestPoints
self._approxParameters["nTestPoints"] = self.nTestPoints
if nTestPointsold != self.nTestPoints:
self.resetSamples()
def _errorSamplingRatio(self, mus:Np1D, muRTest:Np1D,
muRTrain:Np1D) -> Np1D:
"""Scalar ratio in explicit error estimator."""
self.setupApprox()
testTile = np.tile(np.reshape(muRTest, (-1, 1)), [1, len(muRTrain)])
nodalVals = np.prod(testTile - np.reshape(muRTrain, (1, -1)), axis = 1)
denVals = self.trainedModel.getQVal(mus)
return np.abs(nodalVals / denVals)
def _RHSNorms(self, radiusb0:Np2D) -> Np1D:
"""High fidelity system RHS norms."""
self.assembleReducedResidualBlocks(full = False)
# 'ij,jk,ik->k', resbb, radiusb0, radiusb0.conj()
b0resb0 = np.sum(self.trainedModel.data.resbb.dot(radiusb0)
* radiusb0.conj(), axis = 0)
RHSnorms = np.power(np.abs(b0resb0), .5)
return RHSnorms
def _errorEstimatorBare(self) -> Np1D:
"""Bare residual-based error estimator."""
self.setupApprox()
self.assembleReducedResidualGramian(self.trainedModel.data.projMat)
pDom = self.trainedModel.data.P.domCoeff
LL = pDom.conj().dot(self.trainedModel.data.gramian.dot(pDom))
Adiag = self.As[0].diagonal()
LL = ((self.scaleFactor[0] * np.linalg.norm(Adiag)) ** 2. * LL
/ np.size(Adiag))
return np.power(np.abs(LL), .5)
def _errorEstimatorBasic(self, muTest:paramVal, ratioTest:complex) -> Np1D:
"""Basic residual-based error estimator."""
resmu = self.HFEngine.residual(self.trainedModel.getApprox(muTest),
muTest, self.homogeneized,
duality = False)[0]
return np.abs(self.estimatorNormEngine.norm(resmu) / ratioTest)
def _errorEstimatorExact(self, muRTrain:Np1D, vanderBase:Np2D) -> Np1D:
"""Exact residual-based error estimator."""
self.setupApprox()
self.assembleReducedResidualBlocks(full = True)
nAs = self.HFEngine.nAs - 1
nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized)
delta = len(self.mus) - self.trainedModel.data.Q.deg[0]
nbsEff = max(0, nbs - delta)
momentQ = np.zeros(nbsEff, dtype = np.complex)
momentQu = np.zeros((len(self.mus), nAs), dtype = np.complex)
radiusbTen = np.zeros((nbsEff, nbsEff, vanderBase.shape[1]),
dtype = np.complex)
radiusATen = np.zeros((nAs, nAs, vanderBase.shape[1]),
dtype = np.complex)
if nbsEff > 0:
momentQ[0] = self.trainedModel.data.Q.domCoeff
radiusbTen[0, :, :] = vanderBase[: nbsEff, :]
momentQu[:, 0] = self.trainedModel.data.P.domCoeff
radiusATen[0, :, :] = vanderBase[: nAs, :]
Qvals = self.trainedModel.getQVal(self.mus)
for k in range(1, max(nAs, nbs * (nbsEff > 0))):
Qvals = Qvals * muRTrain
if k > delta and k < nbs:
momentQ[k - delta] = self._fitinv.dot(Qvals)
radiusbTen[k - delta, k :, :] = (
radiusbTen[0, : delta - k, :])
if k < nAs:
momentQu[:, k] = Qvals * self._fitinv
radiusATen[k, k :, :] = radiusATen[0, : - k, :]
if self.POD and nAs > 1:
momentQu[:, 1 :] = self.samplingEngine.RPOD.dot(
momentQu[:, 1 :])
radiusA = np.tensordot(momentQu, radiusATen, 1)
if nbsEff > 0:
radiusb = np.tensordot(momentQ, radiusbTen, 1)
# 'ij,jk,ik->k', resbb, radiusb, radiusb.conj()
ff = np.sum(self.trainedModel.data.resbb[delta + 1 :, delta + 1 :]\
.dot(radiusb) * radiusb.conj(), axis = 0)
# 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj()
Lf = np.sum(np.tensordot(
self.trainedModel.data.resAb[delta :, :, :], radiusA, 2)
* radiusb.conj(), axis = 0)
else:
ff, Lf = 0., 0.
# 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj()
LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2)
* radiusA.conj(), axis = (0, 1))
return np.power(np.abs(ff - 2. * np.real(Lf) + LL), .5)
def errorEstimator(self, mus:Np1D) -> Np1D:
"""Standard residual-based error estimator."""
self.setupApprox()
if (np.any(np.isnan(self.trainedModel.data.P.domCoeff))
or np.any(np.isinf(self.trainedModel.data.P.domCoeff))):
err = np.empty(len(mus))
err[:] = np.inf
return err
nAs = self.HFEngine.nAs - 1
nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized)
muRTest = self.centerNormalize(mus)(0)
muRTrain = self.centerNormalize(self.mus)(0)
samplingRatio = self._errorSamplingRatio(mus, muRTest, muRTrain)
vanderBase = np.polynomial.polynomial.polyvander(muRTest,
max(nAs, nbs)).T
RHSnorms = self._RHSNorms(vanderBase[: nbs + 1, :])
if self.errorEstimatorKind == "BARE":
jOpt = self._errorEstimatorBare()
elif self.errorEstimatorKind == "BASIC":
idx_muTestSample = np.argmax(samplingRatio)
jOpt = self._errorEstimatorBasic(mus[idx_muTestSample],
samplingRatio[idx_muTestSample])
else: #if self.errorEstimatorKind == "EXACT":
jOpt = self._errorEstimatorExact(muRTrain, vanderBase[: -1, :])
return jOpt * samplingRatio / RHSnorms
def setupApprox(self, plotEst : bool = False):
"""
Compute Pade' interpolant.
SVD-based robust eigenvalue management.
"""
if self.checkComputedApprox():
return
RROMPyAssert(self._mode, message = "Cannot setup approximant.")
vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5)
self.computeScaleFactor()
self.greedy(plotEst)
self._S = len(self.mus)
self._N, self._M, self._E = [self._S - 1] * 3
if self.Delta < 0:
self._M += self.Delta
else:
self._N -= self.Delta
if self.trainedModel is None:
self.trainedModel = tModel()
self.trainedModel.verbosity = self.verbosity
self.trainedModel.timestamp = self.timestamp
data = TrainedModelData(self.trainedModel.name(), self.mu0,
self.samplingEngine.samples,
self.HFEngine.rescalingExp)
data.scaleFactor = self.scaleFactor
data.mus = copy(self.mus)
self.trainedModel.data = data
else:
self.trainedModel = self.trainedModel
self.trainedModel.data.projMat = copy(self.samplingEngine.samples)
self.trainedModel.data.mus = copy(self.mus)
if min(self.M, self.N) < 0:
vbMng(self, "MAIN", "Minimal sample size not achieved.", 5)
Q = np.empty(tuple([max(self.N, 0) + 1] * self.npar),
dtype = np.complex)
P = np.empty(tuple([max(self.M, 0) + 1] * self.npar)
+ (len(self.mus),), dtype = np.complex)
Q[:] = np.nan
P[:] = np.nan
self.trainedModel.data.Q = copy(Q)
self.trainedModel.data.P = copy(P)
self.trainedModel.data.approxParameters = copy(
self.approxParameters)
vbMng(self, "DEL", "Aborting computation of approximant.", 5)
return
if self.N > 0:
Q = self._setupDenominator()
else:
Q = np.ones(tuple([1] * self.npar), dtype = np.complex)
self.trainedModel.data.Q = copy(Q)
self.trainedModel.data.P = copy(self._setupNumerator())
self.trainedModel.data.approxParameters = copy(self.approxParameters)
vbMng(self, "DEL", "Done setting up approximant.", 5)
def assembleReducedResidualBlocks(self, full : bool = False):
"""Build affine blocks of reduced linear system through projections."""
scaling = self.trainedModel.data.scaleFactor[0]
self.assembleReducedResidualBlocksbb(self.bs, scaling)
if full:
pMat = self.trainedModel.data.projMat
self.assembleReducedResidualBlocksAb(self.As, self.bs[1 :],
pMat, scaling)
self.assembleReducedResidualBlocksAA(self.As, pMat, scaling)

Event Timeline