Page MenuHomec4science

rational_pade.py
No OneTemporary

File Metadata

Created
Mon, Nov 4, 07:57

rational_pade.py

# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy as copy
import numpy as np
from rrompy.reduction_methods.base import checkRobustTolerance
from rrompy.reduction_methods.trained_model import (TrainedModelData,
TrainedModelPade as tModel)
from .generic_centered_approximant import GenericCenteredApproximant
from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, DictAny, HFEng,
paramVal, paramList, sampList)
from rrompy.utilities.base import verbosityDepth
from rrompy.utilities.poly_fitting.polynomial import (nextDerivativeIndices,
hashDerivativeToIdx as hashD, hashIdxToDerivative as hashI)
from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
RROMPyWarning)
__all__ = ['RationalPade']
class RationalPade(GenericCenteredApproximant):
"""
ROM single-point fast Pade' approximant computation for parametric
problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'S': total number of samples current approximant relies upon;
- 'M': degree of Pade' approximant numerator; defaults to 0;
- 'N': degree of Pade' approximant denominator; defaults to 0;
- 'robustTol': tolerance for robust Pade' denominator management;
defaults to 0.
Defaults to empty dict.
homogeneized(optional): Whether to homogeneize Dirichlet BCs. Defaults
to False.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
homogeneized: Whether to homogeneize Dirichlet BCs.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterListSoft: Recognized keys of soft approximant parameters:
- 'POD': whether to compute POD of snapshots;
- 'M': degree of Pade' approximant numerator;
- 'N': degree of Pade' approximant denominator;
- 'robustTol': tolerance for robust Pade' denominator management.
parameterListCritical: Recognized keys of critical approximant
parameters:
- 'S': total number of samples current approximant relies upon.
POD: Whether to compute QR factorization of derivatives.
S: Number of solution snapshots over which current approximant is
based upon.
M: Numerator degree of approximant.
N: Denominator degree of approximant.
robustTol: Tolerance for robust Pade' denominator management.
E: Complete derivative depth level of S.
uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
sampleList.
lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
solution(s) as parameterList.
uAppReduced: Reduced approximate solution(s) with parameter(s)
lastSolvedApp as sampleList.
lastSolvedAppReduced: Parameter(s) corresponding to last computed
reduced approximate solution(s) as parameterList.
uApp: Approximate solution(s) with parameter(s) lastSolvedApp as
sampleList.
lastSolvedApp: Parameter(s) corresponding to last computed approximate
solution(s) as parameterList.
G: Square Numpy 2D vector of size (N+1) corresponding to Pade'
denominator matrix (see paper).
"""
def __init__(self, HFEngine:HFEng, mu0 : paramVal = None,
approxParameters : DictAny = {}, homogeneized : bool = False,
verbosity : int = 10, timestamp : bool = True):
self._preInit()
self._addParametersToList(["M", "N", "robustTol"], [0, 0, 0])
super().__init__(HFEngine = HFEngine, mu0 = mu0,
approxParameters = approxParameters,
homogeneized = homogeneized,
verbosity = verbosity, timestamp = timestamp)
self._postInit()
@property
def M(self):
"""Value of M.."""
return self._M
@M.setter
def M(self, M):
if M < 0: raise RROMPyException("M must be non-negative.")
self._M = M
self._approxParameters["M"] = self.M
if hasattr(self, "E") and self.E < self.M:
RROMPyWarning("Prescribed S is too small. Decreasing M.")
self.M = self.E
@property
def N(self):
"""Value of N."""
return self._N
@N.setter
def N(self, N):
if N < 0: raise RROMPyException("N must be non-negative.")
self._N = N
self._approxParameters["N"] = self.N
if hasattr(self, "E") and self.E < self.N:
RROMPyWarning("Prescribed S is too small. Decreasing N.")
self.N = self.E
@property
def robustTol(self):
"""Value of tolerance for robust Pade' denominator management."""
return self._robustTol
@robustTol.setter
def robustTol(self, robustTol):
if robustTol < 0.:
RROMPyWarning(("Overriding prescribed negative robustness "
"tolerance to 0."))
robustTol = 0.
self._robustTol = robustTol
self._approxParameters["robustTol"] = self.robustTol
@property
def S(self):
"""Value of S."""
return self._S
@S.setter
def S(self, S):
GenericCenteredApproximant.S.fset(self, S)
self.E = np.sum(hashI(np.prod(self.S), self.npar)) - 1
if hasattr(self, "_M"): self.M = self.M
if hasattr(self, "_N"): self.N = self.N
def _setupDenominator(self):
"""Compute Pade' denominator."""
RROMPyAssert(self._mode, message = "Cannot setup denominator.")
if self.verbosity >= 7:
verbosityDepth("INIT", "Starting computation of denominator.",
timestamp = self.timestamp)
while self.N > 0:
if self.POD:
ev, eV = self.findeveVGQR()
else:
ev, eV = self.findeveVGExplicit()
newParameters = checkRobustTolerance(ev, self.N, self.robustTol)
if not newParameters:
break
self.approxParameters = newParameters
if self.N <= 0:
eV = np.ones((1, 1))
q = np.zeros(tuple([self.N + 1] * self.npar), dtype = np.complex)
for j in range(eV.shape[0]):
q[tuple(hashI(j, self.npar))] = eV[j, 0]
if self.verbosity >= 7:
verbosityDepth("DEL", "Done computing denominator.",
timestamp = self.timestamp)
return q
def _setupNumerator(self):
"""Compute Pade' numerator."""
RROMPyAssert(self._mode, message = "Cannot setup numerator.")
if self.verbosity >= 7:
verbosityDepth("INIT", "Starting computation of numerator.",
timestamp = self.timestamp)
P = np.zeros(tuple([self.M + 1] * self.npar) + (np.prod(self.S),),
dtype = np.complex)
mEnd = hashD([self.M + 1] + [0] * (self.npar - 1))
nEnd = hashD([self.N + 1] + [0] * (self.npar - 1))
mnIdxs = nextDerivativeIndices([], self.npar, max(mEnd, nEnd))
for j in range(mEnd):
mIdx = mnIdxs[j]
for n in range(nEnd):
diffIdx = [x - y for (x, y) in zip(mIdx, mnIdxs[n])]
if all([x >= 0 for x in diffIdx]):
P[tuple(mIdx) + (hashD(diffIdx),)] = (
self.trainedModel.data.Q[tuple(mnIdxs[n])])
return self.rescaleByParameter(P).T
def setupApprox(self):
"""
Compute Pade' approximant. SVD-based robust eigenvalue management.
"""
if self.checkComputedApprox():
return
RROMPyAssert(self._mode, message = "Cannot setup approximant.")
if self.verbosity >= 5:
verbosityDepth("INIT", "Setting up {}.". format(self.name()),
timestamp = self.timestamp)
self.computeDerivatives()
if self.trainedModel is None:
self.trainedModel = tModel()
self.trainedModel.verbosity = self.verbosity
self.trainedModel.timestamp = self.timestamp
data = TrainedModelData(self.trainedModel.name(), self.mu0,
None, self.HFEngine.rescalingExp)
data.polytype = "MONOMIAL"
self.trainedModel.data = data
else:
self.trainedModel = self.trainedModel
if self.N > 0:
Q = self._setupDenominator()
else:
self.setScaleParameter()
Q = np.ones(1, dtype = np.complex)
self.trainedModel.data.Q = copy(Q)
self.trainedModel.data.scaleFactor = self.scaleFactor
self.trainedModel.data.projMat = copy(self.samplingEngine.samples(
list(range(np.prod(self.S)))))
P = self._setupNumerator()
if self.POD:
P = np.tensordot(self.samplingEngine.RPOD, P, axes = ([-1], [0]))
self.trainedModel.data.P = copy(P)
self.trainedModel.data.approxParameters = copy(self.approxParameters)
if self.verbosity >= 5:
verbosityDepth("DEL", "Done setting up approximant.",
timestamp = self.timestamp)
def setScaleParameter(self) -> Np2D:
"""Compute parameter for rescaling."""
RROMPyAssert(self._mode, message = "Cannot compute rescaling factor.")
self.computeDerivatives()
self.scaleFactor = [1.] * self.npar
for d in range(self.npar):
hashesd = [0]
for n in range(1, self.E + 1):
hashesd += [hashD([0] * (d - 1) + [n]
+ [0] * (self.npar - d - 1))]
if self.POD:
Rd = self.samplingEngine.RPOD[: hashesd[-1] + 1, hashesd]
Gd = np.diag(Rd.T.conj().dot(Rd))
else:
DerEd = self.samplingEngine.samples(hashesd)
Gd = self.HFEngine.norm(DerEd)
scaleCoeffs = np.polyfit(np.arange(len(Gd)), np.log(Gd), 1)
self.scaleFactor[d] = np.exp(- scaleCoeffs[0] / 2.)
def rescaleByParameter(self, R:Np2D) -> Np2D:
"""
Rescale by scale parameter.
Args:
R: Matrix whose columns need rescaling.
Returns:
Rescaled matrix.
"""
RIdxs = nextDerivativeIndices([], self.npar, R.shape[-1])
Rscaled = copy(R)
for j, RIdx in enumerate(RIdxs):
Rscaled[..., j] *= np.prod([x ** y for (x, y) in
zip(self.scaleFactor, RIdx)])
return Rscaled
def buildG(self):
"""Assemble Pade' denominator matrix."""
RROMPyAssert(self._mode, message = "Cannot compute G matrix.")
self.computeDerivatives()
if self.verbosity >= 10:
verbosityDepth("INIT", "Building gramian matrix.",
timestamp = self.timestamp)
eStart = hashD([self.E] + [0] * (self.npar - 1))
eEnd = hashD([self.E + 1] + [0] * (self.npar - 1))
eIdxs = [hashI(e, self.npar) for e in range(eStart, eEnd)]
nEnd = hashD([self.N + 1] + [0] * (self.npar - 1))
nIdxs = nextDerivativeIndices([], self.npar, nEnd)
self.setScaleParameter()
if self.POD:
RPODE = self.rescaleByParameter(self.samplingEngine.RPOD[: eEnd,
: eEnd])
else:
DerE = self.rescaleByParameter(self.samplingEngine.samples(
list(range(eEnd))).data)
self.G = np.zeros((nEnd, nEnd), dtype = np.complex)
for eIdx in eIdxs:
nLoc = []
samplesIdxs = []
for n, nIdx in enumerate(nIdxs):
diffIdx = [x - y for (x, y) in zip(eIdx, nIdx)]
if all([x >= 0 for x in diffIdx]):
nLoc += [n]
samplesIdxs += [hashD(diffIdx)]
if self.POD:
RPODELoc = RPODE[: samplesIdxs[-1] + 1, samplesIdxs]
GLoc = RPODELoc.T.conj().dot(RPODELoc)
else:
DerELoc = DerE[:, samplesIdxs]
GLoc = self.HFEngine.innerProduct(DerELoc, DerELoc)
for j in range(len(nLoc)):
self.G[nLoc[j], nLoc] = self.G[nLoc[j], nLoc] + GLoc[j]
if self.verbosity >= 10:
verbosityDepth("DEL", "Done building gramian.",
timestamp = self.timestamp)
def findeveVGExplicit(self) -> Tuple[Np1D, Np2D]:
"""
Compute explicitly eigenvalues and eigenvectors of Pade' denominator
matrix.
"""
RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.")
self.buildG()
if self.verbosity >= 7:
verbosityDepth("INIT",
"Solving eigenvalue problem for gramian matrix.",
timestamp = self.timestamp)
ev, eV = np.linalg.eigh(self.G)
if self.verbosity >= 5:
try: condev = ev[-1] / ev[0]
except: condev = np.inf
verbosityDepth("MAIN", ("Solved eigenvalue problem of size {} "
"with condition number {:.4e}.").format(
self.G.shape[0],
condev),
timestamp = self.timestamp)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done solving eigenvalue problem.",
timestamp = self.timestamp)
return ev, eV
def findeveVGQR(self) -> Tuple[Np1D, Np2D]:
"""
Compute eigenvalues and eigenvectors of Pade' denominator matrix
through SVD of R factor.
Returns:
Eigenvalues in ascending order and corresponding eigenvector
matrix.
"""
RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.")
RROMPyAssert(self.POD, True, "POD value")
self.computeDerivatives()
eStart = hashD([self.E] + [0] * (self.npar - 1))
eEnd = hashD([self.E + 1] + [0] * (self.npar - 1))
eIdxs = [hashI(e, self.npar) for e in range(eStart, eEnd)]
nEnd = hashD([self.N + 1] + [0] * (self.npar - 1))
nIdxs = nextDerivativeIndices([], self.npar, nEnd)
self.setScaleParameter()
RPODE = self.rescaleByParameter(self.samplingEngine.RPOD[: eEnd,
: eEnd])
Rstack = np.zeros((RPODE.shape[0] * (eEnd - eStart), nEnd),
dtype = np.complex)
for k, eIdx in enumerate(eIdxs):
nLoc = []
samplesIdxs = []
for n, nIdx in enumerate(nIdxs):
diffIdx = [x - y for (x, y) in zip(eIdx, nIdx)]
if all([x >= 0 for x in diffIdx]):
nLoc += [n]
samplesIdxs += [hashD(diffIdx)]
RPODELoc = RPODE[:, samplesIdxs]
for j in range(len(nLoc)):
Rstack[k * RPODE.shape[0] : (k + 1) * RPODE.shape[0],
nLoc[j]] = RPODELoc[:, j]
if self.verbosity >= 7:
verbosityDepth("INIT", ("Solving svd for square root of "
"gramian matrix."),
timestamp = self.timestamp)
sizeI = Rstack.shape
_, s, V = np.linalg.svd(Rstack, full_matrices = False)
eV = V[::-1, :].T.conj()
if self.verbosity >= 5:
try: condev = s[0] / s[-1]
except: condev = np.inf
verbosityDepth("MAIN", ("Solved svd problem of size {} x {} with "
"condition number {:.4e}.").format(*sizeI,
condev),
timestamp = self.timestamp)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done solving eigenvalue problem.",
timestamp = self.timestamp)
return s[::-1], eV
def centerNormalize(self, mu : paramList = [],
mu0 : paramVal = None) -> paramList:
"""
Compute normalized parameter to be plugged into approximant.
Args:
mu: Parameter(s) 1.
mu0: Parameter(s) 2. If None, set to self.mu0.
Returns:
Normalized parameter.
"""
return self.trainedModel.centerNormalize(mu, mu0)
def getResidues(self) -> sampList:
"""
Obtain approximant residues.
Returns:
Matrix with residues as columns.
"""
return self.trainedModel.getResidues()

Event Timeline