Page MenuHomec4science

approximant_taylor_pade.py
No OneTemporary

File Metadata

Created
Sun, Apr 28, 16:55

approximant_taylor_pade.py

# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from copy import copy
import numpy as np
from rrompy.reduction_methods.base import checkRobustTolerance
from rrompy.reduction_methods.trained_model import (TrainedModelData,
TrainedModelPade as tModel)
from .generic_approximant_taylor import GenericApproximantTaylor
from rrompy.sampling.base.pod_engine import PODEngine
from rrompy.utilities.base.types import Np1D, Np2D, Tuple, DictAny, HFEng
from rrompy.utilities.base import verbosityDepth, purgeDict
from rrompy.utilities.exception_manager import (RROMPyException, modeAssert,
RROMPyWarning)
__all__ = ['ApproximantTaylorPade']
class ApproximantTaylorPade(GenericApproximantTaylor):
"""
ROM single-point fast Pade' approximant computation for parametric
problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'rho': weight for computation of original Pade' approximant;
defaults to np.inf, i.e. fast approximant;
- 'M': degree of Pade' approximant numerator; defaults to 0;
- 'N': degree of Pade' approximant denominator; defaults to 0;
- 'E': total number of derivatives current approximant relies upon;
defaults to 1;
- 'robustTol': tolerance for robust Pade' denominator management;
defaults to 0;
- 'sampleType': label of sampling type; available values are:
- 'ARNOLDI': orthogonalization of solution derivatives through
Arnoldi algorithm;
- 'KRYLOV': standard computation of solution derivatives.
Defaults to 'KRYLOV'.
Defaults to empty dict.
homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
homogeneized: Whether to homogeneize Dirichlet BCs.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterList: Recognized keys of approximant parameters:
- 'POD': whether to compute POD of snapshots;
- 'rho': weight for computation of original Pade' approximant;
- 'M': degree of Pade' approximant numerator;
- 'N': degree of Pade' approximant denominator;
- 'E': total number of derivatives current approximant relies upon;
- 'robustTol': tolerance for robust Pade' denominator management;
- 'sampleType': label of sampling type.
POD: Whether to compute QR factorization of derivatives.
rho: Weight of approximant.
M: Numerator degree of approximant.
N: Denominator degree of approximant.
E: Number of solution derivatives over which current approximant is
based upon.
robustTol: Tolerance for robust Pade' denominator management.
sampleType: Label of sampling type.
initialHFData: HF problem initial data.
uHF: High fidelity solution with wavenumber lastSolvedHF as numpy
complex vector.
lastSolvedHF: Wavenumber corresponding to last computed high fidelity
solution.
G: Square Numpy 2D vector of size (N+1) corresponding to Pade'
denominator matrix (see paper).
uApp: Last evaluated approximant as numpy complex vector.
"""
def __init__(self, HFEngine:HFEng, mu0 : complex = 0,
approxParameters : DictAny = {}, homogeneized : bool = False,
verbosity : int = 10, timestamp : bool = True):
self._preInit()
self._addParametersToList(["M", "N", "robustTol", "rho"])
super().__init__(HFEngine = HFEngine, mu0 = mu0,
approxParameters = approxParameters,
homogeneized = homogeneized,
verbosity = verbosity, timestamp = timestamp)
self._postInit()
@property
def approxParameters(self):
"""Value of approximant parameters."""
return self._approxParameters
@approxParameters.setter
def approxParameters(self, approxParams):
approxParameters = purgeDict(approxParams, self.parameterList,
dictname = self.name() + ".approxParameters",
baselevel = 1)
approxParametersCopy = purgeDict(approxParameters,
["M", "N", "robustTol", "rho"],
True, True, baselevel = 1)
keyList = list(approxParameters.keys())
if "rho" in keyList:
self._rho = approxParameters["rho"]
elif not hasattr(self, "_rho") or self.rho is None:
self._rho = np.inf
GenericApproximantTaylor.approxParameters.fset(self,
approxParametersCopy)
self.rho = self._rho
if "robustTol" in keyList:
self.robustTol = approxParameters["robustTol"]
elif not hasattr(self, "_robustTol") or self._robustTol is None:
self.robustTol = 0
self._ignoreParWarnings = True
if "M" in keyList:
self.M = approxParameters["M"]
elif hasattr(self, "_M") and self._M is not None:
self.M = self.M
else:
self.M = 0
del self._ignoreParWarnings
if "N" in keyList:
self.N = approxParameters["N"]
elif hasattr(self, "_N") and self._N is not None:
self.N = self.N
else:
self.N = 0
@property
def rho(self):
"""Value of rho."""
return self._rho
@rho.setter
def rho(self, rho):
self._rho = np.abs(rho)
self._approxParameters["rho"] = self.rho
@property
def M(self):
"""Value of M. Its assignment may change E."""
return self._M
@M.setter
def M(self, M):
if M < 0: raise RROMPyException("M must be non-negative.")
self._M = M
self._approxParameters["M"] = self.M
if not hasattr(self, "_ignoreParWarnings"):
self.checkMNE()
@property
def N(self):
"""Value of N. Its assignment may change E."""
return self._N
@N.setter
def N(self, N):
if N < 0: raise RROMPyException("N must be non-negative.")
self._N = N
self._approxParameters["N"] = self.N
if not hasattr(self, "_ignoreParWarnings"):
self.checkMNE()
def checkMNE(self):
"""Check consistency of M, N, and E."""
if not hasattr(self, "_E") or self.E is None: return
M = self.M if (hasattr(self, "_M") and self.M is not None) else 0
N = self.N if (hasattr(self, "_N") and self.N is not None) else 0
msg = "max(M, N)" if self.rho == np.inf else "M + N"
bound = eval(msg)
if self.E < bound:
RROMPyWarning(("Prescribed E is too small. Updating E to "
"{}.").format(msg))
self.E = bound
del M, N
@property
def robustTol(self):
"""Value of tolerance for robust Pade' denominator management."""
return self._robustTol
@robustTol.setter
def robustTol(self, robustTol):
if robustTol < 0.:
RROMPyWarning(("Overriding prescribed negative robustness "
"tolerance to 0."))
robustTol = 0.
self._robustTol = robustTol
self._approxParameters["robustTol"] = self.robustTol
@property
def E(self):
"""Value of E."""
return self._E
@E.setter
def E(self, E):
GenericApproximantTaylor.E.fset(self, E)
self.checkMNE()
def setupApprox(self):
"""
Compute Pade' approximant. SVD-based robust eigenvalue management.
"""
if self.checkComputedApprox():
return
if self.verbosity >= 5:
verbosityDepth("INIT", "Setting up {}.". format(self.name()),
timestamp = self.timestamp)
self.computeDerivatives()
if self.N > 0:
if self.verbosity >= 7:
verbosityDepth("INIT", "Starting computation of denominator.",
timestamp = self.timestamp)
while self.N > 0:
if self.POD:
ev, eV = self.findeveVGQR()
else:
ev, eV = self.findeveVGExplicit()
newParameters = checkRobustTolerance(ev, self.E,
self.robustTol)
if not newParameters:
break
self.approxParameters = newParameters
if self.N <= 0:
eV = np.ones((1, 1))
Q = eV[::-1, 0]
if self.verbosity >= 7:
verbosityDepth("DEL", "Done computing denominator.",
timestamp = self.timestamp)
else:
Q = np.ones(1, dtype = np.complex)
if self.verbosity >= 7:
verbosityDepth("INIT", "Starting computation of numerator.",
timestamp = self.timestamp)
P = np.zeros((self.E + 1, self.M + 1), dtype = np.complex)
for i in range(self.E + 1):
l = min(self.M + 1, i + self.N + 1)
P[i, i : l] = Q[: l - i]
P = self.rescaleParameter(P.T).T
if self.sampleType == "ARNOLDI":
P = self.samplingEngine.RArnoldi.dot(P)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done computing numerator.",
timestamp = self.timestamp)
if self.trainedModel is None:
self.trainedModel = tModel()
self.trainedModel.verbosity = self.verbosity
self.trainedModel.timestamp = self.timestamp
data = TrainedModelData(self.trainedModel.name(), self.mu0,
self.samplingEngine.samples[:,:self.E + 1],
self.HFEngine.rescalingExp)
data.P, data.Q = np.copy(P), np.copy(Q)
data.polytype = "MONOMIAL"
data.scaleFactor = self.scaleFactor
self.trainedModel.data = data
else:
self.trainedModel.data.P = np.copy(P)
self.trainedModel.data.Q = np.copy(Q)
self.trainedModel.data.projMat = (
self.samplingEngine.samples[:, : self.E + 1])
self.trainedModel.data.approxParameters = copy(self.approxParameters)
if self.verbosity >= 5:
verbosityDepth("DEL", "Done setting up approximant.",
timestamp = self.timestamp)
def rescaleParameter(self, R:Np2D, A : Np2D = None,
exponent : float = 1.) -> Np2D:
"""
Prepare parameter rescaling.
Args:
R: Matrix whose columns need rescaling.
A(optional): Matrix whose diagonal defines scaling factor. If None,
previous value of scaleFactor is used. Defaults to None.
exponent(optional): Exponent of scaling factor in matrix diagonal.
Defaults to 1.
Returns:
Rescaled matrix.
"""
modeAssert(self._mode, message = "Cannot compute rescaling factor.")
if A is not None:
aDiag = np.diag(A)
scaleCoeffs = np.polyfit(np.arange(A.shape[1]),
np.log(aDiag), 1)
self.scaleFactor = np.exp(- scaleCoeffs[0] / exponent)
return np.multiply(R, np.power(self.scaleFactor,np.arange(R.shape[1])))
def buildG(self):
"""Assemble Pade' denominator matrix."""
modeAssert(self._mode, message = "Cannot compute G matrix.")
self.computeDerivatives()
if self.verbosity >= 10:
verbosityDepth("INIT", "Building gramian matrix.",
timestamp = self.timestamp)
if self.rho == np.inf:
Nmin = self.E - self.N
else:
Nmin = self.M - self.N + 1
if self.sampleType == "KRYLOV":
DerE = self.samplingEngine.samples[:, Nmin : self.E + 1]
G = self.HFEngine.innerProduct(DerE, DerE)
DerE = self.rescaleParameter(DerE, G, 2.)
G = self.HFEngine.innerProduct(DerE, DerE)
else:
RArnE = self.samplingEngine.RArnoldi[: self.E + 1,
Nmin : self.E + 1]
RArnE = self.rescaleParameter(RArnE, RArnE[Nmin :, :])
G = RArnE.T.conj().dot(RArnE)
if self.rho == np.inf:
self.G = G
else:
Gbig = G
self.G = np.zeros((self.N + 1, self.N + 1), dtype = np.complex)
for k in range(self.E - self.M):
self.G += self.rho ** (2 * k) * Gbig[k : k + self.N + 1,
k : k + self.N + 1]
if self.verbosity >= 10:
verbosityDepth("DEL", "Done building gramian.",
timestamp = self.timestamp)
def findeveVGExplicit(self) -> Tuple[Np1D, Np2D]:
"""
Compute explicitly eigenvalues and eigenvectors of Pade' denominator
matrix.
"""
modeAssert(self._mode, message = "Cannot solve eigenvalue problem.")
self.buildG()
if self.verbosity >= 7:
verbosityDepth("INIT",
"Solving eigenvalue problem for gramian matrix.",
timestamp = self.timestamp)
ev, eV = np.linalg.eigh(self.G)
if self.verbosity >= 5:
try: condev = ev[-1] / ev[0]
except: condev = np.inf
verbosityDepth("MAIN", ("Solved eigenvalue problem of size {} "
"with condition number {:.4e}.").format(
self.N + 1,
condev),
timestamp = self.timestamp)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done solving eigenvalue problem.",
timestamp = self.timestamp)
return ev, eV
def findeveVGQR(self) -> Tuple[Np1D, Np2D]:
"""
Compute eigenvalues and eigenvectors of Pade' denominator matrix
through SVD of R factor. See ``Householder triangularization of a
quasimatrix'', L.Trefethen, 2008 for QR algorithm.
Returns:
Eigenvalues in ascending order and corresponding eigenvector
matrix.
"""
modeAssert(self._mode, message = "Cannot solve eigenvalue problem.")
self.computeDerivatives()
if self.rho == np.inf:
Nmin = self.E - self.N
else:
Nmin = self.M - self.N + 1
if self.sampleType == "KRYLOV":
A = copy(self.samplingEngine.samples[:, Nmin : self.E + 1])
self.PODEngine = PODEngine(self.HFEngine)
if self.verbosity >= 10:
verbosityDepth("INIT", "Orthogonalizing samples.",
timestamp = self.timestamp)
R = self.PODEngine.QRHouseholder(A, only_R = True)
if self.verbosity >= 10:
verbosityDepth("DEL", "Done orthogonalizing samples.",
timestamp = self.timestamp)
else:
R = self.samplingEngine.RArnoldi[: self.E + 1, Nmin : self.E + 1]
R = self.rescaleParameter(R, R[R.shape[0] - R.shape[1] :, :])
if self.rho == np.inf:
if self.verbosity >= 7:
verbosityDepth("INIT", ("Solving svd for square root of "
"gramian matrix."),
timestamp = self.timestamp)
sizeI = R.shape[0]
_, s, V = np.linalg.svd(R, full_matrices = False)
else:
if self.verbosity >= 10:
verbosityDepth("INIT", ("Building matrix stack for square "
"root of gramian."),
timestamp = self.timestamp)
Rtower = np.zeros((R.shape[0] * (self.E - self.M), self.N + 1),
dtype = np.complex)
for k in range(self.E - self.M):
Rtower[k * R.shape[0] : (k + 1) * R.shape[0], :] = (
self.rho ** k
* R[:, self.M - self.N + 1 + k : self.M + 1 + k])
if self.verbosity >= 10:
verbosityDepth("DEL", "Done building matrix stack.",
timestamp = self.timestamp)
if self.verbosity >= 7:
verbosityDepth("INIT", ("Solving svd for square root of "
"gramian matrix."),
timestamp = self.timestamp)
sizeI = Rtower.shape[0]
_, s, V = np.linalg.svd(Rtower, full_matrices = False)
eV = V[::-1, :].T.conj()
if self.verbosity >= 5:
try: condev = s[0] / s[-1]
except: condev = np.inf
verbosityDepth("MAIN", ("Solved svd problem of size {} x {} with "
"condition number {:.4e}.").format(sizeI,
self.N + 1,
condev),
timestamp = self.timestamp)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done solving eigenvalue problem.",
timestamp = self.timestamp)
return s[::-1], eV
def radiusPade(self, mu:Np1D, mu0 : float = None) -> float:
"""
Compute translated radius to be plugged into Pade' approximant.
Args:
mu: Parameter(s) 1.
mu0: Parameter(s) 2. If None, set to self.mu0.
Returns:
Translated radius to be plugged into Pade' approximant.
"""
return self.trainedModel.radiusPade(mu, mu0)
def getResidues(self) -> Np1D:
"""
Obtain norm of approximant residues.
Returns:
Numpy vector of norms of residues.
"""
self.setupApprox()
if self.verbosity >= 20:
verbosityDepth("INIT", "Computing residues of model.",
timestamp = self.timestamp)
poles = self.getPoles()
Pvals = self.trainedModel.data.projMat.dot(self.getPVal(poles))
Qder = self._valder(self.radiusPade(poles), self.trainedModel.data.Q)
residues = Pvals / Qder
if self.verbosity >= 20:
verbosityDepth("DEL", "Done computing residues.",
timestamp = self.timestamp)
return residues

Event Timeline