Page MenuHomec4science

approximant_lagrange_pade.py
No OneTemporary

File Metadata

Created
Fri, Apr 19, 21:49

approximant_lagrange_pade.py

# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from copy import copy
import numpy as np
from rrompy.reduction_methods.base import checkRobustTolerance
from .generic_approximant_lagrange import GenericApproximantLagrange
from rrompy.utilities.base.types import Np1D, DictAny, List, HFEng
from rrompy.utilities.base import purgeDict, verbosityDepth
from rrompy.utilities.warning_manager import warn
__all__ = ['ApproximantLagrangePade']
class ApproximantLagrangePade(GenericApproximantLagrange):
"""
ROM Lagrange Pade' interpolant computation for parametric problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'S': total number of samples current approximant relies upon;
defaults to 2;
- 'sampler': sample point generator; defaults to uniform sampler on
[0, 1];
- 'E': coefficient of interpolant to be minimized; defaults to
min(S, M + 1);
- 'M': degree of Pade' interpolant numerator; defaults to 0;
- 'N': degree of Pade' interpolant denominator; defaults to 0;
- 'interpRcond': tolerance for interpolation via numpy.polyfit;
defaults to None;
- 'robustTol': tolerance for robust Pade' denominator management;
defaults to 0.
Defaults to empty dict.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
ws: Array of snapshot weigths.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterList: Recognized keys of approximant parameters:
- 'POD': whether to compute POD of snapshots;
- 'S': total number of samples current approximant relies upon;
- 'sampler': sample point generator;
- 'E': coefficient of interpolant to be minimized;
- 'M': degree of Pade' interpolant numerator;
- 'N': degree of Pade' interpolant denominator;
- 'interpRcond': tolerance for interpolation via numpy.polyfit;
- 'robustTol': tolerance for robust Pade' denominator management.
extraApproxParameters: List of approxParameters keys in addition to
mother class's.
S: Number of solution snapshots over which current approximant is
based upon.
sampler: Sample point generator.
M: Numerator degree of approximant.
N: Denominator degree of approximant.
POD: Whether to compute POD of snapshots.
interpRcond: Tolerance for interpolation via numpy.polyfit.
robustTol: Tolerance for robust Pade' denominator management.
samplingEngine: Sampling engine.
uHF: High fidelity solution with wavenumber lastSolvedHF as numpy
complex vector.
lastSolvedHF: Wavenumber corresponding to last computed high fidelity
solution.
Q: Numpy 1D vector containing complex coefficients of approximant
denominator.
P: Numpy 2D vector whose columns are FE dofs of coefficients of
approximant numerator.
uApp: Last evaluated approximant as numpy complex vector.
lastApproxParameters: List of parameters corresponding to last
computed approximant.
"""
def __init__(self, HFEngine:HFEng, mu0 : complex = 0.,
approxParameters : DictAny = {}, homogeneized : bool = False,
verbosity : int = 10):
self._preInit()
self._addParametersToList(["E", "M", "N", "interpRcond", "robustTol"])
super().__init__(HFEngine = HFEngine, mu0 = mu0,
approxParameters = approxParameters,
homogeneized = homogeneized,
verbosity = verbosity)
self._postInit()
@property
def approxParameters(self):
"""
Value of approximant parameters. Its assignment may change E, M, N,
robustTol and S.
"""
return self._approxParameters
@approxParameters.setter
def approxParameters(self, approxParams):
approxParameters = purgeDict(approxParams, self.parameterList,
dictname = self.name() + ".approxParameters",
baselevel = 1)
approxParametersCopy = purgeDict(approxParameters, ["E", "M", "N",
"interpRcond",
"robustTol"],
True, True, baselevel = 1)
if hasattr(self, "M"):
Mold = self.M
self._M = 0
if hasattr(self, "N"):
Nold = self.N
self._N = 0
if hasattr(self, "E"):
self._E = 0
GenericApproximantLagrange.approxParameters.fset(self,
approxParametersCopy)
keyList = list(approxParameters.keys())
if "interpRcond" in keyList:
self.interpRcond = approxParameters["interpRcond"]
elif hasattr(self, "interpRcond"):
self.interpRcond = self.interpRcond
else:
self.interpRcond = None
if "robustTol" in keyList:
self.robustTol = approxParameters["robustTol"]
elif hasattr(self, "robustTol"):
self.robustTol = self.robustTol
else:
self.robustTol = 0
if "M" in keyList:
self.M = approxParameters["M"]
elif hasattr(self, "M"):
self.M = Mold
else:
self.M = 0
if "N" in keyList:
self.N = approxParameters["N"]
elif hasattr(self, "N"):
self.N = Nold
else:
self.N = 0
if "E" in keyList:
self.E = approxParameters["E"]
else:
self.E = min(self.S - 1, self.M + 1)
@property
def M(self):
"""Value of M. Its assignment may change S."""
return self._M
@M.setter
def M(self, M):
if M < 0: raise ArithmeticError("M must be non-negative.")
self._M = M
self._approxParameters["M"] = self.M
if hasattr(self, "S") and self.S < self.M + 1:
warn("Prescribed S is too small. Updating S to M + 1.")
self.S = self.M + 1
@property
def N(self):
"""Value of N. Its assignment may change S."""
return self._N
@N.setter
def N(self, N):
if N < 0: raise ArithmeticError("N must be non-negative.")
self._N = N
self._approxParameters["N"] = self.N
if hasattr(self, "S") and self.S < self.N + 1:
warn("Prescribed S is too small. Updating S to N + 1.")
self.S = self.N + 1
@property
def E(self):
"""Value of E. Its assignment may change S."""
return self._E
@E.setter
def E(self, E):
if E < 0: raise ArithmeticError("E must be non-negative.")
self._E = E
self._approxParameters["E"] = self.E
if hasattr(self, "S") and self.S < self.E + 1:
warn("Prescribed S is too small. Updating S to E + 1.")
self.S = self.E + 1
@property
def robustTol(self):
"""Value of tolerance for robust Pade' denominator management."""
return self._robustTol
@robustTol.setter
def robustTol(self, robustTol):
if robustTol < 0.:
warn("Overriding prescribed negative robustness tolerance to 0.")
robustTol = 0.
self._robustTol = robustTol
self._approxParameters["robustTol"] = self.robustTol
@property
def S(self):
"""Value of S."""
return self._S
@S.setter
def S(self, S):
if S <= 0: raise ArithmeticError("S must be positive.")
if hasattr(self, "S"): Sold = self.S
else: Sold = -1
vals, label = [0] * 3, {0:"M", 1:"N", 2:"E"}
if hasattr(self, "M"): vals[0] = self.M
if hasattr(self, "N"): vals[1] = self.N
if hasattr(self, "E"): vals[2] = self.E
idxmax = np.argmax(vals)
if vals[idxmax] + 1 > S:
warn("Prescribed S is too small. Updating S to {} + 1."\
.format(label[idxmax]))
self.S = vals[idxmax] + 1
else:
self._S = S
self._approxParameters["S"] = self.S
if Sold != self.S:
self.resetSamples()
def setupApprox(self):
"""
Compute Pade' interpolant.
SVD-based robust eigenvalue management.
"""
if not self.checkComputedApprox():
if self.verbosity >= 5:
verbosityDepth("INIT", "Setting up {}.". format(self.name()))
self.computeRescaleParameter()
self.computeSnapshots()
if self.N > 0:
if self.verbosity >= 7:
verbosityDepth("INIT", ("Starting computation of "
"denominator."))
TNE = np.vander(self.radiusPade(self.mus), N = self.N + 1)
while self.N > 0:
TN = TNE[:, TNE.shape[1] - self.N - 1 :]
if self.POD:
data = self.samplingEngine.RPOD
else:
data = self.samplingEngine.samples
RHSFull = np.empty((self.S, data.shape[0] * (self.N + 1)),
dtype = np.complex)
for j in range(self.S):
RHSFull[j, :] = np.kron(data[:, j], TN[j, :])
fitOut = np.polyfit(self.radiusPade(self.mus), RHSFull,
self.E, w = self.ws, full = True,
rcond = self.interpRcond)
if self.verbosity >= 5:
verbosityDepth("MAIN", ("Fitting {} samples with "
"degree {} through {}... "
"Conditioning of LS system: "
"{:.4e}.").format(
self.S, self.E, "polyfit",
fitOut[3][0] / fitOut[3][-1]))
if fitOut[2] < self.E + 1:
Enew = fitOut[2] - 1
Nnew = min(self.N, Enew)
Mnew = min(self.M, Enew)
if Nnew == self.N:
strN = ""
else:
strN = "N from {} to {} and ".format(self.N, Nnew)
if Mnew == self.M:
strM = ""
else:
strM = "M from {} to {} and ".format(self.M, Mnew)
warn(("Polyfit is poorly conditioned.\nReducing {}{}E "
"from {} to {}.").format(strN, strM,
self.E, Enew))
newParameters = {"N" : Nnew, "M" : Mnew, "E" : Enew}
self.approxParameters = newParameters
continue
G = fitOut[0][0, :].reshape((self.N + 1, data.shape[0]))
if self.POD:
if self.verbosity >= 7:
verbosityDepth("INIT", ("Solving svd for square "
"root of gramian matrix."),
end = "")
_, ev, eV = np.linalg.svd(G, full_matrices = False)
ev = ev[::-1]
eV = eV[::-1, :].conj().T
else:
if self.verbosity >= 10:
verbosityDepth("INIT", "Building gramian matrix.",
end = "")
G2 = self.HFEngine.innerProduct(G, G)
if self.verbosity >= 10:
verbosityDepth("DEL", "Done building gramian.",
inline = True)
if self.verbosity >= 7:
verbosityDepth("INIT", ("Solving eigenvalue "
"problem for gramian "
"matrix."), end = "")
ev, eV = np.linalg.eigh(G2)
if self.verbosity >= 7:
verbosityDepth("DEL", " Done.", inline = True)
newParameters = checkRobustTolerance(ev, self.E,
self.robustTol)
if not newParameters:
break
self.approxParameters = newParameters
if self.N <= 0:
eV = np.ones((1, 1))
self.Q = np.poly1d(eV[:, 0])
if self.verbosity >= 7:
verbosityDepth("DEL", "Done computing denominator.")
else:
self.Q = np.poly1d([1])
if self.verbosity >= 7:
verbosityDepth("INIT", "Starting computation of numerator.")
self.lastApproxParameters = copy(self.approxParameters)
Qevaldiag = np.diag(self.getQVal(self.mus))
while self.M >= 0:
fitOut = np.polyfit(self.radiusPade(self.mus), Qevaldiag,
self.M, w = self.ws, full = True,
rcond = self.interpRcond)
if self.verbosity >= 5:
verbosityDepth("MAIN", ("Fitting {} samples with degree "
"{} through {}... Conditioning of "
"LS system: {:.4e}.").format(
self.S, self.M, "polyfit",
fitOut[3][0] / fitOut[3][-1]))
if fitOut[2] == self.M + 1:
P = fitOut[0].T
break
warn(("Polyfit is poorly conditioned. Reducing M from {} to "
"{}. Exact snapshot interpolation not guaranteed.")\
.format(self.M, fitOut[2] - 1))
self.M = fitOut[2] - 1
self.P = np.atleast_2d(P)
if self.POD:
self.P = self.samplingEngine.RPOD.dot(self.P)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done computing numerator.")
self.lastApproxParameters = copy(self.approxParameters)
if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp
if self.verbosity >= 5:
verbosityDepth("DEL", "Done setting up approximant.\n")
def radiusPade(self, mu:Np1D, mu0 : float = None) -> float:
"""
Compute translated radius to be plugged into Pade' approximant.
Args:
mu: Parameter(s) 1.
mu0: Parameter(s) 2. If None, set to self.mu0.
Returns:
Translated radius to be plugged into Pade' approximant.
"""
if mu0 is None: mu0 = self.mu0
return ((self.HFEngine.rescaling(mu) - self.HFEngine.rescaling(mu0))
/ self.scaleFactor)
def getPVal(self, mu:List[complex]):
"""
Evaluate Pade' numerator at arbitrary parameter.
Args:
mu: Target parameter.
"""
self.setupApprox()
if self.verbosity >= 10:
verbosityDepth("INIT",
"Evaluating numerator at mu = {}.".format(mu),
end = "")
try:
len(mu)
except:
mu = [mu]
powerlist = np.vander(self.radiusPade(mu), self.P.shape[1])
p = self.P.dot(powerlist.T)
if len(mu) == 1:
p = p.flatten()
if self.verbosity >= 10:
verbosityDepth("DEL", " Done.", inline = True)
return p
def getQVal(self, mu:List[complex]):
"""
Evaluate Pade' denominator at arbitrary parameter.
Args:
mu: Target parameter.
"""
self.setupApprox()
if self.verbosity >= 10:
verbosityDepth("INIT",
"Evaluating denominator at mu = {}.".format(mu),
end = "")
q = self.Q(self.radiusPade(mu))
if self.verbosity >= 10:
verbosityDepth("DEL", " Done.", inline = True)
return q
def evalApproxReduced(self, mu:complex):
"""
Evaluate Pade' approximant at arbitrary parameter.
Args:
mu: Target parameter.
"""
self.setupApprox()
if (not hasattr(self, "lastSolvedApp")
or not np.isclose(self.lastSolvedApp, mu)):
if self.verbosity >= 5:
verbosityDepth("INIT",
"Evaluating approximant at mu = {}.".format(mu))
self.uAppReduced = self.getPVal(mu) / self.getQVal(mu)
self.lastSolvedApp = mu
if self.verbosity >= 5:
verbosityDepth("DEL", "Done evaluating approximant.")
def evalApprox(self, mu:complex):
"""
Evaluate approximant at arbitrary parameter.
Args:
mu: Target parameter.
"""
self.evalApproxReduced(mu)
self.uApp = self.samplingEngine.samples.dot(self.uAppReduced)
def getPoles(self) -> Np1D:
"""
Obtain approximant poles.
Returns:
Numpy complex vector of poles.
"""
self.setupApprox()
return self.HFEngine.rescalingInv(self.scaleFactor * self.Q.r
+ self.HFEngine.rescaling(self.mu0))

Event Timeline