Page MenuHomec4science

generic_greedy_approximant.py
No OneTemporary

File Metadata

Created
Mon, May 6, 02:29

generic_greedy_approximant.py

# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy as copy
import numpy as np
from rrompy.reduction_methods.standard.generic_standard_approximant \
import GenericStandardApproximant
from rrompy.utilities.base.types import (Np1D, Np2D, DictAny, HFEng, Tuple,
List, normEng, paramVal, paramList,
sampList)
from rrompy.utilities.poly_fitting.polynomial import (polyvanderTotal as pvT,
hashIdxToDerivative as hashI,
totalDegreeN)
from rrompy.utilities.base import verbosityManager as vbMng
from rrompy.solver import normEngine
from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
RROMPyWarning)
from rrompy.parameter import checkParameterList, emptyParameterList
__all__ = ['GenericGreedyApproximant']
def localL2Distance(mus:Np2D, badmus:Np2D) -> Np2D:
return np.linalg.norm(np.tile(mus[..., np.newaxis], [1, 1, len(badmus)])
- badmus[..., np.newaxis].T, axis = 1)
def pruneSamples(mus:paramList, badmus:paramList,
tol : float = 1e-8) -> Np1D:
"""Remove from mus all the elements which are too close to badmus."""
if len(badmus) == 0: return mus
musA = mus.data
badmusA = badmus.data
proximity = np.min(localL2Distance(musA, badmusA), axis = 1)
return np.arange(len(mus))[proximity <= tol]
class GenericGreedyApproximant(GenericStandardApproximant):
"""
ROM greedy interpolant computation for parametric problems
(ABSTRACT).
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'S': number of starting training points;
- 'sampler': sample point generator;
- 'greedyTol': uniform error tolerance for greedy algorithm;
defaults to 1e-2;
- 'collinearityTol': collinearity tolerance for greedy algorithm;
defaults to 0.;
- 'interactive': whether to interactively terminate greedy
algorithm; defaults to False;
- 'maxIter': maximum number of greedy steps; defaults to 1e2;
- 'refinementRatio': ratio of test points to be exhausted before
test set refinement; defaults to 0.2;
- 'nTestPoints': number of test points; defaults to 5e2;
- 'trainSetGenerator': training sample points generator; defaults
to sampler.
Defaults to empty dict.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterListSoft: Recognized keys of soft approximant parameters:
- 'POD': whether to compute POD of snapshots.
- 'greedyTol': uniform error tolerance for greedy algorithm;
- 'collinearityTol': collinearity tolerance for greedy algorithm;
- 'interactive': whether to interactively terminate greedy
algorithm;
- 'maxIter': maximum number of greedy steps;
- 'refinementRatio': ratio of test points to be exhausted before
test set refinement;
- 'nTestPoints': number of test points;
- 'trainSetGenerator': training sample points generator.
parameterListCritical: Recognized keys of critical approximant
parameters:
- 'S': total number of samples current approximant relies upon;
- 'sampler': sample point generator.
POD: whether to compute POD of snapshots.
S: number of test points.
sampler: Sample point generator.
greedyTol: Uniform error tolerance for greedy algorithm.
collinearityTol: Collinearity tolerance for greedy algorithm.
interactive: whether to interactively terminate greedy algorithm.
maxIter: maximum number of greedy steps.
refinementRatio: ratio of training points to be exhausted before
training set refinement.
nTestPoints: number of starting training points.
trainSetGenerator: training sample points generator.
muBounds: list of bounds for parameter values.
samplingEngine: Sampling engine.
estimatorNormEngine: Engine for estimator norm computation.
uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
sampleList.
lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
solution(s) as parameterList.
uApproxReduced: Reduced approximate solution(s) with parameter(s)
lastSolvedApprox as sampleList.
lastSolvedApproxReduced: Parameter(s) corresponding to last computed
reduced approximate solution(s) as parameterList.
uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
sampleList.
lastSolvedApprox: Parameter(s) corresponding to last computed
approximate solution(s) as parameterList.
"""
TOL_INSTABILITY = 1e-6
def __init__(self, HFEngine:HFEng, mu0 : paramVal = None,
approxParameters : DictAny = {}, verbosity : int = 10,
timestamp : bool = True):
self._preInit()
self._addParametersToList(["greedyTol", "collinearityTol",
"interactive", "maxIter", "refinementRatio",
"nTestPoints"],
[1e-2, 0., False, 1e2, .2, 5e2],
["trainSetGenerator"], ["AUTO"])
super().__init__(HFEngine = HFEngine, mu0 = mu0,
approxParameters = approxParameters,
verbosity = verbosity, timestamp = timestamp)
self._postInit()
@property
def greedyTol(self):
"""Value of greedyTol."""
return self._greedyTol
@greedyTol.setter
def greedyTol(self, greedyTol):
if greedyTol < 0:
raise RROMPyException("greedyTol must be non-negative.")
if hasattr(self, "_greedyTol") and self.greedyTol is not None:
greedyTolold = self.greedyTol
else:
greedyTolold = -1
self._greedyTol = greedyTol
self._approxParameters["greedyTol"] = self.greedyTol
if greedyTolold != self.greedyTol:
self.resetSamples()
@property
def collinearityTol(self):
"""Value of collinearityTol."""
return self._collinearityTol
@collinearityTol.setter
def collinearityTol(self, collinearityTol):
if collinearityTol < 0:
raise RROMPyException("collinearityTol must be non-negative.")
if (hasattr(self, "_collinearityTol")
and self.collinearityTol is not None):
collinearityTolold = self.collinearityTol
else:
collinearityTolold = -1
self._collinearityTol = collinearityTol
self._approxParameters["collinearityTol"] = self.collinearityTol
if collinearityTolold != self.collinearityTol:
self.resetSamples()
@property
def interactive(self):
"""Value of interactive."""
return self._interactive
@interactive.setter
def interactive(self, interactive):
self._interactive = interactive
@property
def maxIter(self):
"""Value of maxIter."""
return self._maxIter
@maxIter.setter
def maxIter(self, maxIter):
if maxIter <= 0: raise RROMPyException("maxIter must be positive.")
if hasattr(self, "_maxIter") and self.maxIter is not None:
maxIterold = self.maxIter
else:
maxIterold = -1
self._maxIter = maxIter
self._approxParameters["maxIter"] = self.maxIter
if maxIterold != self.maxIter:
self.resetSamples()
@property
def refinementRatio(self):
"""Value of refinementRatio."""
return self._refinementRatio
@refinementRatio.setter
def refinementRatio(self, refinementRatio):
if refinementRatio <= 0. or refinementRatio > 1.:
raise RROMPyException(("refinementRatio must be between 0 "
"(excluded) and 1."))
if (hasattr(self, "_refinementRatio")
and self.refinementRatio is not None):
refinementRatioold = self.refinementRatio
else:
refinementRatioold = -1
self._refinementRatio = refinementRatio
self._approxParameters["refinementRatio"] = self.refinementRatio
if refinementRatioold != self.refinementRatio:
self.resetSamples()
@property
def nTestPoints(self):
"""Value of nTestPoints."""
return self._nTestPoints
@nTestPoints.setter
def nTestPoints(self, nTestPoints):
if nTestPoints <= 0:
raise RROMPyException("nTestPoints must be positive.")
if not np.isclose(nTestPoints, np.int(nTestPoints)):
raise RROMPyException("nTestPoints must be an integer.")
nTestPoints = np.int(nTestPoints)
if hasattr(self, "_nTestPoints") and self.nTestPoints is not None:
nTestPointsold = self.nTestPoints
else:
nTestPointsold = -1
self._nTestPoints = nTestPoints
self._approxParameters["nTestPoints"] = self.nTestPoints
if nTestPointsold != self.nTestPoints:
self.resetSamples()
@property
def trainSetGenerator(self):
"""Value of trainSetGenerator."""
return self._trainSetGenerator
@trainSetGenerator.setter
def trainSetGenerator(self, trainSetGenerator):
if (isinstance(trainSetGenerator, (str,))
and trainSetGenerator.upper() == "AUTO"):
trainSetGenerator = self.sampler
if 'generatePoints' not in dir(trainSetGenerator):
raise RROMPyException("trainSetGenerator type not recognized.")
if (hasattr(self, '_trainSetGenerator')
and self.trainSetGenerator not in [None, "AUTO"]):
trainSetGeneratorOld = self.trainSetGenerator
self._trainSetGenerator = trainSetGenerator
self._approxParameters["trainSetGenerator"] = self.trainSetGenerator
if (not 'trainSetGeneratorOld' in locals()
or trainSetGeneratorOld != self.trainSetGenerator):
self.resetSamples()
def resetSamples(self):
"""Reset samples."""
super().resetSamples()
self._mus = emptyParameterList()
def initEstimatorNormEngine(self, normEngn : normEng = None):
"""Initialize estimator norm engine."""
if (normEngn is not None or not hasattr(self, "estimatorNormEngine")
or self.estimatorNormEngine is None):
if normEngn is None:
if not hasattr(self.HFEngine, "energyNormPartialDualMatrix"):
self.HFEngine.buildEnergyNormPartialDualForm()
estimatorEnergyMatrix = (
self.HFEngine.energyNormPartialDualMatrix)
else:
if hasattr(normEngn, "buildEnergyNormPartialDualForm"):
if not hasattr(normEngn, "energyNormPartialDualMatrix"):
normEngn.buildEnergyNormPartialDualForm()
estimatorEnergyMatrix = (
normEngn.energyNormPartialDualMatrix)
else:
estimatorEnergyMatrix = normEngn
self.estimatorNormEngine = normEngine(estimatorEnergyMatrix)
def errorEstimator(self, mus:Np1D) -> Np1D:
"""Standard residual-based error estimator."""
self.setupApprox()
self.assembleReducedResidualBlocks(full = True)
nmus = len(mus)
nAs = self.trainedModel.data.resAA.shape[1]
nbs = self.trainedModel.data.resbb.shape[0]
mustr = mus
if nmus > 2:
mustr = "[{} ..({}).. {}]".format(mus[0], nmus - 2, mus[-1])
vbMng(self.trainedModel, "INIT",
"Computing reduced solution at mu = {}.".format(mustr), 5)
verb = self.trainedModel.verbosity
self.trainedModel.verbosity = 0
mus = checkParameterList(mus, self.npar)[0]
uApproxRs = self.getApproxReduced(mus)
muRTest = ((mus ** self.HFEngine.rescalingExp
- self.mu0 ** self.HFEngine.rescalingExp)
/ self.scaleFactor)
vanderBase, _, vanderBaseIdxs = pvT(muRTest,
np.sum(hashI(max(nAs, nbs) - 1, self.npar)),
"MONOMIAL")
vanderBase = vanderBase[:, vanderBaseIdxs].T
radiusA = np.expand_dims(uApproxRs.data, 1) * vanderBase[: nAs, :]
radiusb = vanderBase[: nbs, :]
self.trainedModel.verbosity = verb
vbMng(self.trainedModel, "DEL", "Done computing reduced solution.", 5)
# 'ij,jk,ik->k', resbb, radiusb, radiusb.conj()
ff = np.sum(self.trainedModel.data.resbb.dot(radiusb) * radiusb.conj(),
axis = 0)
# 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj()
Lf = np.sum(np.tensordot(self.trainedModel.data.resAb, radiusA, 2)
* radiusb.conj(), axis = 0)
# 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj()
LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2)
* radiusA.conj(), axis = (0, 1))
return np.abs((LL - 2. * np.real(Lf) + ff) / ff) ** .5
def getMaxErrorEstimator(self, mus:paramList, n : int = 1,
plot : bool = False) -> Tuple[Np1D, int, float]:
"""
Compute maximum of (and index of maximum of) error estimator over given
parameters.
"""
errorEstTest = self.errorEstimator(mus)
errCP = copy(errorEstTest)
idxMaxEst = np.empty(n, dtype = int)
for j in range(n):
k = np.argmax(errCP)
idxMaxEst[j] = k
if j + 1 < n:
errCP *= np.linalg.norm((mus.data ** self.HFEngine.rescalingExp
- mus[k] ** self.HFEngine.rescalingExp)
/ self.scaleFactor, axis = 1)
maxEst = errorEstTest[idxMaxEst]
if plot and not np.any(np.isinf(errorEstTest)):
musre = copy(mus.re.data)
from matplotlib import pyplot as plt
plt.figure()
plt.semilogy([musre[0, 0], musre[-1, 0]], [self.greedyTol] * 2,
'r--')
plt.semilogy(self.mus.re(0),
2. * self.greedyTol * np.ones(len(self.mus)), '*m')
plt.semilogy(musre[idxMaxEst, 0], maxEst, 'xr')
while len(musre) > 0:
if self.npar == 1:
currIdx = np.arange(len(musre))
else:
currIdx = np.where(np.isclose(np.sum(
np.abs(musre[:, 1 :] - musre[0, 1 :]), 1), 0.))[0]
plt.semilogy(musre[currIdx, 0], errorEstTest[currIdx], 'k')
musre = np.delete(musre, currIdx, 0)
plt.grid()
plt.show()
plt.close()
return errorEstTest, idxMaxEst, maxEst
def _isLastSampleCollinear(self) -> bool:
"""Check collinearity of last sample."""
if self.collinearityTol <= 0.: return False
if self.POD:
reff = self.samplingEngine.RPOD[:, -1]
else:
RROMPyWarning(("Repeated orthogonalization of the samples for "
"collinearity check. Consider setting POD to "
"True."))
if not hasattr(self, "_PODEngine"):
from rrompy.sampling.base.pod_engine import PODEngine
self._PODEngine = PODEngine(self.HFEngine)
reff = self._PODEngine.generalizedQR(self.samplingEngine.samples,
only_R = True)[:, -1]
return np.abs(reff[-1]) < self.collinearityTol * np.linalg.norm(reff)
def greedyNextSample(self, muidx:int, n : int = 1, plotEst : bool = False)\
-> Tuple[Np1D, int, float, paramVal]:
"""Compute next greedy snapshot of solution map."""
RROMPyAssert(self._mode, message = "Cannot add greedy sample.")
mus = copy(self.muTest[muidx])
self.muTest.pop(muidx)
for mu in mus:
vbMng(self, "MAIN",
("Adding {}-th sample point at {} to training "
"set.").format(self.samplingEngine.nsamples + 1, mu), 2)
self.mus.append(mu)
self.samplingEngine.nextSample(mu)
if self._isLastSampleCollinear():
RROMPyWarning("Collinearity above tolerance detected.")
errorEstTest = np.empty(len(self.muTest))
errorEstTest[:] = np.nan
return errorEstTest, [-1], np.nan, np.nan
errorEstTest, muidx, maxErrorEst = self.getMaxErrorEstimator(
self.muTest, n, plotEst)
self.derivativeShell += 1
self.derivativeShellSize = totalDegreeN(self.npar - 1,
self.derivativeShell)
return errorEstTest, muidx, maxErrorEst, self.muTest[muidx]
def _preliminaryTraining(self):
"""Initialize starting snapshots of solution map."""
RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
self.computeScaleFactor()
if self.samplingEngine.nsamples > 0:
return
self.resetSamples()
self.mus = self.trainSetGenerator.generatePoints(self.S)
self.derivativeShell, self.derivativeShellSize, shellTot = -1, 0, 0
while shellTot <= len(self.mus):
self.derivativeShellSize = totalDegreeN(self.npar - 1,
self.derivativeShell + 1)
shellTot += self.derivativeShellSize
self.derivativeShell += 1
self._S = shellTot - self.derivativeShellSize
self.mus = self.mus[list(range(self.S))]
muTestBase = self.sampler.generatePoints(self.nTestPoints)
idxPop = pruneSamples(muTestBase ** self.HFEngine.rescalingExp,
self.mus ** self.HFEngine.rescalingExp,
1e-10 * self.scaleFactor[0])
muTestBase.pop(idxPop)
muTestBase = muTestBase.sort()
muLast = copy(self.mus[-1])
self.mus.pop()
if len(self.mus) > 0:
vbMng(self, "MAIN",
("Adding first {} sample point{} at {} to training "
"set.").format(self.S - 1, "" + "s" * (self.S > 2),
self.mus), 2)
self.samplingEngine.iterSample(self.mus)
self.muTest = emptyParameterList()
self.muTest.reset((len(muTestBase) + 1, self.mus.shape[1]))
self.muTest[: -1] = muTestBase
self.muTest[-1] = muLast
def _enrichTestSet(self, nTest:int):
"""Add extra elements to test set."""
RROMPyAssert(self._mode, message = "Cannot enrich test set.")
muTestExtra = self.sampler.generatePoints(2 * nTest)
muTotal = copy(self.mus)
muTotal.append(self.muTest)
idxPop = pruneSamples(muTestExtra ** self.HFEngine.rescalingExp,
muTotal ** self.HFEngine.rescalingExp,
1e-10 * self.scaleFactor[0])
muTestExtra.pop(idxPop)
muTestNew = np.empty(len(self.muTest) + len(muTestExtra),
dtype = np.complex)
muTestNew[: len(self.muTest)] = self.muTest(0)
muTestNew[len(self.muTest) :] = muTestExtra(0)
self.muTest = checkParameterList(np.sort(muTestNew), self.npar)[0]
vbMng(self, "MAIN",
"Enriching test set by {} elements.".format(len(muTestExtra)), 5)
def greedy(self, plotEst : bool = False):
"""Compute greedy snapshots of solution map."""
RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
if self.samplingEngine.nsamples > 0:
return
vbMng(self, "INIT", "Starting computation of snapshots.", 2)
self._preliminaryTraining()
nTest = self.nTestPoints
muT0 = copy(self.muTest[-1])
errorEstTest, muidx, maxErrorEst, mu = self.greedyNextSample(
[len(self.muTest) - 1], self.derivativeShellSize, plotEst)
if np.any(np.isnan(maxErrorEst)):
RROMPyWarning(("Instability in a posteriori estimator. "
"Starting preemptive greedy loop termination."))
self.muTest.append(muT0)
self.mus.pop(-1)
self.samplingEngine.popSample()
self.setupApprox()
else:
vbMng(self, "MAIN", ("Uniform testing error estimate "
"{:.4e}.").format(np.max(maxErrorEst)), 2)
trainedModelOld = copy(self.trainedModel)
while (self.samplingEngine.nsamples < self.maxIter
and np.max(maxErrorEst) > self.greedyTol):
if (1. - self.refinementRatio) * nTest > len(self.muTest):
self._enrichTestSet(nTest)
nTest = len(self.muTest)
muTestOld, maxErrorEstOld = self.muTest, np.max(maxErrorEst)
errorEstTest, muidx, maxErrorEst, mu = self.greedyNextSample(
muidx, self.derivativeShellSize, plotEst)
vbMng(self, "MAIN", ("Uniform testing error estimate "
"{:.4e}.").format(np.max(maxErrorEst)), 2)
if (np.any(np.isnan(maxErrorEst))
or np.any(np.isinf(maxErrorEst))
or maxErrorEstOld < (np.max(maxErrorEst)
* self.TOL_INSTABILITY)):
RROMPyWarning(("Instability in a posteriori estimator. "
"Starting preemptive greedy loop "
"termination."))
self.muTest = muTestOld
self.mus.pop(-1)
self.samplingEngine.popSample()
self.trainedModel.data = copy(trainedModelOld.data)
break
trainedModelOld.data = copy(self.trainedModel.data)
if (self.interactive and np.max(maxErrorEst) <= self.greedyTol):
vbMng(self, "MAIN",
("Required tolerance {} achieved. Want to decrease "
"greedyTol and continue? "
"Y/N").format(self.greedyTol), 0, end = "")
increasemaxIter = input()
if increasemaxIter.upper() == "Y":
vbMng(self, "MAIN", "Reducing value of greedyTol...",
0)
while np.max(maxErrorEst) <= self._greedyTol:
self._greedyTol *= .5
if (self.interactive
and self.samplingEngine.nsamples >= self.maxIter):
vbMng(self, "MAIN",
("Maximum number of iterations {} reached. Want to "
"increase maxIter and continue? "
"Y/N").format(self.maxIter), 0, end = "")
increasemaxIter = input()
if increasemaxIter.upper() == "Y":
vbMng(self, "MAIN", "Doubling value of maxIter...", 0)
self._maxIter *= 2
vbMng(self, "DEL",
("Done computing snapshots (final snapshot count: "
"{}).").format(self.samplingEngine.nsamples), 2)
def checkComputedApprox(self) -> bool:
"""
Check if setup of new approximant is not needed.
Returns:
True if new setup is not needed. False otherwise.
"""
return (super().checkComputedApprox()
and len(self.mus) == self.trainedModel.data.projMat.shape[1])
def assembleReducedResidualGramian(self, pMat:sampList):
"""
Build residual gramian of reduced linear system through projections.
"""
self.initEstimatorNormEngine()
if (not hasattr(self.trainedModel.data, "gramian")
or self.trainedModel.data.gramian is None):
gramian = self.estimatorNormEngine.innerProduct(pMat, pMat)
else:
Sold = self.trainedModel.data.gramian.shape[0]
S = len(self.mus)
if Sold > S:
gramian = self.trainedModel.data.gramian[: S, : S]
else:
idxOld = list(range(Sold))
idxNew = list(range(Sold, S))
gramian = np.empty((S, S), dtype = np.complex)
gramian[: Sold, : Sold] = self.trainedModel.data.gramian
gramian[: Sold, Sold :] = (
self.estimatorNormEngine.innerProduct(pMat(idxNew),
pMat(idxOld)))
gramian[Sold :, : Sold] = gramian[: Sold, Sold :].T.conj()
gramian[Sold :, Sold :] = (
self.estimatorNormEngine.innerProduct(pMat(idxNew),
pMat(idxNew)))
self.trainedModel.data.gramian = gramian
def assembleReducedResidualBlocksbb(self, bs:List[Np1D]):
"""
Build blocks (of type bb) of reduced linear system through projections.
"""
self.initEstimatorNormEngine()
nbs = len(bs)
if (not hasattr(self.trainedModel.data, "resbb")
or self.trainedModel.data.resbb is None):
resbb = np.empty((nbs, nbs), dtype = np.complex)
for i in range(nbs):
Mbi = bs[i]
resbb[i, i] = self.estimatorNormEngine.innerProduct(Mbi, Mbi)
for j in range(i):
Mbj = bs[j]
resbb[i, j] = self.estimatorNormEngine.innerProduct(Mbj,
Mbi)
for i in range(nbs):
for j in range(i + 1, nbs):
resbb[i, j] = resbb[j, i].conj()
self.trainedModel.data.resbb = resbb
def assembleReducedResidualBlocksAb(self, As:List[Np2D], bs:List[Np1D],
pMat:sampList):
"""
Build blocks (of type Ab) of reduced linear system through projections.
"""
self.initEstimatorNormEngine()
nAs = len(As)
nbs = len(bs)
S = len(self.mus)
if (not hasattr(self.trainedModel.data, "resAb")
or self.trainedModel.data.resAb is None):
if not isinstance(pMat, (np.ndarray,)): pMat = pMat.data
resAb = np.empty((nbs, S, nAs), dtype = np.complex)
for j in range(nAs):
MAj = As[j].dot(pMat)
for i in range(nbs):
Mbi = bs[i]
resAb[i, :, j] = self.estimatorNormEngine.innerProduct(MAj,
Mbi)
else:
Sold = self.trainedModel.data.resAb.shape[1]
if Sold == S: return
if Sold > S:
resAb = self.trainedModel.data.resAb[:, : S, :]
else:
if not isinstance(pMat, (np.ndarray,)): pMat = pMat.data
resAb = np.empty((nbs, S, nAs), dtype = np.complex)
resAb[:, : Sold, :] = self.trainedModel.data.resAb
for j in range(nAs):
MAj = As[j].dot(pMat[:, Sold :])
for i in range(nbs):
Mbi = bs[i]
resAb[i, Sold :, j] = (
self.estimatorNormEngine.innerProduct(MAj, Mbi))
self.trainedModel.data.resAb = resAb
def assembleReducedResidualBlocksAA(self, As:List[Np2D], pMat:sampList):
"""
Build blocks (of type AA) of reduced linear system through projections.
"""
self.initEstimatorNormEngine()
nAs = len(As)
S = len(self.mus)
if (not hasattr(self.trainedModel.data, "resAA")
or self.trainedModel.data.resAA is None):
if not isinstance(pMat, (np.ndarray,)): pMat = pMat.data
resAA = np.empty((S, nAs, S, nAs), dtype = np.complex)
for i in range(nAs):
MAi = As[i].dot(pMat)
resAA[:, i, :, i] = (
self.estimatorNormEngine.innerProduct(MAi, MAi))
for j in range(i):
MAj = As[j].dot(pMat)
resAA[:, i, :, j] = (
self.estimatorNormEngine.innerProduct(MAj, MAi))
for i in range(nAs):
for j in range(i + 1, nAs):
resAA[:, i, :, j] = resAA[:, j, :, i].T.conj()
else:
Sold = self.trainedModel.data.resAA.shape[0]
if Sold == S: return
if Sold > S:
resAA = self.trainedModel.data.resAA[: S, :, : S, :]
else:
if not isinstance(pMat, (np.ndarray,)): pMat = pMat.data
resAA = np.empty((S, nAs, S, nAs), dtype = np.complex)
resAA[: Sold, :, : Sold, :] = self.trainedModel.data.resAA
for i in range(nAs):
MAi = As[i].dot(pMat)
resAA[: Sold, i, Sold :, i] = (
self.estimatorNormEngine.innerProduct(MAi[:, Sold :],
MAi[:, : Sold]))
resAA[Sold :, i, : Sold, i] = resAA[: Sold, i,
Sold :, i].T.conj()
resAA[Sold :, i, Sold :, i] = (
self.estimatorNormEngine.innerProduct(MAi[:, Sold :],
MAi[:, Sold :]))
for j in range(i):
MAj = As[j].dot(pMat)
resAA[: Sold, i, Sold :, j] = (
self.estimatorNormEngine.innerProduct(MAj[:, Sold :],
MAi[:, : Sold]))
resAA[Sold :, i, : Sold, j] = (
self.estimatorNormEngine.innerProduct(MAj[:, : Sold],
MAi[:, Sold :]))
resAA[Sold :, i, Sold :, j] = (
self.estimatorNormEngine.innerProduct(MAj[:, Sold :],
MAi[:, Sold :]))
for i in range(nAs):
for j in range(i + 1, nAs):
resAA[: Sold, i, Sold :, j] = (
resAA[Sold :, j, : Sold, i].T.conj())
resAA[Sold :, i, : Sold, j] = (
resAA[: Sold, j, Sold :, i].T.conj())
resAA[Sold :, i, Sold :, j] = (
resAA[Sold :, j, Sold :, i].T.conj())
self.trainedModel.data.resAA = resAA
def _setupAffineBlocks(self, full : bool = True):
bmsg = "" if full else " of RHS"
if (full and not hasattr(self, "As")) or not hasattr(self, "bs"):
vbMng(self, "INIT",
"Computing affine blocks{} of system.".format(bmsg), 10)
if full:
self.As = self.HFEngine.affineLinearSystemA(self.mu0,
self.scaleFactor)
self.bs = self.HFEngine.affineLinearSystemb(self.mu0,
self.scaleFactor)
vbMng(self, "DEL",
"Done computing affine blocks{}.".format(bmsg), 10)
def assembleReducedResidualBlocks(self, full : bool = False):
"""Build affine blocks of affine decomposition of residual."""
self._setupAffineBlocks(full)
self.assembleReducedResidualBlocksbb(self.bs)
if full:
pMat = self.trainedModel.data.projMat
self.assembleReducedResidualBlocksAb(self.As, self.bs, pMat)
self.assembleReducedResidualBlocksAA(self.As, pMat)

Event Timeline