Page MenuHomec4science

rb_distributed_greedy.py
No OneTemporary

File Metadata

Created
Thu, May 2, 02:41

rb_distributed_greedy.py

# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from copy import deepcopy as copy
from .generic_distributed_greedy_approximant import \
GenericDistributedGreedyApproximant
from rrompy.reduction_methods.distributed import RBDistributed
from rrompy.reduction_methods.trained_model import TrainedModelRB as tModel
from rrompy.reduction_methods.trained_model import TrainedModelData
from rrompy.utilities.base.types import Np1D, DictAny, HFEng
from rrompy.utilities.base import verbosityDepth
from rrompy.utilities.exception_manager import RROMPyException
from rrompy.parameter import parameter, checkParameterList
__all__ = ['RBDistributedGreedy']
class RBDistributedGreedy(GenericDistributedGreedyApproximant, RBDistributed):
"""
ROM greedy RB approximant computation for parametric problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'muBounds': list of bounds for parameter values; defaults to
[0, 1];
- 'S': number of starting training points; defaults to 2;
- 'sampler': sample point generator; defaults to uniform sampler on
muBounds;
- 'greedyTol': uniform error tolerance for greedy algorithm;
defaults to 1e-2;
- 'maxIter': maximum number of greedy steps; defaults to 1e2;
- 'refinementRatio': ratio of training points to be exhausted
before training set refinement; defaults to 0.2;
- 'nTestPoints': number of test points; defaults to maxIter /
refinementRatio;
- 'trainSetGenerator': training sample points generator; defaults
to Chebyshev sampler within muBounds.
Defaults to empty dict.
homogeneized(optional): Whether to homogeneize Dirichlet BCs. Defaults
to False.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
homogeneized: Whether to homogeneize Dirichlet BCs.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterList: Recognized keys of approximant parameters:
- 'POD': whether to compute POD of snapshots;
- 'muBounds': list of bounds for parameter values;
- 'S': number of starting training points;
- 'sampler': sample point generator;
- 'greedyTol': uniform error tolerance for greedy algorithm;
- 'maxIter': maximum number of greedy steps;
- 'refinementRatio': ratio of training points to be exhausted
before training set refinement;
- 'nTestPoints': number of test points;
- 'trainSetGenerator': training sample points generator.
extraApproxParameters: List of approxParameters keys in addition to
mother class's.
POD: whether to compute POD of snapshots.
muBounds: list of bounds for parameter values.
S: number of test points.
sampler: Sample point generator.
greedyTol: uniform error tolerance for greedy algorithm.
maxIter: maximum number of greedy steps.
refinementRatio: ratio of training points to be exhausted before
training set refinement.
nTestPoints: number of starting training points.
trainSetGenerator: training sample points generator.
samplingEngine: Sampling engine.
estimatorNormEngine: Engine for estimator norm computation.
uHF: High fidelity solution with wavenumber lastSolvedHF as numpy
complex vector.
lastSolvedHF: Wavenumber corresponding to last computed high fidelity
solution.
uApp: Last evaluated approximant as numpy complex vector.
As: List of sparse matrices (in CSC format) representing coefficients
of linear system matrix wrt theta(mu).
bs: List of numpy vectors representing coefficients of linear system
RHS wrt theta(mu).
thetaAs: List of callables representing coefficients of linear system
matrix wrt mu.
thetabs: List of callables representing coefficients of linear system
RHS wrt mu.
ARBs: List of sparse matrices (in CSC format) representing coefficients
of compressed linear system matrix wrt theta(mu).
bRBs: List of numpy vectors representing coefficients of compressed
linear system RHS wrt theta(mu).
"""
def __init__(self, HFEngine:HFEng, mu0 : complex = 0.,
approxParameters : DictAny = {}, homogeneized : bool = False,
verbosity : int = 10, timestamp : bool = True):
self._preInit()
super().__init__(HFEngine = HFEngine, mu0 = mu0,
approxParameters = approxParameters,
homogeneized = homogeneized,
verbosity = verbosity, timestamp = timestamp)
if self.verbosity >= 10:
verbosityDepth("INIT", "Computing affine blocks of system.",
timestamp = self.timestamp)
self.As = self.HFEngine.affineLinearSystemA(self.mu0)
self.bs = self.HFEngine.affineLinearSystemb(self.mu0,
self.homogeneized)
if self.verbosity >= 10:
verbosityDepth("DEL", "Done computing affine blocks.",
timestamp = self.timestamp)
self._postInit()
@property
def R(self):
"""Value of R."""
return self._S
@R.setter
def R(self, R):
raise RROMPyException(("R is used just to simplify inheritance, and "
"its value cannot be changed from that of S."))
def errorEstimator(self, mus:Np1D) -> Np1D:
"""
Standard residual-based error estimator. Unreliable for unstable
problems (inf-sup constant is missing).
"""
self.setupApprox()
self.assembleReducedResidualBlocks()
nmus = len(mus)
nAs = self.trainedModel.data.resAA.shape[1]
nbs = self.trainedModel.data.resbb.shape[0]
thetaAs = self.trainedModel.data.thetaAs
thetabs = self.trainedModel.data.thetabs
radiusA = np.empty((len(self.mus), nAs, nmus), dtype = np.complex)
radiusb = np.empty((nbs, nmus), dtype = np.complex)
verb = self.trainedModel.verbosity
self.trainedModel.verbosity = 0
if verb >= 5:
mustr = mus
if nmus > 2:
mustr = "[{} ..({}).. {}]".format(mus[0], nmus - 2, mus[-1])
verbosityDepth("INIT", ("Computing RB solution at mu = "
"{}.").format(mustr),
timestamp = self.timestamp)
parmus, _ = checkParameterList(mus)
uApps = self.getApproxReduced(parmus)
for j, mu in enumerate(parmus):
uApp = uApps[j]
for i in range(nAs):
radiusA[:, i, j] = eval(thetaAs[i]) * uApp
for i in range(nbs):
radiusb[i, j] = eval(thetabs[i])
if verb >= 5:
verbosityDepth("DEL", "Done computing RB solution.",
timestamp = self.timestamp)
self.trainedModel.verbosity = verb
# 'ij,jk,ik->k', resbb, radiusb, radiusb.conj()
ff = np.sum(self.trainedModel.data.resbb.dot(radiusb) * radiusb.conj(),
axis = 0)
# 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj()
Lf = np.sum(np.tensordot(self.trainedModel.data.resAb, radiusA, 2)
* radiusb.conj(), axis = 0)
# 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj()
LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2)
* radiusA.conj(), axis = (0, 1))
return np.abs((LL - 2. * np.real(Lf) + ff) / ff) ** .5
def setupApprox(self, plotEst : bool = False):
"""Compute RB projection matrix."""
if self.checkComputedApprox():
return
if self.verbosity >= 5:
verbosityDepth("INIT", "Setting up {}.". format(self.name()),
timestamp = self.timestamp)
self.greedy(plotEst)
if self.verbosity >= 7:
verbosityDepth("INIT", "Computing projection matrix.",
timestamp = self.timestamp)
pMat = self.samplingEngine.samples
if self.trainedModel is None:
self.trainedModel = tModel()
self.trainedModel.verbosity = self.verbosity
self.trainedModel.timestamp = self.timestamp
data = TrainedModelData(self.trainedModel.name(), self.mu0,
pMat, self.HFEngine.rescalingExp)
data.thetaAs = self.HFEngine.affineWeightsA(self.mu0)
data.thetabs = self.HFEngine.affineWeightsb(self.mu0,
self.homogeneized)
data.ARBs, data.bRBs = self.assembleReducedSystem(pMat)
data.mus = copy(self.mus)
self.trainedModel.data = data
else:
pMatOld = self.trainedModel.data.projMat
Sold = pMatOld.shape[1]
idxNew = list(range(Sold, pMat.shape[1]))
ARBs, bRBs = self.assembleReducedSystem(pMat(idxNew), pMatOld)
self.trainedModel.data.ARBs = ARBs
self.trainedModel.data.bRBs = bRBs
self.trainedModel.data.projMat = copy(pMat)
self.trainedModel.data.mus = copy(self.mus)
if self.verbosity >= 7:
verbosityDepth("DEL", "Done computing projection matrix.",
timestamp = self.timestamp)
self.trainedModel.data.approxParameters = copy(self.approxParameters)
if self.verbosity >= 5:
verbosityDepth("DEL", "Done setting up approximant.",
timestamp = self.timestamp)
def assembleReducedResidualBlocks(self):
"""Build affine blocks of RB linear system through projections."""
computeResbb = not hasattr(self.trainedModel.data, "resbb")
computeResAb = (not hasattr(self.trainedModel.data, "resAb")
or self.trainedModel.data.resAb.shape[1] != len(self.mus))
computeResAA = (not hasattr(self.trainedModel.data, "resAA")
or self.trainedModel.data.resAA.shape[0] != len(self.mus))
if computeResbb or computeResAb or computeResAA:
if self.verbosity >= 7:
verbosityDepth("INIT", "Projecting affine terms of residual.",
timestamp = self.timestamp)
if computeResAb or computeResAA:
pMat = self.trainedModel.data.projMat
if computeResbb:
self.assembleReducedResidualBlocksbb(self.bs)
if computeResAb:
self.assembleReducedResidualBlocksAb(self.As, self.bs, pMat)
if computeResAA:
self.assembleReducedResidualBlocksAA(self.As, pMat)
if self.verbosity >= 7:
verbosityDepth("DEL", ("Done setting up affine decomposition "
"of residual."),
timestamp = self.timestamp)

Event Timeline