Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F63988524
generic_approximant_lagrange_greedy.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Thu, May 23, 19:55
Size
34 KB
Mime Type
text/x-python
Expires
Sat, May 25, 19:55 (2 d)
Engine
blob
Format
Raw Data
Handle
17822769
Attached To
R6746 RationalROMPy
generic_approximant_lagrange_greedy.py
View Options
# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from
copy
import
deepcopy
as
copy
import
numpy
as
np
from
rrompy.reduction_methods.base.generic_approximant
import
(
GenericApproximant
)
from
rrompy.reduction_methods.lagrange.generic_approximant_lagrange
import
(
GenericApproximantLagrange
)
from
rrompy.utilities.base.types
import
Np1D
,
Np2D
,
DictAny
,
HFEng
,
Tuple
,
List
from
rrompy.utilities.base
import
purgeDict
,
verbosityDepth
from
rrompy.utilities.exception_manager
import
(
RROMPyException
,
modeAssert
,
RROMPyWarning
)
__all__
=
[
'GenericApproximantLagrangeGreedy'
]
class
GenericApproximantLagrangeGreedy
(
GenericApproximantLagrange
):
"""
ROM greedy Lagrange interpolant computation for parametric problems
(ABSTRACT).
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'muBounds': list of bounds for parameter values; defaults to
[[0], [1]];
- 'greedyTol': uniform error tolerance for greedy algorithm;
defaults to 1e-2;
- 'interactive': whether to interactively terminate greedy
algorithm; defaults to False;
- 'maxIter': maximum number of greedy steps; defaults to 1e2;
- 'refinementRatio': ratio of test points to be exhausted before
test set refinement; defaults to 0.2;
- 'nTestPoints': number of test points; defaults to maxIter /
refinementRatio;
- 'nTrainPoints': number of starting training points; defaults to
1;
- 'trainSetGenerator': training sample points generator; defaults
to Chebyshev sampler within muBounds;
- 'testSetGenerator': test sample points generator; defaults to
uniform sampler within muBounds.
Defaults to empty dict.
homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
homogeneized: Whether to homogeneize Dirichlet BCs.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterList: Recognized keys of approximant parameters:
- 'POD': whether to compute POD of snapshots;
- 'muBounds': list of bounds for parameter values;
- 'greedyTol': uniform error tolerance for greedy algorithm;
- 'interactive': whether to interactively terminate greedy
algorithm;
- 'maxIter': maximum number of greedy steps;
- 'refinementRatio': ratio of test points to be exhausted before
test set refinement;
- 'nTestPoints': number of test points;
- 'nTrainPoints': number of starting training points;
- 'trainSetGenerator': training sample points generator;
- 'testSetGenerator': test sample points generator.
extraApproxParameters: List of approxParameters keys in addition to
mother class's.
POD: whether to compute POD of snapshots.
muBounds: list of bounds for parameter values.
greedyTol: uniform error tolerance for greedy algorithm.
maxIter: maximum number of greedy steps.
refinementRatio: ratio of training points to be exhausted before
training set refinement.
nTrainPoints: number of test points.
nTestPoints: number of starting training points.
trainSetGenerator: training sample points generator.
testSetGenerator: test sample points generator.
robustTol: tolerance for robust Pade' denominator management.
samplingEngine: Sampling engine.
uHF: High fidelity solution with wavenumber lastSolvedHF as numpy
complex vector.
lastSolvedHF: Wavenumber corresponding to last computed high fidelity
solution.
uApp: Last evaluated approximant as numpy complex vector.
"""
TOL_INSTABILITY
=
1e-6
def
__init__
(
self
,
HFEngine
:
HFEng
,
mu0
:
complex
=
0.
,
approxParameters
:
DictAny
=
{},
homogeneized
:
bool
=
False
,
verbosity
:
int
=
10
,
timestamp
:
bool
=
True
):
self
.
__preInit
()
self
.
__addParametersToList
([
"muBounds"
,
"greedyTol"
,
"interactive"
,
"maxIter"
,
"refinementRatio"
,
"nTestPoints"
,
"nTrainPoints"
,
"trainSetGenerator"
,
"testSetGenerator"
])
super
(
GenericApproximantLagrange
,
self
)
.
__init__
(
HFEngine
=
HFEngine
,
mu0
=
mu0
,
approxParameters
=
approxParameters
,
homogeneized
=
homogeneized
,
verbosity
=
verbosity
,
timestamp
=
timestamp
)
self
.
__postInit
()
@property
def
approxParameters
(
self
):
"""Value of approximant parameters. Its assignment may change S."""
return
self
.
_approxParameters
@approxParameters.setter
def
approxParameters
(
self
,
approxParams
):
approxParameters
=
purgeDict
(
approxParams
,
self
.
parameterList
,
dictname
=
self
.
name
()
+
".approxParameters"
,
baselevel
=
1
)
approxParametersCopy
=
purgeDict
(
approxParameters
,
[
"muBounds"
,
"greedyTol"
,
"interactive"
,
"maxIter"
,
"refinementRatio"
,
"nTestPoints"
,
"nTrainPoints"
,
"trainSetGenerator"
,
"testSetGenerator"
],
True
,
True
,
baselevel
=
1
)
GenericApproximant
.
approxParameters
.
fset
(
self
,
approxParametersCopy
)
keyList
=
list
(
approxParameters
.
keys
())
if
"muBounds"
in
keyList
:
self
.
muBounds
=
approxParameters
[
"muBounds"
]
elif
not
hasattr
(
self
,
"_muBounds"
)
or
self
.
muBounds
is
None
:
self
.
muBounds
=
[[
0.
],
[
1.
]]
if
"greedyTol"
in
keyList
:
self
.
greedyTol
=
approxParameters
[
"greedyTol"
]
elif
not
hasattr
(
self
,
"_greedyTol"
)
or
self
.
greedyTol
is
None
:
self
.
greedyTol
=
1e-2
if
"interactive"
in
keyList
:
self
.
interactive
=
approxParameters
[
"interactive"
]
elif
not
hasattr
(
self
,
"interactive"
)
or
self
.
interactive
is
None
:
self
.
interactive
=
False
if
"maxIter"
in
keyList
:
self
.
maxIter
=
approxParameters
[
"maxIter"
]
elif
not
hasattr
(
self
,
"_maxIter"
)
or
self
.
maxIter
is
None
:
self
.
maxIter
=
1e2
if
"refinementRatio"
in
keyList
:
self
.
refinementRatio
=
approxParameters
[
"refinementRatio"
]
elif
(
not
hasattr
(
self
,
"_refinementRatio"
)
or
self
.
refinementRatio
is
None
):
self
.
refinementRatio
=
0.2
if
"nTestPoints"
in
keyList
:
self
.
nTestPoints
=
approxParameters
[
"nTestPoints"
]
elif
(
not
hasattr
(
self
,
"_nTestPoints"
)
or
self
.
nTestPoints
is
None
):
self
.
nTestPoints
=
np
.
int
(
np
.
ceil
(
self
.
maxIter
/
self
.
refinementRatio
))
if
"nTrainPoints"
in
keyList
:
self
.
nTrainPoints
=
approxParameters
[
"nTrainPoints"
]
elif
not
hasattr
(
self
,
"_nTrainPoints"
)
or
self
.
nTrainPoints
is
None
:
self
.
nTrainPoints
=
1
if
"trainSetGenerator"
in
keyList
:
self
.
trainSetGenerator
=
approxParameters
[
"trainSetGenerator"
]
elif
(
not
hasattr
(
self
,
"_trainSetGenerator"
)
or
self
.
trainSetGenerator
is
None
):
from
rrompy.utilities.parameter_sampling
import
QuadratureSampler
self
.
trainSetGenerator
=
QuadratureSampler
(
self
.
muBounds
,
"CHEBYSHEV"
)
del
QuadratureSampler
if
"testSetGenerator"
in
keyList
:
self
.
testSetGenerator
=
approxParameters
[
"testSetGenerator"
]
elif
(
not
hasattr
(
self
,
"_testSetGenerator"
)
or
self
.
testSetGenerator
is
None
):
from
rrompy.utilities.parameter_sampling
import
QuadratureSampler
self
.
testSetGenerator
=
QuadratureSampler
(
self
.
muBounds
,
"UNIFORM"
)
del
QuadratureSampler
@property
def
S
(
self
):
"""Value of S."""
if
not
hasattr
(
self
,
"_mus"
)
or
self
.
mus
is
None
:
return
0
return
len
(
self
.
mus
)
@S.setter
def
S
(
self
,
S
):
raise
RROMPyException
((
"S is used just to simplify inheritance, and "
"its value cannot be changed."
))
@property
def
mus
(
self
):
"""Value of mus."""
return
self
.
_mus
@mus.setter
def
mus
(
self
,
mus
):
self
.
_mus
=
np
.
array
(
mus
,
dtype
=
np
.
complex
)
@property
def
muBounds
(
self
):
"""Value of muBounds."""
return
self
.
_muBounds
@muBounds.setter
def
muBounds
(
self
,
muBounds
):
if
len
(
muBounds
)
!=
2
:
raise
RROMPyException
(
"2 limits must be specified."
)
try
:
muBounds
=
muBounds
.
tolist
()
except
:
muBounds
=
list
(
muBounds
)
for
j
in
range
(
2
):
try
:
len
(
muBounds
[
j
])
except
:
muBounds
[
j
]
=
np
.
array
([
muBounds
[
j
]])
if
len
(
muBounds
[
0
])
!=
len
(
muBounds
[
1
]):
raise
RROMPyException
(
"The bounds must have the same length."
)
self
.
_muBounds
=
muBounds
@property
def
greedyTol
(
self
):
"""Value of greedyTol."""
return
self
.
_greedyTol
@greedyTol.setter
def
greedyTol
(
self
,
greedyTol
):
if
greedyTol
<
0
:
raise
RROMPyException
(
"greedyTol must be non-negative."
)
if
hasattr
(
self
,
"_greedyTol"
)
and
self
.
greedyTol
is
not
None
:
greedyTolold
=
self
.
greedyTol
else
:
greedyTolold
=
-
1
self
.
_greedyTol
=
greedyTol
self
.
_approxParameters
[
"greedyTol"
]
=
self
.
greedyTol
if
greedyTolold
!=
self
.
greedyTol
:
self
.
resetSamples
()
@property
def
maxIter
(
self
):
"""Value of maxIter."""
return
self
.
_maxIter
@maxIter.setter
def
maxIter
(
self
,
maxIter
):
if
maxIter
<=
0
:
raise
RROMPyException
(
"maxIter must be positive."
)
if
hasattr
(
self
,
"_maxIter"
)
and
self
.
maxIter
is
not
None
:
maxIterold
=
self
.
maxIter
else
:
maxIterold
=
-
1
self
.
_maxIter
=
maxIter
self
.
_approxParameters
[
"maxIter"
]
=
self
.
maxIter
if
maxIterold
!=
self
.
maxIter
:
self
.
resetSamples
()
@property
def
refinementRatio
(
self
):
"""Value of refinementRatio."""
return
self
.
_refinementRatio
@refinementRatio.setter
def
refinementRatio
(
self
,
refinementRatio
):
if
refinementRatio
<=
0.
or
refinementRatio
>
1.
:
raise
RROMPyException
((
"refinementRatio must be between 0 "
"(excluded) and 1."
))
if
(
hasattr
(
self
,
"_refinementRatio"
)
and
self
.
refinementRatio
is
not
None
):
refinementRatioold
=
self
.
refinementRatio
else
:
refinementRatioold
=
-
1
self
.
_refinementRatio
=
refinementRatio
self
.
_approxParameters
[
"refinementRatio"
]
=
self
.
refinementRatio
if
refinementRatioold
!=
self
.
refinementRatio
:
self
.
resetSamples
()
@property
def
nTestPoints
(
self
):
"""Value of nTestPoints."""
return
self
.
_nTestPoints
@nTestPoints.setter
def
nTestPoints
(
self
,
nTestPoints
):
if
nTestPoints
<=
0
:
raise
RROMPyException
(
"nTestPoints must be positive."
)
if
not
np
.
isclose
(
nTestPoints
,
np
.
int
(
nTestPoints
)):
raise
RROMPyException
(
"nTestPoints must be an integer."
)
nTestPoints
=
np
.
int
(
nTestPoints
)
if
hasattr
(
self
,
"_nTestPoints"
)
and
self
.
nTestPoints
is
not
None
:
nTestPointsold
=
self
.
nTestPoints
else
:
nTestPointsold
=
-
1
self
.
_nTestPoints
=
nTestPoints
self
.
_approxParameters
[
"nTestPoints"
]
=
self
.
nTestPoints
if
nTestPointsold
!=
self
.
nTestPoints
:
self
.
resetSamples
()
@property
def
nTrainPoints
(
self
):
"""Value of nTrainPoints."""
return
self
.
_nTrainPoints
@nTrainPoints.setter
def
nTrainPoints
(
self
,
nTrainPoints
):
if
nTrainPoints
<=
1
:
raise
RROMPyException
(
"nTrainPoints must be greater than 1."
)
if
not
np
.
isclose
(
nTrainPoints
,
np
.
int
(
nTrainPoints
)):
raise
RROMPyException
(
"nTrainPoints must be an integer."
)
nTrainPoints
=
np
.
int
(
nTrainPoints
)
if
(
hasattr
(
self
,
"_nTrainPoints"
)
and
self
.
nTrainPoints
is
not
None
):
nTrainPointsOld
=
self
.
nTrainPoints
else
:
nTrainPointsOld
=
-
1
self
.
_nTrainPoints
=
nTrainPoints
self
.
_approxParameters
[
"nTrainPoints"
]
=
self
.
nTrainPoints
if
nTrainPointsOld
!=
self
.
nTrainPoints
:
self
.
resetSamples
()
@property
def
trainSetGenerator
(
self
):
"""Value of trainSetGenerator."""
return
self
.
_trainSetGenerator
@trainSetGenerator.setter
def
trainSetGenerator
(
self
,
trainSetGenerator
):
if
'generatePoints'
not
in
dir
(
trainSetGenerator
):
raise
RROMPyException
(
"trainSetGenerator type not recognized."
)
if
(
hasattr
(
self
,
'_trainSetGenerator'
)
and
self
.
trainSetGenerator
is
not
None
):
trainSetGeneratorOld
=
self
.
trainSetGenerator
self
.
_trainSetGenerator
=
trainSetGenerator
self
.
_approxParameters
[
"trainSetGenerator"
]
=
self
.
trainSetGenerator
if
(
not
'trainSetGeneratorOld'
in
locals
()
or
trainSetGeneratorOld
!=
self
.
trainSetGenerator
):
self
.
resetSamples
()
@property
def
testSetGenerator
(
self
):
"""Value of testSetGenerator."""
return
self
.
_testSetGenerator
@testSetGenerator.setter
def
testSetGenerator
(
self
,
testSetGenerator
):
if
'generatePoints'
not
in
dir
(
testSetGenerator
):
raise
RROMPyException
(
"testSetGenerator type not recognized."
)
if
(
hasattr
(
self
,
'_testSetGenerator'
)
and
self
.
testSetGenerator
is
not
None
):
testSetGeneratorOld
=
self
.
testSetGenerator
self
.
_testSetGenerator
=
testSetGenerator
self
.
_approxParameters
[
"testSetGenerator"
]
=
self
.
testSetGenerator
if
(
not
'testSetGeneratorOld'
in
locals
()
or
testSetGeneratorOld
!=
self
.
testSetGenerator
):
self
.
resetSamples
()
def
resetSamples
(
self
):
"""Reset samples."""
super
()
.
resetSamples
()
self
.
_mus
=
[]
def
initEstNormer
(
self
):
"""Initialize estimator norm engine."""
if
not
hasattr
(
self
,
"estNormer"
):
from
rrompy.hfengines.base
import
ProblemEngineBase
as
PEB
self
.
estNormer
=
PEB
()
# L2 norm
self
.
estNormer
.
V
=
self
.
HFEngine
.
V
self
.
estNormer
.
buildEnergyNormForm
()
def
errorEstimator
(
self
,
mus
:
List
[
np
.
complex
])
->
List
[
np
.
complex
]:
"""
Standard residual-based error estimator with explicit residual
computation.
"""
self
.
setupApprox
()
nmus
=
len
(
mus
)
err
=
np
.
empty
(
nmus
)
if
self
.
HFEngine
.
nbs
==
1
:
RHS
=
self
.
getRHS
(
mus
[
0
],
homogeneized
=
self
.
homogeneized
)
RHSNorm
=
self
.
estNormer
.
norm
(
RHS
)
for
j
in
range
(
nmus
):
res
=
self
.
getRes
(
mus
[
j
],
homogeneized
=
self
.
homogeneized
)
err
[
j
]
=
self
.
estNormer
.
norm
(
res
)
/
RHSNorm
else
:
for
j
in
range
(
nmus
):
res
=
self
.
getRes
(
mus
[
j
],
homogeneized
=
self
.
homogeneized
)
RHS
=
self
.
getRHS
(
mus
[
j
],
homogeneized
=
self
.
homogeneized
)
err
[
j
]
=
self
.
estNormer
.
norm
(
res
)
/
self
.
estNormer
.
norm
(
RHS
)
return
np
.
abs
(
err
)
def
getMaxErrorEstimator
(
self
,
mus
,
plot
:
bool
=
False
)
\
->
Tuple
[
Np1D
,
int
,
float
]:
"""
Compute maximum of (and index of maximum of) error estimator over given
parameters.
"""
errorEstTest
=
self
.
errorEstimator
(
mus
)
idxMaxEst
=
np
.
argmax
(
errorEstTest
)
maxEst
=
errorEstTest
[
idxMaxEst
]
if
plot
and
not
np
.
all
(
np
.
isinf
(
errorEstTest
)):
from
matplotlib
import
pyplot
as
plt
onemus
=
np
.
ones
(
self
.
S
)
plt
.
figure
()
plt
.
semilogy
(
np
.
real
(
mus
),
errorEstTest
,
'k'
)
plt
.
semilogy
(
np
.
real
(
mus
[[
0
,
-
1
]]),
[
self
.
greedyTol
]
*
2
,
'r--'
)
plt
.
semilogy
(
np
.
real
(
self
.
mus
),
2.
*
self
.
greedyTol
*
onemus
,
'*m'
)
plt
.
semilogy
(
np
.
real
(
mus
[
idxMaxEst
]),
maxEst
,
'xr'
)
plt
.
grid
()
plt
.
show
()
plt
.
close
()
return
errorEstTest
,
idxMaxEst
,
maxEst
def
greedyNextSample
(
self
,
muidx
:
int
,
plotEst
:
bool
=
False
)
\
->
Tuple
[
Np1D
,
int
,
float
,
complex
]:
"""Compute next greedy snapshot of solution map."""
modeAssert
(
self
.
_mode
,
message
=
"Cannot add greedy sample."
)
mu
=
self
.
muTest
[
muidx
]
if
self
.
verbosity
>=
2
:
verbosityDepth
(
"MAIN"
,
(
"Adding {}-th sample point at {} to "
"training set."
)
.
format
(
self
.
samplingEngine
.
nsamples
+
1
,
mu
),
timestamp
=
self
.
timestamp
)
self
.
mus
=
np
.
append
(
self
.
mus
,
mu
)
idxs
=
np
.
arange
(
len
(
self
.
muTest
))
mask
=
np
.
ones_like
(
idxs
,
dtype
=
bool
)
mask
[
muidx
]
=
False
idxs
=
idxs
[
mask
]
self
.
muTest
=
self
.
muTest
[
idxs
]
self
.
samplingEngine
.
nextSample
(
mu
,
homogeneized
=
self
.
homogeneized
)
errorEstTest
,
muidx
,
maxErrorEst
=
self
.
getMaxErrorEstimator
(
self
.
muTest
,
plotEst
)
return
errorEstTest
,
muidx
,
maxErrorEst
,
self
.
muTest
[
muidx
]
def
__preliminaryTraining
(
self
):
"""Initialize starting snapshots of solution map."""
modeAssert
(
self
.
_mode
,
message
=
"Cannot start greedy algorithm."
)
if
self
.
samplingEngine
.
samples
is
not
None
:
return
if
self
.
verbosity
>=
2
:
verbosityDepth
(
"INIT"
,
"Starting computation of snapshots."
,
timestamp
=
self
.
timestamp
)
self
.
resetSamples
()
self
.
mus
,
_
=
self
.
trainSetGenerator
.
generatePoints
(
self
.
nTrainPoints
)
muTestBase
,
_
=
self
.
testSetGenerator
.
generatePoints
(
self
.
nTestPoints
)
proxVal
=
np
.
min
(
np
.
abs
(
muTestBase
.
reshape
(
-
1
,
1
)
-
np
.
tile
(
self
.
mus
.
reshape
(
1
,
-
1
),
[
self
.
nTestPoints
,
1
])),
axis
=
1
)
proxMask
=
~
(
proxVal
<
1e-12
*
np
.
abs
(
muTestBase
[
0
]
-
muTestBase
[
-
1
]))
self
.
muTest
=
np
.
empty
(
np
.
sum
(
proxMask
)
+
1
,
dtype
=
np
.
complex
)
self
.
muTest
[:
-
1
]
=
np
.
sort
(
muTestBase
[
proxMask
])
.
flatten
()
self
.
muTest
[
-
1
]
=
self
.
mus
[
-
1
]
self
.
mus
=
self
.
mus
[:
-
1
]
for
j
in
range
(
len
(
self
.
mus
)):
if
self
.
verbosity
>=
2
:
verbosityDepth
(
"MAIN"
,
(
"Adding {}-th sample point at {} to training "
"set."
)
.
format
(
self
.
samplingEngine
.
nsamples
+
1
,
self
.
mus
[
j
]),
timestamp
=
self
.
timestamp
)
self
.
samplingEngine
.
nextSample
(
self
.
mus
[
j
],
homogeneized
=
self
.
homogeneized
)
def
__enrichTestSet
(
self
,
nTest
:
int
):
"""Double number of elements of test set."""
muTestExtra
,
_
=
self
.
testSetGenerator
.
generatePoints
(
2
*
nTest
)
muGiven
=
np
.
append
(
self
.
mus
,
self
.
muTest
)
.
reshape
(
1
,
-
1
)
proxVal
=
np
.
min
(
np
.
abs
(
muTestExtra
.
reshape
(
-
1
,
1
)
-
np
.
tile
(
muGiven
,
[
2
*
nTest
,
1
])),
axis
=
1
)
proxMask
=
~
(
proxVal
<
1e-12
*
np
.
abs
(
muTestExtra
[
0
]
-
muTestExtra
[
-
1
]))
muTestNew
=
np
.
empty
(
len
(
self
.
muTest
)
+
np
.
sum
(
proxMask
),
dtype
=
np
.
complex
)
muTestNew
[:
len
(
self
.
muTest
)]
=
self
.
muTest
muTestNew
[
len
(
self
.
muTest
)
:]
=
muTestExtra
[
proxMask
]
self
.
muTest
=
np
.
sort
(
muTestNew
)
if
self
.
verbosity
>=
5
:
verbosityDepth
(
"MAIN"
,
"Enriching test set by {} elements."
.
format
(
np
.
sum
(
proxMask
)),
timestamp
=
self
.
timestamp
)
def
greedy
(
self
,
plotEst
:
bool
=
False
):
"""Compute greedy snapshots of solution map."""
modeAssert
(
self
.
_mode
,
message
=
"Cannot start greedy algorithm."
)
if
self
.
samplingEngine
.
samples
is
not
None
:
return
self
.
__preliminaryTraining
()
nTest
=
self
.
nTestPoints
errorEstTest
,
muidx
,
maxErrorEst
,
mu
=
self
.
greedyNextSample
(
-
1
,
plotEst
)
if
self
.
verbosity
>=
2
:
verbosityDepth
(
"MAIN"
,
(
"Uniform testing error estimate "
"{:.4e}."
)
.
format
(
maxErrorEst
),
timestamp
=
self
.
timestamp
)
trainedModelOld
=
copy
(
self
.
trainedModel
)
while
(
self
.
samplingEngine
.
nsamples
<
self
.
maxIter
and
maxErrorEst
>
self
.
greedyTol
):
if
(
1.
-
self
.
refinementRatio
)
*
nTest
>
len
(
self
.
muTest
):
self
.
__enrichTestSet
(
nTest
)
nTest
=
len
(
self
.
muTest
)
muTestOld
,
maxErrorEstOld
=
self
.
muTest
,
maxErrorEst
errorEstTest
,
muidx
,
maxErrorEst
,
mu
=
self
.
greedyNextSample
(
muidx
,
plotEst
)
if
self
.
verbosity
>=
2
:
verbosityDepth
(
"MAIN"
,
(
"Uniform testing error estimate "
"{:.4e}."
)
.
format
(
maxErrorEst
),
timestamp
=
self
.
timestamp
)
if
(
np
.
isnan
(
maxErrorEst
)
or
np
.
isinf
(
maxErrorEst
)
or
maxErrorEstOld
<
maxErrorEst
*
self
.
TOL_INSTABILITY
):
RROMPyWarning
((
"Instability in a posteriori estimator. "
"Starting preemptive greedy loop termination."
))
maxErrorEst
=
maxErrorEstOld
self
.
muTest
=
muTestOld
self
.
mus
=
self
.
mus
[:
-
1
]
self
.
samplingEngine
.
popSample
()
self
.
trainedModel
.
data
=
copy
(
trainedModelOld
.
data
)
break
trainedModelOld
.
data
=
copy
(
self
.
trainedModel
.
data
)
if
(
self
.
interactive
and
maxErrorEst
<=
self
.
greedyTol
):
verbosityDepth
(
"MAIN"
,
(
"Required tolerance {} achieved. Want "
"to decrease greedyTol and continue? "
"Y/N"
)
.
format
(
self
.
greedyTol
),
timestamp
=
self
.
timestamp
,
end
=
""
)
increasemaxIter
=
input
()
if
increasemaxIter
.
upper
()
==
"Y"
:
verbosityDepth
(
"MAIN"
,
"Reducing value of greedyTol..."
,
timestamp
=
self
.
timestamp
)
while
maxErrorEst
<=
self
.
_greedyTol
:
self
.
_greedyTol
*=
.
5
if
(
self
.
interactive
and
self
.
samplingEngine
.
nsamples
>=
self
.
maxIter
):
verbosityDepth
(
"MAIN"
,
(
"Maximum number of iterations {} "
"reached. Want to increase maxIter "
"and continue? Y/N"
)
.
format
(
self
.
maxIter
),
timestamp
=
self
.
timestamp
,
end
=
""
)
increasemaxIter
=
input
()
if
increasemaxIter
.
upper
()
==
"Y"
:
verbosityDepth
(
"MAIN"
,
"Doubling value of maxIter..."
,
timestamp
=
self
.
timestamp
)
self
.
_maxIter
*=
2
if
self
.
verbosity
>=
2
:
verbosityDepth
(
"DEL"
,
(
"Done computing snapshots (final snapshot "
"count: {})."
)
.
format
(
self
.
samplingEngine
.
nsamples
),
timestamp
=
self
.
timestamp
)
def
checkComputedApprox
(
self
)
->
bool
:
"""
Check if setup of new approximant is not needed.
Returns:
True if new setup is not needed. False otherwise.
"""
return
(
super
()
.
checkComputedApprox
()
and
self
.
S
==
self
.
trainedModel
.
data
.
projMat
.
shape
[
1
])
def
computeScaleFactor
(
self
):
"""Compute parameter rescaling factor."""
modeAssert
(
self
.
_mode
,
message
=
"Cannot compute rescaling factor."
)
self
.
scaleFactor
=
np
.
abs
(
np
.
power
(
self
.
trainSetGenerator
.
lims
[
0
],
self
.
HFEngine
.
rescalingExp
)
-
np
.
power
(
self
.
trainSetGenerator
.
lims
[
1
],
self
.
HFEngine
.
rescalingExp
))
/
2.
def
assembleReducedResidualGramian
(
self
,
pMat
:
Np2D
):
"""
Build residual gramian of reduced linear system through projections.
"""
self
.
initEstNormer
()
if
(
not
hasattr
(
self
.
trainedModel
.
data
,
"gramian"
)
or
self
.
trainedModel
.
data
.
gramian
is
None
):
gramian
=
self
.
estNormer
.
innerProduct
(
pMat
,
pMat
)
else
:
Sold
=
self
.
trainedModel
.
data
.
gramian
.
shape
[
0
]
if
Sold
>
self
.
S
:
gramian
=
self
.
trainedModel
.
data
.
gramian
[:
self
.
S
,
:
self
.
S
]
else
:
gramian
=
np
.
empty
((
self
.
S
,
self
.
S
),
dtype
=
np
.
complex
)
gramian
[:
Sold
,
:
Sold
]
=
self
.
trainedModel
.
data
.
gramian
gramian
[:
Sold
,
Sold
:]
=
(
self
.
estNormer
.
innerProduct
(
pMat
[:,
Sold
:],
pMat
[:,
:
Sold
]))
gramian
[
Sold
:,
:
Sold
]
=
gramian
[:
Sold
,
Sold
:]
.
T
.
conj
()
gramian
[
Sold
:,
Sold
:]
=
(
self
.
estNormer
.
innerProduct
(
pMat
[:,
Sold
:],
pMat
[:,
Sold
:]))
self
.
trainedModel
.
data
.
gramian
=
gramian
def
assembleReducedResidualBlocksbb
(
self
,
bs
:
List
[
Np1D
],
pMat
:
Np2D
,
scaling
:
float
=
1.
):
"""
Build blocks (of type bb) of reduced linear system through projections.
"""
self
.
initEstNormer
()
nbs
=
len
(
bs
)
if
(
not
hasattr
(
self
.
trainedModel
.
data
,
"resbb"
)
or
self
.
trainedModel
.
data
.
resbb
is
None
):
resbb
=
np
.
empty
((
nbs
,
nbs
),
dtype
=
np
.
complex
)
for
i
in
range
(
nbs
):
Mbi
=
scaling
**
i
*
bs
[
i
]
resbb
[
i
,
i
]
=
self
.
estNormer
.
innerProduct
(
Mbi
,
Mbi
)
for
j
in
range
(
i
):
Mbj
=
scaling
**
j
*
bs
[
j
]
resbb
[
i
,
j
]
=
self
.
estNormer
.
innerProduct
(
Mbj
,
Mbi
)
for
i
in
range
(
nbs
):
for
j
in
range
(
i
+
1
,
nbs
):
resbb
[
i
,
j
]
=
resbb
[
j
,
i
]
.
conj
()
self
.
trainedModel
.
data
.
resbb
=
resbb
def
assembleReducedResidualBlocksAb
(
self
,
As
:
List
[
Np2D
],
bs
:
List
[
Np1D
],
pMat
:
Np2D
,
scaling
:
float
=
1.
):
"""
Build blocks (of type Ab) of reduced linear system through projections.
"""
self
.
initEstNormer
()
nAs
=
len
(
As
)
nbs
=
len
(
bs
)
if
(
not
hasattr
(
self
.
trainedModel
.
data
,
"resAb"
)
or
self
.
trainedModel
.
data
.
resAb
is
None
):
resAb
=
np
.
empty
((
nbs
,
self
.
S
,
nAs
),
dtype
=
np
.
complex
)
for
j
in
range
(
nAs
):
MAj
=
scaling
**
(
j
+
1
)
*
As
[
j
]
.
dot
(
pMat
)
for
i
in
range
(
nbs
):
Mbi
=
scaling
**
(
i
+
1
)
*
bs
[
i
]
resAb
[
i
,
:,
j
]
=
self
.
estNormer
.
innerProduct
(
MAj
,
Mbi
)
else
:
Sold
=
self
.
trainedModel
.
data
.
resAb
.
shape
[
1
]
if
Sold
==
self
.
S
:
return
if
Sold
>
self
.
S
:
resAb
=
self
.
trainedModel
.
data
.
resAb
[:,
:
self
.
S
,
:]
else
:
resAb
=
np
.
empty
((
nbs
,
self
.
S
,
nAs
),
dtype
=
np
.
complex
)
resAb
[:,
:
Sold
,
:]
=
self
.
trainedModel
.
data
.
resAb
for
j
in
range
(
nAs
):
MAj
=
scaling
**
(
j
+
1
)
*
As
[
j
]
.
dot
(
pMat
[:,
Sold
:])
for
i
in
range
(
nbs
):
Mbi
=
scaling
**
(
i
+
1
)
*
bs
[
i
]
resAb
[
i
,
Sold
:,
j
]
=
self
.
estNormer
.
innerProduct
(
MAj
,
Mbi
)
self
.
trainedModel
.
data
.
resAb
=
resAb
def
assembleReducedResidualBlocksAA
(
self
,
As
:
List
[
Np2D
],
pMat
:
Np2D
,
scaling
:
float
=
1.
,
basic
:
bool
=
False
):
"""
Build blocks (of type AA) of reduced linear system through projections.
"""
self
.
initEstNormer
()
nAs
=
len
(
As
)
if
(
not
hasattr
(
self
.
trainedModel
.
data
,
"resAA"
)
or
self
.
trainedModel
.
data
.
resAA
is
None
):
if
basic
:
MAEnd
=
scaling
**
nAs
*
As
[
-
1
]
.
dot
(
pMat
)
resAA
=
self
.
estNormer
.
innerProduct
(
MAEnd
,
MAEnd
)
else
:
resAA
=
np
.
empty
((
self
.
S
,
nAs
,
self
.
S
,
nAs
),
dtype
=
np
.
complex
)
for
i
in
range
(
nAs
):
MAi
=
scaling
**
(
i
+
1
)
*
As
[
i
]
.
dot
(
pMat
)
resAA
[:,
i
,
:,
i
]
=
self
.
estNormer
.
innerProduct
(
MAi
,
MAi
)
for
j
in
range
(
i
):
MAj
=
scaling
**
(
j
+
1
)
*
As
[
j
]
.
dot
(
pMat
)
resAA
[:,
i
,
:,
j
]
=
self
.
estNormer
.
innerProduct
(
MAj
,
MAi
)
for
i
in
range
(
nAs
):
for
j
in
range
(
i
+
1
,
nAs
):
resAA
[:,
i
,
:,
j
]
=
resAA
[:,
j
,
:,
i
]
.
T
.
conj
()
else
:
Sold
=
self
.
trainedModel
.
data
.
resAA
.
shape
[
0
]
if
Sold
==
self
.
S
:
return
if
Sold
>
self
.
S
:
if
basic
:
resAA
=
self
.
trainedModel
.
data
.
resAA
[:
self
.
S
,
:
self
.
S
]
else
:
resAA
=
self
.
trainedModel
.
data
.
resAA
[:
self
.
S
,
:,
:
self
.
S
,
:]
else
:
if
basic
:
resAA
=
np
.
empty
((
self
.
S
,
self
.
S
),
dtype
=
np
.
complex
)
resAA
[:
Sold
,
:
Sold
]
=
self
.
trainedModel
.
data
.
resAA
MAi
=
scaling
**
nAs
*
As
[
-
1
]
.
dot
(
pMat
)
resAA
[:
Sold
,
Sold
:]
=
(
self
.
estNormer
.
innerProduct
(
MAi
[:,
Sold
:],
MAi
[:,
:
Sold
]))
resAA
[
Sold
:,
:
Sold
]
=
resAA
[:
Sold
,
Sold
:]
.
T
.
conj
()
resAA
[
Sold
:,
Sold
:]
=
(
self
.
estNormer
.
innerProduct
(
MAi
[:,
Sold
:],
MAi
[:,
Sold
:]))
else
:
resAA
=
np
.
empty
((
self
.
S
,
nAs
,
self
.
S
,
nAs
),
dtype
=
np
.
complex
)
resAA
[:
Sold
,
:,
:
Sold
,
:]
=
self
.
trainedModel
.
data
.
resAA
for
i
in
range
(
nAs
):
MAi
=
scaling
**
(
i
+
1
)
*
As
[
i
]
.
dot
(
pMat
)
resAA
[:
Sold
,
i
,
Sold
:,
i
]
=
(
self
.
estNormer
.
innerProduct
(
MAi
[:,
Sold
:],
MAi
[:,
:
Sold
]))
resAA
[
Sold
:,
i
,
:
Sold
,
i
]
=
resAA
[:
Sold
,
i
,
Sold
:,
i
]
.
T
.
conj
()
resAA
[
Sold
:,
i
,
Sold
:,
i
]
=
(
self
.
estNormer
.
innerProduct
(
MAi
[:,
Sold
:],
MAi
[:,
Sold
:]))
for
j
in
range
(
i
):
MAj
=
scaling
**
(
j
+
1
)
*
As
[
j
]
.
dot
(
pMat
)
resAA
[:
Sold
,
i
,
Sold
:,
j
]
=
(
self
.
estNormer
.
innerProduct
(
MAj
[:,
Sold
:],
MAi
[:,
:
Sold
]))
resAA
[
Sold
:,
i
,
:
Sold
,
j
]
=
(
self
.
estNormer
.
innerProduct
(
MAj
[:,
:
Sold
],
MAi
[:,
Sold
:]))
resAA
[
Sold
:,
i
,
Sold
:,
j
]
=
(
self
.
estNormer
.
innerProduct
(
MAj
[:,
Sold
:],
MAi
[:,
Sold
:]))
for
i
in
range
(
nAs
):
for
j
in
range
(
i
+
1
,
nAs
):
resAA
[:
Sold
,
i
,
Sold
:,
j
]
=
(
resAA
[
Sold
:,
j
,
:
Sold
,
i
]
.
T
.
conj
())
resAA
[
Sold
:,
i
,
:
Sold
,
j
]
=
(
resAA
[:
Sold
,
j
,
Sold
:,
i
]
.
T
.
conj
())
resAA
[
Sold
:,
i
,
Sold
:,
j
]
=
(
resAA
[
Sold
:,
j
,
Sold
:,
i
]
.
T
.
conj
())
self
.
trainedModel
.
data
.
resAA
=
resAA
Event Timeline
Log In to Comment