Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F85107611
rational_interpolant_greedy.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Thu, Sep 26, 20:11
Size
26 KB
Mime Type
text/x-python
Expires
Sat, Sep 28, 20:11 (2 d)
Engine
blob
Format
Raw Data
Handle
21129807
Attached To
R6746 RationalROMPy
rational_interpolant_greedy.py
View Options
# Copyright (C) 2018 by the RROMPy authors
#
# This file is part of RROMPy.
#
# RROMPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RROMPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
#
from
copy
import
deepcopy
as
copy
import
numpy
as
np
from
rrompy.hfengines.base.linear_affine_engine
import
checkIfAffine
from
.generic_greedy_approximant
import
GenericGreedyApproximant
from
rrompy.utilities.poly_fitting.polynomial
import
(
polybases
,
polyfitname
,
PolynomialInterpolator
as
PI
,
polyvander
)
from
rrompy.utilities.numerical
import
dot
from
rrompy.utilities.numerical.degree
import
totalDegreeN
from
rrompy.utilities.expression
import
expressionEvaluator
from
rrompy.reduction_methods.standard
import
RationalInterpolant
from
rrompy.utilities.base.types
import
Np1D
,
Tuple
,
paramVal
,
List
from
rrompy.utilities.base.verbosity_depth
import
(
verbosityManager
as
vbMng
,
getVerbosityDepth
,
setVerbosityDepth
)
from
rrompy.utilities.poly_fitting
import
customFit
from
rrompy.utilities.exception_manager
import
(
RROMPyWarning
,
RROMPyException
,
RROMPyAssert
,
RROMPy_FRAGILE
)
from
rrompy.sampling
import
sampleList
,
emptySampleList
__all__
=
[
'RationalInterpolantGreedy'
]
class
RationalInterpolantGreedy
(
GenericGreedyApproximant
,
RationalInterpolant
):
"""
ROM greedy rational interpolant computation for parametric problems.
Args:
HFEngine: HF problem solver.
mu0(optional): Default parameter. Defaults to 0.
approxParameters(optional): Dictionary containing values for main
parameters of approximant. Recognized keys are:
- 'POD': whether to compute POD of snapshots; defaults to True;
- 'scaleFactorDer': scaling factors for derivative computation;
defaults to 'AUTO';
- 'S': number of starting training points;
- 'sampler': sample point generator;
- 'greedyTol': uniform error tolerance for greedy algorithm;
defaults to 1e-2;
- 'collinearityTol': collinearity tolerance for greedy algorithm;
defaults to 0.;
- 'maxIter': maximum number of greedy steps; defaults to 1e2;
- 'nTestPoints': number of test points; defaults to 5e2;
- 'trainSetGenerator': training sample points generator; defaults
to sampler;
- 'polybasis': type of basis for interpolation; defaults to
'MONOMIAL';
- 'errorEstimatorKind': kind of error estimator; available values
include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
'LOOK_AHEAD_RES', 'LOOK_AHEAD_OUTPUT', and 'NONE'; defaults to
'NONE';
- 'functionalSolve': strategy for minimization of denominator
functional; allowed values include 'NORM', 'DOMINANT', 'NODAL',
'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM';
- 'interpRcond': tolerance for interpolation; defaults to None;
- 'robustTol': tolerance for robust rational denominator
management; defaults to 0.
Defaults to empty dict.
approx_state(optional): Whether to approximate state. Defaults and must
be True.
verbosity(optional): Verbosity level. Defaults to 10.
Attributes:
HFEngine: HF problem solver.
mu0: Default parameter.
mus: Array of snapshot parameters.
approxParameters: Dictionary containing values for main parameters of
approximant. Recognized keys are in parameterList.
parameterListSoft: Recognized keys of soft approximant parameters:
- 'POD': whether to compute POD of snapshots;
- 'scaleFactorDer': scaling factors for derivative computation;
- 'greedyTol': uniform error tolerance for greedy algorithm;
- 'collinearityTol': collinearity tolerance for greedy algorithm;
- 'maxIter': maximum number of greedy steps;
- 'nTestPoints': number of test points;
- 'trainSetGenerator': training sample points generator;
- 'errorEstimatorKind': kind of error estimator;
- 'functionalSolve': strategy for minimization of denominator
functional;
- 'interpRcond': tolerance for interpolation;
- 'robustTol': tolerance for robust rational denominator
management.
parameterListCritical: Recognized keys of critical approximant
parameters:
- 'S': total number of samples current approximant relies upon;
- 'sampler': sample point generator.
approx_state: Whether to approximate state.
verbosity: Verbosity level.
POD: whether to compute POD of snapshots.
scaleFactorDer: Scaling factors for derivative computation.
S: number of test points.
sampler: Sample point generator.
greedyTol: uniform error tolerance for greedy algorithm.
collinearityTol: Collinearity tolerance for greedy algorithm.
maxIter: maximum number of greedy steps.
nTestPoints: number of starting training points.
trainSetGenerator: training sample points generator.
robustTol: tolerance for robust rational denominator management.
errorEstimatorKind: kind of error estimator.
functionalSolve: Strategy for minimization of denominator functional.
interpRcond: tolerance for interpolation.
robustTol: tolerance for robust rational denominator management.
muBounds: list of bounds for parameter values.
samplingEngine: Sampling engine.
estimatorNormEngine: Engine for estimator norm computation.
uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
sampleList.
lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
solution(s) as parameterList.
uApproxReduced: Reduced approximate solution(s) with parameter(s)
lastSolvedApprox as sampleList.
lastSolvedApproxReduced: Parameter(s) corresponding to last computed
reduced approximate solution(s) as parameterList.
uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
sampleList.
lastSolvedApprox: Parameter(s) corresponding to last computed
approximate solution(s) as parameterList.
"""
_allowedEstimatorKinds
=
[
"AFFINE"
,
"DISCREPANCY"
,
"LOOK_AHEAD"
,
"LOOK_AHEAD_RES"
,
"LOOK_AHEAD_OUTPUT"
,
"NONE"
]
def
__init__
(
self
,
*
args
,
**
kwargs
):
self
.
_preInit
()
self
.
_addParametersToList
([
"errorEstimatorKind"
],
[
"DISCREPANCY"
],
toBeExcluded
=
[
"M"
,
"N"
,
"polydegreetype"
,
"radialDirectionalWeights"
])
super
()
.
__init__
(
*
args
,
**
kwargs
)
if
not
self
.
approx_state
and
self
.
errorEstimatorKind
not
in
[
"LOOK_AHEAD"
,
"LOOK_AHEAD_OUTPUT"
,
"NONE"
]:
raise
RROMPyException
((
"Must compute greedy approximation of "
"state, unless error estimator allows "
"otherwise."
))
self
.
_postInit
()
@property
def
approx_state
(
self
):
"""Value of approx_state."""
return
self
.
_approx_state
@approx_state.setter
def
approx_state
(
self
,
approx_state
):
RationalInterpolant
.
approx_state
.
fset
(
self
,
approx_state
)
if
(
not
self
.
approx_state
and
hasattr
(
self
,
"_errorEstimatorKind"
)
and
self
.
errorEstimatorKind
not
in
[
"LOOK_AHEAD"
,
"LOOK_AHEAD_OUTPUT"
,
"NONE"
]):
raise
RROMPyException
((
"Must compute greedy approximation of "
"state, unless error estimator allows "
"otherwise."
))
@property
def
E
(
self
):
"""Value of E."""
self
.
_E
=
self
.
sampleBatchIdx
-
1
return
self
.
_E
@E.setter
def
E
(
self
,
E
):
RROMPyWarning
((
"E is used just to simplify inheritance, and its value "
"cannot be changed from that of sampleBatchIdx - 1."
))
def
_setMAuto
(
self
):
self
.
M
=
self
.
E
def
_setNAuto
(
self
):
self
.
N
=
self
.
E
@property
def
polydegreetype
(
self
):
"""Value of polydegreetype."""
return
"TOTAL"
@polydegreetype.setter
def
polydegreetype
(
self
,
polydegreetype
):
RROMPyWarning
((
"polydegreetype is used just to simplify inheritance, "
"and its value cannot be changed from 'TOTAL'."
))
@property
def
polybasis
(
self
):
"""Value of polybasis."""
return
self
.
_polybasis
@polybasis.setter
def
polybasis
(
self
,
polybasis
):
try
:
polybasis
=
polybasis
.
upper
()
.
strip
()
.
replace
(
" "
,
""
)
if
polybasis
not
in
polybases
:
raise
RROMPyException
(
"Sample type not recognized."
)
self
.
_polybasis
=
polybasis
except
:
RROMPyWarning
((
"Prescribed polybasis not recognized. Overriding "
"to 'MONOMIAL'."
))
self
.
_polybasis
=
"MONOMIAL"
self
.
_approxParameters
[
"polybasis"
]
=
self
.
polybasis
@property
def
errorEstimatorKind
(
self
):
"""Value of errorEstimatorKind."""
return
self
.
_errorEstimatorKind
@errorEstimatorKind.setter
def
errorEstimatorKind
(
self
,
errorEstimatorKind
):
errorEstimatorKind
=
errorEstimatorKind
.
upper
()
if
errorEstimatorKind
not
in
self
.
_allowedEstimatorKinds
:
RROMPyWarning
((
"Error estimator kind not recognized. Overriding "
"to 'NONE'."
))
errorEstimatorKind
=
"NONE"
self
.
_errorEstimatorKind
=
errorEstimatorKind
self
.
_approxParameters
[
"errorEstimatorKind"
]
=
self
.
errorEstimatorKind
if
(
self
.
errorEstimatorKind
not
in
[
"LOOK_AHEAD"
,
"LOOK_AHEAD_OUTPUT"
,
"NONE"
]
and
hasattr
(
self
,
"_approx_state"
)
and
not
self
.
approx_state
):
raise
RROMPyException
((
"Must compute greedy approximation of "
"state, unless error estimator allows "
"otherwise."
))
def
_polyvanderAuxiliary
(
self
,
mus
,
deg
,
*
args
):
return
polyvander
(
mus
,
deg
,
*
args
)
def
getErrorEstimatorDiscrepancy
(
self
,
mus
:
Np1D
)
->
Np1D
:
"""Discrepancy-based residual estimator."""
checkIfAffine
(
self
.
HFEngine
,
"apply discrepancy-based error estimator"
)
mus
=
self
.
checkParameterList
(
mus
)
muCTest
=
self
.
trainedModel
.
centerNormalize
(
mus
)
tMverb
,
self
.
trainedModel
.
verbosity
=
self
.
trainedModel
.
verbosity
,
0
QTest
=
self
.
trainedModel
.
getQVal
(
mus
)
QTzero
=
np
.
where
(
QTest
==
0.
)[
0
]
if
len
(
QTzero
)
>
0
:
RROMPyWarning
((
"Adjusting estimator to avoid division by "
"numerically zero denominator."
))
QTest
[
QTzero
]
=
np
.
finfo
(
np
.
complex
)
.
eps
/
(
1.
+
self
.
N
)
self
.
HFEngine
.
buildA
()
self
.
HFEngine
.
buildb
()
nAs
,
nbs
=
self
.
HFEngine
.
nAs
,
self
.
HFEngine
.
nbs
muTrainEff
=
self
.
HFEngine
.
mapParameterList
(
self
.
mus
)
muTestEff
=
self
.
HFEngine
.
mapParameterList
(
mus
)
PTrain
=
self
.
trainedModel
.
getPVal
(
self
.
mus
)
.
data
.
T
QTrain
=
self
.
trainedModel
.
getQVal
(
self
.
mus
)
QTzero
=
np
.
where
(
QTrain
==
0.
)[
0
]
if
len
(
QTzero
)
>
0
:
RROMPyWarning
((
"Adjusting estimator to avoid division by "
"numerically zero denominator."
))
QTrain
[
QTzero
]
=
np
.
finfo
(
np
.
complex
)
.
eps
/
(
1.
+
self
.
N
)
PTest
=
self
.
trainedModel
.
getPVal
(
mus
)
.
data
self
.
trainedModel
.
verbosity
=
tMverb
radiusAbTrain
=
np
.
empty
((
self
.
S
,
nAs
*
self
.
S
+
nbs
),
dtype
=
np
.
complex
)
radiusA
=
np
.
empty
((
self
.
S
,
nAs
,
len
(
mus
)),
dtype
=
np
.
complex
)
radiusb
=
np
.
empty
((
nbs
,
len
(
mus
)),
dtype
=
np
.
complex
)
for
j
,
thA
in
enumerate
(
self
.
HFEngine
.
thAs
):
idxs
=
j
*
self
.
S
+
np
.
arange
(
self
.
S
)
radiusAbTrain
[:,
idxs
]
=
expressionEvaluator
(
thA
[
0
],
muTrainEff
,
(
self
.
S
,
1
))
*
PTrain
radiusA
[:,
j
]
=
PTest
*
expressionEvaluator
(
thA
[
0
],
muTestEff
,
(
len
(
mus
),))
for
j
,
thb
in
enumerate
(
self
.
HFEngine
.
thbs
):
idx
=
nAs
*
self
.
S
+
j
radiusAbTrain
[:,
idx
]
=
QTrain
*
expressionEvaluator
(
thb
[
0
],
muTrainEff
,
(
self
.
S
,))
radiusb
[
j
]
=
QTest
*
expressionEvaluator
(
thb
[
0
],
muTestEff
,
(
len
(
mus
),))
QRHSNorm2
=
self
.
_affineResidualMatricesContraction
(
radiusb
)
vanTrain
=
self
.
_polyvanderAuxiliary
(
self
.
_musUniqueCN
,
self
.
E
,
self
.
polybasis0
,
self
.
_derIdxs
,
self
.
_reorder
)
interpPQ
=
customFit
(
vanTrain
,
radiusAbTrain
,
rcond
=
self
.
interpRcond
)
vanTest
=
self
.
_polyvanderAuxiliary
(
muCTest
,
self
.
E
,
self
.
polybasis0
)
DradiusAb
=
vanTest
.
dot
(
interpPQ
)
radiusA
=
(
radiusA
-
DradiusAb
[:,
:
-
nbs
]
.
reshape
(
len
(
mus
),
-
1
,
self
.
S
)
.
T
)
radiusb
=
radiusb
-
DradiusAb
[:,
-
nbs
:]
.
T
ff
,
Lf
,
LL
=
self
.
_affineResidualMatricesContraction
(
radiusb
,
radiusA
)
err
=
np
.
abs
((
LL
-
2.
*
np
.
real
(
Lf
)
+
ff
)
/
QRHSNorm2
)
**
.
5
return
err
def
getErrorEstimatorLookAhead
(
self
,
mus
:
Np1D
,
what
:
str
=
""
)
->
Tuple
[
Np1D
,
List
[
int
]]:
"""Residual estimator based on look-ahead idea."""
errTest
,
QTest
,
idxMaxEst
=
self
.
_EIMStep
(
mus
)
_approx_state_old
=
self
.
approx_state
if
what
==
"OUTPUT"
and
_approx_state_old
:
self
.
_approx_state
=
False
self
.
initEstimatorNormEngine
()
self
.
_approx_state
=
_approx_state_old
mu_muTestSample
=
mus
[
idxMaxEst
]
app_muTestSample
=
self
.
getApproxReduced
(
mu_muTestSample
)
if
self
.
_mode
==
RROMPy_FRAGILE
:
if
what
==
"RES"
and
not
self
.
HFEngine
.
isCEye
:
raise
RROMPyException
((
"Cannot compute LOOK_AHEAD_RES "
"estimator in fragile mode for "
"non-scalar C."
))
app_muTestSample
=
dot
(
self
.
trainedModel
.
data
.
projMat
[:,
:
app_muTestSample
.
shape
[
0
]],
app_muTestSample
)
else
:
app_muTestSample
=
dot
(
self
.
samplingEngine
.
projectionMatrix
,
app_muTestSample
)
if
what
==
"RES"
:
errmu
=
self
.
HFEngine
.
residual
(
mu_muTestSample
,
app_muTestSample
,
post_c
=
False
)
solmu
=
self
.
HFEngine
.
residual
(
mu_muTestSample
,
None
,
post_c
=
False
)
else
:
for
j
,
mu
in
enumerate
(
mu_muTestSample
):
uEx
=
self
.
samplingEngine
.
nextSample
(
mu
)
if
j
==
0
:
solmu
=
emptySampleList
()
solmu
.
reset
((
len
(
uEx
),
len
(
mu_muTestSample
)),
dtype
=
uEx
.
dtype
)
solmu
[
j
]
=
uEx
if
what
==
"OUTPUT"
and
self
.
approx_state
:
solmu
=
sampleList
(
self
.
HFEngine
.
applyC
(
solmu
))
app_muTestSample
=
sampleList
(
self
.
HFEngine
.
applyC
(
app_muTestSample
))
errmu
=
solmu
-
app_muTestSample
errsamples
=
(
self
.
estimatorNormEngine
.
norm
(
errmu
)
/
self
.
estimatorNormEngine
.
norm
(
solmu
))
musT
=
copy
(
self
.
mus
)
musT
.
append
(
mu_muTestSample
)
musT
=
self
.
trainedModel
.
centerNormalize
(
musT
)
musC
=
self
.
trainedModel
.
centerNormalize
(
mus
)
errT
=
np
.
zeros
((
len
(
musT
),
len
(
mu_muTestSample
)),
dtype
=
np
.
complex
)
errT
[
np
.
arange
(
len
(
self
.
mus
),
len
(
musT
)),
np
.
arange
(
len
(
mu_muTestSample
))]
=
errsamples
*
QTest
[
idxMaxEst
]
vanT
=
self
.
_polyvanderAuxiliary
(
musT
,
self
.
E
+
1
,
self
.
polybasis
)
fitOut
=
customFit
(
vanT
,
errT
,
full
=
True
,
rcond
=
self
.
interpRcond
)
vbMng
(
self
,
"MAIN"
,
(
"Fitting {} samples with degree {} through {}... Conditioning "
"of LS system: {:.4e}."
)
.
format
(
len
(
vanT
),
self
.
E
+
1
,
polyfitname
(
self
.
polybasis
),
fitOut
[
1
][
2
][
0
]
/
fitOut
[
1
][
2
][
-
1
]),
15
)
vanC
=
self
.
_polyvanderAuxiliary
(
musC
,
self
.
E
+
1
,
self
.
polybasis
)
err
=
np
.
sum
(
np
.
abs
(
vanC
.
dot
(
fitOut
[
0
])),
axis
=
-
1
)
/
QTest
return
err
,
idxMaxEst
def
getErrorEstimatorNone
(
self
,
mus
:
Np1D
)
->
Np1D
:
"""EIM-based residual estimator."""
err
=
np
.
max
(
self
.
_EIMStep
(
mus
,
True
),
axis
=
1
)
err
*=
self
.
greedyTol
/
np
.
mean
(
err
)
return
err
def
_EIMStep
(
self
,
mus
:
Np1D
,
only_one
:
bool
=
False
)
->
Tuple
[
Np1D
,
Np1D
,
List
[
int
]]:
"""Residual estimator based on look-ahead idea."""
mus
=
self
.
checkParameterList
(
mus
)
tMverb
,
self
.
trainedModel
.
verbosity
=
self
.
trainedModel
.
verbosity
,
0
QTest
=
self
.
trainedModel
.
getQVal
(
mus
)
QTzero
=
np
.
where
(
QTest
==
0.
)[
0
]
if
len
(
QTzero
)
>
0
:
RROMPyWarning
((
"Adjusting estimator to avoid division by "
"numerically zero denominator."
))
QTest
[
QTzero
]
=
np
.
finfo
(
np
.
complex
)
.
eps
/
(
1.
+
self
.
N
)
QTest
=
np
.
abs
(
QTest
)
muCTest
=
self
.
trainedModel
.
centerNormalize
(
mus
)
muCTrain
=
self
.
trainedModel
.
centerNormalize
(
self
.
mus
)
self
.
trainedModel
.
verbosity
=
tMverb
vanTest
=
self
.
_polyvanderAuxiliary
(
muCTest
,
self
.
E
,
self
.
polybasis
)
vanTestNext
=
self
.
_polyvanderAuxiliary
(
muCTest
,
self
.
E
+
1
,
self
.
polybasis
)[:,
vanTest
.
shape
[
1
]
:]
idxsTest
=
np
.
arange
(
vanTestNext
.
shape
[
1
])
basis
=
np
.
zeros
((
len
(
idxsTest
),
0
),
dtype
=
float
)
idxMaxEst
=
[]
while
len
(
idxsTest
)
>
0
:
vanTrial
=
self
.
_polyvanderAuxiliary
(
muCTrain
,
self
.
E
,
self
.
polybasis
)
vanTrialNext
=
self
.
_polyvanderAuxiliary
(
muCTrain
,
self
.
E
+
1
,
self
.
polybasis
)[:,
vanTrial
.
shape
[
1
]
:]
vanTrial
=
np
.
hstack
((
vanTrial
,
vanTrialNext
.
dot
(
basis
)
.
reshape
(
len
(
vanTrialNext
),
basis
.
shape
[
1
])))
valuesTrial
=
vanTrialNext
[:,
idxsTest
]
vanTestEff
=
np
.
hstack
((
vanTest
,
vanTestNext
.
dot
(
basis
)
.
reshape
(
len
(
vanTestNext
),
basis
.
shape
[
1
])))
vanTestNextEff
=
vanTestNext
[:,
idxsTest
]
coeffTest
=
np
.
linalg
.
solve
(
vanTrial
,
valuesTrial
)
errTest
=
(
np
.
abs
(
vanTestNextEff
-
vanTestEff
.
dot
(
coeffTest
))
/
np
.
expand_dims
(
QTest
,
1
))
if
only_one
:
return
errTest
idxMaxErr
=
np
.
unravel_index
(
np
.
argmax
(
errTest
),
errTest
.
shape
)
idxMaxEst
+=
[
idxMaxErr
[
0
]]
muCTrain
.
append
(
muCTest
[
idxMaxErr
[
0
]])
basis
=
np
.
pad
(
basis
,
[(
0
,
0
),
(
0
,
1
)],
"constant"
)
basis
[
idxsTest
[
idxMaxErr
[
1
]],
-
1
]
=
1.
idxsTest
=
np
.
delete
(
idxsTest
,
idxMaxErr
[
1
])
return
errTest
,
QTest
,
idxMaxEst
def
errorEstimator
(
self
,
mus
:
Np1D
,
return_max
:
bool
=
False
)
->
Np1D
:
"""Standard residual-based error estimator."""
setupOK
=
self
.
setupApproxLocal
()
if
setupOK
>
0
:
err
=
np
.
empty
(
len
(
mus
))
err
[:]
=
np
.
nan
if
not
return_max
:
return
err
return
err
,
[
-
setupOK
],
np
.
nan
mus
=
self
.
checkParameterList
(
mus
)
vbMng
(
self
.
trainedModel
,
"INIT"
,
"Evaluating error estimator at mu = {}."
.
format
(
mus
),
10
)
if
self
.
errorEstimatorKind
==
"AFFINE"
:
err
=
self
.
getErrorEstimatorAffine
(
mus
)
else
:
self
.
_setupInterpolationIndices
()
if
self
.
errorEstimatorKind
==
"DISCREPANCY"
:
err
=
self
.
getErrorEstimatorDiscrepancy
(
mus
)
elif
self
.
errorEstimatorKind
[:
10
]
==
"LOOK_AHEAD"
:
err
,
idxMaxEst
=
self
.
getErrorEstimatorLookAhead
(
mus
,
self
.
errorEstimatorKind
[
11
:])
else
:
#if self.errorEstimatorKind == "NONE":
err
=
self
.
getErrorEstimatorNone
(
mus
)
vbMng
(
self
.
trainedModel
,
"DEL"
,
"Done evaluating error estimator"
,
10
)
if
not
return_max
:
return
err
if
self
.
errorEstimatorKind
[:
10
]
!=
"LOOK_AHEAD"
:
idxMaxEst
=
np
.
empty
(
self
.
sampleBatchSize
,
dtype
=
int
)
errCP
=
copy
(
err
)
for
j
in
range
(
self
.
sampleBatchSize
):
k
=
np
.
argmax
(
errCP
)
idxMaxEst
[
j
]
=
k
if
j
+
1
<
self
.
sampleBatchSize
:
musZero
=
self
.
trainedModel
.
centerNormalize
(
mus
,
mus
[
k
])
errCP
*=
np
.
linalg
.
norm
(
musZero
.
data
,
axis
=
1
)
return
err
,
idxMaxEst
,
err
[
idxMaxEst
]
def
plotEstimator
(
self
,
*
args
,
**
kwargs
):
super
()
.
plotEstimator
(
*
args
,
**
kwargs
)
if
self
.
errorEstimatorKind
==
"NONE"
:
vbMng
(
self
,
"MAIN"
,
(
"Warning! Error estimator has been arbitrarily normalized "
"before plotting."
),
15
)
def
greedyNextSample
(
self
,
*
args
,
**
kwargs
)
->
Tuple
[
Np1D
,
int
,
float
,
paramVal
]:
"""Compute next greedy snapshot of solution map."""
RROMPyAssert
(
self
.
_mode
,
message
=
"Cannot add greedy sample."
)
self
.
sampleBatchIdx
+=
1
self
.
sampleBatchSize
=
totalDegreeN
(
self
.
npar
-
1
,
self
.
sampleBatchIdx
)
err
,
muidx
,
maxErr
,
muNext
=
super
()
.
greedyNextSample
(
*
args
,
**
kwargs
)
if
maxErr
is
not
None
and
(
np
.
any
(
np
.
isnan
(
maxErr
))
or
np
.
any
(
np
.
isinf
(
maxErr
))):
self
.
sampleBatchIdx
-=
1
self
.
sampleBatchSize
=
totalDegreeN
(
self
.
npar
-
1
,
self
.
sampleBatchIdx
)
if
(
self
.
errorEstimatorKind
==
"NONE"
and
not
np
.
isnan
(
maxErr
)
and
not
np
.
isinf
(
maxErr
)):
maxErr
=
None
return
err
,
muidx
,
maxErr
,
muNext
def
_setSampleBatch
(
self
,
maxS
:
int
):
self
.
sampleBatchIdx
,
self
.
sampleBatchSize
,
S
=
-
1
,
0
,
0
nextBatchSize
=
1
while
S
+
nextBatchSize
<=
maxS
:
self
.
sampleBatchIdx
+=
1
self
.
sampleBatchSize
=
nextBatchSize
S
+=
self
.
sampleBatchSize
nextBatchSize
=
totalDegreeN
(
self
.
npar
-
1
,
self
.
sampleBatchIdx
+
1
)
return
S
def
_preliminaryTraining
(
self
):
"""Initialize starting snapshots of solution map."""
RROMPyAssert
(
self
.
_mode
,
message
=
"Cannot start greedy algorithm."
)
if
self
.
samplingEngine
.
nsamples
>
0
:
return
self
.
_S
=
self
.
_setSampleBatch
(
self
.
S
)
super
()
.
_preliminaryTraining
()
self
.
M
,
self
.
N
=
(
"AUTO"
,)
*
2
def
setupApproxLocal
(
self
)
->
int
:
"""Compute rational interpolant."""
if
self
.
checkComputedApprox
():
return
-
1
RROMPyAssert
(
self
.
_mode
,
message
=
"Cannot setup approximant."
)
self
.
verbosity
-=
10
vbMng
(
self
,
"INIT"
,
"Setting up local approximant."
,
5
)
pMat
=
self
.
samplingEngine
.
projectionMatrix
if
self
.
trainedModel
is
not
None
:
pMat
=
pMat
[:,
len
(
self
.
trainedModel
.
data
.
mus
)
:]
self
.
_setupTrainedModel
(
pMat
,
self
.
trainedModel
is
not
None
)
self
.
catchInstability
=
2
vbDepth
=
getVerbosityDepth
()
unstable
=
0
if
self
.
E
>
0
:
try
:
Q
=
self
.
_setupDenominator
()
except
RROMPyException
as
RE
:
if
RE
.
critical
:
raise
RE
from
None
setVerbosityDepth
(
vbDepth
)
RROMPyWarning
(
"Downgraded {}: {}"
.
format
(
RE
.
__class__
.
__name__
,
RE
))
unstable
=
1
else
:
Q
=
PI
()
Q
.
coeffs
=
np
.
ones
((
1
,)
*
self
.
npar
,
dtype
=
np
.
complex
)
Q
.
npar
=
self
.
npar
Q
.
polybasis
=
self
.
polybasis
if
not
unstable
:
self
.
trainedModel
.
data
.
Q
=
copy
(
Q
)
try
:
P
=
copy
(
self
.
_setupNumerator
())
except
RROMPyException
as
RE
:
if
RE
.
critical
:
raise
RE
from
None
setVerbosityDepth
(
vbDepth
)
RROMPyWarning
(
"Downgraded {}: {}"
.
format
(
RE
.
__class__
.
__name__
,
RE
))
unstable
=
1
if
not
unstable
:
self
.
trainedModel
.
data
.
P
=
copy
(
P
)
self
.
trainedModel
.
data
.
approxParameters
=
copy
(
self
.
approxParameters
)
vbMng
(
self
,
"DEL"
,
"Done setting up local approximant."
,
5
)
self
.
catchInstability
=
0
self
.
verbosity
+=
10
return
unstable
def
setupApprox
(
self
,
plotEst
:
str
=
"NONE"
)
->
int
:
val
=
super
()
.
setupApprox
(
plotEst
)
if
val
==
0
:
self
.
_setupRational
(
self
.
trainedModel
.
data
.
Q
,
self
.
trainedModel
.
data
.
P
)
self
.
trainedModel
.
data
.
approxParameters
=
copy
(
self
.
approxParameters
)
return
val
def
loadTrainedModel
(
self
,
filename
:
str
):
"""Load trained reduced model from file."""
super
()
.
loadTrainedModel
(
filename
)
self
.
_setSampleBatch
(
self
.
S
+
1
)
Event Timeline
Log In to Comment