diff --git a/examples/5_anisotropic_square/anisotropic_square.py b/examples/5_anisotropic_square/anisotropic_square.py index ad8dabe..2738c6c 100644 --- a/examples/5_anisotropic_square/anisotropic_square.py +++ b/examples/5_anisotropic_square/anisotropic_square.py @@ -1,77 +1,77 @@ import numpy as np import matplotlib.pyplot as plt from itertools import product from anisotropic_square_engine import (AnisotropicSquareEngine as engine, AnisotropicSquareEnginePoles as plsEx) from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedGreedy as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, Ls = [10., 50.], [.2, 1.2] z0, L0, n = np.mean(zs), np.mean(Ls), 50 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] np.random.seed(4020) mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] solver = engine(z0, L0, n) fighandles = [] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, "polybasisMarginal": "MONOMIAL_WENDLAND", "polybasis": "LEGENDRE", 'samplerPivot':QS(zs, "UNIFORM"), 'trainSetGenerator':QS(zs, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD_RES", 'errorEstimatorKindMarginal':"LOOK_AHEAD_RECOVER", "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.} for shared, tol in product([1., 0.], [1., 3.]): print("Testing cutoff tolerance {} with shared ratio {}.".format(tol, shared)) params['cutOffTolerance'] = tol - params['cutOffSharedRatio'] = shared + params['sharedRatio'] = shared approx = RIGPG([0], solver, mu0 = [z0, L0], approx_state = True, approxParameters = params, verbosity = 5) approx.setupApprox("ALL") verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(Ls[0], Ls[-1], 100) for j, t in enumerate(tspace): plsE = plsEx(t, 0., zs[-1]) pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: polesE = np.empty((len(tspace), len(plsE))) poles = np.empty((len(tspace), len(pls))) polesE[:] = np.nan if len(plsE) > polesE.shape[1]: nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) nanR[:] = np.nan polesE = np.hstack((polesE, nanR)) polesE[j, : len(plsE)] = np.real(plsE) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (17, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(Ls) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(polesE, tspace, 'k-.', linewidth = 1) ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(Ls) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py b/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py index 8fd2899..1786c06 100755 --- a/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py +++ b/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py @@ -1,78 +1,78 @@ import numpy as np import matplotlib.pyplot as plt from itertools import product from anisotropic_square_engine import (AnisotropicSquareEngine as engine, AnisotropicSquareEnginePoles as plsEx) from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedGreedy as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, Ls = [10., 50.], [.2, 1.2] z0, L0, n = np.mean(zs), np.mean(Ls), 50 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] np.random.seed(4020) mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] solver = engine(z0, L0, n) fighandles = [] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, "polybasis": "LEGENDRE", 'samplerPivot':QS(zs, "UNIFORM"), - 'cutOffSharedRatio': 0., "maxIterMarginal":20, + 'sharedRatio': 0., "maxIterMarginal":20, 'cutOffToleranceError': 1., 'trainSetGenerator':QS(zs, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD_RES", 'errorEstimatorKindMarginal':"LOOK_AHEAD_RECOVER", "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.} for cutOffTolerance, polybasisMarginal in product([1., np.inf], ["MONOMIAL_WENDLAND", "PIECEWISE_LINEAR_UNIFORM"]): print("Testing cutoff tolerance {} with marginal basis {}.".format( cutOffTolerance, polybasisMarginal)) params['cutOffTolerance'] = cutOffTolerance params["polybasisMarginal"] = polybasisMarginal approx = RIGPG([0], solver, mu0 = [z0, L0], approx_state = True, approxParameters = params, verbosity = 5) approx.setupApprox("ALL") verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(Ls[0], Ls[-1], 100) for j, t in enumerate(tspace): plsE = plsEx(t, 0., zs[-1]) pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: polesE = np.empty((len(tspace), len(plsE))) poles = np.empty((len(tspace), len(pls))) polesE[:] = np.nan if len(plsE) > polesE.shape[1]: nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) nanR[:] = np.nan polesE = np.hstack((polesE, nanR)) polesE[j, : len(plsE)] = np.real(plsE) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (17, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(Ls) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(polesE, tspace, 'k-.', linewidth = 1) ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(Ls) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py index 663fa35..6f1960b 100644 --- a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py +++ b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py @@ -1,828 +1,828 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from copy import deepcopy as copy import numpy as np from matplotlib import pyplot as plt from rrompy.reduction_methods.pivoted.generic_pivoted_approximant import ( GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximant) from rrompy.reduction_methods.pivoted.gather_pivoted_approximant import ( gatherPivotedApproximant) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal, paramList, ListAny) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.point_matching import (pointMatching, chordalMetricAdjusted, potential) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import (masterCore, indicesScatter, arrayGatherv, isend) __all__ = ['GenericPivotedGreedyApproximantNoMatch', 'GenericPivotedGreedyApproximant'] class GenericPivotedGreedyApproximantBase(GenericPivotedApproximantBase): _allowedEstimatorKindsMarginal = ["LEAVE_ONE_OUT", "LOOK_AHEAD", "LOOK_AHEAD_RECOVER", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingWeightError", "cutOffToleranceError", "errorEstimatorKindMarginal", "greedyTolMarginal", "maxIterMarginal"], [0., "AUTO", "NONE", 1e-1, 1e2]) super().__init__(*args, **kwargs) self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self._scaleFactorOldPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif hasattr(scaleFactorDer, "__len__"): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'refine' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") GenericPivotedApproximantBase.samplerMarginal.fset(self, samplerMarginal) @property def errorEstimatorKindMarginal(self): """Value of errorEstimatorKindMarginal.""" return self._errorEstimatorKindMarginal @errorEstimatorKindMarginal.setter def errorEstimatorKindMarginal(self, errorEstimatorKindMarginal): errorEstimatorKindMarginal = errorEstimatorKindMarginal.upper() if errorEstimatorKindMarginal not in ( self._allowedEstimatorKindsMarginal): RROMPyWarning(("Marginal error estimator kind not recognized. " "Overriding to 'NONE'.")) errorEstimatorKindMarginal = "NONE" self._errorEstimatorKindMarginal = errorEstimatorKindMarginal self._approxParameters["errorEstimatorKindMarginal"] = ( self.errorEstimatorKindMarginal) @property def matchingWeightError(self): """Value of matchingWeightError.""" return self._matchingWeightError @matchingWeightError.setter def matchingWeightError(self, matchingWeightError): self._matchingWeightError = matchingWeightError self._approxParameters["matchingWeightError"] = ( self.matchingWeightError) @property def cutOffToleranceError(self): """Value of cutOffToleranceError.""" return self._cutOffToleranceError @cutOffToleranceError.setter def cutOffToleranceError(self, cutOffToleranceError): if isinstance(cutOffToleranceError, (str,)): cutOffToleranceError = cutOffToleranceError.upper()\ .strip().replace(" ","") if cutOffToleranceError != "AUTO": RROMPyWarning(("String value of cutOffToleranceError not " "recognized. Overriding to 'AUTO'.")) cutOffToleranceError == "AUTO" self._cutOffToleranceError = cutOffToleranceError self._approxParameters["cutOffToleranceError"] = ( self.cutOffToleranceError) @property def greedyTolMarginal(self): """Value of greedyTolMarginal.""" return self._greedyTolMarginal @greedyTolMarginal.setter def greedyTolMarginal(self, greedyTolMarginal): if greedyTolMarginal < 0: raise RROMPyException("greedyTolMarginal must be non-negative.") if (hasattr(self, "_greedyTolMarginal") and self.greedyTolMarginal is not None): greedyTolMarginalold = self.greedyTolMarginal else: greedyTolMarginalold = -1 self._greedyTolMarginal = greedyTolMarginal self._approxParameters["greedyTolMarginal"] = self.greedyTolMarginal if greedyTolMarginalold != self.greedyTolMarginal: self.resetSamples() @property def maxIterMarginal(self): """Value of maxIterMarginal.""" return self._maxIterMarginal @maxIterMarginal.setter def maxIterMarginal(self, maxIterMarginal): if maxIterMarginal <= 0: raise RROMPyException("maxIterMarginal must be positive.") if (hasattr(self, "_maxIterMarginal") and self.maxIterMarginal is not None): maxIterMarginalold = self.maxIterMarginal else: maxIterMarginalold = -1 self._maxIterMarginal = maxIterMarginal self._approxParameters["maxIterMarginal"] = self.maxIterMarginal if maxIterMarginalold != self.maxIterMarginal: self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() if not hasattr(self, "_temporaryPivot"): self._mus = emptyParameterList() self._musMarginal = emptyParameterList() if hasattr(self, "samplerMarginal"): self.samplerMarginal.reset() if hasattr(self, "samplingEngine") and self.samplingEngine is not None: self.samplingEngine.resetHistory() def _getPolesResExact(self, HITest, foci:Tuple[float, float], ground:float) -> Tuple[Np1D, Np2D]: if self.cutOffToleranceError == "AUTO": cutOffTolErr = self.cutOffTolerance else: cutOffTolErr = self.cutOffToleranceError polesEx = copy(HITest.poles) idxExEff = np.where(potential(polesEx, foci) - ground <= cutOffTolErr * ground)[0] if self.matchingWeightError != 0: resEx = HITest.coeffs[idxExEff] else: resEx = None return polesEx[idxExEff], resEx def _getDistanceApp(self, polesEx:Np1D, resEx:Np2D, muTest:paramVal, foci:Tuple[float, float], ground:float) -> float: if self.cutOffToleranceError == "AUTO": cutOffTolErr = self.cutOffTolerance else: cutOffTolErr = self.cutOffToleranceError polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0] idxApEff = np.where(potential(polesAp, foci) - ground <= cutOffTolErr * ground)[0] polesAp = polesAp[idxApEff] if self.matchingWeightError != 0: resAp = self.trainedModel.interpolateMarginalCoeffs(muTest)[0][ idxApEff, :] resEx = self.trainedModel.data.projMat[:, : resEx.shape[1]].dot(resEx.T) resAp = self.trainedModel.data.projMat[:, : resAp.shape[1]].dot(resAp.T) else: resAp = None dist = chordalMetricAdjusted(polesEx, polesAp, self.matchingWeightError, resEx, resAp, self.HFEngine, False) pmR, pmC = pointMatching(dist) return np.mean(dist[pmR, pmC]) def getErrorEstimatorMarginalLeaveOneOut(self) -> Np1D: nTest = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nTest) if nTest <= 1: err = np.empty(nTest) err[:] = np.inf return err idx, sizes = indicesScatter(nTest, return_sizes = True) err = [] if len(idx) > 0: _tMdataFull = copy(self.trainedModel.data) _musMExcl = None self.verbosity -= 35 self.trainedModel.verbosity -= 35 foci = self.samplerPivot.normalFoci() ground = self.samplerPivot.groundPotential() for i, j in enumerate(idx): jEff = j - (i > 0) muTest = self.trainedModel.data.musMarginal[jEff] polesEx, resEx = self._getPolesResExact( self.trainedModel.data.HIs[jEff], foci, ground) if i > 0: self.musMarginal.insert(_musMExcl, j - 1) _musMExcl = self.musMarginal[j] self.musMarginal.pop(j) if len(polesEx) == 0: err += [0.] continue self._updateTrainedModelMarginalSamples([j]) self._finalizeMarginalization() err += [self._getDistanceApp(polesEx, resEx, muTest, foci, ground)] self._updateTrainedModelMarginalSamples() self.musMarginal.insert(_musMExcl, idx[-1]) self.verbosity += 35 self.trainedModel.verbosity += 35 self.trainedModel.data = _tMdataFull return arrayGatherv(np.array(err), sizes) def getErrorEstimatorMarginalLookAhead(self) -> Np1D: if not hasattr(self.trainedModel, "_musMExcl"): err = np.zeros(0) err[:] = np.inf self._musMarginalTestIdxs = np.zeros(0, dtype = int) return err self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl, dtype = int) idx, sizes = indicesScatter(len(self.trainedModel._musMExcl), return_sizes = True) err = [] if len(idx) > 0: self.verbosity -= 35 self.trainedModel.verbosity -= 35 foci = self.samplerPivot.normalFoci() ground = self.samplerPivot.groundPotential() for j in idx: muTest = self.trainedModel._musMExcl[j] HITest = self.trainedModel._HIsExcl[j] polesEx, resEx = self._getPolesResExact(HITest, foci, ground) if len(polesEx) == 0: err += [0.] continue err += [self._getDistanceApp(polesEx, resEx, muTest, foci, ground)] self.verbosity += 35 self.trainedModel.verbosity += 35 return arrayGatherv(np.array(err), sizes) def getErrorEstimatorMarginalNone(self) -> Np1D: nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) return (1. + self.greedyTolMarginal) * np.ones(nErr) def errorEstimatorMarginal(self, return_max : bool = False) -> Np1D: vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format( self.trainedModel.data.musMarginal), 10) if self.errorEstimatorKindMarginal == "LEAVE_ONE_OUT": err = self.getErrorEstimatorMarginalLeaveOneOut() elif self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": err = self.getErrorEstimatorMarginalLookAhead() else:#if self.errorEstimatorKindMarginal == "NONE": err = self.getErrorEstimatorMarginalNone() vbMng(self.trainedModel, "DEL", "Done evaluating error estimator", 10) if not return_max: return err idxMaxEst = np.where(err > self.greedyTolMarginal)[0] maxErr = err[idxMaxEst] if self.errorEstimatorKindMarginal == "NONE": maxErr = None return err, idxMaxEst, maxErr def plotEstimatorMarginal(self, est:Np1D, idxMax:List[int], estMax:List[float]): if self.errorEstimatorKindMarginal == "NONE": return if (not (np.any(np.isnan(est)) or np.any(np.isinf(est))) and masterCore()): fig = plt.figure(figsize = plt.figaspect(1. / self.nparMarginal)) for jpar in range(self.nparMarginal): ax = fig.add_subplot(1, self.nparMarginal, 1 + jpar) if self.errorEstimatorKindMarginal == "LEAVE_ONE_OUT": musre = copy(self.trainedModel.data.musMarginal.re.data) else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": if not hasattr(self.trainedModel, "_musMExcl"): return musre = np.real(self.trainedModel._musMExcl) if len(idxMax) > 0 and estMax is not None: maxrej = musre[idxMax, jpar] errCP = copy(est) idx = np.delete(np.arange(self.nparMarginal), jpar) while len(musre) > 0: if self.nparMarginal == 1: currIdx = np.arange(len(musre)) else: currIdx = np.where(np.isclose(np.sum( np.abs(musre[:, idx] - musre[0, idx]), 1), 0.))[0] currIdxSorted = currIdx[np.argsort(musre[currIdx, jpar])] ax.semilogy(musre[currIdxSorted, jpar], errCP[currIdxSorted], 'k.-', linewidth = 1) musre = np.delete(musre, currIdx, 0) errCP = np.delete(errCP, currIdx) ax.semilogy(self.musMarginal.re(jpar), (self.greedyTolMarginal,) * len(self.musMarginal), '*m') if len(idxMax) > 0 and estMax is not None: ax.semilogy(maxrej, estMax, 'xr') ax.set_xlim(*list(self.samplerMarginal.lims.re(jpar))) ax.grid() plt.tight_layout() plt.show() def _addMarginalSample(self, mus:paramList): mus = self.checkParameterListMarginal(mus) if len(mus) == 0: return self._nmusOld, nmus = len(self.musMarginal), len(mus) if (hasattr(self, "trainedModel") and self.trainedModel is not None and hasattr(self.trainedModel, "_musMExcl")): self._nmusOld += len(self.trainedModel._musMExcl) vbMng(self, "MAIN", ("Adding marginal sample point{} no. {}{} at {} to training " "set.").format("s" * (nmus > 1), self._nmusOld + 1, "--{}".format(self._nmusOld + nmus) * (nmus > 1), mus), 3) self.musMarginal.append(mus) self.setupApproxPivoted(mus) self._poleMatching() del self._nmusOld if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): ubRange = len(self.trainedModel.data.musMarginal) if hasattr(self.trainedModel, "_idxExcl"): shRange = len(self.trainedModel._musMExcl) else: shRange = 0 testIdxs = list(range(ubRange + shRange - len(mus), ubRange + shRange)) for j in testIdxs[::-1]: self.musMarginal.pop(j - shRange) if hasattr(self.trainedModel, "_idxExcl"): testIdxs = self.trainedModel._idxExcl + testIdxs self._updateTrainedModelMarginalSamples(testIdxs) self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = self.SMarginal def greedyNextSampleMarginal(self, muidx:List[int], plotEst : str = "NONE") \ -> Tuple[Np1D, List[int], float, paramVal]: RROMPyAssert(self._mode, message = "Cannot add greedy sample.") muidx = self._musMarginalTestIdxs[muidx] if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): if not hasattr(self.trainedModel, "_idxExcl"): raise RROMPyException(("Sample index to be added not present " "in trained model.")) testIdxs = copy(self.trainedModel._idxExcl) skippedIdx = 0 for cj, j in enumerate(self.trainedModel._idxExcl): if j in muidx: testIdxs.pop(skippedIdx) self.musMarginal.insert(self.trainedModel._musMExcl[cj], j - skippedIdx) else: skippedIdx += 1 if len(self.trainedModel._idxExcl) < (len(muidx) + len(testIdxs)): raise RROMPyException(("Sample index to be added not present " "in trained model.")) self._updateTrainedModelMarginalSamples(testIdxs) self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) self.firstGreedyIterM = False idxAdded = self.samplerMarginal.refine(muidx) self._addMarginalSample(self.samplerMarginal.points[idxAdded]) errorEstTest, muidx, maxErrorEst = self.errorEstimatorMarginal(True) if plotEst == "ALL": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) return (errorEstTest, muidx, maxErrorEst, self.samplerMarginal.points[muidx]) def _preliminaryTrainingMarginal(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if np.sum(self.samplingEngine.nsamples) > 0: return self.resetSamples() self._addMarginalSample(self.samplerMarginal.generatePoints( self.SMarginal)) def _preSetupApproxPivoted(self, mus:paramList) \ -> Tuple[ListAny, ListAny, ListAny]: self.computeScaleFactor() if self.trainedModel is None: self._setupTrainedModel(np.zeros((0, 0))) self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] self.trainedModel.data.Psupp = [] self._trainedModelOld = copy(self.trainedModel) self._scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 self._musLoc = copy(self.mus) idx, sizes = indicesScatter(len(mus), return_sizes = True) emptyCores = np.where(np.logical_not(sizes))[0] self.verbosity -= 15 return idx, sizes, emptyCores def _postSetupApproxPivoted(self, mus:Np2D, pMat:Np2D, Ps:ListAny, Qs:ListAny, sizes:ListAny): self.scaleFactor = self._scaleFactorOldPivot del self._scaleFactorOldPivot, self._temporaryPivot pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, mus, sizes, self.polybasis) if len(self._musLoc) > 0: self._mus = self.checkParameterList(self._musLoc) self._mus.append(mus) else: self._mus = self.checkParameterList(mus) self.trainedModel = self._trainedModelOld del self._trainedModelOld padLeft = self.trainedModel.data.projMat.shape[1] suppNew = np.append(0, np.cumsum(nsamples)) self._setupTrainedModel(pMat, padLeft > 0) self.trainedModel.data.Qs += Qs self.trainedModel.data.Ps += Ps self.trainedModel.data.Psupp += list(padLeft + suppNew[: -1]) self.trainedModel.data.approxParameters = copy(self.approxParameters) self.verbosity += 15 def _localPivotedResult(self, pMat:Np2D, req:ListAny, emptyCores:ListAny, mus:Np2D) -> Tuple[Np2D, ListAny, Np2D]: if pMat is None: mus = copy(self.samplingEngine.mus.data) pMat = copy(self.samplingEngine.projectionMatrix) if masterCore(): for dest in emptyCores: req += [isend((len(pMat), pMat.dtype, mus.dtype), dest = dest, tag = dest)] else: mus = np.vstack((mus, self.samplingEngine.mus.data)) pMat = np.hstack((pMat, self.samplingEngine.projectionMatrix)) return pMat, req, mus @abstractmethod def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) self._preSetupApproxPivoted() data = [] pass self._postSetupApproxPivoted(mus, data) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: """Compute greedy snapshots of solution map.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") vbMng(self, "INIT", "Starting computation of snapshots.", 3) max2ErrorEst, self.firstGreedyIterM = np.inf, True self._preliminaryTrainingMarginal() if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": muidx = np.arange(len(self.trainedModel.data.musMarginal)) else:#if self.errorEstimatorKindMarginal in ["LEAVE_ONE_OUT", "NONE"]: muidx = [] self._musMarginalTestIdxs = np.array(muidx) while self.firstGreedyIterM or (max2ErrorEst > self.greedyTolMarginal and self.samplerMarginal.npoints < self.maxIterMarginal): errorEstTest, muidx, maxErrorEst, mu = \ self.greedyNextSampleMarginal(muidx, plotEst) if maxErrorEst is None: max2ErrorEst = 1. + self.greedyTolMarginal else: if len(maxErrorEst) > 0: max2ErrorEst = np.max(maxErrorEst) - vbMng(self, "MAIN", ("Uniform testing error estimate " - "{:.4e}.").format(max2ErrorEst), 3) else: - max2ErrorEst = 0. + max2ErrorEst = np.max(errorEstTest) + vbMng(self, "MAIN", ("Uniform testing error estimate " + "{:.4e}.").format(max2ErrorEst), 3) if plotEst == "LAST": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: " "{}).").format(len(self.mus)), 3) if (self.errorEstimatorKindMarginal == "LOOK_AHEAD_RECOVER" and hasattr(self.trainedModel, "_idxExcl") and len(self.trainedModel._idxExcl) > 0): vbMng(self, "INIT", "Recovering {} test models.".format( len(self.trainedModel._idxExcl)), 7) for j, mu in zip(self.trainedModel._idxExcl, self.trainedModel._musMExcl): self.musMarginal.insert(mu, j) self._updateTrainedModelMarginalSamples() self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) vbMng(self, "DEL", "Done recovering test models.", 7) return 0 def checkComputedApproxPivoted(self) -> bool: return (super().checkComputedApprox() and len(self.musMarginal) == len(self.trainedModel.data.musMarginal)) class GenericPivotedGreedyApproximantNoMatch( GenericPivotedGreedyApproximantBase, GenericPivotedApproximantNoMatch): """ ROM pivoted greedy interpolant computation for parametric problems (without pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'cutOffToleranceError': tolerance for ignoring parasitic poles in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'cutOffToleranceError': tolerance for ignoring parasitic poles in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeightError: Weight for pole matching optimization in error estimation. cutOffToleranceError: Tolerance for ignoring parasitic poles in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _poleMatching(self): vbMng(self, "INIT", "Compressing poles.", 10) self.trainedModel.initializeFromRational() vbMng(self, "DEL", "Done compressing poles.", 10) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx) class GenericPivotedGreedyApproximant(GenericPivotedGreedyApproximantBase, GenericPivotedApproximant): """ ROM pivoted greedy interpolant computation for parametric problems (with pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'cutOffToleranceError': tolerance for ignoring parasitic poles in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'cutOffToleranceError': tolerance for ignoring parasitic poles in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. matchingWeightError: Weight for pole matching optimization in error estimation. cutOffToleranceError: Tolerance for ignoring parasitic poles in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _poleMatching(self): vbMng(self, "INIT", "Compressing and matching poles.", 10) self.trainedModel.initializeFromRational(self.matchingWeight, self.matchingMode, self.HFEngine, False) vbMng(self, "DEL", "Done compressing and matching poles.", 10) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight, self.matchingMode, self.HFEngine, False) def getErrorEstimatorMarginalLeaveOneOut(self) -> Np1D: if self.polybasisMarginal != "NEARESTNEIGHBOR": if not hasattr(self, "_MMarginal_isauto"): if not hasattr(self, "_MMarginalOriginal"): self._MMarginalOriginal = self.paramsMarginal["MMarginal"] self.paramsMarginal["MMarginal"] = self._MMarginalOriginal self._reduceDegreeNNoWarn = 1 err = super().getErrorEstimatorMarginalLeaveOneOut() if self.polybasisMarginal != "NEARESTNEIGHBOR": del self._reduceDegreeNNoWarn return err def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() return super().setupApprox(*args, **kwargs) diff --git a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational.py b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational.py index dc6c33a..a8414b5 100644 --- a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational.py +++ b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational.py @@ -1,314 +1,295 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import warnings import numpy as np from scipy.sparse import csr_matrix, hstack, SparseEfficiencyWarning from copy import deepcopy as copy from .trained_model_pivoted_rational_nomatch import ( TrainedModelPivotedRationalNoMatch) from rrompy.utilities.base.types import (Np2D, ListAny, paramVal, paramList, HFEng) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.point_matching import rationalFunctionMatching from rrompy.utilities.numerical.degree import reduceDegreeN from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb, PolynomialInterpolator as PI) from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.poly_fitting.heaviside import (heavisideUniformShape, HeavisideInterpolator as HI) from rrompy.utilities.poly_fitting.nearest_neighbor import ( NearestNeighborInterpolator as NNI) from rrompy.utilities.poly_fitting.piecewise_linear import (sparsekinds, PiecewiseLinearInterpolator as PLI) from rrompy.utilities.exception_manager import RROMPyException __all__ = ['TrainedModelPivotedRational'] class TrainedModelPivotedRational(TrainedModelPivotedRationalNoMatch): """ ROM approximant evaluation for pivoted approximants based on interpolation of rational approximants (with pole matching). Attributes: Data: dictionary with all that can be pickled. """ def centerNormalizeMarginal(self, mu : paramList = [], mu0 : paramVal = None) -> paramList: """ Compute normalized parameter to be plugged into approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.data.mu0Marginal. Returns: Normalized parameter. """ mu = self.checkParameterListMarginal(mu) if mu0 is None: mu0 = self.checkParameterListMarginal( self.data.mu0(0, self.data.directionMarginal)) return (self.mapParameterList(mu, idx = self.data.directionMarginal) - self.mapParameterList(mu0, idx = self.data.directionMarginal) ) / [self.data.scaleFactor[x] for x in self.data.directionMarginal] def setupMarginalInterp(self, approx, interpPars:ListAny, extraPar = None): vbMng(self, "INIT", "Starting computation of marginal interpolator.", 12) musMCN = self.centerNormalizeMarginal(self.data.musMarginal) - nM, nP = len(musMCN), len(self.data.HIs[0].poles) - pbM = approx.polybasisMarginal + nM, pbM = len(musMCN), approx.polybasisMarginal if pbM in ppb + rbpb: if extraPar: approx._setMMarginalAuto() _MMarginalEff = approx.paramsMarginal["MMarginal"] if pbM in ppb: p = PI() elif pbM in rbpb: p = RBI() else: # if pbM in sparsekinds + ["NEARESTNEIGHBOR"]: if pbM == "NEARESTNEIGHBOR": p = NNI() else: # if pbM in sparsekinds: pllims = [[-1.] * self.data.nparMarginal, [1.] * self.data.nparMarginal] p = PLI() for ipts, pts in enumerate(self.data.suppEffPts): if len(pts) == 0: raise RROMPyException("Empty list of support points.") musMCNEff, valsEff = musMCN[pts], np.eye(len(pts)) if pbM in ppb + rbpb: if extraPar: if ipts > 0: verb = approx.verbosity approx.verbosity = 0 _musM = approx.musMarginal approx.musMarginal = musMCNEff approx._setMMarginalAuto() approx.musMarginal = _musM approx.verbosity = verb else: approx.paramsMarginal["MMarginal"] = reduceDegreeN( _MMarginalEff, len(musMCNEff), self.data.nparMarginal, approx.paramsMarginal["polydegreetypeMarginal"]) MMEff = approx.paramsMarginal["MMarginal"] while MMEff >= 0: wellCond, msg = p.setupByInterpolation(musMCNEff, valsEff, MMEff, *interpPars) vbMng(self, "MAIN", msg, 30) if wellCond: break vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing " "MMarginal by 1."), 35) MMEff -= 1 if MMEff < 0: raise RROMPyException(("Instability in computation of " "interpolant. Aborting.")) if (pbM in rbpb and len(interpPars) > 4 and "optimizeScalingBounds" in interpPars[4].keys()): interpPars[4]["optimizeScalingBounds"] = [-1., -1.] elif pbM == "NEARESTNEIGHBOR": if ipts > 0: interpPars[0] = 1 p.setupByInterpolation(musMCNEff, valsEff, *interpPars) elif ipts == 0: # and pbM in sparsekinds: wellCond, msg = p.setupByInterpolation(musMCNEff, valsEff, pllims, extraPar[pts], *interpPars) vbMng(self, "MAIN", msg, 30) if not wellCond: vbMng(self, "MAIN", "Warning: polyfit is poorly conditioned.", 35) if ipts == 0: self.data.marginalInterp = copy(p) self.data.coeffsEff, self.data.polesEff = [], [] for hi, sup in zip(self.data.HIs, self.data.Psupp): cEff = hi.coeffs if (self.data._collapsed or self.data.projMat.shape[1] == cEff.shape[1]): cEff = copy(cEff) else: supC = self.data.projMat.shape[1] - sup - cEff.shape[1] cEff = hstack((csr_matrix((len(cEff), sup)), csr_matrix(cEff), csr_matrix((len(cEff), supC))), "csr") self.data.coeffsEff += [cEff] self.data.polesEff += [copy(hi.poles)] else: ptsBad = [i for i in range(nM) if i not in pts] idxBad = np.where(self.data.suppEffIdx == ipts)[0] + warnings.simplefilter('ignore', SparseEfficiencyWarning) if pbM in sparsekinds: for ij, j in enumerate(ptsBad): - for jb in idxBad: - poleBad = self.data.polesEff[j][jb] - if not np.isinf(poleBad): - self.data.coeffsEff[j][nP] -= ( - self.data.coeffsEff[j][jb] / poleBad) - bdsL = np.append([0], - self.data.coeffsEff[j].indptr[idxBad + 1]) - bdsR = np.append(self.data.coeffsEff[j].indptr[idxBad], - self.data.coeffsEff[j].indptr[-1]) - self.data.coeffsEff[j].data = np.concatenate([ - self.data.coeffsEff[j].data[l : r] - for l, r in zip(bdsL, bdsR)]) - self.data.coeffsEff[j].indices = np.concatenate([ - self.data.coeffsEff[j].indices[l : r] - for l, r in zip(bdsL, bdsR)]) - for ijb, jb in enumerate(idxBad): - self.data.coeffsEff[j].indptr[jb + 1 :] -= ( - bdsL[ijb + 1] - bdsR[ijb]) - self.data.polesEff[j][idxBad] = np.inf + nearest = pts[np.argmin(np.sum(np.abs(musMCNEff.data + - np.tile(musMCN[j], [len(pts), 1]) + ), axis = 1).flatten())] + self.data.coeffsEff[j][idxBad] = copy( + self.data.coeffsEff[nearest][idxBad]) + self.data.polesEff[j][idxBad] = copy( + self.data.polesEff[nearest][idxBad]) else: - warnings.simplefilter('ignore', SparseEfficiencyWarning) if (self.data._collapsed or self.data.projMat.shape[1] == cEff.shape[1]): cfBase = np.zeros((len(idxBad), cEff.shape[1]), dtype = cEff.dtype) else: cfBase = csr_matrix((len(idxBad), self.data.projMat.shape[1]), dtype = cEff.dtype) valMuMBad = p(musMCN[ptsBad]) for ijb, jb in enumerate(ptsBad): self.data.coeffsEff[jb][idxBad] = copy(cfBase) self.data.polesEff[jb][idxBad] = 0. for ij, j in enumerate(pts): val = valMuMBad[ij][ijb] if not np.isclose(val, 0.): self.data.coeffsEff[jb][idxBad] += (val * self.data.coeffsEff[j][idxBad]) self.data.polesEff[jb][idxBad] += (val * self.data.polesEff[j][idxBad]) - warnings.filters.pop(0) + warnings.filters.pop(0) if pbM in ppb + rbpb: approx.paramsMarginal["MMarginal"] = _MMarginalEff vbMng(self, "DEL", "Done computing marginal interpolator.", 12) def initializeFromLists(self, poles:ListAny, coeffs:ListAny, supps:ListAny, basis:str, matchingWeight:float, matchingMode:str, HFEngine:HFEng, is_state:bool): """Initialize Heaviside representation.""" poles, coeffs = rationalFunctionMatching( *heavisideUniformShape(poles, coeffs), self.data.musMarginal.data, matchingWeight, matchingMode, supps, self.data.projMat, HFEngine, is_state) super().initializeFromLists(poles, coeffs, supps, basis) self.data.suppEffPts = [np.arange(len(self.data.HIs))] self.data.suppEffIdx = np.zeros(len(poles[0]), dtype = int) def checkSharedRatio(self, shared:float) -> str: N = len(self.data.HIs[0].poles) M = len(self.data.HIs) goodLocPoles = np.array([np.logical_not(np.isinf(hi.poles) ) for hi in self.data.HIs]) self.data.suppEffPts = [np.arange(len(self.data.HIs))] self.data.suppEffIdx = np.zeros(N, dtype = int) if np.all(np.all(goodLocPoles)): return " No poles erased." goodGlobPoles = np.sum(goodLocPoles, axis = 0) goodEnoughPoles = goodGlobPoles >= max(1., 1. * shared * M) keepPole = np.where(goodEnoughPoles)[0] halfPole = np.where(np.logical_and(goodEnoughPoles, goodGlobPoles < M))[0] removePole = np.where(np.logical_not(goodEnoughPoles))[0] if len(removePole) > 0: keepCoeff = np.append(keepPole, np.arange(N, len(self.data.HIs[0].coeffs))) for hi in self.data.HIs: for j in removePole: if not np.isinf(hi.poles[j]): hi.coeffs[N, :] -= hi.coeffs[j, :] / hi.poles[j] hi.poles = hi.poles[keepPole] hi.coeffs = hi.coeffs[keepCoeff, :] for idxR in halfPole: pts = np.where(goodLocPoles[:, idxR])[0] idxEff = len(self.data.suppEffPts) for idEff, prevPts in enumerate(self.data.suppEffPts): if len(prevPts) == len(pts): if np.allclose(prevPts, pts): idxEff = idEff break if idxEff == len(self.data.suppEffPts): self.data.suppEffPts += [pts] self.data.suppEffIdx[idxR] = idxEff self.data.suppEffIdx = self.data.suppEffIdx[keepPole] return (" Hard-erased {} pole".format(len(removePole)) + "s" * (len(removePole) != 1) + " and soft-erased {} pole".format(len(halfPole)) + "s" * (len(halfPole) != 1) + ".") def interpolateMarginalInterpolator(self, mu : paramList = []) -> ListAny: """Obtain interpolated approximant interpolator.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal models at mu = {}.".format(mu), 95) his = [] muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) verb, self.verbosity = self.verbosity, 0 poless = self.interpolateMarginalPoles(mu, mIvals) coeffss = self.interpolateMarginalCoeffs(mu, mIvals) self.verbosity = verb for j in range(len(mu)): his += [HI()] his[-1].poles = poless[j] his[-1].coeffs = coeffss[j] his[-1].npar = 1 his[-1].polybasis = self.data.HIs[0].polybasis vbMng(self, "DEL", "Done interpolating marginal models.", 95) return his def interpolateMarginalPoles(self, mu : paramList = [], mIvals : Np2D = None) -> ListAny: """Obtain interpolated approximant poles.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal poles at mu = {}.".format(mu), 95) intMPoles = np.zeros((len(mu),) + self.data.polesEff[0].shape, dtype = self.data.polesEff[0].dtype) if mIvals is None: muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) for pEff, mI in zip(self.data.polesEff, mIvals): - mIBad, pEffBad = np.logical_not(np.isclose(mI, 0.)), np.isinf(pEff) - pEffGood = np.logical_not(pEffBad) - if np.sum(pEffGood) > 0: - intMPoles[:, pEffGood] += ( - np.expand_dims(mI, - 1) * pEff[pEffGood]) - if np.sum(mIBad) * np.sum(pEffBad) > 0: - intMPoles[np.ix_(mIBad, pEffBad)] = np.inf + intMPoles += np.expand_dims(mI, - 1) * pEff vbMng(self, "DEL", "Done interpolating marginal poles.", 95) return intMPoles def interpolateMarginalCoeffs(self, mu : paramList = [], mIvals : Np2D = None) -> ListAny: """Obtain interpolated approximant coefficients.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal coefficients at mu = {}.".format(mu), 95) intMCoeffs = np.zeros((len(mu),) + self.data.coeffsEff[0].shape, dtype = self.data.coeffsEff[0].dtype) if mIvals is None: muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) for cEff, mI in zip(self.data.coeffsEff, mIvals): for j, m in enumerate(mI): intMCoeffs[j] += m * cEff vbMng(self, "DEL", "Done interpolating marginal coefficients.", 95) return intMCoeffs