diff --git a/examples/5_anisotropic_square/anisotropic_square.py b/examples/5_anisotropic_square/anisotropic_square.py index ec95bcd..ad8dabe 100644 --- a/examples/5_anisotropic_square/anisotropic_square.py +++ b/examples/5_anisotropic_square/anisotropic_square.py @@ -1,76 +1,77 @@ import numpy as np import matplotlib.pyplot as plt from itertools import product from anisotropic_square_engine import (AnisotropicSquareEngine as engine, AnisotropicSquareEnginePoles as plsEx) from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedGreedy as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, Ls = [10., 50.], [.2, 1.2] z0, L0, n = np.mean(zs), np.mean(Ls), 50 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] np.random.seed(4020) mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] solver = engine(z0, L0, n) fighandles = [] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, "polybasisMarginal": "MONOMIAL_WENDLAND", "polybasis": "LEGENDRE", 'samplerPivot':QS(zs, "UNIFORM"), 'trainSetGenerator':QS(zs, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD_RES", 'errorEstimatorKindMarginal':"LOOK_AHEAD_RECOVER", - "paramsMarginal":{"MMarginal": 2}, "SMarginal": 3, + "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, + "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.} for shared, tol in product([1., 0.], [1., 3.]): print("Testing cutoff tolerance {} with shared ratio {}.".format(tol, shared)) params['cutOffTolerance'] = tol params['cutOffSharedRatio'] = shared approx = RIGPG([0], solver, mu0 = [z0, L0], approx_state = True, approxParameters = params, verbosity = 5) approx.setupApprox("ALL") verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(Ls[0], Ls[-1], 100) for j, t in enumerate(tspace): plsE = plsEx(t, 0., zs[-1]) pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: polesE = np.empty((len(tspace), len(plsE))) poles = np.empty((len(tspace), len(pls))) polesE[:] = np.nan if len(plsE) > polesE.shape[1]: nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) nanR[:] = np.nan polesE = np.hstack((polesE, nanR)) polesE[j, : len(plsE)] = np.real(plsE) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (17, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(Ls) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(polesE, tspace, 'k-.', linewidth = 1) ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(Ls) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/examples/5_anisotropic_square/anisotropic_square.py b/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py old mode 100644 new mode 100755 similarity index 79% copy from examples/5_anisotropic_square/anisotropic_square.py copy to examples/5_anisotropic_square/anisotropic_square_test_cutoff.py index ec95bcd..8fd2899 --- a/examples/5_anisotropic_square/anisotropic_square.py +++ b/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py @@ -1,76 +1,78 @@ import numpy as np import matplotlib.pyplot as plt from itertools import product from anisotropic_square_engine import (AnisotropicSquareEngine as engine, AnisotropicSquareEnginePoles as plsEx) from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedGreedy as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, Ls = [10., 50.], [.2, 1.2] z0, L0, n = np.mean(zs), np.mean(Ls), 50 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] np.random.seed(4020) mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] solver = engine(z0, L0, n) fighandles = [] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, - "polybasisMarginal": "MONOMIAL_WENDLAND", "polybasis": "LEGENDRE", 'samplerPivot':QS(zs, "UNIFORM"), - 'trainSetGenerator':QS(zs, "UNIFORM"), + 'cutOffSharedRatio': 0., "maxIterMarginal":20, + 'cutOffToleranceError': 1., 'trainSetGenerator':QS(zs, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD_RES", 'errorEstimatorKindMarginal':"LOOK_AHEAD_RECOVER", - "paramsMarginal":{"MMarginal": 2}, "SMarginal": 3, + "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, + "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.} -for shared, tol in product([1., 0.], [1., 3.]): - print("Testing cutoff tolerance {} with shared ratio {}.".format(tol, - shared)) - params['cutOffTolerance'] = tol - params['cutOffSharedRatio'] = shared +for cutOffTolerance, polybasisMarginal in product([1., np.inf], + ["MONOMIAL_WENDLAND", "PIECEWISE_LINEAR_UNIFORM"]): + print("Testing cutoff tolerance {} with marginal basis {}.".format( + cutOffTolerance, polybasisMarginal)) + params['cutOffTolerance'] = cutOffTolerance + params["polybasisMarginal"] = polybasisMarginal approx = RIGPG([0], solver, mu0 = [z0, L0], approx_state = True, approxParameters = params, verbosity = 5) approx.setupApprox("ALL") verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(Ls[0], Ls[-1], 100) for j, t in enumerate(tspace): plsE = plsEx(t, 0., zs[-1]) pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: polesE = np.empty((len(tspace), len(plsE))) poles = np.empty((len(tspace), len(pls))) polesE[:] = np.nan if len(plsE) > polesE.shape[1]: nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) nanR[:] = np.nan polesE = np.hstack((polesE, nanR)) polesE[j, : len(plsE)] = np.real(plsE) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (17, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(Ls) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(polesE, tspace, 'k-.', linewidth = 1) ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(Ls) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/examples/8_damped_mass_chain/damped_mass_chain.py b/examples/8_damped_mass_chain/damped_mass_chain.py index a2cda76..30f1f30 100644 --- a/examples/8_damped_mass_chain/damped_mass_chain.py +++ b/examples/8_damped_mass_chain/damped_mass_chain.py @@ -1,180 +1,186 @@ ### example from Lohmann, Eid. Efficient Order Reduction of Parametric and ### Nonlinear Models by Superposition of Locally Reduced Models. from copy import deepcopy as copy import numpy as np import matplotlib.pyplot as plt from rrompy.reduction_methods import (NearestNeighbor as NN, RationalInterpolant as RI, RationalInterpolantGreedy as RIG, RationalInterpolantPivoted as RIP, RationalInterpolantGreedyPivoted as RIGP) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) from damped_mass_chain_engine import (bode as bode0, bodeLog, MassChainEngine, MassChainEngineLog, AugmentedMassChainEngine, AugmentedMassChainEngineLog) from rrompy.utilities.base.decorators import addWhiteNoise ########################## fullModelOrder = 1 #+ 1 SMarginal = 1 #* 2 state = 0 #+ 1 noise_level = 0 #+ 1e-5 +LS = 0 #+ 6 ########################## modelSign = "Surrogate modeling for frequency response of " if fullModelOrder == 1: modelSign += "augmented " modelSign += "damper-mass-spring model" if SMarginal > 1: modelSign += " with 1 design parameter" modelSign += ".\nOutput is " if state: modelSign += "vector of mass displacements. " else: modelSign += "displacement of last mass. " +if LS: + modelSign += "Least-squares: S - N - 1 = {}. ".format(LS) +else: + modelSign += "Interpolatory: S = N + 1. " modelSign += "Noise level: {}.\n".format(noise_level) print(modelSign) M = [np.array([1., 5., 25., 125.])] N = len(M[0]) D = [np.zeros((N, N))] D[0][0, 1], D[0][1, 2], D[0][2, 3], D[0][3, 3] = .1, .4, 1.6, 0. D[0] = D[0] + D[0].T K = [np.zeros((N, N))] K[0][0, 1], K[0][1, 2], K[0][2, 3], K[0][0, 3], K[0][3, 3] = 9., 3., 1., 1., 2. K[0] = K[0] + K[0].T B = np.append(27., np.zeros(N - 1)).reshape(-1, 1) if SMarginal > 1: M += [np.zeros(N)] D += [np.zeros((N, N))] D[1][3, 3] = 1. D[1] = D[1] + D[1].T K += [np.zeros((N, N))] K[1][0, 3], K[1][3, 3] = 2., 2. K[1] = K[1] + K[1].T if state: C = np.eye(4) else: C = np.append(np.zeros(N - 1), 1.).reshape(1, -1) for logspace in range(2): print("Approximation in l{}space".format("og" * logspace + "in" * (not logspace))) if logspace: bode = bodeLog if fullModelOrder == 1: engine = AugmentedMassChainEngineLog else: engine = MassChainEngineLog else: bode = bode0 if fullModelOrder == 1: engine = AugmentedMassChainEngine else: engine = MassChainEngine solver = addWhiteNoise(noise_level)(engine)(M, D, K, B, C) ss, mu = [1e-2, 1e1], [] s0 = 10. ** np.mean(np.log10(ss)) freq = np.logspace(np.log10(ss[0]), np.log10(ss[1]), 100) if logspace: ss, freq = [np.log10(ss[0]), np.log10(ss[1])], np.log10(freq) s0, parameterMap = np.log10(s0), 1. else: parameterMap = {"F": [("log10", "x")], "B": [(10., "**", "x")]} krange = [[ss[0]], [ss[-1]]] k0, srange = [s0], copy(krange) if SMarginal > 1: ms = [0., 1.] m0, mrange = np.mean(ms), [[ms[0]], [ms[-1]]] krange[0] += mrange[0] krange[1] += mrange[1] k0 += [m0] mu = [.5 * (ms[1] - ms[0]) / (SMarginal - 1)] if not logspace: parameterMap["F"] += [("x")] parameterMap["B"] += [("x")] for method in ["RI", "RI_GREEDY"]: print("Testing {} method".format(method)) if method == "RI": - params = {'S':15, 'N': 8, 'POD':True, 'polybasis':"CHEBYSHEV"} + params = {'S':15, 'POD':True, 'polybasis':"CHEBYSHEV"} + if LS: params["N"] = params["S"] - 1 - LS if SMarginal > 1: algo = RIP else: params['sampler'] = QS(srange, "CHEBYSHEV", parameterMap) algo = RI if method == "RI_GREEDY": params = {'S':5, 'POD':True, 'polybasis':"LEGENDRE", 'greedyTol':1e-2, 'errorEstimatorKind':"DISCREPANCY", 'trainSetGenerator':QS(srange, "CHEBYSHEV", parameterMap)} if SMarginal > 1: algo = RIGP else: params['sampler'] = QS(srange, "UNIFORM", parameterMap) algo = RIG if SMarginal > 1: params["paramsMarginal"] = {"MMarginal": SMarginal - 1} params['SMarginal'] = SMarginal params['polybasisMarginal'] = "MONOMIAL" params['radialDirectionalWeightsMarginal'] = [2. / (ms[1] - ms[0])] params['matchingWeight'] = 1. #params['cutOffTolerance'] = 2. params['samplerPivot'] = QS(srange, "UNIFORM", parameterMap) params['samplerMarginal'] = QS(mrange, "UNIFORM") approx = algo([0], solver, mu0 = k0, approx_state = True, approxParameters = params, verbosity = 5, storeAllSamples = True) else: approx = algo(solver, mu0 = k0, approx_state = True, approxParameters = params, verbosity = 5) if "GREEDY" in method: approx.setupApprox("LAST") else: approx.setupApprox() approxNN = NN(solver, mu0 = k0, approx_state = True, verbosity = 5, approxParameters = {'S':len(approx.mus), 'POD':params['POD'], 'sampler':ES()}) if SMarginal > 1: approxNN.setSamples(approx.storedSamplesFilenames) approx.purgeStoredSamples() for m in approx.musMarginal: bode(freq, m[0], [approx.getHF, approx.getApprox, approxNN.getApprox]) else: approxNN.setSamples(approx.samplingEngine) bode(freq, mu, [approx.getHF, approx.getApprox, approxNN.getApprox]) if SMarginal > 1: bode(freq, [1.5 * ms[1]], [approx.getHF, approx.getApprox, approxNN.getApprox]) bode(freq, [2. * ms[1]], [approx.getHF, approx.getApprox, approxNN.getApprox]) verb = approx.verbosity approx.verbosity = 0 mspace = np.linspace(ms[0], ms[-1], 10) for j, t in enumerate(mspace): pls = approx.getPoles([None, t]) if j == 0: poles = np.empty((len(mspace), len(pls)), dtype = np.complex) poles[j] = pls for j, t in enumerate(approx.musMarginal): pls = approx.getPoles([None, t[0][0]]) if j == 0: polesE = np.empty((SMarginal, len(pls)), dtype = np.complex) polesE[j] = pls approx.verbosity = verb fig = plt.figure(figsize = (10, 6)) ax = fig.add_subplot(1, 1, 1) ax.plot(np.real(poles), np.imag(poles), '--') ax.plot(np.real(polesE), np.imag(polesE), 'ko', markersize = 4) ax.set_xlabel('Real') ax.set_ylabel('Imag') ax.grid() plt.show() else: poles = approx.getPoles() print("Poles:\n{}".format(poles)) print("\n") diff --git a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py index fd5bab1..efaa9c8 100644 --- a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py +++ b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py @@ -1,751 +1,751 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from os import mkdir, remove, rmdir import numpy as np from copy import deepcopy as copy from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.utilities.base.data_structures import purgeDict, getNewFilename from rrompy.sampling import SamplingEngineStandard, SamplingEngineStandardPOD from rrompy.utilities.poly_fitting.polynomial import polybases as ppb from rrompy.utilities.poly_fitting.radial_basis import polybases as rbpb from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk from rrompy.utilities.base.types import Np2D, paramList, List, ListAny from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.degree import reduceDegreeN from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import checkParameterList from rrompy.utilities.parallel import poolRank, bcast __all__ = ['GenericPivotedApproximantNoMatch', 'GenericPivotedApproximant'] class GenericPivotedApproximantBase(GenericApproximant): def __init__(self, directionPivot:ListAny, *args, storeAllSamples : bool = False, **kwargs): self._preInit() if len(directionPivot) > 1: raise RROMPyException(("Exactly 1 pivot parameter allowed in pole " "matching.")) from rrompy.parameter.parameter_sampling import (EmptySampler as ES, SparseGridSampler as SG) self._addParametersToList(["cutOffTolerance", "radialDirectionalWeightsMarginal"], - [np.inf, [1.]], ["samplerPivot", + [np.inf, 1.], ["samplerPivot", "SMarginal", "samplerMarginal"], [ES(), 1, SG([[-1.], [1.]])]) self._directionPivot = directionPivot self.storeAllSamples = storeAllSamples super().__init__(*args, **kwargs) self._postInit() def setupSampling(self): """Setup sampling engine.""" RROMPyAssert(self._mode, message = "Cannot setup sampling engine.") if not hasattr(self, "_POD") or self._POD is None: return if self.POD: SamplingEngine = SamplingEngineStandardPOD else: SamplingEngine = SamplingEngineStandard self.samplingEngine = SamplingEngine(self.HFEngine, sample_state = self.approx_state, verbosity = self.verbosity) def initializeModelData(self, datadict): if "directionPivot" in datadict.keys(): from .trained_model.trained_model_pivoted_data import ( TrainedModelPivotedData) return (TrainedModelPivotedData(datadict["mu0"], datadict["mus"], datadict.pop("projMat"), datadict["scaleFactor"], datadict.pop("parameterMap"), datadict["directionPivot"]), ["mu0", "scaleFactor", "directionPivot", "mus"]) else: return super().initializeModelData(datadict) @property def npar(self): """Number of parameters.""" if hasattr(self, "_temporaryPivot"): return self.nparPivot return super().npar def checkParameterListPivot(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.nparPivot, check_if_single) def checkParameterListMarginal(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.nparMarginal, check_if_single) @property def mus(self): """Value of mus. Its assignment may reset snapshots.""" return self._mus @mus.setter def mus(self, mus): mus = self.checkParameterList(mus) musOld = copy(self.mus) if hasattr(self, '_mus') else None if (musOld is None or len(mus) != len(musOld) or not mus == musOld): self.resetSamples() self._mus = mus @property def musMarginal(self): """Value of musMarginal. Its assignment may reset snapshots.""" return self._musMarginal @musMarginal.setter def musMarginal(self, musMarginal): musMarginal = self.checkParameterListMarginal(musMarginal) if hasattr(self, '_musMarginal'): musMOld = copy(self.musMarginal) else: musMOld = None if (musMOld is None or len(musMarginal) != len(musMOld) or not musMarginal == musMOld): self.resetSamples() self._musMarginal = musMarginal @property def cutOffTolerance(self): """Value of cutOffTolerance.""" return self._cutOffTolerance @cutOffTolerance.setter def cutOffTolerance(self, cutOffTolerance): self._cutOffTolerance = cutOffTolerance self._approxParameters["cutOffTolerance"] = self.cutOffTolerance @property def SMarginal(self): """Value of SMarginal.""" return self._SMarginal @SMarginal.setter def SMarginal(self, SMarginal): if SMarginal <= 0: raise RROMPyException("SMarginal must be positive.") if hasattr(self, "_SMarginal") and self._SMarginal is not None: Sold = self.SMarginal else: Sold = -1 self._SMarginal = SMarginal self._approxParameters["SMarginal"] = self.SMarginal if Sold != self.SMarginal: self.resetSamples() @property def radialDirectionalWeightsMarginal(self): """Value of radialDirectionalWeightsMarginal.""" return self._radialDirectionalWeightsMarginal @radialDirectionalWeightsMarginal.setter def radialDirectionalWeightsMarginal(self, radialDirWeightsMarg): if hasattr(radialDirWeightsMarg, "__len__"): radialDirWeightsMarg = list(radialDirWeightsMarg) else: radialDirWeightsMarg = [radialDirWeightsMarg] - self._radialDirectionalWeightsMarginal = np.array(radialDirWeightsMarg) + self._radialDirectionalWeightsMarginal = radialDirWeightsMarg self._approxParameters["radialDirectionalWeightsMarginal"] = ( self.radialDirectionalWeightsMarginal) @property def directionPivot(self): """Value of directionPivot. Its assignment may reset snapshots.""" return self._directionPivot @directionPivot.setter def directionPivot(self, directionPivot): if hasattr(self, '_directionPivot'): directionPivotOld = copy(self.directionPivot) else: directionPivotOld = None if (directionPivotOld is None or len(directionPivot) != len(directionPivotOld) or not directionPivot == directionPivotOld): self.resetSamples() self._directionPivot = directionPivot @property def directionMarginal(self): return [x for x in range(self.HFEngine.npar) \ if x not in self.directionPivot] @property def nparPivot(self): return len(self.directionPivot) @property def nparMarginal(self): return self.npar - self.nparPivot @property def muBounds(self): """Value of muBounds.""" return self.samplerPivot.lims @property def muBoundsMarginal(self): """Value of muBoundsMarginal.""" return self.samplerMarginal.lims @property def sampler(self): """Proxy of samplerPivot.""" return self._samplerPivot @property def samplerPivot(self): """Value of samplerPivot.""" return self._samplerPivot @samplerPivot.setter def samplerPivot(self, samplerPivot): if 'generatePoints' not in dir(samplerPivot): raise RROMPyException("Pivot sampler type not recognized.") if hasattr(self, '_samplerPivot') and self._samplerPivot is not None: samplerOld = self.samplerPivot self._samplerPivot = samplerPivot self._approxParameters["samplerPivot"] = self.samplerPivot if not 'samplerOld' in locals() or samplerOld != self.samplerPivot: self.resetSamples() @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'generatePoints' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") if (hasattr(self, '_samplerMarginal') and self._samplerMarginal is not None): samplerOld = self.samplerMarginal self._samplerMarginal = samplerMarginal self._approxParameters["samplerMarginal"] = self.samplerMarginal if not 'samplerOld' in locals() or samplerOld != self.samplerMarginal: self.resetSamples() def computeScaleFactor(self): """Compute parameter rescaling factor.""" self.scaleFactorPivot = .5 * np.abs(( self.HFEngine.mapParameterList(self.muBounds[0], idx = self.directionPivot) - self.HFEngine.mapParameterList(self.muBounds[1], idx = self.directionPivot) )[0]) self.scaleFactorMarginal = .5 * np.abs(( self.HFEngine.mapParameterList(self.muBoundsMarginal[0], idx = self.directionMarginal) - self.HFEngine.mapParameterList(self.muBoundsMarginal[1], idx = self.directionMarginal) )[0]) self.scaleFactor = np.empty(self.npar) self.scaleFactor[self.directionPivot] = self.scaleFactorPivot self.scaleFactor[self.directionMarginal] = self.scaleFactorMarginal def _setupTrainedModel(self, pMat:Np2D, pMatUpdate : bool = False, forceNew : bool = False): pMatEff = self.HFEngine.applyC(pMat) if self.approx_state else pMat if forceNew or self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "mus": copy(self.mus), "projMat": pMatEff, "scaleFactor": self.scaleFactor, "parameterMap": self.HFEngine.parameterMap, "directionPivot": self.directionPivot} self.trainedModel.data = self.initializeModelData(datadict)[0] else: self.trainedModel = self.trainedModel if pMatUpdate: self.trainedModel.data.projMat = np.hstack( (self.trainedModel.data.projMat, pMatEff)) else: self.trainedModel.data.projMat = copy(pMatEff) self.trainedModel.data.mus = copy(self.mus) self.trainedModel.data.musMarginal = copy(self.musMarginal) def normApprox(self, mu:paramList) -> float: _PODOld = self.POD self._POD = False result = super().normApprox(mu) self._POD = _PODOld return result @property def storedSamplesFilenames(self) -> List[str]: if not hasattr(self, "_sampleBaseFilename"): return [] return [self._sampleBaseFilename + "{}_{}.pkl" .format(idx + 1, self.name()) for idx in range(len(self.musMarginal))] def purgeStoredSamples(self): if not hasattr(self, "_sampleBaseFilename"): return try: for file in self.storedSamplesFilenames: remove(file) except: RROMPyWarning(("Could not delete file {}. Aborting purge of " "stored samples.").format(file)) return try: rmdir(self._sampleBaseFilename[: -8]) except: RROMPyWarning(("Could not delete base folder containing stored " "samples.")) return def storeSamples(self, idx : int = None): """Store samples to file.""" if not hasattr(self, "_sampleBaseFilename"): filenameBase = None if poolRank() == 0: foldername = getNewFilename(self.name(), "samples") mkdir(foldername) filenameBase = foldername + "/sample_" self._sampleBaseFilename = bcast(filenameBase, force = True) if idx is not None: super().storeSamples(self._sampleBaseFilename + str(idx + 1), False) def loadTrainedModel(self, filename:str): """Load trained reduced model from file.""" super().loadTrainedModel(filename) self._musMarginal = self.trainedModel.data.musMarginal class GenericPivotedApproximantNoMatch(GenericPivotedApproximantBase): """ ROM pivoted approximant (without pole matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'cutOffTolerance': tolerance for ignoring parasitic poles; defaults to np.inf; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'cutOffTolerance': tolerance for ignoring parasitic poles; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. cutOffTolerance: Tolerance for ignoring parasitic poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ @property def tModelType(self): from .trained_model.trained_model_pivoted_rational_nomatch import ( TrainedModelPivotedRationalNoMatch) return TrainedModelPivotedRationalNoMatch def _finalizeMarginalization(self): vbMng(self, "INIT", "Recompressing by cut off.", 10) msg = self.trainedModel.recompressByCutOff(self.cutOffTolerance, self.samplerPivot.normalFoci(), self.samplerPivot.groundPotential()) vbMng(self, "DEL", "Done recompressing." + msg, 10) self.trainedModel.setupMarginalInterp( [self.radialDirectionalWeightsMarginal]) self.trainedModel.data.approxParameters = copy(self.approxParameters) class GenericPivotedApproximant(GenericPivotedApproximantBase): """ ROM pivoted approximant (with pole matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'cutOffTolerance': tolerance for ignoring parasitic poles; defaults to np.inf; - 'cutOffSharedRatio': required ratio of marginal points to share resonance in cut off strategy; defaults to 1.; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'cutOffTolerance': tolerance for ignoring parasitic poles; - 'cutOffSharedRatio': required ratio of marginal points to share resonance in cut off strategy; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. cutOffTolerance: Tolerance for ignoring parasitic poles. cutOffSharedRatio: Required ratio of marginal points to share resonance in cut off strategy. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingWeight", "matchingMode", "cutOffSharedRatio", "polybasisMarginal", "paramsMarginal"], [1., "NONE", 1., "MONOMIAL", {}]) self.parameterMarginalList = ["MMarginal", "nNeighborsMarginal", "polydegreetypeMarginal", "interpRcondMarginal", "radialDirectionalWeightsMarginalAdapt"] super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_pivoted_rational import ( TrainedModelPivotedRational) return TrainedModelPivotedRational @property def matchingWeight(self): """Value of matchingWeight.""" return self._matchingWeight @matchingWeight.setter def matchingWeight(self, matchingWeight): self._matchingWeight = matchingWeight self._approxParameters["matchingWeight"] = self.matchingWeight @property def matchingMode(self): """Value of matchingMode.""" return self._matchingMode @matchingMode.setter def matchingMode(self, matchingMode): matchingMode = matchingMode.upper().strip().replace(" ", "") if matchingMode != "NONE" and matchingMode[: 5] != "SHIFT": raise RROMPyException("Prescribed matching mode not recognized.") self._matchingMode = matchingMode self._approxParameters["matchingMode"] = self.matchingMode @property def cutOffSharedRatio(self): """Value of cutOffSharedRatio.""" return self._cutOffSharedRatio @cutOffSharedRatio.setter def cutOffSharedRatio(self, cutOffSharedRatio): if cutOffSharedRatio > 1.: RROMPyWarning("Cut off shared ratio too large. Clipping to 1.") cutOffSharedRatio = 1. elif cutOffSharedRatio < 0.: RROMPyWarning("Cut off shared ratio too small. Clipping to 0.") cutOffSharedRatio = 0. self._cutOffSharedRatio = cutOffSharedRatio self._approxParameters["cutOffSharedRatio"] = self.cutOffSharedRatio @property def polybasisMarginal(self): """Value of polybasisMarginal.""" return self._polybasisMarginal @polybasisMarginal.setter def polybasisMarginal(self, polybasisMarginal): try: polybasisMarginal = polybasisMarginal.upper().strip().replace(" ", "") if polybasisMarginal not in ppb + rbpb + ["NEARESTNEIGHBOR"] + sk: raise RROMPyException( "Prescribed marginal polybasis not recognized.") self._polybasisMarginal = polybasisMarginal except: RROMPyWarning(("Prescribed marginal polybasis not recognized. " "Overriding to 'MONOMIAL'.")) self._polybasisMarginal = "MONOMIAL" self._approxParameters["polybasisMarginal"] = self.polybasisMarginal @property def paramsMarginal(self): """Value of paramsMarginal.""" return self._paramsMarginal @paramsMarginal.setter def paramsMarginal(self, paramsMarginal): paramsMarginal = purgeDict(paramsMarginal, self.parameterMarginalList, dictname = self.name() + ".paramsMarginal", baselevel = 1) keyList = list(paramsMarginal.keys()) if not hasattr(self, "_paramsMarginal"): self._paramsMarginal = {} if "MMarginal" in keyList: MMarg = paramsMarginal["MMarginal"] elif ("MMarginal" in self.paramsMarginal and not hasattr(self, "_MMarginal_isauto")): MMarg = self.paramsMarginal["MMarginal"] else: MMarg = "AUTO" if isinstance(MMarg, str): MMarg = MMarg.strip().replace(" ","") if "-" not in MMarg: MMarg = MMarg + "-0" self._MMarginal_isauto = True self._MMarginal_shift = int(MMarg.split("-")[-1]) MMarg = 0 if MMarg < 0: raise RROMPyException("MMarginal must be non-negative.") self._paramsMarginal["MMarginal"] = MMarg if "nNeighborsMarginal" in keyList: self._paramsMarginal["nNeighborsMarginal"] = max(1, paramsMarginal["nNeighborsMarginal"]) elif "nNeighborsMarginal" not in self.paramsMarginal: self._paramsMarginal["nNeighborsMarginal"] = 1 if "polydegreetypeMarginal" in keyList: try: polydegtypeM = paramsMarginal["polydegreetypeMarginal"]\ .upper().strip().replace(" ","") if polydegtypeM not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetypeMarginal " "not recognized.")) self._paramsMarginal["polydegreetypeMarginal"] = polydegtypeM except: RROMPyWarning(("Prescribed polydegreetypeMarginal not " "recognized. Overriding to 'TOTAL'.")) self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL" elif "polydegreetypeMarginal" not in self.paramsMarginal: self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL" if "interpRcondMarginal" in keyList: self._paramsMarginal["interpRcondMarginal"] = ( paramsMarginal["interpRcondMarginal"]) elif "interpRcondMarginal" not in self.paramsMarginal: self._paramsMarginal["interpRcondMarginal"] = -1 if "radialDirectionalWeightsMarginalAdapt" in keyList: self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = ( paramsMarginal["radialDirectionalWeightsMarginalAdapt"]) elif "radialDirectionalWeightsMarginalAdapt" not in self.paramsMarginal: self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = [ -1., -1.] self._approxParameters["paramsMarginal"] = self.paramsMarginal def _setMMarginalAuto(self): if (self.polybasisMarginal not in ppb + rbpb or "MMarginal" not in self.paramsMarginal or "polydegreetypeMarginal" not in self.paramsMarginal): raise RROMPyException(("Cannot set MMarginal if " "polybasisMarginal does not allow it.")) self.paramsMarginal["MMarginal"] = max(0, reduceDegreeN( len(self.musMarginal), len(self.musMarginal), self.nparMarginal, self.paramsMarginal["polydegreetypeMarginal"]) - self._MMarginal_shift) vbMng(self, "MAIN", ("Automatically setting MMarginal to {}.").format( self.paramsMarginal["MMarginal"]), 25) def purgeparamsMarginal(self): self.paramsMarginal = {} paramsMbadkeys = [] if self.polybasisMarginal in ppb + rbpb + sk: paramsMbadkeys += ["nNeighborsMarginal"] if self.polybasisMarginal not in rbpb: paramsMbadkeys += ["radialDirectionalWeightsMarginalAdapt"] if self.polybasisMarginal in ["NEARESTNEIGHBOR"] + sk: paramsMbadkeys += ["MMarginal", "polydegreetypeMarginal"] if hasattr(self, "_MMarginal_isauto"): del self._MMarginal_isauto if hasattr(self, "_MMarginal_shift"): del self._MMarginal_shift if self.polybasisMarginal == "NEARESTNEIGHBOR": paramsMbadkeys += ["interpRcondMarginal"] for key in paramsMbadkeys: if key in self._paramsMarginal: del self._paramsMarginal[key] self._approxParameters["paramsMarginal"] = self.paramsMarginal def _finalizeMarginalization(self): vbMng(self, "INIT", "Recompressing by cut off.", 10) msg = self.trainedModel.recompressByCutOff(self.cutOffTolerance, self.cutOffSharedRatio, self.samplerPivot.normalFoci(), self.samplerPivot.groundPotential()) vbMng(self, "DEL", "Done recompressing." + msg, 10) if self.polybasisMarginal in rbpb + ["NEARESTNEIGHBOR"]: self.computeScaleFactor() rDWMEff = np.array([w * f for w, f in zip( self.radialDirectionalWeightsMarginal, self.scaleFactorMarginal)]) if self.polybasisMarginal in ppb + rbpb + sk: addPars = [] if self.polybasisMarginal in ppb + rbpb: if self.polybasisMarginal in rbpb: addPars += [rDWMEff] addPars += [self.verbosity >= 5, self.paramsMarginal["polydegreetypeMarginal"] == "TOTAL"] if self.polybasisMarginal in ppb: addPars += [{}] else: # if self.polybasisMarginal in rbpb: addPars += [{"optimizeScalingBounds":self.paramsMarginal[ "radialDirectionalWeightsMarginalAdapt"]}] extraPar = hasattr(self, "_MMarginal_isauto") else: # if self.polybasisMarginal in sk: idxEff = [x for x in range(self.samplerMarginal.npoints) if not hasattr(self.trainedModel, "_idxExcl") or x not in self.trainedModel._idxExcl] extraPar = self.samplerMarginal.depth[idxEff] interpPars = [self.polybasisMarginal] + addPars + [ {"rcond":self.paramsMarginal["interpRcondMarginal"]}] else: # if self.polybasisMarginal == "NEARESTNEIGHBOR": interpPars = [self.paramsMarginal["nNeighborsMarginal"], rDWMEff] extraPar = None self.trainedModel.setupMarginalInterp(self, interpPars, extraPar) self.trainedModel.data.approxParameters = copy(self.approxParameters) diff --git a/rrompy/reduction_methods/standard/nearest_neighbor.py b/rrompy/reduction_methods/standard/nearest_neighbor.py index 4af1bd8..932856c 100644 --- a/rrompy/reduction_methods/standard/nearest_neighbor.py +++ b/rrompy/reduction_methods/standard/nearest_neighbor.py @@ -1,165 +1,165 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.poly_fitting.nearest_neighbor import ( NearestNeighborInterpolator as NNI) from rrompy.utilities.exception_manager import RROMPyAssert __all__ = ['NearestNeighbor'] class NearestNeighbor(GenericStandardApproximant): """ ROM nearest neighbor approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'nNeighbors': number of nearest neighbors; defaults to 1; - 'radialDirectionalWeights': directional weights for computation of parameter distance; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults and must be True. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'nNeighbors': number of nearest neighbors; - 'radialDirectionalWeights': directional weights for computation of parameter distance. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. nNeighbors: Number of nearest neighbors. radialDirectionalWeights: Directional weights for computation of parameter distance. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["nNeighbors", "radialDirectionalWeights"], - [1, [1.]]) + [1, 1.]) super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_nearest_neighbor import ( TrainedModelNearestNeighbor) return TrainedModelNearestNeighbor @property def nNeighbors(self): """Value of nNeighbors.""" return self._nNeighbors @nNeighbors.setter def nNeighbors(self, nNeighbors): self._nNeighbors = max(1, nNeighbors) self._approxParameters["nNeighbors"] = self.nNeighbors @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): if hasattr(radialDirectionalWeights, "__len__"): radialDirectionalWeights = list(radialDirectionalWeights) else: radialDirectionalWeights = [radialDirectionalWeights] self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) def setupApprox(self) -> int: """Compute RB projection matrix.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() setData = self.trainedModel is None self._setupTrainedModel(self.samplingEngine.projectionMatrix) if setData: self.trainedModel.data.NN = NNI() if self.POD: R = self.samplingEngine.RPOD if isinstance(R, (np.ndarray,)): vals, supp = list(R.T), [0] * R.shape[1] else: vals, supp = [], [] for j in range(R.shape[1]): idx = R.indices[R.indptr[j] : R.indptr[j + 1]] if len(idx) == 0: supp += [0] val = np.empty(0, dtype = R.dtype) else: supp += [idx[0]] idx = idx - idx[0] val = np.zeros(idx[-1] + 1, dtype = R.dtype) val[idx] = R.data[R.indptr[j] : R.indptr[j + 1]] vals += [val] else: vals = [np.ones(1)] * len(self.mus) supp = list(range(len(self.mus))) self.trainedModel.data.NN.setupByInterpolation(self.mus, np.arange(len(self.mus)), self.nNeighbors, self.radialDirectionalWeights) self.trainedModel.data.vals, self.trainedModel.data.supp = vals, supp self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 diff --git a/rrompy/reduction_methods/standard/rational_interpolant.py b/rrompy/reduction_methods/standard/rational_interpolant.py index 29d18c9..89100ed 100644 --- a/rrompy/reduction_methods/standard/rational_interpolant.py +++ b/rrompy/reduction_methods/standard/rational_interpolant.py @@ -1,690 +1,690 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.reduction_methods.base import checkRobustTolerance from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.poly_fitting.polynomial import ( polybases as ppb, polyfitname, polyvander as pvP, polyTimes, polyTimesTable, vanderInvTable, PolynomialInterpolator as PI) from rrompy.utilities.poly_fitting.heaviside import rational2heaviside from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.base.types import Np1D, Np2D, Tuple, List, sampList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import customPInv, dot, potential from rrompy.utilities.numerical.hash_derivative import nextDerivativeIndices from rrompy.utilities.numerical.degree import (reduceDegreeN, degreeTotalToFull, fullDegreeMaxMask, totalDegreeMaxMask) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['RationalInterpolant'] class RationalInterpolant(GenericStandardApproximant): """ ROM rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL'; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'interpRcond': tolerance for interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator management; defaults to 0; - 'correctorForce': whether corrector should forcefully delete bad poles; defaults to False; - 'correctorTol': tolerance for corrector step; defaults to 0., i.e. no bad poles; - 'correctorMaxIter': maximum number of corrector iterations; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'polydegreetype': type of polynomial degree; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust rational denominator management; - 'correctorForce': whether corrector should forcefully delete bad poles; - 'correctorTol': tolerance for corrector step; - 'correctorMaxIter': maximum number of corrector iterations. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. polybasis: type of polynomial basis for interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. polydegreetype: Type of polynomial degree. radialDirectionalWeights: Radial basis weights for interpolant numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust rational denominator management. correctorForce: Whether corrector should forcefully delete bad poles. correctorTol: Tolerance for corrector step. correctorMaxIter: Maximum number of corrector iterations. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["polybasis", "M", "N", "polydegreetype", "radialDirectionalWeights", "radialDirectionalWeightsAdapt", "interpRcond", "robustTol", "correctorForce", "correctorTol", "correctorMaxIter"], - ["MONOMIAL", "AUTO", "AUTO", "TOTAL", [1.], + ["MONOMIAL", "AUTO", "AUTO", "TOTAL", 1., [-1., -1.], -1, 0., False, 0., 1]) super().__init__(*args, **kwargs) self.catchInstability = 0 self._postInit() @property def tModelType(self): from .trained_model.trained_model_rational import TrainedModelRational return TrainedModelRational @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in ppb + rbpb: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def polybasis0(self): if "_" in self.polybasis: return self.polybasis.split("_")[0] return self.polybasis @property def interpRcond(self): """Value of interpRcond.""" return self._interpRcond @interpRcond.setter def interpRcond(self, interpRcond): self._interpRcond = interpRcond self._approxParameters["interpRcond"] = self.interpRcond @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): if hasattr(radialDirectionalWeights, "__len__"): radialDirectionalWeights = list(radialDirectionalWeights) else: radialDirectionalWeights = [radialDirectionalWeights] - self._radialDirectionalWeights = np.array(radialDirectionalWeights) + self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) @property def radialDirectionalWeightsAdapt(self): """Value of radialDirectionalWeightsAdapt.""" return self._radialDirectionalWeightsAdapt @radialDirectionalWeightsAdapt.setter def radialDirectionalWeightsAdapt(self, radialDirectionalWeightsAdapt): self._radialDirectionalWeightsAdapt = radialDirectionalWeightsAdapt self._approxParameters["radialDirectionalWeightsAdapt"] = ( self.radialDirectionalWeightsAdapt) @property def M(self): """Value of M.""" return self._M @M.setter def M(self, M): if isinstance(M, str): M = M.strip().replace(" ","") if "-" not in M: M = M + "-0" self._M_isauto, self._M_shift = True, int(M.split("-")[-1]) M = 0 if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M def _setMAuto(self): self.M = max(0, reduceDegreeN(self.S, self.S, self.npar, self.polydegreetype) - self._M_shift) vbMng(self, "MAIN", "Automatically setting M to {}.".format(self.M), 25) @property def N(self): """Value of N.""" return self._N @N.setter def N(self, N): if isinstance(N, str): N = N.strip().replace(" ","") if "-" not in N: N = N + "-0" self._N_isauto, self._N_shift = True, int(N.split("-")[-1]) N = 0 if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N def _setNAuto(self): self.N = max(0, reduceDegreeN(self.S, self.S, self.npar, self.polydegreetype) - self._N_shift) vbMng(self, "MAIN", "Automatically setting N to {}.".format(self.N), 25) @property def polydegreetype(self): """Value of polydegreetype.""" return self._polydegreetype @polydegreetype.setter def polydegreetype(self, polydegreetype): try: polydegreetype = polydegreetype.upper().strip().replace(" ","") if polydegreetype not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetype not " "recognized.")) self._polydegreetype = polydegreetype except: RROMPyWarning(("Prescribed polydegreetype not recognized. " "Overriding to 'TOTAL'.")) self._polydegreetype = "TOTAL" self._approxParameters["polydegreetype"] = self.polydegreetype @property def robustTol(self): """Value of tolerance for robust rational denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def correctorForce(self): """Value of correctorForce.""" return self._correctorForce @correctorForce.setter def correctorForce(self, correctorForce): self._correctorForce = correctorForce self._approxParameters["correctorForce"] = self.correctorForce @property def correctorTol(self): """Value of correctorTol.""" return self._correctorTol @correctorTol.setter def correctorTol(self, correctorTol): if correctorTol < 0. or (correctorTol > 0. and self.npar > 1): RROMPyWarning(("Overriding prescribed corrector tolerance " "to 0.")) correctorTol = 0. self._correctorTol = correctorTol self._approxParameters["correctorTol"] = self.correctorTol @property def correctorMaxIter(self): """Value of correctorMaxIter.""" return self._correctorMaxIter @correctorMaxIter.setter def correctorMaxIter(self, correctorMaxIter): if correctorMaxIter < 1 or (correctorMaxIter > 1 and self.npar > 1): RROMPyWarning(("Overriding prescribed max number of corrector " "iterations to 1.")) correctorMaxIter = 1 self._correctorMaxIter = correctorMaxIter self._approxParameters["correctorMaxIter"] = self.correctorMaxIter def resetSamples(self): """Reset samples.""" super().resetSamples() self._musUniqueCN = None self._derIdxs = None self._reorder = None def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" if self._musUniqueCN is None or len(self._reorder) != len(self.mus): self._musUniqueCN, musIdxsTo, musIdxs, musCount = ( self.trainedModel.centerNormalize(self.mus).unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.mus[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.mus.shape[1], cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def _setupDenominator(self): """Compute rational denominator.""" RROMPyAssert(self._mode, message = "Cannot setup denominator.") vbMng(self, "INIT", "Starting computation of denominator.", 7) if hasattr(self, "_N_isauto"): self._setNAuto() else: N = reduceDegreeN(self.N, self.S, self.npar, self.polydegreetype) if N < self.N: RROMPyWarning(("N too large compared to S. Reducing N by " "{}").format(self.N - N)) self.N = N while self.N > 0: invD, fitinv = self._computeInterpolantInverseBlocks() idxSamplesEff = list(range(self.S)) if self.POD: ev, eV = self.findeveVGQR( self.samplingEngine.RPOD[:, idxSamplesEff], invD) else: ev, eV = self.findeveVGExplicit( self.samplingEngine.samples(idxSamplesEff), invD) nevBad = checkRobustTolerance(ev, self.robustTol) if nevBad <= 1: break if self.catchInstability > 0: raise RROMPyException(("Instability in denominator " "computation: eigenproblem is poorly " "conditioned."), self.catchInstability == 1) vbMng(self, "MAIN", ("Smallest {} eigenvalues below tolerance. " "Reducing N by 1.").format(nevBad), 10) self.N = self.N - 1 if self.N <= 0: self.N = 0 eV = np.ones((1, 1)) q = PI() q.npar = self.npar q.polybasis = self.polybasis0 if self.polydegreetype == "TOTAL": q.coeffs = degreeTotalToFull(tuple([self.N + 1] * self.npar), self.npar, eV[:, 0]) else: q.coeffs = eV[:, 0].reshape([self.N + 1] * self.npar) vbMng(self, "DEL", "Done computing denominator.", 7) return q, fitinv def _setupNumerator(self): """Compute rational numerator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of numerator.", 7) self._setupInterpolationIndices() Qevaldiag = polyTimesTable(self.trainedModel.data.Q, self._musUniqueCN, self._reorder, self._derIdxs, self.scaleFactorRel) if self.POD: Qevaldiag = Qevaldiag.dot(self.samplingEngine.RPOD.T) if hasattr(self, "_M_isauto"): self._setMAuto() M = self.M else: M = reduceDegreeN(self.M, self.S, self.npar, self.polydegreetype) if M < self.M: RROMPyWarning(("M too large compared to S. Reducing M by " "{}").format(self.M - M)) self.M = M while self.M >= 0: pParRest = [self.M, self.polybasis, self.verbosity >= 5, self.polydegreetype == "TOTAL", {"derIdxs": self._derIdxs, "reorder": self._reorder, "scl": self.scaleFactorRel}] if self.polybasis in ppb: p = PI() else: self.computeScaleFactor() rDWEff = np.array([w * f for w, f in zip( self.radialDirectionalWeights, self.scaleFactor)]) pParRest = pParRest[: 2] + [rDWEff] + pParRest[2 :] pParRest[-1]["optimizeScalingBounds"] = ( self.radialDirectionalWeightsAdapt) p = RBI() if self.polybasis in ppb + rbpb: pParRest += [{"rcond": self.interpRcond}] wellCond, msg = p.setupByInterpolation(self._musUniqueCN, Qevaldiag, *pParRest) vbMng(self, "MAIN", msg, 5) if wellCond: break if self.catchInstability > 0: raise RROMPyException(("Instability in numerator computation: " "polyfit is poorly conditioned."), self.catchInstability == 1) vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing M " "by 1."), 10) self.M = self.M - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) self.M = M vbMng(self, "DEL", "Done computing numerator.", 7) return p def setupApprox(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() self._setupTrainedModel(self.samplingEngine.projectionMatrix) self._iterCorrector() self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def _iterCorrector(self): noCorrector = self.correctorTol <= 0. or (self.correctorMaxIter <= 1 and not self.correctorForce) if noCorrector: vbMng(self, "INIT", "Starting approximant finalization.", 5) else: vbMng(self, "INIT", "Starting correction iterations.", 5) self._Qhat = PI() self._Qhat.npar = self.npar self._Qhat.polybasis = "MONOMIAL" self._Qhat.coeffs = np.ones(1, dtype = np.complex) if self.POD: self._samplesOld = copy(self.samplingEngine.RPOD) else: self._samplesOld = copy(self.samplingEngine.samples) Nauto = hasattr(self, "_N_isauto") for j in range(self.correctorMaxIter): if self.N > 0 or (Nauto and self.S > self.npar): Q = self._setupDenominator()[0] if hasattr(self, "_N_isauto"): del self._N_isauto else: Q = PI() Q.coeffs = np.ones((1,) * self.npar, dtype = np.complex) Q.npar = self.npar Q.polybasis = self.polybasis self.N = 0 if j == 0: _N0 = self.N self.trainedModel.data.Q = Q self.trainedModel.data.P = self._setupNumerator() if noCorrector or (j >= self.correctorMaxIter - 1 and not self.correctorForce): self.N = 0 else: self._applyCorrector(j) if self.N <= 0: break if Nauto: self._N_isauto = True self.N = _N0 if noCorrector: vbMng(self, "DEL", "Terminated approximant finalization.", 5) return if self.POD: self.samplingEngine.RPOD = self._samplesOld else: self.samplingEngine.samples = self._samplesOld del self._samplesOld if self.correctorForce: QOld, QOldBasis = [1.], "MONOMIAL" else: QOld, QOldBasis = Q.coeffs, self.polybasis Q = polyTimes(self._Qhat.coeffs, QOld, Pbasis = self._Qhat.polybasis, Qbasis = QOldBasis, Rbasis = self.polybasis) del self._Qhat gamma = np.linalg.norm(Q) self.trainedModel.data.Q.coeffs = np.pad(Q, (0, self.N - len(Q) + 1), "constant") / gamma if self.correctorForce: self.trainedModel.data.P = self._setupNumerator() else: self.trainedModel.data.P.coeffs /= gamma vbMng(self, "DEL", "Terminated correction iterations.", 5) def _applyCorrector(self, j:int): cfs, pls, _ = rational2heaviside(self.trainedModel.data.P, self.trainedModel.data.Q) cfs = cfs[: self.N] if self.POD: resEff = np.linalg.norm(cfs, axis = 1) else: resEff = self.HFEngine.norm( self.samplingEngine.projectionMatrix.dot(cfs.T), is_state = self.approx_state) potentialEff = (potential(pls, self.sampler.normalFoci()) / self.sampler.groundPotential()) potentialEff[np.logical_or(potentialEff < 1., np.isinf(pls))] = 1. resEff[np.isinf(pls)] = 0. resEff /= potentialEff idxKeep = np.where(resEff >= self.correctorTol * np.max(resEff))[0] vbMng(self, "MAIN", ("Correction iteration no. {}: {} out of {} residuals satisfy " "tolerance.").format(j + 1, len(idxKeep), self.N), 10) self.N -= len(idxKeep) if ((self.N <= 0 or j < self.correctorMaxIter - 1) and not self.correctorForce): return for i in idxKeep: self._Qhat.coeffs = polyTimes(self._Qhat.coeffs, [- pls[i], 1.], Pbasis = self._Qhat.polybasis, Rbasis = self._Qhat.polybasis) self._Qhat.coeffs /= np.linalg.norm(self._Qhat.coeffs) if self.N <= 0: return vbMng(self, "MAIN", ("Removing poles at (normalized positions): " "{}.").format(pls[resEff < self.correctorTol * np.max(resEff)]), 65) if j < self.correctorMaxIter - 1: That = polyTimesTable(self._Qhat, self._musUniqueCN, self._reorder, self._derIdxs, self.scaleFactorRel).T if self.POD: self.samplingEngine.RPOD = self._samplesOld.dot(That) else: self.samplingEngine.samples = self._samplesOld.dot(That) def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]: """ Compute inverse factors for minimal interpolant target functional. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") self._setupInterpolationIndices() pvPPar = [self.polybasis0, self._derIdxs, self._reorder, self.scaleFactorRel] if hasattr(self, "_M_isauto"): self._setMAuto() E = max(self.M, self.N) while E >= 0: if self.polydegreetype == "TOTAL": Eeff = E idxsB = totalDegreeMaxMask(E, self.npar) else: #if self.polydegreetype == "FULL": Eeff = [E] * self.npar idxsB = fullDegreeMaxMask(E, self.npar) TE = pvP(self._musUniqueCN, Eeff, *pvPPar) fitOut = customPInv(TE, rcond = self.interpRcond, full = True) vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... " "Conditioning of pseudoinverse system: {:.4e}.").format( TE.shape[0], E, polyfitname(self.polybasis0), fitOut[1][1][0] / fitOut[1][1][-1]), 5) if fitOut[1][0] == TE.shape[1]: fitinv = fitOut[0][idxsB, :] break if self.catchInstability > 0: raise RROMPyException(("Instability in denominator " "computation: polyfit is poorly " "conditioned."), self.catchInstability == 1) EeqN = E == self.N vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing E {}" "by 1.").format("and N " * EeqN), 10) if EeqN: self.N = self.N - 1 E -= 1 if self.N < 0: raise RROMPyException(("Instability in computation of " "denominator. Aborting.")) invD = vanderInvTable(fitinv, idxsB, self._reorder, self._derIdxs) if self.N == E: TN = TE else: if self.polydegreetype == "TOTAL": Neff = self.N idxsB = totalDegreeMaxMask(self.N, self.npar) else: #if self.polydegreetype == "FULL": Neff = [self.N] * self.npar idxsB = fullDegreeMaxMask(self.N, self.npar) TN = pvP(self._musUniqueCN, Neff, *pvPPar) for k in range(len(invD)): invD[k] = dot(invD[k], TN) return invD, fitinv def findeveVGExplicit(self, sampleE:sampList, invD:List[Np2D]) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of rational denominator matrix. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") nEnd = invD[0].shape[1] eWidth = len(invD) vbMng(self, "INIT", "Building gramian matrix.", 10) gramian = self.HFEngine.innerProduct(sampleE, sampleE, is_state = self.approx_state) G = np.zeros((nEnd, nEnd), dtype = np.complex) for k in range(eWidth): G += dot(dot(gramian, invD[k]).T, invD[k].conj()).T vbMng(self, "DEL", "Done building gramian.", 10) vbMng(self, "INIT", "Solving eigenvalue problem for gramian matrix.", 7) try: ev, eV = np.linalg.eigh(G) except np.linalg.LinAlgError as e: raise RROMPyException(e) vbMng(self, "MAIN", ("Solved eigenvalue problem of size {} with condition number " "{:.4e}.").format(nEnd, ev[-1] / ev[0]), 5) vbMng(self, "DEL", "Done solving eigenvalue problem.", 7) return ev, eV def findeveVGQR(self, RPODE:Np2D, invD:List[Np2D]) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of rational denominator matrix through SVD of R factor. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") nEnd = invD[0].shape[1] S = RPODE.shape[0] eWidth = len(invD) vbMng(self, "INIT", "Building half-gramian matrix stack.", 10) Rstack = np.zeros((S * eWidth, nEnd), dtype = np.complex) for k in range(eWidth): Rstack[k * S : (k + 1) * S, :] = dot(RPODE, invD[k]) vbMng(self, "DEL", "Done building half-gramian.", 10) vbMng(self, "INIT", "Solving svd for square root of gramian matrix.", 7) try: _, s, eV = np.linalg.svd(Rstack, full_matrices = False) except np.linalg.LinAlgError as e: raise RROMPyException(e) ev = s[::-1] eV = eV[::-1, :].T.conj() vbMng(self, "MAIN", ("Solved svd problem of size {} x {} with condition number " "{:.4e}.").format(*Rstack.shape, s[0] / s[-1]), 5) vbMng(self, "DEL", "Done solving svd.", 7) return ev, eV def getResidues(self, *args, **kwargs) -> Np2D: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues(*args, **kwargs) diff --git a/rrompy/utilities/poly_fitting/nearest_neighbor/nearest_neighbor_interpolator.py b/rrompy/utilities/poly_fitting/nearest_neighbor/nearest_neighbor_interpolator.py index c0938de..984450e 100644 --- a/rrompy/utilities/poly_fitting/nearest_neighbor/nearest_neighbor_interpolator.py +++ b/rrompy/utilities/poly_fitting/nearest_neighbor/nearest_neighbor_interpolator.py @@ -1,91 +1,90 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import deepcopy as copy from rrompy.utilities.base.types import List, ListAny, Np1D, Np2D, paramList from rrompy.utilities.poly_fitting.interpolator import GenericInterpolator from .val import polyval from rrompy.utilities.numerical import dot from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.parameter import checkParameterList __all__ = ['NearestNeighborInterpolator'] class NearestNeighborInterpolator(GenericInterpolator): def __init__(self, other = None): if other is None: return self.support = other.support self.coeffsLocal = other.coeffsLocal self.nNeighbors = other.nNeighbors self.directionalWeights = other.directionalWeights self.npar = other.npar @property def shape(self): sh = self.coeffsLocal.shape[1 :] if self.coeffsLocal.ndim > 1 else 1 return sh def __call__(self, mu:paramList, der : List[int] = None, scl : Np1D = None): if der is not None and np.sum(der) > 0: return np.zeros(self.coeffsLocal.shape[1 :] + (len(mu),)) return polyval(mu, self.coeffsLocal, self.support, self.nNeighbors, self.directionalWeights) def __copy__(self): return NearestNeighborInterpolator(self) def __deepcopy__(self, memo): other = NearestNeighborInterpolator() (other.support, other.coeffsLocal, other.nNeighbors, other.directionalWeights, other.npar) = copy((self.support, self.coeffsLocal, self.nNeighbors, self.directionalWeights, self.npar), memo) return other def postmultiplyTensorize(self, A:Np2D): RROMPyAssert(A.shape[0], self.shape[-1], "Shape of output") self.coeffsLocal = dot(self.coeffsLocal, A) def pad(self, nleft : List[int] = None, nright : List[int] = None): if nleft is None: nleft = [0] * len(self.shape) if nright is None: nright = [0] * len(self.shape) if not hasattr(nleft, "__len__"): nleft = [nleft] if not hasattr(nright, "__len__"): nright = [nright] RROMPyAssert(len(self.shape), len(nleft), "Shape of output") RROMPyAssert(len(self.shape), len(nright), "Shape of output") padwidth = [(0, 0)] + [(l, r) for l, r in zip(nleft, nright)] self.coeffsLocal = np.pad(self.coeffsLocal, padwidth, "constant", constant_values = (0., 0.)) def setupByInterpolation(self, support:paramList, values:ListAny, nNeighbors : int = 1, directionalWeights : Np1D = None): support = checkParameterList(support) RROMPyAssert(len(support), len(values), "Number of support values") self.support = copy(support) self.npar = support.shape[1] self.coeffsLocal = values self.nNeighbors = max(1, nNeighbors) - if directionalWeights is None: - directionalWeights = np.ones(self.npar) - self.directionalWeights = directionalWeights + if directionalWeights is None: directionalWeights = [1.] * self.npar + self.directionalWeights = np.array(directionalWeights) RROMPyAssert(len(support), len(values), "Number of support points") return True, None diff --git a/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py b/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py index cc32ec1..1fce87c 100644 --- a/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py +++ b/rrompy/utilities/poly_fitting/radial_basis/radial_basis_interpolator.py @@ -1,133 +1,133 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import deepcopy as copy from rrompy.utilities.base.types import (List, ListAny, DictAny, Np1D, Np2D, paramList) from rrompy.utilities.poly_fitting.interpolator import GenericInterpolator from rrompy.utilities.poly_fitting.custom_fit import customFit from .base import polyfitname from .val import polyval from .vander import polyvander as pv from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.degree import degreeTotalToFull from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert from rrompy.parameter import checkParameterList __all__ = ['RadialBasisInterpolator'] class RadialBasisInterpolator(GenericInterpolator): def __init__(self, other = None): if other is None: return self.support = other.support self.coeffsGlobal = other.coeffsGlobal self.coeffsLocal = other.coeffsLocal self.directionalWeights = other.directionalWeights self.npar = other.npar self.polybasis = other.polybasis @property def shape(self): sh = self.coeffsLocal.shape[1 :] if self.coeffsLocal.ndim > 1 else 1 return sh @property def deg(self): return [x - 1 for x in self.coeffsGlobal.shape[: self.npar]] def __call__(self, mu:paramList, der : List[int] = None, scl : Np1D = None): if der is not None and np.sum(der) > 0: raise RROMPyException(("Cannot take derivatives of radial basis " "function.")) return polyval(mu, self.coeffsGlobal, self.coeffsLocal, self.support, self.directionalWeights, self.polybasis) def __copy__(self): return RadialBasisInterpolator(self) def __deepcopy__(self, memo): other = RadialBasisInterpolator() (other.support, other.coeffsGlobal, other.coeffsLocal, other.directionalWeights, other.npar, other.polybasis) = copy( (self.support, self.coeffsGlobal, self.coeffsLocal, self.directionalWeights, self.npar, self.polybasis), memo) return other def postmultiplyTensorize(self, A:Np2D): RROMPyAssert(A.shape[0], self.shape[-1], "Shape of output") self.coeffsLocal = dot(self.coeffsLocal, A) self.coeffsGlobal = dot(self.coeffsGlobal, A) def pad(self, nleft : List[int] = None, nright : List[int] = None): if nleft is None: nleft = [0] * len(self.shape) if nright is None: nright = [0] * len(self.shape) if not hasattr(nleft, "__len__"): nleft = [nleft] if not hasattr(nright, "__len__"): nright = [nright] RROMPyAssert(len(self.shape), len(nleft), "Shape of output") RROMPyAssert(len(self.shape), len(nright), "Shape of output") padwidth = [(0, 0)] + [(l, r) for l, r in zip(nleft, nright)] self.coeffsLocal = np.pad(self.coeffsLocal, padwidth, "constant", constant_values = (0., 0.)) padwidth = [(0, 0)] * (self.npar - 1) + padwidth self.coeffsGlobal = np.pad(self.coeffsGlobal, padwidth, "constant", constant_values = (0., 0.)) def setupByInterpolation(self, support:paramList, values:ListAny, deg:int, polybasis : str = "MONOMIAL_GAUSSIAN", directionalWeights : Np1D = None, verbose : bool = True, totalDegree : bool = True, vanderCoeffs : DictAny = {}, fitCoeffs : DictAny = {}): support = checkParameterList(support) RROMPyAssert(len(support), len(values), "Number of support values") self.support = copy(support) if "reorder" in vanderCoeffs.keys(): self.support = self.support[vanderCoeffs["reorder"]] self.npar = support.shape[1] - if directionalWeights is None: - directionalWeights = np.ones(self.npar) + if directionalWeights is None: directionalWeights = [1.] * self.npar + directionalWeights = np.array(directionalWeights) self.polybasis = polybasis if not totalDegree and not hasattr(deg, "__len__"): deg = [deg] * self.npar vander, self.directionalWeights = pv(support, deg, basis = polybasis, directionalWeights = directionalWeights, **vanderCoeffs) outDim = values.shape[1:] values = values.reshape(values.shape[0], -1) values = np.pad(values, ((0, len(vander) - len(values)), (0, 0)), "constant") fitOut = customFit(vander, values, full = True, **fitCoeffs) P = fitOut[0][len(support) :] if verbose: msg = ("Fitting {}+{} samples with degree {} through {}... " "Conditioning of LS system: {:.4e}.").format( len(support), len(vander) - len(support), deg, polyfitname(self.polybasis), fitOut[1][2][0] / fitOut[1][2][-1]) else: msg = None self.coeffsLocal = fitOut[0][: len(support)].reshape((len(support),) + outDim) if totalDegree: self.coeffsGlobal = degreeTotalToFull(tuple([deg + 1] * self.npar) + outDim, self.npar, P) else: self.coeffsGlobal = P.reshape(tuple([d + 1 for d in deg]) + outDim) return fitOut[1][1] == vander.shape[1], msg