diff --git a/examples/3_sector_angle/sector_angle.py b/examples/3_sector_angle/sector_angle.py index 7d8f2c4..3df9737 100644 --- a/examples/3_sector_angle/sector_angle.py +++ b/examples/3_sector_angle/sector_angle.py @@ -1,110 +1,108 @@ import numpy as np import matplotlib.pyplot as plt from sector_angle_engine import SectorAngleEngine as engine from rrompy.reduction_methods import (NearestNeighbor as NN, RationalInterpolantPivoted as RIP, RationalInterpolantGreedyPivoted as RIGP) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) ks, ts = [10., 15.], [.4, .6] k0, t0, n = np.mean(np.power(ks, 2.)) ** .5, np.mean(ts), 50 solver = engine(k0, t0, n) murange = [[ks[0], ts[0]], [ks[-1], ts[-1]]] mu = [12., .535] fighandles = [] for method in ["RI", "RI_GREEDY"]: print("Testing {} method".format(method)) if method == "RI": params = {'S':20, "paramsMarginal":{"MMarginal": 3}, 'SMarginal':11, 'POD':True, 'polybasis':"CHEBYSHEV", 'polybasisMarginal':"MONOMIAL_GAUSSIAN", 'radialDirectionalWeightsMarginal': 100., - 'matchingWeight':1., 'cutOffTolerance': 2., - 'samplerPivot':QS(ks, "CHEBYSHEV", 2.), + 'matchingWeight':1., 'samplerPivot':QS(ks, "CHEBYSHEV", 2.), 'samplerMarginal':QS(ts, "UNIFORM")} algo = RIP if method == "RI_GREEDY": params = {'S':10, "paramsMarginal":{"MMarginal": 3}, 'SMarginal':11, 'POD':True, 'polybasis':"LEGENDRE", 'polybasisMarginal':"MONOMIAL_GAUSSIAN", 'radialDirectionalWeightsMarginal': 100., - 'matchingWeight':1., 'cutOffTolerance': 2., - 'samplerPivot':QS(ks, "UNIFORM", 2.), + 'matchingWeight':1., 'samplerPivot':QS(ks, "UNIFORM", 2.), 'greedyTol':1e-3, 'errorEstimatorKind':"LOOK_AHEAD_RES", 'trainSetGenerator':QS(ks, "CHEBYSHEV", 2.), 'samplerMarginal':QS(ts, "UNIFORM")} algo = RIGP approx = algo([0], solver, mu0 = [k0, t0], approx_state = True, approxParameters = params, verbosity = 10, storeAllSamples = True) if len(method) == 2: approx.setupApprox() else: approx.setupApprox("LAST") print("--- Approximant ---") approx.plotApprox(mu, name = 'u_app') approx.plotHF(mu, name = 'u_HF') approx.plotErr(mu, name = 'err_app') approx.plotRes(mu, name = 'res_app') normErr = approx.normErr(mu)[0] normSol = approx.normHF(mu)[0] normRes = approx.normRes(mu)[0] normRHS = approx.normRHS(mu)[0] print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_app: \t{:.5e}\nResRel_app:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) print("--- Closest snapshot ---") paramsNN = {'S':len(approx.mus), 'POD':True, 'sampler':ES()} approxNN = NN(solver, mu0 = [k0, t0], approx_state = True, approxParameters = paramsNN, verbosity = 0) approxNN.setSamples(approx.storedSamplesFilenames) approx.purgeStoredSamples() approxNN.plotApprox(mu, name = 'u_close') approxNN.plotHF(mu, name = 'u_HF') approxNN.plotErr(mu, name = 'err_close') approxNN.plotRes(mu, name = 'res_close') normErr = approxNN.normErr(mu)[0] normSol = approxNN.normHF(mu)[0] normRes = approxNN.normRes(mu)[0] normRHS = approxNN.normRHS(mu)[0] print("SolNorm:\t{:.5e}\nErr_close:\t{:.5e}\nErrRel_close:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_close:\t{:.5e}\nResRel_close:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(ts[0], ts[-1], 100) for j, t in enumerate(tspace): pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls ** 2.)) > 1e-5] = np.nan if j == 0: poles = np.empty((len(tspace), len(pls))) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (12, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(ts) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(ks, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(ks) ax2.set_ylim(ts) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/examples/3_sector_angle/sector_angle_engine.py b/examples/3_sector_angle/sector_angle_engine.py index cd767dc..69aa480 100644 --- a/examples/3_sector_angle/sector_angle_engine.py +++ b/examples/3_sector_angle/sector_angle_engine.py @@ -1,44 +1,46 @@ import numpy as np import fenics as fen import mshr from rrompy.utilities.base.decorators import nonaffine_construct from rrompy.hfengines.fenics_engines import HelmholtzProblemEngine from rrompy.parameter import parameterMap as pMap class SectorAngleEngine(HelmholtzProblemEngine): def __init__(self, k0:float, t0:float, n:int): super().__init__(mu0 = [k0, t0]) self._affinePoly = False self.npar = 2 self.parameterMap = pMap([2., 1.]) mesh = mshr.generate_mesh( mshr.Circle(fen.Point(0., 0.), 1.) - mshr.Rectangle(fen.Point(-1., -1.), fen.Point(0., 1.)) - mshr.Rectangle(fen.Point(-1., -1.), fen.Point(1., 0.)), n) self.V = fen.FunctionSpace(mesh, "P", 1) x, y = fen.SpatialCoordinate(self.V.mesh())[:] self.forcingTerm = [fen.exp(x + y) * (1. - x ** 2. - y ** 2.), fen.exp(x - y) * (1. - x ** 2. - y ** 2.)] self._tBoundary = np.nan + self.cutOffPolesRMin, self.cutOffPolesRMax = -2., 2. + self.cutOffPolesIMin, self.cutOffPolesIMax = -1., 1. def setBoundary(self, t:float): while hasattr(t, "__len__"): t = t[0] if not np.isclose(t, self._tBoundary): eps = 1e-2 self._tBoundary = t self.DirichletBoundary = lambda x, on_boundary: ( on_boundary and x[0] >= eps and x[1] <= eps + np.sin(t * np.pi / 2.)) self.NeumannBoundary = "REST" @nonaffine_construct def A(self, mu = [], der = 0): mu = self.checkParameter(mu) self.setBoundary(mu(1)) return HelmholtzProblemEngine.A(self, mu, der) @nonaffine_construct def b(self, mu = [], der = 0): mu = self.checkParameter(mu) self.setBoundary(mu(1)) return HelmholtzProblemEngine.b(self, mu, der) diff --git a/examples/5_anisotropic_square/anisotropic_square.py b/examples/5_anisotropic_square/anisotropic_square.py index 2738c6c..078a1e3 100644 --- a/examples/5_anisotropic_square/anisotropic_square.py +++ b/examples/5_anisotropic_square/anisotropic_square.py @@ -1,77 +1,80 @@ import numpy as np import matplotlib.pyplot as plt from itertools import product from anisotropic_square_engine import (AnisotropicSquareEngine as engine, AnisotropicSquareEnginePoles as plsEx) from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedGreedy as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, Ls = [10., 50.], [.2, 1.2] z0, L0, n = np.mean(zs), np.mean(Ls), 50 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] np.random.seed(4020) mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] solver = engine(z0, L0, n) fighandles = [] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, "polybasisMarginal": "MONOMIAL_WENDLAND", "polybasis": "LEGENDRE", 'samplerPivot':QS(zs, "UNIFORM"), 'trainSetGenerator':QS(zs, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD_RES", 'errorEstimatorKindMarginal':"LOOK_AHEAD_RECOVER", "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.} for shared, tol in product([1., 0.], [1., 3.]): print("Testing cutoff tolerance {} with shared ratio {}.".format(tol, shared)) - params['cutOffTolerance'] = tol + solver.cutOffPolesRMin = - 1. - tol + solver.cutOffPolesRMax = 1. + tol + solver.cutOffPolesIMin = - tol + solver.cutOffPolesIMax = tol params['sharedRatio'] = shared approx = RIGPG([0], solver, mu0 = [z0, L0], approx_state = True, approxParameters = params, verbosity = 5) approx.setupApprox("ALL") verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(Ls[0], Ls[-1], 100) for j, t in enumerate(tspace): plsE = plsEx(t, 0., zs[-1]) pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: polesE = np.empty((len(tspace), len(plsE))) poles = np.empty((len(tspace), len(pls))) polesE[:] = np.nan if len(plsE) > polesE.shape[1]: nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) nanR[:] = np.nan polesE = np.hstack((polesE, nanR)) polesE[j, : len(plsE)] = np.real(plsE) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (17, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(Ls) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(polesE, tspace, 'k-.', linewidth = 1) ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(Ls) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py b/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py deleted file mode 100755 index 1786c06..0000000 --- a/examples/5_anisotropic_square/anisotropic_square_test_cutoff.py +++ /dev/null @@ -1,78 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from itertools import product -from anisotropic_square_engine import (AnisotropicSquareEngine as engine, - AnisotropicSquareEnginePoles as plsEx) -from rrompy.reduction_methods import ( - RationalInterpolantGreedyPivotedGreedy as RIGPG) -from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, - SparseGridSampler as SGS) - -zs, Ls = [10., 50.], [.2, 1.2] -z0, L0, n = np.mean(zs), np.mean(Ls), 50 -murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] -np.random.seed(4020) -mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), - Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] -solver = engine(z0, L0, n) - -fighandles = [] -params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, - "polybasis": "LEGENDRE", 'samplerPivot':QS(zs, "UNIFORM"), - 'sharedRatio': 0., "maxIterMarginal":20, - 'cutOffToleranceError': 1., 'trainSetGenerator':QS(zs, "UNIFORM"), - 'errorEstimatorKind':"LOOK_AHEAD_RES", - 'errorEstimatorKindMarginal':"LOOK_AHEAD_RECOVER", - "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, - "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, - "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), - "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.} - -for cutOffTolerance, polybasisMarginal in product([1., np.inf], - ["MONOMIAL_WENDLAND", "PIECEWISE_LINEAR_UNIFORM"]): - print("Testing cutoff tolerance {} with marginal basis {}.".format( - cutOffTolerance, polybasisMarginal)) - params['cutOffTolerance'] = cutOffTolerance - params["polybasisMarginal"] = polybasisMarginal - - approx = RIGPG([0], solver, mu0 = [z0, L0], approx_state = True, - approxParameters = params, verbosity = 5) - approx.setupApprox("ALL") - verb = approx.verbosity - approx.verbosity = 0 - tspace = np.linspace(Ls[0], Ls[-1], 100) - for j, t in enumerate(tspace): - plsE = plsEx(t, 0., zs[-1]) - pls = approx.getPoles([None, t]) - pls[np.abs(np.imag(pls)) > 1e-5] = np.nan - if j == 0: - polesE = np.empty((len(tspace), len(plsE))) - poles = np.empty((len(tspace), len(pls))) - polesE[:] = np.nan - if len(plsE) > polesE.shape[1]: - nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) - nanR[:] = np.nan - polesE = np.hstack((polesE, nanR)) - polesE[j, : len(plsE)] = np.real(plsE) - poles[j] = np.real(pls) - approx.verbosity = verb - fighandles += [plt.figure(figsize = (17, 5))] - ax1 = fighandles[-1].add_subplot(1, 2, 1) - ax2 = fighandles[-1].add_subplot(1, 2, 2) - ax1.plot(poles, tspace) - ax1.set_ylim(Ls) - ax1.set_xlabel('mu_1') - ax1.set_ylabel('mu_2') - ax1.grid() - ax2.plot(polesE, tspace, 'k-.', linewidth = 1) - ax2.plot(poles, tspace) - for mm in approx.musMarginal: - ax2.plot(zs, [mm[0, 0]] * 2, 'k--', linewidth = 1) - ax2.set_xlim(zs) - ax2.set_ylim(Ls) - ax2.set_xlabel('mu_1') - ax2.set_ylabel('mu_2') - ax2.grid() - plt.show() - - print("\n") diff --git a/examples/7_MHD/mhd.py b/examples/7_MHD/mhd.py index bb8482c..a750e41 100644 --- a/examples/7_MHD/mhd.py +++ b/examples/7_MHD/mhd.py @@ -1,73 +1,73 @@ import numpy as np import matplotlib.pyplot as plt from mhd_engine import MHDEngine as engine from rrompy.reduction_methods import (RationalInterpolant as RI, RationalInterpolantGreedy as RIG) from rrompy.parameter.parameter_sampling import (FFTSampler as FFTS, QuadratureCircleSampler as QCS, QuadratureBoxSampler as QBS) ks = [-.35 + .5j, 0. + .5j] k0 = np.mean(ks) solver = engine(5) kEffDelta = .1 * (ks[1] - ks[0]) kEff = np.real([ks[0] - kEffDelta, ks[1] + kEffDelta]) iEff = kEff - .5 * np.sum(np.real(ks)) + np.imag(ks[0]) nPoles = 50 polesEx = solver.getPolesExact(nPoles, k0) for corrector in [False, True]: for method in ["FFT", "BOX", "GREEDY"]: print("Testing {} method with{} corrector".format(method, "out" * (not corrector))) if method == "FFT": params = {'S':64, 'POD':True, 'polybasis':"MONOMIAL", - 'sampler':FFTS(ks), 'residueTol':1e-5} + 'sampler':FFTS(ks)} algo = RI if method == "BOX": params = {'S':64, 'POD':True, 'polybasis':"MONOMIAL", - 'sampler':QBS(ks), 'residueTol':1e-5} + 'sampler':QBS(ks)} algo = RI if method == "GREEDY": params = {'S':30, 'POD':True, 'greedyTol':1e-2, 'polybasis':"MONOMIAL", 'sampler':QCS(ks), 'errorEstimatorKind':"LOOK_AHEAD", 'nTestPoints':10000, - 'trainSetGenerator':FFTS(ks), 'residueTol':1e-5} + 'trainSetGenerator':FFTS(ks)} algo = RIG params['correctorForce'] = corrector approx = algo(solver, mu0 = k0, approx_state = True, approxParameters = params, verbosity = 10) approx.setupApprox() poles, residues = approx.getResidues() inRange = np.logical_and( np.logical_and(np.real(poles) >= kEff[0], np.real(poles) <= kEff[1]), np.logical_and(np.imag(poles) >= iEff[0], np.imag(poles) <= iEff[1])) polesEff = poles[inRange] resNormEff = np.linalg.norm(residues, axis = 1)[inRange] rLm = np.min(np.log(resNormEff)) rLmM = np.max(np.log(resNormEff)) - rLm fig = plt.figure(figsize = (10, 10)) ax = fig.add_subplot(1, 1, 1) if method == "GREEDY": ax.plot(approx.muTest.re.data.flatten(), approx.muTest.im.data.flatten(), 'k,', alpha = 0.25) for pl, rN in zip(polesEff, resNormEff): if corrector: alpha = .35 + .4 * (np.log(rN) - rLm) / rLmM else: alpha = .1 + .65 * (np.log(rN) - rLm) / rLmM ax.annotate("{0:.0e}".format(rN), (np.real(pl), np.imag(pl)), alpha = alpha) ax.plot(np.real(pl), np.imag(pl), 'r+', alpha = alpha + .25) ax.plot(approx.mus.re.data.flatten(), approx.mus.im.data.flatten(), 'k.') ax.plot(np.real(polesEx), np.imag(polesEx), 'bx') ax.set_xlim(kEff) ax.set_ylim(iEff) ax.grid() plt.tight_layout() plt.show() print("\n") diff --git a/examples/7_MHD/mhd_engine.py b/examples/7_MHD/mhd_engine.py index aa8988f..36cf664 100644 --- a/examples/7_MHD/mhd_engine.py +++ b/examples/7_MHD/mhd_engine.py @@ -1,17 +1,18 @@ import numpy as np import scipy.io as scio import scipy.sparse as sp from rrompy.hfengines.scipy_engines import TensorizedEigenproblemEngine class MHDEngine(TensorizedEigenproblemEngine): """ From Matrix Market: //math.nist.gov/MatrixMarket/data/NEP/mhd/mhd.html """ def __init__(self, ncol : int = 1, seed : int = 31415): A = sp.csr_matrix(scio.mmread("mhd4800a.mtx"), dtype = np.complex) B = - scio.mmread("mhd4800b.mtx").tocsr() super().__init__([A, B], seed, ncol) + self.cutOffResNormMin = 1e-5 def getPolesExact(self, k:int, sigma:np.complex): return sp.linalg.eigs(self.As[0], k, - self.As[1], sigma, return_eigenvectors = False) diff --git a/examples/8_damped_mass_chain/damped_mass_chain.py b/examples/8_damped_mass_chain/damped_mass_chain.py index 30f1f30..69785c4 100644 --- a/examples/8_damped_mass_chain/damped_mass_chain.py +++ b/examples/8_damped_mass_chain/damped_mass_chain.py @@ -1,186 +1,185 @@ ### example from Lohmann, Eid. Efficient Order Reduction of Parametric and ### Nonlinear Models by Superposition of Locally Reduced Models. from copy import deepcopy as copy import numpy as np import matplotlib.pyplot as plt from rrompy.reduction_methods import (NearestNeighbor as NN, RationalInterpolant as RI, RationalInterpolantGreedy as RIG, RationalInterpolantPivoted as RIP, RationalInterpolantGreedyPivoted as RIGP) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) from damped_mass_chain_engine import (bode as bode0, bodeLog, MassChainEngine, MassChainEngineLog, AugmentedMassChainEngine, AugmentedMassChainEngineLog) from rrompy.utilities.base.decorators import addWhiteNoise ########################## fullModelOrder = 1 #+ 1 SMarginal = 1 #* 2 state = 0 #+ 1 noise_level = 0 #+ 1e-5 LS = 0 #+ 6 ########################## modelSign = "Surrogate modeling for frequency response of " if fullModelOrder == 1: modelSign += "augmented " modelSign += "damper-mass-spring model" if SMarginal > 1: modelSign += " with 1 design parameter" modelSign += ".\nOutput is " if state: modelSign += "vector of mass displacements. " else: modelSign += "displacement of last mass. " if LS: modelSign += "Least-squares: S - N - 1 = {}. ".format(LS) else: modelSign += "Interpolatory: S = N + 1. " modelSign += "Noise level: {}.\n".format(noise_level) print(modelSign) M = [np.array([1., 5., 25., 125.])] N = len(M[0]) D = [np.zeros((N, N))] D[0][0, 1], D[0][1, 2], D[0][2, 3], D[0][3, 3] = .1, .4, 1.6, 0. D[0] = D[0] + D[0].T K = [np.zeros((N, N))] K[0][0, 1], K[0][1, 2], K[0][2, 3], K[0][0, 3], K[0][3, 3] = 9., 3., 1., 1., 2. K[0] = K[0] + K[0].T B = np.append(27., np.zeros(N - 1)).reshape(-1, 1) if SMarginal > 1: M += [np.zeros(N)] D += [np.zeros((N, N))] D[1][3, 3] = 1. D[1] = D[1] + D[1].T K += [np.zeros((N, N))] K[1][0, 3], K[1][3, 3] = 2., 2. K[1] = K[1] + K[1].T if state: C = np.eye(4) else: C = np.append(np.zeros(N - 1), 1.).reshape(1, -1) for logspace in range(2): print("Approximation in l{}space".format("og" * logspace + "in" * (not logspace))) if logspace: bode = bodeLog if fullModelOrder == 1: engine = AugmentedMassChainEngineLog else: engine = MassChainEngineLog else: bode = bode0 if fullModelOrder == 1: engine = AugmentedMassChainEngine else: engine = MassChainEngine solver = addWhiteNoise(noise_level)(engine)(M, D, K, B, C) ss, mu = [1e-2, 1e1], [] s0 = 10. ** np.mean(np.log10(ss)) freq = np.logspace(np.log10(ss[0]), np.log10(ss[1]), 100) if logspace: ss, freq = [np.log10(ss[0]), np.log10(ss[1])], np.log10(freq) s0, parameterMap = np.log10(s0), 1. else: parameterMap = {"F": [("log10", "x")], "B": [(10., "**", "x")]} krange = [[ss[0]], [ss[-1]]] k0, srange = [s0], copy(krange) if SMarginal > 1: ms = [0., 1.] m0, mrange = np.mean(ms), [[ms[0]], [ms[-1]]] krange[0] += mrange[0] krange[1] += mrange[1] k0 += [m0] mu = [.5 * (ms[1] - ms[0]) / (SMarginal - 1)] if not logspace: parameterMap["F"] += [("x")] parameterMap["B"] += [("x")] for method in ["RI", "RI_GREEDY"]: print("Testing {} method".format(method)) if method == "RI": params = {'S':15, 'POD':True, 'polybasis':"CHEBYSHEV"} if LS: params["N"] = params["S"] - 1 - LS if SMarginal > 1: algo = RIP else: params['sampler'] = QS(srange, "CHEBYSHEV", parameterMap) algo = RI if method == "RI_GREEDY": params = {'S':5, 'POD':True, 'polybasis':"LEGENDRE", 'greedyTol':1e-2, 'errorEstimatorKind':"DISCREPANCY", 'trainSetGenerator':QS(srange, "CHEBYSHEV", parameterMap)} if SMarginal > 1: algo = RIGP else: params['sampler'] = QS(srange, "UNIFORM", parameterMap) algo = RIG if SMarginal > 1: params["paramsMarginal"] = {"MMarginal": SMarginal - 1} params['SMarginal'] = SMarginal params['polybasisMarginal'] = "MONOMIAL" params['radialDirectionalWeightsMarginal'] = [2. / (ms[1] - ms[0])] params['matchingWeight'] = 1. - #params['cutOffTolerance'] = 2. params['samplerPivot'] = QS(srange, "UNIFORM", parameterMap) params['samplerMarginal'] = QS(mrange, "UNIFORM") approx = algo([0], solver, mu0 = k0, approx_state = True, approxParameters = params, verbosity = 5, storeAllSamples = True) else: approx = algo(solver, mu0 = k0, approx_state = True, approxParameters = params, verbosity = 5) if "GREEDY" in method: approx.setupApprox("LAST") else: approx.setupApprox() approxNN = NN(solver, mu0 = k0, approx_state = True, verbosity = 5, approxParameters = {'S':len(approx.mus), 'POD':params['POD'], 'sampler':ES()}) if SMarginal > 1: approxNN.setSamples(approx.storedSamplesFilenames) approx.purgeStoredSamples() for m in approx.musMarginal: bode(freq, m[0], [approx.getHF, approx.getApprox, approxNN.getApprox]) else: approxNN.setSamples(approx.samplingEngine) bode(freq, mu, [approx.getHF, approx.getApprox, approxNN.getApprox]) if SMarginal > 1: bode(freq, [1.5 * ms[1]], [approx.getHF, approx.getApprox, approxNN.getApprox]) bode(freq, [2. * ms[1]], [approx.getHF, approx.getApprox, approxNN.getApprox]) verb = approx.verbosity approx.verbosity = 0 mspace = np.linspace(ms[0], ms[-1], 10) for j, t in enumerate(mspace): pls = approx.getPoles([None, t]) if j == 0: poles = np.empty((len(mspace), len(pls)), dtype = np.complex) poles[j] = pls for j, t in enumerate(approx.musMarginal): pls = approx.getPoles([None, t[0][0]]) if j == 0: polesE = np.empty((SMarginal, len(pls)), dtype = np.complex) polesE[j] = pls approx.verbosity = verb fig = plt.figure(figsize = (10, 6)) ax = fig.add_subplot(1, 1, 1) ax.plot(np.real(poles), np.imag(poles), '--') ax.plot(np.real(polesE), np.imag(polesE), 'ko', markersize = 4) ax.set_xlabel('Real') ax.set_ylabel('Imag') ax.grid() plt.show() else: poles = approx.getPoles() print("Poles:\n{}".format(poles)) print("\n") diff --git a/rrompy/hfengines/base/hfengine_base.py b/rrompy/hfengines/base/hfengine_base.py index 5f6916f..030eeef 100644 --- a/rrompy/hfengines/base/hfengine_base.py +++ b/rrompy/hfengines/base/hfengine_base.py @@ -1,272 +1,305 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np import scipy.sparse as scsp from numbers import Number from copy import copy as softcopy from rrompy.utilities.base.decorators import nonaffine_construct from rrompy.utilities.base.types import (Np1D, Np2D, List, DictAny, paramVal, paramList, sampList) from rrompy.utilities.numerical import solve as tsolve, dot, pseudoInverse from rrompy.utilities.expression import expressionEvaluator from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.sampling.sample_list import sampleList from rrompy.parameter import (checkParameter, checkParameterList, parameterList, parameterMap as pMap) from rrompy.solver.linear_solver import setupSolver from rrompy.utilities.parallel import (poolRank, masterCore, listScatter, matrixGatherv, isend, recv) __all__ = ['HFEngineBase'] class HFEngineBase: """Generic solver for parametric problems.""" def __init__(self, verbosity : int = 10, timestamp : bool = True): self.verbosity = verbosity self.timestamp = timestamp self.setSolver("SPSOLVE", {"use_umfpack" : False}) self.npar = 0 self._C = None self.outputNormMatrix = 1. def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def __dir_base__(self): return [x for x in self.__dir__() if x[:2] != "__"] def __deepcopy__(self, memo): return softcopy(self) @property def npar(self): """Value of npar.""" return self._npar @npar.setter def npar(self, npar): nparOld = self._npar if hasattr(self, "_npar") else -1 if npar != nparOld: self.parameterMap = pMap(1., npar) self._npar = npar @property def spacedim(self): return 1 def checkParameter(self, mu:paramVal) -> paramVal: muP = checkParameter(mu, self.npar) if self.npar == 0: muP.reset((1, 0), muP.dtype) return muP def checkParameterList(self, mu:paramList, check_if_single : bool = False) -> paramList: muL = checkParameterList(mu, self.npar, check_if_single) return muL def mapParameterList(self, mu:paramList, direct : str = "F", idx : List[int] = None) -> paramList: if idx is None: idx = np.arange(self.npar) muMapped = checkParameterList(mu, len(idx)) for j, d in enumerate(idx): muMapped.data[:, j] = expressionEvaluator( self.parameterMap[direct][d], muMapped(j)).flatten() return muMapped def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ self.energyNormMatrix = 1. def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ self.energyNormDualMatrix = 1. def innerProduct(self, u:Np2D, v:Np2D, onlyDiag : bool = False, dual : bool = False, is_state : bool = True) -> Np2D: """Scalar product.""" if is_state or self.isCEye: if dual: if not hasattr(self, "energyNormDualMatrix"): self.buildEnergyNormDualForm() energyMat = self.energyNormDualMatrix else: if not hasattr(self, "energyNormMatrix"): self.buildEnergyNormForm() energyMat = self.energyNormMatrix else: energyMat = self.outputNormMatrix if isinstance(u, (parameterList, sampleList)): u = u.data if isinstance(v, (parameterList, sampleList)): v = v.data if onlyDiag: return np.sum(dot(energyMat, u) * v.conj(), axis = 0) return dot(dot(energyMat, u).T, v.conj()).T def norm(self, u:Np2D, dual : bool = False, is_state : bool = True) -> Np1D: return np.abs(self.innerProduct(u, u, onlyDiag = True, dual = dual, is_state = is_state)) ** .5 def baselineA(self): """Return 0 of shape consistent with operator of linear system.""" if (hasattr(self, "As") and hasattr(self.As, "__len__") and self.As[0] is not None): d = self.As[0].shape[0] else: d = self.spacedim return scsp.csr_matrix((np.zeros(0), np.zeros(0), np.zeros(d + 1)), shape = (d, d), dtype = np.complex) def baselineb(self): """Return 0 of shape consistent with RHS of linear system.""" return np.zeros(self.spacedim, dtype = np.complex) @nonaffine_construct @abstractmethod def A(self, mu : paramVal = [], der : List[int] = 0) -> Np2D: """ Assemble terms of operator of linear system and return it (or its derivative) at a given parameter. """ return @nonaffine_construct @abstractmethod def b(self, mu : paramVal = [], der : List[int] = 0) -> Np1D: """ Assemble terms of RHS of linear system and return it (or its derivative) at a given parameter. """ return @property def C(self): """Value of C.""" if self._C is None: self._C = 1. return self._C @property def isCEye(self): return isinstance(self.C, Number) def applyC(self, u:sampList): """Apply LHS of linear system.""" return dot(self.C, u) def applyCpInv(self, u:sampList): """Apply pseudoinverse of LHS of linear system.""" return dot(pseudoInverse(self.C), u) def setSolver(self, solverType:str, solverArgs : DictAny = {}): """Choose solver type and parameters.""" self._solver, self._solverArgs = setupSolver(solverType, solverArgs) def solve(self, mu : paramList = [], RHS : sampList = None, return_state : bool = False) -> sampList: """ Find solution of linear system. Args: mu: parameter value. RHS: RHS of linear system. If None, defaults to that of parametric system. Defaults to None. return_state: whether to return state before multiplication by c. Defaults to False. """ from rrompy.sampling import sampleList, emptySampleList if mu == []: mu = self.mu0 mu = self.checkParameterList(mu) if len(mu) == 0: return emptySampleList() mu, idx, sizes = listScatter(mu, return_sizes = True) mu = self.checkParameterList(mu) req, emptyCores = [], np.where(np.logical_not(sizes))[0] if len(mu) == 0: uL, uT = recv(source = 0, tag = poolRank()) sol = np.empty((uL, 0), dtype = uT) else: if RHS is None: RHS = sampleList([self.b(m) for m in mu]) else: RHS = sampleList(RHS) if len(RHS) > 1: RHS = sampleList([RHS[i] for i in idx]) mult = 0 if len(RHS) == 1 else 1 RROMPyAssert(mult * (len(mu) - 1) + 1, len(RHS), "Sample size") for j, mj in enumerate(mu): u = tsolve(self.A(mj), RHS[mult * j], self._solver, self._solverArgs) if j == 0: sol = np.empty((len(u), len(mu)), dtype = u.dtype) if masterCore(): for dest in emptyCores: req += [isend((len(u), u.dtype), dest = dest, tag = dest)] sol[:, j] = u if not return_state: sol = self.applyC(sol) for r in req: r.wait() return sampleList(matrixGatherv(sol, sizes)) def residual(self, mu : paramList = [], u : sampList = None, post_c : bool = True) -> sampList: """ Find residual of linear system for given approximate solution. Args: mu: parameter value. u: numpy complex array with function dofs. If None, set to 0. post_c: whether to post-process using c. Defaults to True. """ from rrompy.sampling import sampleList, emptySampleList if mu == []: mu = self.mu0 mu = self.checkParameterList(mu) if len(mu) == 0: return emptySampleList() mu, idx, sizes = listScatter(mu, return_sizes = True) mu = self.checkParameterList(mu) req, emptyCores = [], np.where(np.logical_not(sizes))[0] if len(mu) == 0: uL, uT = recv(source = 0, tag = poolRank()) res = np.empty((uL, 0), dtype = uT) else: v = sampleList(np.zeros((self.spacedim, len(mu)))) if u is not None: u = sampleList(u) v = v + sampleList([u[i] for i in idx]) for j, (mj, vj) in enumerate(zip(mu, v)): r = self.b(mj) - dot(self.A(mj), vj) if j == 0: res = np.empty((len(r), len(mu)), dtype = r.dtype) if masterCore(): for dest in emptyCores: req += [isend((len(r), r.dtype), dest = dest, tag = dest)] res[:, j] = r if post_c: res = self.applyC(res) for r in req: r.wait() return sampleList(matrixGatherv(res, sizes)) + + cutOffPolesRMax = np.inf + cutOffPolesRMin = - np.inf + cutOffPolesIMax = np.inf + cutOffPolesIMin = - np.inf + cutOffResNormMin = 0. + def flagBadPolesResidues(self, poles:Np1D, residues : Np1D = None) -> Np1D: + """ + Flag (numerical) poles/residues which are impossible. + + Args: + poles: poles to be judged. + """ + poles = np.array(poles).flatten() + if residues is not None: + residues = np.array(residues).reshape(len(poles), -1) + flag = np.zeros(len(poles), dtype = bool) + if not np.isinf(self.cutOffPolesRMax): + flag = np.logical_or(flag, np.real(poles) > self.cutOffPolesRMax) + if not np.isinf(self.cutOffPolesRMin): + flag = np.logical_or(flag, np.real(poles) < self.cutOffPolesRMin) + if not np.isinf(self.cutOffPolesIMax): + flag = np.logical_or(flag, np.imag(poles) > self.cutOffPolesIMax) + if not np.isinf(self.cutOffPolesIMin): + flag = np.logical_or(flag, np.imag(poles) < self.cutOffPolesIMin) + if residues is not None and self.cutOffResNormMin != 0.: + if residues.shape[1] == self.spacedim: + resEff = self.norm(residues.T) + else: + resEff = np.linalg.norm(residues, axis = 1) + resEff /= np.max(resEff) + flag = np.logical_or(flag, resEff < self.cutOffResNormMin) + return flag diff --git a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py index cbd0005..63dbc8e 100644 --- a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py +++ b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py @@ -1,829 +1,788 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from copy import deepcopy as copy import numpy as np from matplotlib import pyplot as plt from rrompy.reduction_methods.pivoted.generic_pivoted_approximant import ( GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximant) from rrompy.reduction_methods.pivoted.gather_pivoted_approximant import ( gatherPivotedApproximant) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal, paramList, ListAny) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.point_matching import (pointMatching, - chordalMetricAdjusted, potential) + chordalMetricAdjusted) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import (masterCore, indicesScatter, arrayGatherv, isend) __all__ = ['GenericPivotedGreedyApproximantNoMatch', 'GenericPivotedGreedyApproximant'] class GenericPivotedGreedyApproximantBase(GenericPivotedApproximantBase): _allowedEstimatorKindsMarginal = ["LEAVE_ONE_OUT", "LOOK_AHEAD", "LOOK_AHEAD_RECOVER", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingWeightError", - "cutOffToleranceError", "errorEstimatorKindMarginal", "greedyTolMarginal", "maxIterMarginal"], - [0., "AUTO", "NONE", 1e-1, 1e2]) + [0., "NONE", 1e-1, 1e2]) super().__init__(*args, **kwargs) self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self._scaleFactorOldPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif hasattr(scaleFactorDer, "__len__"): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'refine' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") GenericPivotedApproximantBase.samplerMarginal.fset(self, samplerMarginal) @property def errorEstimatorKindMarginal(self): """Value of errorEstimatorKindMarginal.""" return self._errorEstimatorKindMarginal @errorEstimatorKindMarginal.setter def errorEstimatorKindMarginal(self, errorEstimatorKindMarginal): errorEstimatorKindMarginal = errorEstimatorKindMarginal.upper() if errorEstimatorKindMarginal not in ( self._allowedEstimatorKindsMarginal): RROMPyWarning(("Marginal error estimator kind not recognized. " "Overriding to 'NONE'.")) errorEstimatorKindMarginal = "NONE" self._errorEstimatorKindMarginal = errorEstimatorKindMarginal self._approxParameters["errorEstimatorKindMarginal"] = ( self.errorEstimatorKindMarginal) @property def matchingWeightError(self): """Value of matchingWeightError.""" return self._matchingWeightError @matchingWeightError.setter def matchingWeightError(self, matchingWeightError): self._matchingWeightError = matchingWeightError self._approxParameters["matchingWeightError"] = ( self.matchingWeightError) - @property - def cutOffToleranceError(self): - """Value of cutOffToleranceError.""" - return self._cutOffToleranceError - @cutOffToleranceError.setter - def cutOffToleranceError(self, cutOffToleranceError): - if isinstance(cutOffToleranceError, (str,)): - cutOffToleranceError = cutOffToleranceError.upper()\ - .strip().replace(" ","") - if cutOffToleranceError != "AUTO": - RROMPyWarning(("String value of cutOffToleranceError not " - "recognized. Overriding to 'AUTO'.")) - cutOffToleranceError == "AUTO" - self._cutOffToleranceError = cutOffToleranceError - self._approxParameters["cutOffToleranceError"] = ( - self.cutOffToleranceError) - @property def greedyTolMarginal(self): """Value of greedyTolMarginal.""" return self._greedyTolMarginal @greedyTolMarginal.setter def greedyTolMarginal(self, greedyTolMarginal): if greedyTolMarginal < 0: raise RROMPyException("greedyTolMarginal must be non-negative.") if (hasattr(self, "_greedyTolMarginal") and self.greedyTolMarginal is not None): greedyTolMarginalold = self.greedyTolMarginal else: greedyTolMarginalold = -1 self._greedyTolMarginal = greedyTolMarginal self._approxParameters["greedyTolMarginal"] = self.greedyTolMarginal if greedyTolMarginalold != self.greedyTolMarginal: self.resetSamples() @property def maxIterMarginal(self): """Value of maxIterMarginal.""" return self._maxIterMarginal @maxIterMarginal.setter def maxIterMarginal(self, maxIterMarginal): if maxIterMarginal <= 0: raise RROMPyException("maxIterMarginal must be positive.") if (hasattr(self, "_maxIterMarginal") and self.maxIterMarginal is not None): maxIterMarginalold = self.maxIterMarginal else: maxIterMarginalold = -1 self._maxIterMarginal = maxIterMarginal self._approxParameters["maxIterMarginal"] = self.maxIterMarginal if maxIterMarginalold != self.maxIterMarginal: self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() if not hasattr(self, "_temporaryPivot"): self._mus = emptyParameterList() self._musMarginal = emptyParameterList() if hasattr(self, "samplerMarginal"): self.samplerMarginal.reset() if hasattr(self, "samplingEngine") and self.samplingEngine is not None: self.samplingEngine.resetHistory() - def _getPolesResExact(self, HITest, foci:Tuple[float, float], - ground:float) -> Tuple[Np1D, Np2D]: - if self.cutOffToleranceError == "AUTO": - cutOffTolErr = self.cutOffTolerance - else: - cutOffTolErr = self.cutOffToleranceError - polesEx = copy(HITest.poles) - idxExEff = np.where(potential(polesEx, foci) - ground - <= cutOffTolErr * ground)[0] - if self.matchingWeightError != 0: - resEx = HITest.coeffs[idxExEff] - else: - resEx = None - return polesEx[idxExEff], resEx - def _getDistanceApp(self, polesEx:Np1D, resEx:Np2D, muTest:paramVal, foci:Tuple[float, float], ground:float) -> float: - if self.cutOffToleranceError == "AUTO": - cutOffTolErr = self.cutOffTolerance - else: - cutOffTolErr = self.cutOffToleranceError polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0] - idxApEff = np.where(potential(polesAp, foci) - ground - <= cutOffTolErr * ground)[0] - polesAp = polesAp[idxApEff] if self.matchingWeightError != 0: resAp = self.trainedModel.interpolateMarginalCoeffs(muTest)[0][ - idxApEff, :] + : len(polesAp), :] resEx = self.trainedModel.data.projMat[:, : resEx.shape[1]].dot(resEx.T) resAp = self.trainedModel.data.projMat[:, : resAp.shape[1]].dot(resAp.T) else: resAp = None dist = chordalMetricAdjusted(polesEx, polesAp, self.matchingWeightError, resEx, resAp, self.HFEngine, False) pmR, pmC = pointMatching(dist) return np.mean(dist[pmR, pmC]) def getErrorEstimatorMarginalLeaveOneOut(self) -> Np1D: nTest = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nTest) if nTest <= 1: err = np.empty(nTest) err[:] = np.inf return err idx, sizes = indicesScatter(nTest, return_sizes = True) err = [] if len(idx) > 0: _tMdataFull = copy(self.trainedModel.data) _musMExcl = None self.verbosity -= 35 self.trainedModel.verbosity -= 35 foci = self.samplerPivot.normalFoci() ground = self.samplerPivot.groundPotential() for i, j in enumerate(idx): jEff = j - (i > 0) muTest = self.trainedModel.data.musMarginal[jEff] - polesEx, resEx = self._getPolesResExact( - self.trainedModel.data.HIs[jEff], - foci, ground) + polesEx = copy(self.trainedModel.data.HIs[jEff].poles) + idxBad = self.HFEngine.flagBadPolesResidues(polesEx) + polesEx = polesEx[np.logical_not(idxBad)] + if self.matchingWeightError != 0: + resEx = self.trainedModel.data.HIs[jEff].coeffs[ + np.where(np.logical_not(idxBad))[0]] + else: + resEx = None if i > 0: self.musMarginal.insert(_musMExcl, j - 1) _musMExcl = self.musMarginal[j] self.musMarginal.pop(j) if len(polesEx) == 0: err += [0.] continue self._updateTrainedModelMarginalSamples([j]) self._finalizeMarginalization() err += [self._getDistanceApp(polesEx, resEx, muTest, foci, ground)] self._updateTrainedModelMarginalSamples() self.musMarginal.insert(_musMExcl, idx[-1]) self.verbosity += 35 self.trainedModel.verbosity += 35 self.trainedModel.data = _tMdataFull return arrayGatherv(np.array(err), sizes) def getErrorEstimatorMarginalLookAhead(self) -> Np1D: if not hasattr(self.trainedModel, "_musMExcl"): err = np.zeros(0) err[:] = np.inf self._musMarginalTestIdxs = np.zeros(0, dtype = int) return err self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl, dtype = int) idx, sizes = indicesScatter(len(self.trainedModel._musMExcl), return_sizes = True) err = [] if len(idx) > 0: self.verbosity -= 35 self.trainedModel.verbosity -= 35 foci = self.samplerPivot.normalFoci() ground = self.samplerPivot.groundPotential() for j in idx: muTest = self.trainedModel._musMExcl[j] HITest = self.trainedModel._HIsExcl[j] - polesEx, resEx = self._getPolesResExact(HITest, foci, ground) + polesEx = HITest.poles + idxBad = self.HFEngine.flagBadPolesResidues(polesEx) + polesEx = polesEx[np.logical_not(idxBad)] + if self.matchingWeightError != 0: + resEx = HITest.coeffs[np.where(np.logical_not(idxBad))[0]] + else: + resEx = None if len(polesEx) == 0: err += [0.] continue err += [self._getDistanceApp(polesEx, resEx, muTest, foci, ground)] self.verbosity += 35 self.trainedModel.verbosity += 35 return arrayGatherv(np.array(err), sizes) def getErrorEstimatorMarginalNone(self) -> Np1D: nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) return (1. + self.greedyTolMarginal) * np.ones(nErr) def errorEstimatorMarginal(self, return_max : bool = False) -> Np1D: vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format( self.trainedModel.data.musMarginal), 10) if self.errorEstimatorKindMarginal == "LEAVE_ONE_OUT": err = self.getErrorEstimatorMarginalLeaveOneOut() elif self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": err = self.getErrorEstimatorMarginalLookAhead() else:#if self.errorEstimatorKindMarginal == "NONE": err = self.getErrorEstimatorMarginalNone() vbMng(self.trainedModel, "DEL", "Done evaluating error estimator", 10) if not return_max: return err idxMaxEst = np.where(err > self.greedyTolMarginal)[0] maxErr = err[idxMaxEst] if self.errorEstimatorKindMarginal == "NONE": maxErr = None return err, idxMaxEst, maxErr def plotEstimatorMarginal(self, est:Np1D, idxMax:List[int], estMax:List[float]): if self.errorEstimatorKindMarginal == "NONE": return if (not (np.any(np.isnan(est)) or np.any(np.isinf(est))) and masterCore()): fig = plt.figure(figsize = plt.figaspect(1. / self.nparMarginal)) for jpar in range(self.nparMarginal): ax = fig.add_subplot(1, self.nparMarginal, 1 + jpar) if self.errorEstimatorKindMarginal == "LEAVE_ONE_OUT": musre = copy(self.trainedModel.data.musMarginal.re.data) else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": if not hasattr(self.trainedModel, "_musMExcl"): return musre = np.real(self.trainedModel._musMExcl) if len(idxMax) > 0 and estMax is not None: maxrej = musre[idxMax, jpar] errCP = copy(est) idx = np.delete(np.arange(self.nparMarginal), jpar) while len(musre) > 0: if self.nparMarginal == 1: currIdx = np.arange(len(musre)) else: currIdx = np.where(np.isclose(np.sum( np.abs(musre[:, idx] - musre[0, idx]), 1), 0.))[0] currIdxSorted = currIdx[np.argsort(musre[currIdx, jpar])] ax.semilogy(musre[currIdxSorted, jpar], errCP[currIdxSorted], 'k.-', linewidth = 1) musre = np.delete(musre, currIdx, 0) errCP = np.delete(errCP, currIdx) ax.semilogy(self.musMarginal.re(jpar), (self.greedyTolMarginal,) * len(self.musMarginal), '*m') if len(idxMax) > 0 and estMax is not None: ax.semilogy(maxrej, estMax, 'xr') ax.set_xlim(*list(self.samplerMarginal.lims.re(jpar))) ax.grid() plt.tight_layout() plt.show() def _addMarginalSample(self, mus:paramList): mus = self.checkParameterListMarginal(mus) if len(mus) == 0: return self._nmusOld, nmus = len(self.musMarginal), len(mus) if (hasattr(self, "trainedModel") and self.trainedModel is not None and hasattr(self.trainedModel, "_musMExcl")): self._nmusOld += len(self.trainedModel._musMExcl) vbMng(self, "MAIN", ("Adding marginal sample point{} no. {}{} at {} to training " "set.").format("s" * (nmus > 1), self._nmusOld + 1, "--{}".format(self._nmusOld + nmus) * (nmus > 1), mus), 3) self.musMarginal.append(mus) self.setupApproxPivoted(mus) self._poleMatching() del self._nmusOld if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): ubRange = len(self.trainedModel.data.musMarginal) if hasattr(self.trainedModel, "_idxExcl"): shRange = len(self.trainedModel._musMExcl) else: shRange = 0 testIdxs = list(range(ubRange + shRange - len(mus), ubRange + shRange)) for j in testIdxs[::-1]: self.musMarginal.pop(j - shRange) if hasattr(self.trainedModel, "_idxExcl"): testIdxs = self.trainedModel._idxExcl + testIdxs self._updateTrainedModelMarginalSamples(testIdxs) self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = self.SMarginal def greedyNextSampleMarginal(self, muidx:List[int], plotEst : str = "NONE") \ -> Tuple[Np1D, List[int], float, paramVal]: RROMPyAssert(self._mode, message = "Cannot add greedy sample.") muidx = self._musMarginalTestIdxs[muidx] if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): if not hasattr(self.trainedModel, "_idxExcl"): raise RROMPyException(("Sample index to be added not present " "in trained model.")) testIdxs = copy(self.trainedModel._idxExcl) skippedIdx = 0 for cj, j in enumerate(self.trainedModel._idxExcl): if j in muidx: testIdxs.pop(skippedIdx) self.musMarginal.insert(self.trainedModel._musMExcl[cj], j - skippedIdx) else: skippedIdx += 1 if len(self.trainedModel._idxExcl) < (len(muidx) + len(testIdxs)): raise RROMPyException(("Sample index to be added not present " "in trained model.")) self._updateTrainedModelMarginalSamples(testIdxs) self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) self.firstGreedyIterM = False idxAdded = self.samplerMarginal.refine(muidx) self._addMarginalSample(self.samplerMarginal.points[idxAdded]) errorEstTest, muidx, maxErrorEst = self.errorEstimatorMarginal(True) if plotEst == "ALL": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) return (errorEstTest, muidx, maxErrorEst, self.samplerMarginal.points[muidx]) def _preliminaryTrainingMarginal(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if np.sum(self.samplingEngine.nsamples) > 0: return self.resetSamples() self._addMarginalSample(self.samplerMarginal.generatePoints( self.SMarginal)) def _preSetupApproxPivoted(self, mus:paramList) \ -> Tuple[ListAny, ListAny, ListAny]: self.computeScaleFactor() if self.trainedModel is None: self._setupTrainedModel(np.zeros((0, 0))) self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] self.trainedModel.data.Psupp = [] self._trainedModelOld = copy(self.trainedModel) self._scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 self._musLoc = copy(self.mus) idx, sizes = indicesScatter(len(mus), return_sizes = True) emptyCores = np.where(np.logical_not(sizes))[0] self.verbosity -= 15 return idx, sizes, emptyCores def _postSetupApproxPivoted(self, mus:Np2D, pMat:Np2D, Ps:ListAny, Qs:ListAny, sizes:ListAny): self.scaleFactor = self._scaleFactorOldPivot del self._scaleFactorOldPivot, self._temporaryPivot pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, mus, sizes, self.polybasis) if len(self._musLoc) > 0: self._mus = self.checkParameterList(self._musLoc) self._mus.append(mus) else: self._mus = self.checkParameterList(mus) self.trainedModel = self._trainedModelOld del self._trainedModelOld padLeft = self.trainedModel.data.projMat.shape[1] suppNew = np.append(0, np.cumsum(nsamples)) self._setupTrainedModel(pMat, padLeft > 0) self.trainedModel.data.Qs += Qs self.trainedModel.data.Ps += Ps self.trainedModel.data.Psupp += list(padLeft + suppNew[: -1]) self.trainedModel.data.approxParameters = copy(self.approxParameters) self.verbosity += 15 def _localPivotedResult(self, pMat:Np2D, req:ListAny, emptyCores:ListAny, mus:Np2D) -> Tuple[Np2D, ListAny, Np2D]: if pMat is None: mus = copy(self.samplingEngine.mus.data) pMat = copy(self.samplingEngine.projectionMatrix) if masterCore(): for dest in emptyCores: req += [isend((len(pMat), pMat.dtype, mus.dtype), dest = dest, tag = dest)] else: mus = np.vstack((mus, self.samplingEngine.mus.data)) pMat = np.hstack((pMat, self.samplingEngine.projectionMatrix)) return pMat, req, mus @abstractmethod def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) self._preSetupApproxPivoted() data = [] pass self._postSetupApproxPivoted(mus, data) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: """Compute greedy snapshots of solution map.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") vbMng(self, "INIT", "Starting computation of snapshots.", 3) max2ErrorEst, self.firstGreedyIterM = np.inf, True self._preliminaryTrainingMarginal() if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": muidx = np.arange(len(self.trainedModel.data.musMarginal)) else:#if self.errorEstimatorKindMarginal in ["LEAVE_ONE_OUT", "NONE"]: muidx = [] self._musMarginalTestIdxs = np.array(muidx) while self.firstGreedyIterM or (max2ErrorEst > self.greedyTolMarginal and self.samplerMarginal.npoints < self.maxIterMarginal): errorEstTest, muidx, maxErrorEst, mu = \ self.greedyNextSampleMarginal(muidx, plotEst) if maxErrorEst is None: max2ErrorEst = 1. + self.greedyTolMarginal else: if len(maxErrorEst) > 0: max2ErrorEst = np.max(maxErrorEst) else: max2ErrorEst = np.max(errorEstTest) vbMng(self, "MAIN", ("Uniform testing error estimate " "{:.4e}.").format(max2ErrorEst), 3) if plotEst == "LAST": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: " "{}).").format(len(self.mus)), 3) if (self.errorEstimatorKindMarginal == "LOOK_AHEAD_RECOVER" and hasattr(self.trainedModel, "_idxExcl") and len(self.trainedModel._idxExcl) > 0): vbMng(self, "INIT", "Recovering {} test models.".format( len(self.trainedModel._idxExcl)), 7) for j, mu in zip(self.trainedModel._idxExcl, self.trainedModel._musMExcl): self.musMarginal.insert(mu, j) self._updateTrainedModelMarginalSamples() self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) vbMng(self, "DEL", "Done recovering test models.", 7) return 0 def checkComputedApproxPivoted(self) -> bool: return (super().checkComputedApprox() and len(self.musMarginal) == len(self.trainedModel.data.musMarginal)) class GenericPivotedGreedyApproximantNoMatch( GenericPivotedGreedyApproximantBase, GenericPivotedApproximantNoMatch): """ ROM pivoted greedy interpolant computation for parametric problems (without pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeightError': weight for pole matching optimization in error estimation; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeightError: Weight for pole matching optimization in error estimation. - cutOffToleranceError: Tolerance for ignoring parasitic poles in error - estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _poleMatching(self): vbMng(self, "INIT", "Compressing poles.", 10) self.trainedModel.initializeFromRational() vbMng(self, "DEL", "Done compressing poles.", 10) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx) class GenericPivotedGreedyApproximant(GenericPivotedGreedyApproximantBase, GenericPivotedApproximant): """ ROM pivoted greedy interpolant computation for parametric problems (with pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'matchingWeightError': weight for pole matching optimization in error estimation; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. matchingWeightError: Weight for pole matching optimization in error estimation. - cutOffToleranceError: Tolerance for ignoring parasitic poles in error - estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _poleMatching(self): vbMng(self, "INIT", "Compressing and matching poles.", 10) self.trainedModel.initializeFromRational(self.matchingWeight, self.matchingMode, self.HFEngine, False) vbMng(self, "DEL", "Done compressing and matching poles.", 10) def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight, self.matchingMode, self.HFEngine, False) def getErrorEstimatorMarginalLeaveOneOut(self) -> Np1D: if self.polybasisMarginal != "NEARESTNEIGHBOR": if not hasattr(self, "_MMarginal_isauto"): if not hasattr(self, "_MMarginalOriginal"): self._MMarginalOriginal = self.paramsMarginal["MMarginal"] self.paramsMarginal["MMarginal"] = self._MMarginalOriginal self._reduceDegreeNNoWarn = 1 err = super().getErrorEstimatorMarginalLeaveOneOut() if self.polybasisMarginal != "NEARESTNEIGHBOR": del self._reduceDegreeNNoWarn return err def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() return super().setupApprox(*args, **kwargs) diff --git a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py index 4131d51..e440e9a 100644 --- a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py +++ b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py @@ -1,532 +1,504 @@ #Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_pivoted_greedy_approximant import ( GenericPivotedGreedyApproximantBase, GenericPivotedGreedyApproximantNoMatch, GenericPivotedGreedyApproximant) from rrompy.reduction_methods.standard.greedy import RationalInterpolantGreedy from rrompy.reduction_methods.standard.greedy.generic_greedy_approximant \ import pruneSamples from rrompy.reduction_methods.pivoted import ( RationalInterpolantGreedyPivotedNoMatch, RationalInterpolantGreedyPivoted) from rrompy.utilities.base.types import Np1D, Tuple, paramVal, paramList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, recv __all__ = ['RationalInterpolantGreedyPivotedGreedyNoMatch', 'RationalInterpolantGreedyPivotedGreedy'] class RationalInterpolantGreedyPivotedGreedyBase( GenericPivotedGreedyApproximantBase): @property def sampleBatchSize(self): """Value of sampleBatchSize.""" return 1 @property def sampleBatchIdx(self): """Value of sampleBatchIdx.""" return self.S def greedyNextSample(self, muidx:int, plotEst : str = "NONE")\ -> Tuple[Np1D, int, float, paramVal]: """Compute next greedy snapshot of solution map.""" RROMPyAssert(self._mode, message = "Cannot add greedy sample.") mus = copy(self.muTest[muidx]) self.muTest.pop(muidx) for j, mu in enumerate(mus): vbMng(self, "MAIN", ("Adding sample point no. {} at {} to training " "set.").format(len(self.mus) + 1, mu), 3) self.mus.append(mu) self._S = len(self.mus) self._approxParameters["S"] = self.S if (self.samplingEngine.nsamples <= len(mus) - j - 1 or not np.allclose(mu, self.samplingEngine.mus[j - len(mus)])): self.samplingEngine.nextSample(mu) if self._isLastSampleCollinear(): vbMng(self, "MAIN", ("Collinearity above tolerance detected. Starting " "preemptive greedy loop termination."), 3) self._collinearityFlag = 1 errorEstTest = np.empty(len(self.muTest)) errorEstTest[:] = np.nan return errorEstTest, [-1], np.nan, np.nan errorEstTest, muidx, maxErrorEst = self.errorEstimator(self.muTest, True) if plotEst == "ALL": self.plotEstimator(errorEstTest, muidx, maxErrorEst) return errorEstTest, muidx, maxErrorEst, self.muTest[muidx] def _setSampleBatch(self, maxS:int): return self.S def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.nsamples > 0: return self.resetSamples() self.samplingEngine.scaleFactor = self.scaleFactorDer musPivot = self.trainSetGenerator.generatePoints(self.S) while len(musPivot) > self.S: musPivot.pop() muTestBasePivot = self.samplerPivot.generatePoints(self.nTestPoints, False) idxPop = pruneSamples(self.HFEngine.mapParameterList(muTestBasePivot, idx = self.directionPivot), self.HFEngine.mapParameterList(musPivot, idx = self.directionPivot), 1e-10 * self.scaleFactorPivot[0]) muTestBasePivot.pop(idxPop) self._mus = emptyParameterList() self.mus.reset((self.S - 1, self.HFEngine.npar)) self.muTest = emptyParameterList() self.muTest.reset((len(muTestBasePivot) + 1, self.HFEngine.npar)) for k in range(self.S - 1): muk = np.empty_like(self.mus[0]) muk[self.directionPivot] = musPivot[k] muk[self.directionMarginal] = self.muMargLoc self.mus[k] = muk for k in range(len(muTestBasePivot)): muk = np.empty_like(self.muTest[0]) muk[self.directionPivot] = muTestBasePivot[k] muk[self.directionMarginal] = self.muMargLoc self.muTest[k] = muk muk = np.empty_like(self.mus[0]) muk[self.directionPivot] = musPivot[-1] muk[self.directionMarginal] = self.muMargLoc self.muTest[-1] = muk if len(self.mus) > 0: vbMng(self, "MAIN", ("Adding first {} sample point{} at {} to training " "set.").format(self.S - 1, "" + "s" * (self.S > 2), self.mus), 3) self.samplingEngine.iterSample(self.mus) self._S = len(self.mus) self._approxParameters["S"] = self.S self.M, self.N = ("AUTO",) * 2 def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) if not hasattr(self, "_plotEstPivot"): self._plotEstPivot = "NONE" idx, sizes, emptyCores = self._preSetupApproxPivoted(mus) S0 = copy(self.S) pMat, Ps, Qs, req, musA = None, [], [], [], None if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 45) if self.storeAllSamples: self.storeSamples() pL, pT, mT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) musA = np.empty((0, self.mu0.shape[1]), dtype = mT) else: for i in idx: self.muMargLoc = mus[i] vbMng(self, "MAIN", "Building marginal model no. {} at " "{}.".format(i + 1, self.muMargLoc), 25) self.samplingEngine.resetHistory() self.trainedModel = None self.verbosity -= 5 self.samplingEngine.verbosity -= 5 RationalInterpolantGreedy.setupApprox(self, self._plotEstPivot) self.verbosity += 5 self.samplingEngine.verbosity += 5 if self.storeAllSamples: self.storeSamples(i + self._nmusOld) pMat, req, musA = self._localPivotedResult(pMat, req, emptyCores, musA) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] self._S = S0 del self.muMargLoc for r in req: r.wait() self._postSetupApproxPivoted(musA, pMat, Ps, Qs, sizes) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: if self.checkComputedApprox(): return -1 if '_' not in plotEst: plotEst = plotEst + "_NONE" plotEstM, self._plotEstPivot = plotEst.split("_") val = super().setupApprox(plotEstM) return val class RationalInterpolantGreedyPivotedGreedyNoMatch( RationalInterpolantGreedyPivotedGreedyBase, GenericPivotedGreedyApproximantNoMatch, RationalInterpolantGreedyPivotedNoMatch): """ ROM greedy pivoted greedy rational interpolant computation for parametric problems (without pole matching). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', and 'LOOK_AHEAD_RECOVER'; defaults to 'LEAVE_ONE_OUT'; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'trainSetGenerator': training sample points generator; defaults to sampler; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeightError': weight for pole matching optimization in error estimation; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasis': type of polynomial basis for pivot interpolation; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'trainSetGenerator': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeightError: Weight for pole matching optimization in error estimation. - cutOffToleranceError: Tolerance for ignoring parasitic poles in error - estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasis: Type of polynomial basis for pivot interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. trainSetGenerator: training sample points generator. errorEstimatorKind: kind of error estimator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ class RationalInterpolantGreedyPivotedGreedy( RationalInterpolantGreedyPivotedGreedyBase, GenericPivotedGreedyApproximant, RationalInterpolantGreedyPivoted): """ ROM greedy pivoted greedy rational interpolant computation for parametric problems (with pole matching). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', and 'LOOK_AHEAD_RECOVER'; defaults to 'LEAVE_ONE_OUT'; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'trainSetGenerator': training sample points generator; defaults to sampler; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'matchingWeightError': weight for pole matching optimization in error estimation; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'trainSetGenerator': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. matchingWeightError: Weight for pole matching optimization in error estimation. - cutOffToleranceError: Tolerance for ignoring parasitic poles in error - estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. trainSetGenerator: training sample points generator. errorEstimatorKind: kind of error estimator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ diff --git a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py index 01f203b..1305ea7 100644 --- a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py +++ b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py @@ -1,459 +1,431 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy from numpy import empty, empty_like from .generic_pivoted_greedy_approximant import ( GenericPivotedGreedyApproximantBase, GenericPivotedGreedyApproximantNoMatch, GenericPivotedGreedyApproximant) from rrompy.reduction_methods.standard import RationalInterpolant from rrompy.reduction_methods.pivoted import ( RationalInterpolantPivotedNoMatch, RationalInterpolantPivoted) from rrompy.utilities.base.types import paramList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, recv __all__ = ['RationalInterpolantPivotedGreedyNoMatch', 'RationalInterpolantPivotedGreedy'] class RationalInterpolantPivotedGreedyBase( GenericPivotedGreedyApproximantBase): def computeSnapshots(self): """Compute snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start snapshot computation.") vbMng(self, "INIT", "Starting computation of snapshots.", 5) self.samplingEngine.scaleFactor = self.scaleFactorDer if not hasattr(self, "musPivot") or len(self.musPivot) != self.S: self.musPivot = self.samplerPivot.generatePoints(self.S) while len(self.musPivot) > self.S: self.musPivot.pop() musLoc = emptyParameterList() musLoc.reset((self.S, self.HFEngine.npar)) self.samplingEngine.resetHistory() for k in range(self.S): muk = empty_like(musLoc[0]) muk[self.directionPivot] = self.musPivot[k] muk[self.directionMarginal] = self.muMargLoc musLoc[k] = muk self.samplingEngine.iterSample(musLoc) vbMng(self, "DEL", "Done computing snapshots.", 5) self._m_selfmus = copy(musLoc) self._mus = self.musPivot self._m_mu0 = copy(self.mu0) self._m_HFEparameterMap = copy(self.HFEngine.parameterMap) self._mu0 = self.checkParameterListPivot(self.mu0(self.directionPivot)) self.HFEngine.parameterMap = { "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]], "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]} def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) idx, sizes, emptyCores = self._preSetupApproxPivoted(mus) pMat, Ps, Qs, req, musA = None, [], [], [], None if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 45) if self.storeAllSamples: self.storeSamples() pL, pT, mT = recv(source = 0, tag = poolRank()) pMat = empty((pL, 0), dtype = pT) musA = empty((0, self.mu0.shape[1]), dtype = mT) else: for i in idx: self.muMargLoc = mus[i] vbMng(self, "MAIN", "Building marginal model no. {} at " "{}.".format(i + 1, self.muMargLoc), 25) self.samplingEngine.resetHistory() self.trainedModel = None self.verbosity -= 5 self.samplingEngine.verbosity -= 5 RationalInterpolant.setupApprox(self) self.verbosity += 5 self.samplingEngine.verbosity += 5 self._mu0 = self._m_mu0 self._mus = self._m_selfmus self.HFEngine.parameterMap = self._m_HFEparameterMap del self._m_mu0, self._m_selfmus, self._m_HFEparameterMap if self.storeAllSamples: self.storeSamples(i + self._nmusOld) pMat, req, musA = self._localPivotedResult(pMat, req, emptyCores, musA) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] del self.muMargLoc for r in req: r.wait() self._postSetupApproxPivoted(musA, pMat, Ps, Qs, sizes) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 class RationalInterpolantPivotedGreedyNoMatch( RationalInterpolantPivotedGreedyBase, GenericPivotedGreedyApproximantNoMatch, RationalInterpolantPivotedNoMatch): """ ROM pivoted greedy rational interpolant computation for parametric problems (without pole matching). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', and 'LOOK_AHEAD_RECOVER'; defaults to 'LEAVE_ONE_OUT'; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeightError': weight for pole matching optimization in error estimation; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasis': type of polynomial basis for pivot interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeightError: Weight for pole matching optimization in error estimation. - cutOffToleranceError: Tolerance for ignoring parasitic poles in error - estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasis: Type of polynomial basis for pivot interpolation. M: Degree of rational interpolant numerator. N: Degree of rational interpolant denominator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ class RationalInterpolantPivotedGreedy(RationalInterpolantPivotedGreedyBase, GenericPivotedGreedyApproximant, RationalInterpolantPivoted): """ ROM pivoted greedy rational interpolant computation for parametric problems (with pole matching). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; defaults to 'AUTO', i.e. cutOffTolerance; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LEAVE_ONE_OUT', 'LOOK_AHEAD', and 'LOOK_AHEAD_RECOVER'; defaults to 'LEAVE_ONE_OUT'; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'matchingWeightError': weight for pole matching optimization in error estimation; - - 'cutOffToleranceError': tolerance for ignoring parasitic poles - in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. matchingWeightError: Weight for pole matching optimization in error estimation. - cutOffToleranceError: Tolerance for ignoring parasitic poles in error - estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. M: Degree of rational interpolant numerator. N: Degree of rational interpolant denominator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py index 68534a5..cdb1381 100644 --- a/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py +++ b/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py @@ -1,550 +1,528 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_pivoted_approximant import (GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximant) from .gather_pivoted_approximant import gatherPivotedApproximant from rrompy.reduction_methods.standard.greedy.rational_interpolant_greedy \ import RationalInterpolantGreedy from rrompy.reduction_methods.standard.greedy.generic_greedy_approximant \ import pruneSamples from rrompy.utilities.base.types import Np1D from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.poly_fitting.polynomial import polyvander as pv from rrompy.utilities.exception_manager import RROMPyAssert, RROMPyWarning from rrompy.parameter import emptyParameterList, parameterList from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv __all__ = ['RationalInterpolantGreedyPivotedNoMatch', 'RationalInterpolantGreedyPivoted'] class RationalInterpolantGreedyPivotedBase(GenericPivotedApproximantBase, RationalInterpolantGreedy): + def __init__(self, *args, **kwargs): + self._preInit() + super().__init__(*args, **kwargs) + self._ignoreResidues = self.nparPivot > 1 + self._postInit() + @property def tModelType(self): if hasattr(self, "_temporaryPivot"): return RationalInterpolantGreedy.tModelType.fget(self) return super().tModelType - - @property - def residueTol(self): - """Value of residueTol.""" - return self._residueTol - @residueTol.setter - def residueTol(self, residueTol): - if residueTol < 0. or (residueTol > 0. and self.nparPivot > 1): - RROMPyWarning("Overriding prescribed residue tolerance to 0.") - residueTol = 0. - self._residueTol = residueTol - self._approxParameters["residueTol"] = self.residueTol def _polyvanderAuxiliary(self, mus, deg, *args): degEff = [0] * self.npar degEff[self.directionPivot[0]] = deg return pv(mus, degEff, *args) def _marginalizeMiscellanea(self, forward:bool): if forward: self._m_mu0 = copy(self.mu0) self._m_selfmus = copy(self.mus) self._m_HFEparameterMap = copy(self.HFEngine.parameterMap) self._mu0 = self.checkParameterListPivot( self.mu0(self.directionPivot)) self._mus = self.checkParameterListPivot( self.mus(self.directionPivot)) self.HFEngine.parameterMap = { "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]], "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]} else: self._mu0 = self._m_mu0 self._mus = self._m_selfmus self.HFEngine.parameterMap = self._m_HFEparameterMap del self._m_mu0, self._m_selfmus, self._m_HFEparameterMap def _marginalizeTrainedModel(self, forward:bool): if forward: del self._temporaryPivot self.trainedModel.data.mu0 = self.mu0 self.trainedModel.data.scaleFactor = [1.] * self.npar self.trainedModel.data.scaleFactor[self.directionPivot[0]] = ( self.scaleFactor[0]) self.trainedModel.data.parameterMap = self.HFEngine.parameterMap self._m_musUniqueCN = copy(self._musUniqueCN) musUniqueCNAux = np.zeros((self.S, self.npar), dtype = self._musUniqueCN.dtype) musUniqueCNAux[:, self.directionPivot[0]] = self._musUniqueCN(0) self._musUniqueCN = self.checkParameterList(musUniqueCNAux) self._m_derIdxs = copy(self._derIdxs) for j in range(len(self._derIdxs)): for l in range(len(self._derIdxs[j])): derjl = self._derIdxs[j][l][0] self._derIdxs[j][l] = [0] * self.npar self._derIdxs[j][l][self.directionPivot[0]] = derjl self.trainedModel.data.Q._dirPivot = self.directionPivot[0] self.trainedModel.data.P._dirPivot = self.directionPivot[0] else: self._temporaryPivot = 1 self.trainedModel.data.mu0 = self.checkParameterListPivot( self.mu0(self.directionPivot)) self.trainedModel.data.scaleFactor = self.scaleFactor self.trainedModel.data.parameterMap = { "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]], "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]} self._musUniqueCN = copy(self._m_musUniqueCN) self._derIdxs = copy(self._m_derIdxs) del self._m_musUniqueCN, self._m_derIdxs del self.trainedModel.data.Q._dirPivot del self.trainedModel.data.P._dirPivot self.trainedModel.data.npar = self.npar def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D: """Standard residual-based error estimator.""" self._marginalizeMiscellanea(True) setupOK = self.setupApproxLocal() self._marginalizeMiscellanea(False) if setupOK > 0: err = np.empty(len(mus)) err[:] = np.nan if not return_max: return err return err, [- setupOK], np.nan self._marginalizeTrainedModel(True) errRes = super().errorEstimator(mus, return_max) self._marginalizeTrainedModel(False) return errRes def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") self._S = self._setSampleBatch(self.S) self.resetSamples() self.samplingEngine.scaleFactor = self.scaleFactorDer musPivot = self.trainSetGenerator.generatePoints(self.S) while len(musPivot) > self.S: musPivot.pop() muTestPivot = self.samplerPivot.generatePoints(self.nTestPoints, False) idxPop = pruneSamples(self.HFEngine.mapParameterList(muTestPivot, idx = self.directionPivot), self.HFEngine.mapParameterList(musPivot, idx = self.directionPivot), 1e-10 * self.scaleFactorPivot[0]) self._mus = emptyParameterList() self.mus.reset((self.S, self.npar + len(self.musMargLoc))) muTestBase = emptyParameterList() muTestBase.reset((len(muTestPivot), self.npar + len(self.musMargLoc))) for k in range(self.S): muk = np.empty_like(self.mus[0]) muk[self.directionPivot] = musPivot[k] muk[self.directionMarginal] = self.musMargLoc self.mus[k] = muk for k in range(len(muTestPivot)): muk = np.empty_like(muTestBase[0]) muk[self.directionPivot] = muTestPivot[k] muk[self.directionMarginal] = self.musMargLoc muTestBase[k] = muk muTestBase.pop(idxPop) muLast = copy(self.mus[-1]) self.mus.pop() if len(self.mus) > 0: vbMng(self, "MAIN", ("Adding first {} sample point{} at {} to training " "set.").format(self.S - 1, "" + "s" * (self.S > 2), self.mus), 3) self.samplingEngine.iterSample(self.mus) self._S = len(self.mus) self._approxParameters["S"] = self.S self.muTest = parameterList(muTestBase) self.muTest.append(muLast) self.M, self.N = ("AUTO",) * 2 def setupApprox(self, *args, **kwargs) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeScaleFactor() self._musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) while len(self.musMarginal) > self.SMarginal: self.musMarginal.pop() S0 = copy(self.S) idx, sizes = indicesScatter(len(self.musMarginal), return_sizes = True) pMat, Ps, Qs, mus = None, [], [], None req, emptyCores = [], np.where(np.logical_not(sizes))[0] if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 25) if self.storeAllSamples: self.storeSamples() pL, pT, mT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) mus = np.empty((0, self.mu0.shape[1]), dtype = mT) else: _scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 for i in idx: self.musMargLoc = self.musMarginal[i] vbMng(self, "MAIN", "Building marginal model no. {} at {}.".format(i + 1, self.musMargLoc), 5) self.samplingEngine.resetHistory() self.trainedModel = None self.verbosity -= 5 self.samplingEngine.verbosity -= 5 super().setupApprox(*args, **kwargs) self.verbosity += 5 self.samplingEngine.verbosity += 5 if self.storeAllSamples: self.storeSamples(i) if pMat is None: mus = copy(self.samplingEngine.mus.data) pMat = copy(self.samplingEngine.projectionMatrix) if i == 0: for dest in emptyCores: req += [isend((len(pMat), pMat.dtype, mus.dtype), dest = dest, tag = dest)] else: mus = np.vstack((mus, self.samplingEngine.mus.data)) pMat = np.hstack((pMat, self.samplingEngine.projectionMatrix)) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] self._S = S0 del self._temporaryPivot, self.musMargLoc self.scaleFactor = _scaleFactorOldPivot for r in req: r.wait() pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, mus, sizes, self.polybasis) self._mus = self.checkParameterList(mus) Psupp = np.append(0, np.cumsum(nsamples)) self._setupTrainedModel(pMat, forceNew = True) self.trainedModel.data.Qs, self.trainedModel.data.Ps = Qs, Ps self.trainedModel.data.Psupp = list(Psupp[: -1]) self._poleMatching() self._finalizeMarginalization() vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 class RationalInterpolantGreedyPivotedNoMatch( RationalInterpolantGreedyPivotedBase, GenericPivotedApproximantNoMatch): """ ROM pivoted rational interpolant (without pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'trainSetGenerator': training sample points generator; defaults to sampler; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for pivot interpolation; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'trainSetGenerator': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. trainSetGenerator: training sample points generator. errorEstimatorKind: kind of error estimator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ class RationalInterpolantGreedyPivoted(RationalInterpolantGreedyPivotedBase, GenericPivotedApproximant): """ ROM pivoted rational interpolant (with pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'trainSetGenerator': training sample points generator; defaults to sampler; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'trainSetGenerator': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. trainSetGenerator: training sample points generator. errorEstimatorKind: kind of error estimator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py index 0490d3e..2e55405 100644 --- a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py +++ b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py @@ -1,482 +1,455 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_pivoted_approximant import (GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximant) from .gather_pivoted_approximant import gatherPivotedApproximant from rrompy.reduction_methods.standard.rational_interpolant import ( RationalInterpolant) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.hash_derivative import nextDerivativeIndices from rrompy.utilities.exception_manager import RROMPyAssert, RROMPyWarning from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv __all__ = ['RationalInterpolantPivotedNoMatch', 'RationalInterpolantPivoted'] class RationalInterpolantPivotedBase(GenericPivotedApproximantBase, RationalInterpolant): def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(toBeExcluded = ["polydegreetype"]) super().__init__(*args, **kwargs) + self._ignoreResidues = self.nparPivot > 1 self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self.scaleFactorPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif hasattr(scaleFactorDer, "__len__"): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def polydegreetype(self): """Value of polydegreetype.""" return "TOTAL" @polydegreetype.setter def polydegreetype(self, polydegreetype): RROMPyWarning(("polydegreetype is used just to simplify inheritance, " "and its value cannot be changed from 'TOTAL'.")) - - @property - def residueTol(self): - """Value of residueTol.""" - return self._residueTol - @residueTol.setter - def residueTol(self, residueTol): - if residueTol < 0. or (residueTol > 0. and self.nparPivot > 1): - RROMPyWarning("Overriding prescribed residue tolerance to 0.") - residueTol = 0. - self._residueTol = residueTol - self._approxParameters["residueTol"] = self.residueTol - + def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" RROMPyAssert(self._mode, message = "Cannot setup interpolation indices.") if (self._musUniqueCN is None or len(self._reorder) != len(self.musPivot)): try: muPC = self.trainedModel.centerNormalizePivot(self.musPivot) except: muPC = self.trainedModel.centerNormalize(self.musPivot) self._musUniqueCN, musIdxsTo, musIdxs, musCount = (muPC.unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.musPivot[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.nparPivot, cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def setupApprox(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeScaleFactor() self.resetSamples() self.samplingEngine.scaleFactor = self.scaleFactorDer self.musPivot = self.samplerPivot.generatePoints(self.S) while len(self.musPivot) > self.S: self.musPivot.pop() self._musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) while len(self.musMarginal) > self.SMarginal: self.musMarginal.pop() self._mus = emptyParameterList() self.mus.reset((self.S * self.SMarginal, self.HFEngine.npar)) for j, muMarg in enumerate(self.musMarginal): for k in range(j * self.S, (j + 1) * self.S): muk = np.empty_like(self.mus[0]) muk[self.directionPivot] = self.musPivot[k - j * self.S] muk[self.directionMarginal] = muMarg self.mus[k] = muk N0 = copy(self.N) self._setupTrainedModel(np.zeros((0, 0)), forceNew = True) idx, sizes = indicesScatter(len(self.musMarginal), return_sizes = True) pMat, Ps, Qs = None, [], [] req, emptyCores = [], np.where(np.logical_not(sizes))[0] if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 30) if self.storeAllSamples: self.storeSamples() pL, pT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) else: _scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 for i in idx: vbMng(self, "MAIN", "Building marginal model no. {} at {}.".format(i + 1, self.musMarginal[i]), 5) vbMng(self, "INIT", "Starting computation of snapshots.", 10) self.samplingEngine.resetHistory() self.samplingEngine.iterSample( self.mus[self.S * i : self.S * (i + 1)]) vbMng(self, "DEL", "Done computing snapshots.", 10) self.verbosity -= 5 self.samplingEngine.verbosity -= 5 self._setupRational(self._setupDenominator()) self.verbosity += 5 self.samplingEngine.verbosity += 5 if self.storeAllSamples: self.storeSamples(i) if pMat is None: pMat = copy(self.samplingEngine.projectionMatrix) if i == 0: for dest in emptyCores: req += [isend((len(pMat), pMat.dtype), dest = dest, tag = dest)] else: pMat = np.hstack((pMat, self.samplingEngine.projectionMatrix)) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] del self.trainedModel.data.Q, self.trainedModel.data.P self.N = N0 del self._temporaryPivot self.scaleFactor = _scaleFactorOldPivot for r in req: r.wait() pMat, Ps, Qs, _, _ = gatherPivotedApproximant(pMat, Ps, Qs, self.mus.data, sizes, self.polybasis, False) self._setupTrainedModel(pMat) self.trainedModel.data.Qs, self.trainedModel.data.Ps = Qs, Ps Psupp = np.arange(0, len(self.musMarginal) * self.S, self.S) self.trainedModel.data.Psupp = list(Psupp) self._poleMatching() self._finalizeMarginalization() vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 class RationalInterpolantPivotedNoMatch(RationalInterpolantPivotedBase, GenericPivotedApproximantNoMatch): """ ROM pivoted rational interpolant (without pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for pivot interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ class RationalInterpolantPivoted(RationalInterpolantPivotedBase, GenericPivotedApproximant): """ ROM pivoted rational interpolant (with pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - 'matchingMode': mode for pole matching optimization; allowed values include 'NONE' and 'SHIFT'; defaults to 'NONE'; - 'sharedRatio': required ratio of marginal points to share resonance; defaults to 1.; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpRcondMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for pivot interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchingWeight': weight for pole matching optimization; - 'matchingMode': mode for pole matching optimization; - 'sharedRatio': required ratio of marginal points to share resonance; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpRcondMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for pivot interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. matchingWeight: Weight for pole matching optimization. matchingMode: Mode for pole matching optimization. sharedRatio: Required ratio of marginal points to share resonance. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for pivot interpolation. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ diff --git a/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py b/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py index a8d3a54..a27e805 100644 --- a/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py +++ b/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py @@ -1,543 +1,535 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.hfengines.base.linear_affine_engine import checkIfAffine from .generic_greedy_approximant import GenericGreedyApproximant from rrompy.utilities.poly_fitting.polynomial import (polybases, polyfitname, PolynomialInterpolator as PI, polyvander) from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.degree import totalDegreeN from rrompy.utilities.expression import expressionEvaluator from rrompy.reduction_methods.standard import RationalInterpolant from rrompy.utilities.base.types import Np1D, Tuple, paramVal, List from rrompy.utilities.base.verbosity_depth import (verbosityManager as vbMng, getVerbosityDepth, setVerbosityDepth) from rrompy.utilities.poly_fitting import customFit from rrompy.utilities.exception_manager import (RROMPyWarning, RROMPyException, RROMPyAssert, RROMPy_FRAGILE) from rrompy.sampling import sampleList, emptySampleList __all__ = ['RationalInterpolantGreedy'] class RationalInterpolantGreedy(GenericGreedyApproximant, RationalInterpolant): """ ROM greedy rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': number of starting training points; - 'sampler': sample point generator; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'trainSetGenerator': training sample points generator; defaults to sampler; - 'polybasis': type of basis for interpolation; defaults to 'MONOMIAL'; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', 'LOOK_AHEAD_OUTPUT', and 'NONE'; defaults to 'NONE'; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults and must be True. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'trainSetGenerator': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for interpolation; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. S: number of test points. sampler: Sample point generator. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. trainSetGenerator: training sample points generator. robustTol: tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. errorEstimatorKind: kind of error estimator. functionalSolve: Strategy for minimization of denominator functional. interpRcond: tolerance for interpolation. robustTol: tolerance for robust rational denominator management. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. estimatorNormEngine: Engine for estimator norm computation. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ _allowedEstimatorKinds = ["AFFINE", "DISCREPANCY", "LOOK_AHEAD", "LOOK_AHEAD_RES", "LOOK_AHEAD_OUTPUT", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["errorEstimatorKind"], ["DISCREPANCY"], toBeExcluded = ["M", "N", "polydegreetype", "radialDirectionalWeights"]) super().__init__(*args, **kwargs) if not self.approx_state and self.errorEstimatorKind not in [ "LOOK_AHEAD", "LOOK_AHEAD_OUTPUT", "NONE"]: raise RROMPyException(("Must compute greedy approximation of " "state, unless error estimator allows " "otherwise.")) self._postInit() @property def approx_state(self): """Value of approx_state.""" return self._approx_state @approx_state.setter def approx_state(self, approx_state): RationalInterpolant.approx_state.fset(self, approx_state) if (not self.approx_state and hasattr(self, "_errorEstimatorKind") and self.errorEstimatorKind not in [ "LOOK_AHEAD", "LOOK_AHEAD_OUTPUT", "NONE"]): raise RROMPyException(("Must compute greedy approximation of " "state, unless error estimator allows " "otherwise.")) @property def E(self): """Value of E.""" self._E = self.sampleBatchIdx - 1 return self._E @E.setter def E(self, E): RROMPyWarning(("E is used just to simplify inheritance, and its value " "cannot be changed from that of sampleBatchIdx - 1.")) def _setMAuto(self): self.M = self.E def _setNAuto(self): self.N = self.E @property def polydegreetype(self): """Value of polydegreetype.""" return "TOTAL" @polydegreetype.setter def polydegreetype(self, polydegreetype): RROMPyWarning(("polydegreetype is used just to simplify inheritance, " "and its value cannot be changed from 'TOTAL'.")) @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in polybases: raise RROMPyException("Sample type not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def errorEstimatorKind(self): """Value of errorEstimatorKind.""" return self._errorEstimatorKind @errorEstimatorKind.setter def errorEstimatorKind(self, errorEstimatorKind): errorEstimatorKind = errorEstimatorKind.upper() if errorEstimatorKind not in self._allowedEstimatorKinds: RROMPyWarning(("Error estimator kind not recognized. Overriding " "to 'NONE'.")) errorEstimatorKind = "NONE" self._errorEstimatorKind = errorEstimatorKind self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind if (self.errorEstimatorKind not in [ "LOOK_AHEAD", "LOOK_AHEAD_OUTPUT", "NONE"] and hasattr(self, "_approx_state") and not self.approx_state): raise RROMPyException(("Must compute greedy approximation of " "state, unless error estimator allows " "otherwise.")) def _polyvanderAuxiliary(self, mus, deg, *args): return polyvander(mus, deg, *args) def getErrorEstimatorDiscrepancy(self, mus:Np1D) -> Np1D: """Discrepancy-based residual estimator.""" checkIfAffine(self.HFEngine, "apply discrepancy-based error estimator") mus = self.checkParameterList(mus) muCTest = self.trainedModel.centerNormalize(mus) tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0 QTest = self.trainedModel.getQVal(mus) QTzero = np.where(QTest == 0.)[0] if len(QTzero) > 0: RROMPyWarning(("Adjusting estimator to avoid division by " "numerically zero denominator.")) QTest[QTzero] = np.finfo(np.complex).eps / (1. + self.N) self.HFEngine.buildA() self.HFEngine.buildb() nAs, nbs = self.HFEngine.nAs, self.HFEngine.nbs muTrainEff = self.HFEngine.mapParameterList(self.mus) muTestEff = self.HFEngine.mapParameterList(mus) PTrain = self.trainedModel.getPVal(self.mus).data.T QTrain = self.trainedModel.getQVal(self.mus) QTzero = np.where(QTrain == 0.)[0] if len(QTzero) > 0: RROMPyWarning(("Adjusting estimator to avoid division by " "numerically zero denominator.")) QTrain[QTzero] = np.finfo(np.complex).eps / (1. + self.N) PTest = self.trainedModel.getPVal(mus).data self.trainedModel.verbosity = tMverb radiusAbTrain = np.empty((self.S, nAs * self.S + nbs), dtype = np.complex) radiusA = np.empty((self.S, nAs, len(mus)), dtype = np.complex) radiusb = np.empty((nbs, len(mus)), dtype = np.complex) for j, thA in enumerate(self.HFEngine.thAs): idxs = j * self.S + np.arange(self.S) radiusAbTrain[:, idxs] = expressionEvaluator(thA[0], muTrainEff, (self.S, 1)) * PTrain radiusA[:, j] = PTest * expressionEvaluator(thA[0], muTestEff, (len(mus),)) for j, thb in enumerate(self.HFEngine.thbs): idx = nAs * self.S + j radiusAbTrain[:, idx] = QTrain * expressionEvaluator(thb[0], muTrainEff, (self.S,)) radiusb[j] = QTest * expressionEvaluator(thb[0], muTestEff, (len(mus),)) QRHSNorm2 = self._affineResidualMatricesContraction(radiusb) vanTrain = self._polyvanderAuxiliary(self._musUniqueCN, self.E, self.polybasis0, self._derIdxs, self._reorder) interpPQ = customFit(vanTrain, radiusAbTrain, rcond = self.interpRcond) vanTest = self._polyvanderAuxiliary(muCTest, self.E, self.polybasis0) DradiusAb = vanTest.dot(interpPQ) radiusA = (radiusA - DradiusAb[:, : - nbs].reshape(len(mus), -1, self.S).T) radiusb = radiusb - DradiusAb[:, - nbs :].T ff, Lf, LL = self._affineResidualMatricesContraction(radiusb, radiusA) err = np.abs((LL - 2. * np.real(Lf) + ff) / QRHSNorm2) ** .5 return err def getErrorEstimatorLookAhead(self, mus:Np1D, what : str = "") -> Tuple[Np1D, List[int]]: """Residual estimator based on look-ahead idea.""" errTest, QTest, idxMaxEst = self._EIMStep(mus) _approx_state_old = self.approx_state if what == "OUTPUT" and _approx_state_old: self._approx_state = False self.initEstimatorNormEngine() self._approx_state = _approx_state_old mu_muTestSample = mus[idxMaxEst] app_muTestSample = self.getApproxReduced(mu_muTestSample) if self._mode == RROMPy_FRAGILE: if what == "RES" and not self.HFEngine.isCEye: raise RROMPyException(("Cannot compute LOOK_AHEAD_RES " "estimator in fragile mode for " "non-scalar C.")) app_muTestSample = dot(self.trainedModel.data.projMat[:, : app_muTestSample.shape[0]], app_muTestSample) else: app_muTestSample = dot(self.samplingEngine.projectionMatrix, app_muTestSample) if what == "RES": errmu = self.HFEngine.residual(mu_muTestSample, app_muTestSample, post_c = False) solmu = self.HFEngine.residual(mu_muTestSample, None, post_c = False) else: for j, mu in enumerate(mu_muTestSample): uEx = self.samplingEngine.nextSample(mu) if j == 0: solmu = emptySampleList() solmu.reset((len(uEx), len(mu_muTestSample)), dtype = uEx.dtype) solmu[j] = uEx if what == "OUTPUT" and self.approx_state: solmu = sampleList(self.HFEngine.applyC(solmu)) app_muTestSample = sampleList(self.HFEngine.applyC( app_muTestSample)) errmu = solmu - app_muTestSample errsamples = (self.estimatorNormEngine.norm(errmu) / self.estimatorNormEngine.norm(solmu)) musT = copy(self.mus) musT.append(mu_muTestSample) musT = self.trainedModel.centerNormalize(musT) musC = self.trainedModel.centerNormalize(mus) errT = np.zeros((len(musT), len(mu_muTestSample)), dtype = np.complex) errT[np.arange(len(self.mus), len(musT)), np.arange(len(mu_muTestSample))] = errsamples * QTest[idxMaxEst] vanT = self._polyvanderAuxiliary(musT, self.E + 1, self.polybasis) fitOut = customFit(vanT, errT, full = True, rcond = self.interpRcond) vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... Conditioning " "of LS system: {:.4e}.").format(len(vanT), self.E + 1, polyfitname(self.polybasis), fitOut[1][2][0] / fitOut[1][2][-1]), 15) vanC = self._polyvanderAuxiliary(musC, self.E + 1, self.polybasis) err = np.sum(np.abs(vanC.dot(fitOut[0])), axis = -1) / QTest return err, idxMaxEst def getErrorEstimatorNone(self, mus:Np1D) -> Np1D: """EIM-based residual estimator.""" err = np.max(self._EIMStep(mus, True), axis = 1) err *= self.greedyTol / np.mean(err) return err def _EIMStep(self, mus:Np1D, only_one : bool = False) -> Tuple[Np1D, Np1D, List[int]]: """Residual estimator based on look-ahead idea.""" mus = self.checkParameterList(mus) tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0 QTest = self.trainedModel.getQVal(mus) QTzero = np.where(QTest == 0.)[0] if len(QTzero) > 0: RROMPyWarning(("Adjusting estimator to avoid division by " "numerically zero denominator.")) QTest[QTzero] = np.finfo(np.complex).eps / (1. + self.N) QTest = np.abs(QTest) muCTest = self.trainedModel.centerNormalize(mus) muCTrain = self.trainedModel.centerNormalize(self.mus) self.trainedModel.verbosity = tMverb vanTest = self._polyvanderAuxiliary(muCTest, self.E, self.polybasis) vanTestNext = self._polyvanderAuxiliary(muCTest, self.E + 1, self.polybasis)[:, vanTest.shape[1] :] idxsTest = np.arange(vanTestNext.shape[1]) basis = np.zeros((len(idxsTest), 0), dtype = float) idxMaxEst = [] while len(idxsTest) > 0: vanTrial = self._polyvanderAuxiliary(muCTrain, self.E, self.polybasis) vanTrialNext = self._polyvanderAuxiliary(muCTrain, self.E + 1, self.polybasis)[:, vanTrial.shape[1] :] vanTrial = np.hstack((vanTrial, vanTrialNext.dot(basis).reshape( len(vanTrialNext), basis.shape[1]))) valuesTrial = vanTrialNext[:, idxsTest] vanTestEff = np.hstack((vanTest, vanTestNext.dot(basis).reshape( len(vanTestNext), basis.shape[1]))) vanTestNextEff = vanTestNext[:, idxsTest] coeffTest = np.linalg.solve(vanTrial, valuesTrial) errTest = (np.abs(vanTestNextEff - vanTestEff.dot(coeffTest)) / np.expand_dims(QTest, 1)) if only_one: return errTest idxMaxErr = np.unravel_index(np.argmax(errTest), errTest.shape) idxMaxEst += [idxMaxErr[0]] muCTrain.append(muCTest[idxMaxErr[0]]) basis = np.pad(basis, [(0, 0), (0, 1)], "constant") basis[idxsTest[idxMaxErr[1]], -1] = 1. idxsTest = np.delete(idxsTest, idxMaxErr[1]) return errTest, QTest, idxMaxEst def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D: """Standard residual-based error estimator.""" setupOK = self.setupApproxLocal() if setupOK > 0: err = np.empty(len(mus)) err[:] = np.nan if not return_max: return err return err, [- setupOK], np.nan mus = self.checkParameterList(mus) vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format(mus), 10) if self.errorEstimatorKind == "AFFINE": err = self.getErrorEstimatorAffine(mus) else: self._setupInterpolationIndices() if self.errorEstimatorKind == "DISCREPANCY": err = self.getErrorEstimatorDiscrepancy(mus) elif self.errorEstimatorKind[: 10] == "LOOK_AHEAD": err, idxMaxEst = self.getErrorEstimatorLookAhead(mus, self.errorEstimatorKind[11 :]) else: #if self.errorEstimatorKind == "NONE": err = self.getErrorEstimatorNone(mus) vbMng(self.trainedModel, "DEL", "Done evaluating error estimator", 10) if not return_max: return err if self.errorEstimatorKind[: 10] != "LOOK_AHEAD": idxMaxEst = np.empty(self.sampleBatchSize, dtype = int) errCP = copy(err) for j in range(self.sampleBatchSize): k = np.argmax(errCP) idxMaxEst[j] = k if j + 1 < self.sampleBatchSize: musZero = self.trainedModel.centerNormalize(mus, mus[k]) errCP *= np.linalg.norm(musZero.data, axis = 1) return err, idxMaxEst, err[idxMaxEst] def plotEstimator(self, *args, **kwargs): super().plotEstimator(*args, **kwargs) if self.errorEstimatorKind == "NONE": vbMng(self, "MAIN", ("Warning! Error estimator has been arbitrarily normalized " "before plotting."), 15) def greedyNextSample(self, *args, **kwargs) -> Tuple[Np1D, int, float, paramVal]: """Compute next greedy snapshot of solution map.""" RROMPyAssert(self._mode, message = "Cannot add greedy sample.") self.sampleBatchIdx += 1 self.sampleBatchSize = totalDegreeN(self.npar - 1, self.sampleBatchIdx) err, muidx, maxErr, muNext = super().greedyNextSample(*args, **kwargs) if maxErr is not None and (np.any(np.isnan(maxErr)) or np.any(np.isinf(maxErr))): self.sampleBatchIdx -= 1 self.sampleBatchSize = totalDegreeN(self.npar - 1, self.sampleBatchIdx) if (self.errorEstimatorKind == "NONE" and not np.isnan(maxErr) and not np.isinf(maxErr)): maxErr = None return err, muidx, maxErr, muNext def _setSampleBatch(self, maxS:int): self.sampleBatchIdx, self.sampleBatchSize, S = -1, 0, 0 nextBatchSize = 1 while S + nextBatchSize <= maxS: self.sampleBatchIdx += 1 self.sampleBatchSize = nextBatchSize S += self.sampleBatchSize nextBatchSize = totalDegreeN(self.npar - 1, self.sampleBatchIdx + 1) return S def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.nsamples > 0: return self._S = self._setSampleBatch(self.S) super()._preliminaryTraining() self.M, self.N = ("AUTO",) * 2 def setupApproxLocal(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") self.verbosity -= 10 vbMng(self, "INIT", "Setting up local approximant.", 5) pMat = self.samplingEngine.projectionMatrix if self.trainedModel is not None: pMat = pMat[:, len(self.trainedModel.data.mus) :] self._setupTrainedModel(pMat, self.trainedModel is not None) self.catchInstability = 2 vbDepth = getVerbosityDepth() unstable = 0 if self.E > 0: try: Q = self._setupDenominator() except RROMPyException as RE: if RE.critical: raise RE from None setVerbosityDepth(vbDepth) RROMPyWarning("Downgraded {}: {}".format(RE.__class__.__name__, RE)) unstable = 1 else: Q = PI() Q.coeffs = np.ones((1,) * self.npar, dtype = np.complex) Q.npar = self.npar Q.polybasis = self.polybasis if not unstable: self.trainedModel.data.Q = copy(Q) try: P = copy(self._setupNumerator()) except RROMPyException as RE: if RE.critical: raise RE from None setVerbosityDepth(vbDepth) RROMPyWarning("Downgraded {}: {}".format(RE.__class__.__name__, RE)) unstable = 1 if not unstable: self.trainedModel.data.P = copy(P) self.trainedModel.data.approxParameters = copy( self.approxParameters) vbMng(self, "DEL", "Done setting up local approximant.", 5) self.catchInstability = 0 self.verbosity += 10 return unstable def setupApprox(self, plotEst : str = "NONE") -> int: val = super().setupApprox(plotEst) if val == 0: self._setupRational(self.trainedModel.data.Q, self.trainedModel.data.P) self.trainedModel.data.approxParameters = copy( self.approxParameters) return val def loadTrainedModel(self, filename:str): """Load trained reduced model from file.""" super().loadTrainedModel(filename) self._setSampleBatch(self.S + 1) diff --git a/rrompy/reduction_methods/standard/rational_interpolant.py b/rrompy/reduction_methods/standard/rational_interpolant.py index b2c9377..82a254e 100644 --- a/rrompy/reduction_methods/standard/rational_interpolant.py +++ b/rrompy/reduction_methods/standard/rational_interpolant.py @@ -1,827 +1,769 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from scipy.linalg import eigvals from rrompy.reduction_methods.base import checkRobustTolerance from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.poly_fitting.polynomial import ( polybases as ppb, polyfitname, polyvander as pvP, polyTimes, polyTimesTable, vanderInvTable, PolynomialInterpolator as PI, PolynomialInterpolatorNodal as PIN) from rrompy.utilities.poly_fitting.heaviside import rational2heaviside from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, sampList, interpEng) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import pseudoInverse, dot, potential from rrompy.utilities.numerical.hash_derivative import nextDerivativeIndices from rrompy.utilities.numerical.degree import (reduceDegreeN, degreeTotalToFull, fullDegreeMaxMask, totalDegreeMaxMask) from rrompy.solver import Np2DLikeGramian from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['RationalInterpolant'] class RationalInterpolant(GenericStandardApproximant): """ ROM rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL'; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'NODAL', 'LOEWNER', and 'BARYCENTRIC'; defaults to 'NORM'; - 'interpRcond': tolerance for interpolation; defaults to None; - 'robustTol': tolerance for robust rational denominator - management; defaults to 0; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - defaults to np.inf; - - 'residueTol': tolerance for residue elimination; defaults to 0., - i.e. no bad residues. + management; defaults to 0. Defaults to empty dict. approx_state(optional): Whether to approximate state. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': whether to compute POD of snapshots; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'polydegreetype': type of polynomial degree; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust rational denominator - management; - - 'cutOffTolerance': tolerance for ignoring parasitic poles; - - 'residueTol': tolerance for residue elimination. + management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. approx_state: Whether to approximate state. verbosity: Verbosity level. POD: Whether to compute POD of snapshots. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. polybasis: type of polynomial basis for interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. polydegreetype: Type of polynomial degree. radialDirectionalWeights: Radial basis weights for interpolant numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. functionalSolve: Strategy for minimization of denominator functional. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust rational denominator management. - cutOffTolerance: Tolerance for ignoring parasitic poles. - residueTol: Tolerance for residue elimination. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["polybasis", "M", "N", "polydegreetype", "radialDirectionalWeights", "radialDirectionalWeightsAdapt", "functionalSolve", "interpRcond", - "robustTol", "cutOffTolerance", - "residueTol"], - ["MONOMIAL", "AUTO", "AUTO", "TOTAL", 1., - [-1., -1.], "NORM", -1, 0., np.inf, 0.]) + "robustTol"], ["MONOMIAL", "AUTO", "AUTO", + "TOTAL", 1., [-1., -1.], + "NORM", -1, 0.]) super().__init__(*args, **kwargs) self.catchInstability = 0 self._postInit() @property def tModelType(self): from .trained_model.trained_model_rational import TrainedModelRational return TrainedModelRational @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in ppb + rbpb: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def polybasis0(self): if "_" in self.polybasis: return self.polybasis.split("_")[0] return self.polybasis @property def functionalSolve(self): """Value of functionalSolve.""" return self._functionalSolve @functionalSolve.setter def functionalSolve(self, functionalSolve): try: functionalSolve = functionalSolve.upper().strip().replace(" ","") if functionalSolve not in ["NORM", "DOMINANT", "NODAL", "LOEWNER", "BARYCENTRIC"]: raise RROMPyException(("Prescribed functionalSolve not " "recognized.")) self._functionalSolve = functionalSolve except: RROMPyWarning(("Prescribed functionalSolve not recognized. " "Overriding to 'NORM'.")) self._functionalSolve = "NORM" self._approxParameters["functionalSolve"] = self.functionalSolve @property def interpRcond(self): """Value of interpRcond.""" return self._interpRcond @interpRcond.setter def interpRcond(self, interpRcond): self._interpRcond = interpRcond self._approxParameters["interpRcond"] = self.interpRcond @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): if hasattr(radialDirectionalWeights, "__len__"): radialDirectionalWeights = list(radialDirectionalWeights) else: radialDirectionalWeights = [radialDirectionalWeights] self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) @property def radialDirectionalWeightsAdapt(self): """Value of radialDirectionalWeightsAdapt.""" return self._radialDirectionalWeightsAdapt @radialDirectionalWeightsAdapt.setter def radialDirectionalWeightsAdapt(self, radialDirectionalWeightsAdapt): self._radialDirectionalWeightsAdapt = radialDirectionalWeightsAdapt self._approxParameters["radialDirectionalWeightsAdapt"] = ( self.radialDirectionalWeightsAdapt) @property def M(self): """Value of M.""" return self._M @M.setter def M(self, M): if isinstance(M, str): M = M.strip().replace(" ","") if "-" not in M: M = M + "-0" self._M_isauto, self._M_shift = True, int(M.split("-")[-1]) M = 0 if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M def _setMAuto(self): self.M = max(0, reduceDegreeN(self.S, self.S, self.npar, self.polydegreetype) - self._M_shift) vbMng(self, "MAIN", "Automatically setting M to {}.".format(self.M), 25) @property def N(self): """Value of N.""" return self._N @N.setter def N(self, N): if isinstance(N, str): N = N.strip().replace(" ","") if "-" not in N: N = N + "-0" self._N_isauto, self._N_shift = True, int(N.split("-")[-1]) N = 0 if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N def _setNAuto(self): self.N = max(0, reduceDegreeN(self.S, self.S, self.npar, self.polydegreetype) - self._N_shift) vbMng(self, "MAIN", "Automatically setting N to {}.".format(self.N), 25) @property def polydegreetype(self): """Value of polydegreetype.""" return self._polydegreetype @polydegreetype.setter def polydegreetype(self, polydegreetype): try: polydegreetype = polydegreetype.upper().strip().replace(" ","") if polydegreetype not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetype not " "recognized.")) self._polydegreetype = polydegreetype except: RROMPyWarning(("Prescribed polydegreetype not recognized. " "Overriding to 'TOTAL'.")) self._polydegreetype = "TOTAL" self._approxParameters["polydegreetype"] = self.polydegreetype @property def robustTol(self): """Value of tolerance for robust rational denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol - - @property - def cutOffTolerance(self): - """Value of cutOffTolerance.""" - return self._cutOffTolerance - @cutOffTolerance.setter - def cutOffTolerance(self, cutOffTolerance): - self._cutOffTolerance = cutOffTolerance - self._approxParameters["cutOffTolerance"] = self.cutOffTolerance - - @property - def residueTol(self): - """Value of residueTol.""" - return self._residueTol - @residueTol.setter - def residueTol(self, residueTol): - if residueTol < 0. or (residueTol > 0. and self.npar > 1): - RROMPyWarning("Overriding prescribed residue tolerance to 0.") - residueTol = 0. - self._residueTol = residueTol - self._approxParameters["residueTol"] = self.residueTol - def resetSamples(self): """Reset samples.""" super().resetSamples() self._musUniqueCN = None self._derIdxs = None self._reorder = None def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" if self._musUniqueCN is None or len(self._reorder) != len(self.mus): self._musUniqueCN, musIdxsTo, musIdxs, musCount = ( self.trainedModel.centerNormalize(self.mus).unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.mus[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.mus.shape[1], cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def _setupDenominator(self): """Compute rational denominator.""" RROMPyAssert(self._mode, message = "Cannot setup denominator.") vbMng(self, "INIT", "Starting computation of denominator.", 7) if hasattr(self, "_N_isauto"): self._setNAuto() else: N = reduceDegreeN(self.N, self.S, self.npar, self.polydegreetype) if N < self.N: RROMPyWarning(("N too large compared to S. Reducing N by " "{}").format(self.N - N)) self.N = N while self.N > 0: if self.functionalSolve != "NORM" and self.npar > 1: RROMPyWarning(("Strategy for functional optimization must be " "'NORM' for more than one parameter. " "Overriding to 'NORM'.")) self.functionalSolve = "NORM" if self.functionalSolve == "BARYCENTRIC" and self.N + 1 < self.S: RROMPyWarning(("Barycentric strategy cannot be applied with " "Least Squares. Overriding to 'NORM'.")) self.functionalSolve = "NORM" if self.functionalSolve == "BARYCENTRIC": invD, TN = None, None self._setupInterpolationIndices() else: invD, TN = self._computeInterpolantInverseBlocks() if (self.functionalSolve in ["NODAL", "LOEWNER", "BARYCENTRIC"] and len(self._musUnique) != len(self.mus)): if self.functionalSolve == "BARYCENTRIC": warnflag = "Barycentric" else: warnflag = "Iterative" RROMPyWarning(("{} functional optimization cannot be applied " "to repeated samples. Overriding to " "'NORM'.").format(warnflag)) self.functionalSolve = "NORM" idxSamplesEff = list(range(self.S)) if self.POD: ev, eV = self.findeveVGQR( self.samplingEngine.RPOD[:, idxSamplesEff], invD, TN) else: ev, eV = self.findeveVGExplicit( self.samplingEngine.samples(idxSamplesEff), invD, TN) if self.functionalSolve in ["NODAL", "LOEWNER"]: break nevBad = checkRobustTolerance(ev, self.robustTol) if nevBad <= (self.functionalSolve == "NORM"): break if self.catchInstability: raise RROMPyException(("Instability in denominator " "computation: eigenproblem is poorly " "conditioned."), self.catchInstability == 1) vbMng(self, "MAIN", ("Smallest {} eigenvalues below tolerance. " "Reducing N by 1.").format(nevBad), 10) self.N = self.N - 1 if self.N <= 0: self.N = 0 eV = np.ones((1, 1)) if self.N > 0 and self.functionalSolve in ["NODAL", "LOEWNER", "BARYCENTRIC"]: q = PIN() q.polybasis, q.nodes = self.polybasis0, eV else: q = PI() q.npar = self.npar q.polybasis = self.polybasis0 if self.polydegreetype == "TOTAL": q.coeffs = degreeTotalToFull(tuple([self.N + 1] * self.npar), self.npar, eV) else: q.coeffs = eV.reshape([self.N + 1] * self.npar) vbMng(self, "DEL", "Done computing denominator.", 7) return q def _setupNumerator(self): """Compute rational numerator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of numerator.", 7) self._setupInterpolationIndices() Qevaldiag = polyTimesTable(self.trainedModel.data.Q, self._musUniqueCN, self._reorder, self._derIdxs, self.scaleFactorRel) if self.POD: Qevaldiag = Qevaldiag.dot(self.samplingEngine.RPOD.T) if hasattr(self, "_M_isauto"): self._setMAuto() M = self.M else: M = reduceDegreeN(self.M, self.S, self.npar, self.polydegreetype) if M < self.M: RROMPyWarning(("M too large compared to S. Reducing M by " "{}").format(self.M - M)) self.M = M while self.M >= 0: pParRest = [self.M, self.polybasis, self.verbosity >= 5, self.polydegreetype == "TOTAL", {"derIdxs": self._derIdxs, "reorder": self._reorder, "scl": self.scaleFactorRel}] if self.polybasis in ppb: p = PI() else: self.computeScaleFactor() rDWEff = np.array([w * f for w, f in zip( self.radialDirectionalWeights, self.scaleFactor)]) pParRest = pParRest[: 2] + [rDWEff] + pParRest[2 :] pParRest[-1]["optimizeScalingBounds"] = ( self.radialDirectionalWeightsAdapt) p = RBI() if self.polybasis in ppb + rbpb: pParRest += [{"rcond": self.interpRcond}] wellCond, msg = p.setupByInterpolation(self._musUniqueCN, Qevaldiag, *pParRest) vbMng(self, "MAIN", msg, 5) if wellCond: break if self.catchInstability: raise RROMPyException(("Instability in numerator computation: " "polyfit is poorly conditioned."), self.catchInstability == 1) vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing M " "by 1."), 10) self.M = self.M - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) self.M = M vbMng(self, "DEL", "Done computing numerator.", 7) return p def setupApprox(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() self._setupTrainedModel(self.samplingEngine.projectionMatrix) self._setupRational(self._setupDenominator()) self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def _setupRational(self, Q:interpEng, P : interpEng = None): vbMng(self, "INIT", "Starting approximant finalization.", 5) - computeNum = P is None + self.trainedModel.data.Q = Q + if P is None: P = self._setupNumerator() if self.N > 0 and self.npar == 1: - foci = self.sampler.normalFoci() - ground = self.sampler.groundPotential() - if not np.isinf(self.cutOffTolerance): - pls = Q.roots() - ground = self.sampler.groundPotential() - idKeep = np.logical_and(np.logical_not(np.isinf(pls)), - potential(pls, foci) / ground - 1. - <= self.cutOffTolerance) - if np.sum(idKeep) < self.N: - vbMng(self, "MAIN", - ("Removing {} poles out of {} due to cut " - "off.").format(np.sum(idKeep), self.N), 10) - if isinstance(Q, PIN): - Q.nodes = Q.nodes[idKeep] - else: - Q = PI() - Q.npar = self.npar - Q.polybasis = self.polybasis0 - Q.coeffs = np.ones(1, dtype = np.complex) - for pl in pls[idKeep]: - Q.coeffs = polyTimes(Q.coeffs, [- pl, 1.], - Pbasis = Q.polybasis, Rbasis = Q.polybasis) - Q.coeffs /= np.linalg.norm(Q.coeffs) - self.N = np.sum(idKeep) - computeNum = True - if self.residueTol > 0: - if computeNum: P = self._setupNumerator() + if not hasattr(self, "_ignoreResidues"): cfs, pls, _ = rational2heaviside(P, Q) - cfs = cfs[: self.N] - if self.POD: - resEff = np.linalg.norm(cfs, axis = 1) - else: - resEff = self.HFEngine.norm( - self.samplingEngine.projectionMatrix.dot(cfs.T), - is_state = self.approx_state) + cfs = cfs[: self.N].T + if not self.POD: + cfs = self.samplingEngine.projectionMatrix.dot(cfs) + foci = self.sampler.normalFoci() + ground = self.sampler.groundPotential() potEff = potential(pls, foci) / ground potEff[np.logical_or(potEff < 1., np.isinf(pls))] = 1. - resEff[np.isinf(pls)] = 0. - resEff /= potEff - idKeep = resEff >= self.residueTol * np.max(resEff) - if np.sum(idKeep) < self.N: - vbMng(self, "MAIN", - ("Removing {} poles out of {} due to residue " - "magnitude.").format(np.sum(idKeep), self.N), 10) - if isinstance(Q, PIN): - Q.nodes = Q.nodes[idKeep] - else: - Q = PI() - Q.npar = self.npar - Q.polybasis = self.polybasis0 - Q.coeffs = np.ones(1, dtype = np.complex) - for pl in pls[idKeep]: - Q.coeffs = polyTimes(Q.coeffs, [- pl, 1.], - Pbasis = Q.polybasis, Rbasis = Q.polybasis) - Q.coeffs /= np.linalg.norm(Q.coeffs) - self.N = np.sum(idKeep) + cfs[:, np.isinf(pls)] = 0. + cfs /= potEff + else: + cfs, pls = None, Q.roots() + idxBad = self.HFEngine.flagBadPolesResidues(pls, cfs) + if np.any(idxBad): + vbMng(self, "MAIN", + "Removing {} spurious poles out of {}.".format( + np.sum(idxBad), self.N), 10) + if isinstance(Q, PIN): + Q.nodes = Q.nodes[np.logical_not(idxBad)] else: - computeNum = False - self.trainedModel.data.Q = Q - if computeNum: P = self._setupNumerator() + Q = PI() + Q.npar = self.npar + Q.polybasis = self.polybasis0 + Q.coeffs = np.ones(1, dtype = np.complex) + for pl in pls[np.logical_not(idxBad)]: + Q.coeffs = polyTimes(Q.coeffs, [- pl, 1.], + Pbasis = Q.polybasis, Rbasis = Q.polybasis) + Q.coeffs /= np.linalg.norm(Q.coeffs) + self.trainedModel.data.Q = Q + self.N = Q.deg[0] + P = self._setupNumerator() self.trainedModel.data.P = P vbMng(self, "DEL", "Terminated approximant finalization.", 5) def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]: """ Compute inverse factors for minimal interpolant target functional. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") self._setupInterpolationIndices() pvPPar = [self.polybasis0, self._derIdxs, self._reorder, self.scaleFactorRel] if hasattr(self, "_M_isauto"): self._setMAuto() E = max(self.M, self.N) while E >= 0: if self.polydegreetype == "TOTAL": Eeff = E idxsB = totalDegreeMaxMask(E, self.npar) else: #if self.polydegreetype == "FULL": Eeff = [E] * self.npar idxsB = fullDegreeMaxMask(E, self.npar) TE = pvP(self._musUniqueCN, Eeff, *pvPPar) fitOut = pseudoInverse(TE, rcond = self.interpRcond, full = True) vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... " "Conditioning of pseudoinverse system: {:.4e}.").format( TE.shape[0], E, polyfitname(self.polybasis0), fitOut[1][1][0] / fitOut[1][1][-1]), 5) if fitOut[1][0] == TE.shape[1]: fitinv = fitOut[0][idxsB, :] break if self.catchInstability: raise RROMPyException(("Instability in denominator " "computation: polyfit is poorly " "conditioned."), self.catchInstability == 1) EeqN = E == self.N vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing E {}" "by 1.").format("and N " * EeqN), 10) if EeqN: self.N = self.N - 1 E -= 1 if self.N < 0: raise RROMPyException(("Instability in computation of " "denominator. Aborting.")) invD = vanderInvTable(fitinv, idxsB, self._reorder, self._derIdxs) if self.N == E: TN = TE else: if self.polydegreetype == "TOTAL": Neff = self.N idxsB = totalDegreeMaxMask(self.N, self.npar) else: #if self.polydegreetype == "FULL": Neff = [self.N] * self.npar idxsB = fullDegreeMaxMask(self.N, self.npar) TN = pvP(self._musUniqueCN, Neff, *pvPPar) return invD, TN def findeveVGExplicit(self, sampleE:sampList, invD:List[Np2D], TN:Np2D) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of rational denominator matrix. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") vbMng(self, "INIT", "Building gramian matrix.", 10) gramian = self.HFEngine.innerProduct(sampleE, sampleE, is_state = self.approx_state) if self.functionalSolve == "NODAL": SEnd = invD[0].shape[1] G0 = np.zeros((SEnd,) * 2, dtype = np.complex) elif self.functionalSolve == "LOEWNER": G0 = gramian if self.functionalSolve == "BARYCENTRIC": nEnd = len(gramian) - 1 else: nEnd = TN.shape[1] G = np.zeros((nEnd, nEnd), dtype = np.complex) for k in range(len(invD)): iDkN = dot(invD[k], TN) G += dot(dot(gramian, iDkN).T, iDkN.conj()).T if self.functionalSolve == "NODAL": G0 += dot(dot(gramian, invD[k]).T, invD[k].conj()).T vbMng(self, "DEL", "Done building gramian.", 10) if self.functionalSolve == "NORM": ev, eV = np.linalg.eigh(G) eV = eV[:, 0] problem = "eigenproblem" else: if self.functionalSolve == "BARYCENTRIC": fitOut = pseudoInverse(gramian, rcond = self.interpRcond, full = True) barWeigths = fitOut[0].dot(np.ones(nEnd + 1)) eV = self.findeVBarycentric(barWeigths / np.sum(barWeigths)) else: fitOut = pseudoInverse(G[:-1, :-1], rcond = self.interpRcond, full = True) eV = np.append(fitOut[0].dot(G[:-1, -1]), -1.) ev = fitOut[1][1][::-1] problem = "linear problem" vbMng(self, "MAIN", ("Solved {} of size {} with condition number " "{:.4e}.").format(problem, nEnd, ev[-1] / ev[0]), 5) if self.functionalSolve in ["NODAL", "LOEWNER"]: q = PI() q.npar, q.polybasis, q.coeffs = self.npar, self.polybasis0, eV eV, tol, niter, passed = self.findeVNewton(q.roots(), G0) if passed: vbMng(self, "MAIN", ("Newton algorithm for problem of size {} converged " "(tol = {:.4e}) in {} iterations.").format(nEnd, tol, niter), 5) else: RROMPyWarning(("Newton algorithm for problem of size {} did " "not converge (tol = {:.4e}) after {} " "iterations.").format(nEnd, tol, niter)) return ev, eV def findeveVGQR(self, RPODE:Np2D, invD:List[Np2D], TN:Np2D) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of rational denominator matrix through SVD of R factor. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") vbMng(self, "INIT", "Building half-gramian matrix stack.", 10) if self.functionalSolve == "NODAL": gramian = Np2DLikeGramian(None, dot(RPODE, invD[0])) elif self.functionalSolve == "LOEWNER": gramian = Np2DLikeGramian(None, RPODE) if self.functionalSolve == "BARYCENTRIC": nEnd = RPODE.shape[1] - 1 else: S, nEnd, eWidth = RPODE.shape[0], TN.shape[1], len(invD) Rstack = np.zeros((S * eWidth, nEnd), dtype = np.complex) for k in range(eWidth): Rstack[k * S : (k + 1) * S, :] = dot(RPODE, dot(invD[k], TN)) vbMng(self, "DEL", "Done building half-gramian.", 10) if self.functionalSolve in ["NORM", "BARYCENTRIC"]: if self.functionalSolve == "NORM": _, s, Vh = np.linalg.svd(Rstack, full_matrices = False) eV = Vh[-1, :].conj() else: #if self.functionalSolve == "BARYCENTRIC": _, s, Vh = np.linalg.svd(RPODE, full_matrices = False) s[np.logical_not(np.isclose(s, 0.))] **= -2. barWeigths = (Vh.T.conj() * s).dot(Vh.dot(np.ones(nEnd + 1))) eV = self.findeVBarycentric(barWeigths / np.sum(barWeigths)) ev = s[::-1] problem = "svd problem" else: fitOut = pseudoInverse(Rstack[:, :-1], rcond = self.interpRcond, full = True) ev = fitOut[1][1][::-1] eV = np.append(fitOut[0].dot(Rstack[:, -1]), -1.) problem = "linear problem" vbMng(self, "MAIN", ("Solved {} of size {} with condition number " "{:.4e}.").format(problem, nEnd, ev[-1] / ev[0]), 5) if self.functionalSolve in ["NODAL", "LOEWNER"]: q = PI() q.npar, q.polybasis, q.coeffs = self.npar, self.polybasis0, eV eV, tol, niter, passed = self.findeVNewton(q.roots(), gramian) if passed: vbMng(self, "MAIN", ("Newton algorithm for problem of size {} converged " "(tol = {:.4e}) in {} iterations.").format(nEnd, tol, niter), 5) else: RROMPyWarning(("Newton algorithm for problem of size {} did " "not converge (tol = {:.2e}) after {} " "iterations.").format(nEnd, tol, niter)) return ev, eV def findeVBarycentric(self, baryWeights:Np1D) -> Np1D: RROMPyAssert(self._mode, message = "Cannot solve optimization problem.") arrow = np.pad(np.diag(self._musUniqueCN.data[ self._reorder].flatten()), (1, 0), "constant", constant_values = 1.) + 0.j arrow[0, 0] = 0. arrow[0, 1:] = baryWeights active = np.pad(np.eye(len(baryWeights)), (1, 0), "constant") eV = eigvals(arrow, active) return eV[np.logical_not(np.isinf(eV))] def findeVNewton(self, nodes0:Np1D, gram:Np2D, maxiter : int = 25, tolNewton : float = 1e-10) \ -> Tuple[Np1D, float, int, bool]: RROMPyAssert(self._mode, message = "Cannot solve optimization problem.") algo = self.functionalSolve N = len(nodes0) nodes = nodes0 grad = np.zeros(N, dtype = np.complex) hess = np.zeros((N, N), dtype = np.complex) mu = np.repeat(self._musUniqueCN.data[self._reorder], N, axis = 1) for niter in range(maxiter): if algo == "NODAL": plDist = mu - nodes.reshape(1, -1) q0, q = np.prod(plDist, axis = 1), [] elif algo == "LOEWNER": loew = np.pad(np.power(mu - nodes.reshape(1, -1), -1.), [(0, 0), (1, 0)], 'constant', constant_values = 1.) loewI = pseudoInverse(loew) Ids = [] for jS in range(N): if algo == "NODAL": mask = np.arange(N) != jS q += [np.prod(plDist[:, mask], axis = 1)] grad[jS] = q[-1].conj().dot(gram.dot(q0)) elif algo == "LOEWNER": Ids += [loewI.dot(np.power(loew[:, 1 + jS], 2.))] zIj, jI = Ids[-1][0], loewI[1 + jS] grad[jS] = (zIj * jI).conj().dot(gram.dot(loewI[0])) for iS in range(jS + 1): if algo == "NODAL": if iS == jS: hij = 0. sij = q[-1].conj().dot(gram.dot(q[-1])) else: mask = np.logical_and(np.arange(N) != iS, np.arange(N) != jS) qij = np.prod(plDist[:, mask], axis = 1) hij = qij.conj().dot(gram.dot(q0)) sij = q[-1].conj().dot(gram.dot(q[iS])) elif algo == "LOEWNER": zIi, iIj = Ids[iS][0], Ids[-1][1 + iS] hij = (zIi * iIj * jI).conj().dot(gram.dot(loewI[0])) if iS == jS: iI = jI zIdd = loewI[0].dot(np.power(loew[:, 1 + jS], 3.)) hij += (zIdd * jI).conj().dot(gram.dot(loewI[0])) hij *= 2. else: jIi, iI = Ids[iS][1 + jS], loewI[1 + iS] hij += (zIj * jIi * iI).conj().dot( gram.dot(loewI[0])) sij = (zIj * jI).conj().dot(gram.dot(zIi * iI)) hess[jS, iS] = hij + sij if iS < jS: hess[iS, jS] = hij + sij.conj() dnodes = np.linalg.solve(hess, grad) nodes += dnodes tol = np.linalg.norm(dnodes) / np.linalg.norm(nodes) if tol < tolNewton: break return nodes, tol, niter, niter < maxiter def getResidues(self, *args, **kwargs) -> Np2D: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues(*args, **kwargs) diff --git a/tests/4_reduction_methods_multiD/pivoted_rational_2d.py b/tests/4_reduction_methods_multiD/pivoted_rational_2d.py index d73c223..dcbca0f 100644 --- a/tests/4_reduction_methods_multiD/pivoted_rational_2d.py +++ b/tests/4_reduction_methods_multiD/pivoted_rational_2d.py @@ -1,112 +1,114 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from matrix_random import matrixRandom from rrompy.reduction_methods import (RationalInterpolantPivoted as RIP, RationalInterpolantGreedyPivoted as RIGP) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, ManualSampler as MS) def test_pivoted_uniform(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": True, "S": 5, "polybasis": "CHEBYSHEV", "samplerPivot": QS([4.75, 5.25], "CHEBYSHEV"), "SMarginal": 5, "polybasisMarginal": "MONOMIAL", "matchingWeight": 1., "samplerMarginal": QS([6.75, 7.25], "UNIFORM")} approx = RIP([0], solver, mu0, approx_state = True, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), 6.0631706e-04, rtol = 1) def test_pivoted_manual_grid(capsys): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": False, "S": 5, "polybasis": "MONOMIAL", "samplerPivot": MS([4.75, 5.25], np.array([5.]), normalFoci = [0., 0.]), "SMarginal": 5, "polybasisMarginal": "MONOMIAL", "matchingWeight": 1., "samplerMarginal": MS([6.75, 7.25], np.linspace(6.75, 7.25, 5)), - "robustTol": 1e-6, "interpRcond": 1e-3, "cutOffTolerance": 1.} + "robustTol": 1e-6, "interpRcond": 1e-3} approx = RIP([0], solver, mu0, approx_state = True, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), .4763489, rtol = 1) out, err = capsys.readouterr() assert ("poorly conditioned" not in out) assert len(err) == 0 def test_pivoted_greedy(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "collinearityTol": 1e8, "errorEstimatorKind": "DISCREPANCY", "S": 5, "polybasis": "CHEBYSHEV", "samplerPivot": QS([4.75, 5.25], "UNIFORM"), "trainSetGenerator": QS([4.75, 5.25], "CHEBYSHEV"), "SMarginal": 5, "polybasisMarginal": "MONOMIAL", "samplerMarginal": QS([6.75, 7.25], "UNIFORM"), - "matchingWeight": 1., "cutOffTolerance": 1.5} + "matchingWeight": 1.} + solver.cutOffPolesRMin, solver.cutOffPolesRMax = -3., 3. + solver.cutOffPolesIMin, solver.cutOffPolesIMax = -1.5, 1.5 approx = RIGP([0], solver, mu0, approx_state = True, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), 1.181958e-02, rtol = 1)