diff --git a/README.md b/README.md index a5d5f20..dd23c3f 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,51 @@ # RROMPy -- Rational Reduced Order Modeling in Python ===================================================== Module for the solution and rational model order reduction of parametric PDE-based problem. Coded in Python 3. ## Prerequisites **RROMPy** requires * **numpy** and **scipy**; * **fenics** and **mshr**; * **matplotlib**; -* and other standard Python3 modules (**os**, **typing**, **time**, **datetime**, **abc**, **pickle**, **traceback**, and **itertools**). +* and other standard Python3 modules (**os**, **typing**, **time**, **datetime**, **abc**, **pickle**, **traceback**, **itertools**, ...). Testing requires * **pytest**. ### Fenics Most of the high fidelity problem engines already provided rely on [FEniCS](http://fenicsproject.org/). If you do not have FEniCS installed, you may want to create an [Anaconda3/Miniconda3](http://anaconda.org/) environment using the command ``` -conda create -n fenicsenv -c conda-forge pytest scipy matplotlib fenics=2019.1.0=py38_9 mshr=2019.1.0=py38hf9f41d3_3 +conda create -n fenicsenv -c conda-forge pytest pytest-runner scipy matplotlib fenics=2019.1.0=py38_9 mshr=2019.1.0=py38hf9f41d3_3 ``` This will create an environment where FEniCS (and all other required modules) can be used. In order to use FEniCS, the environment must be activated through ``` conda activate fenicsenv ``` See the [Anaconda documentation](http://docs.conda.io/) for more information. ### Fenics and mshr versions More recent versions of FEniCS and mshr may be preferred, but one should be careful of [inconsistent dependencies](http://fenicsproject.discourse.group/t/anaconda-installation-of-fenics-and-mshr/2062/5). If the following code snippet runs successfully, then your environment *should* have been created correctly: ``` from mshr import * ``` ## Installing Clone the repository ``` git clone http://c4science.ch/source/RROMPy.git ``` enter the main folder and install the package by typing ``` -python3 setup.py install +python setup.py install ``` The installation can be tested with ``` -python3 setup.py test +python setup.py test ``` ## License This project is licensed under the GNU GENERAL PUBLIC LICENSE license - see the !!LICENSE!! file for details. ## Acknowledgments Part of the funding that made this module possible has been provided by the Swiss National Science Foundation through the FNS Research Project 182236. diff --git a/examples/1_symmetric_disk/symmetric_disk.py b/examples/1_symmetric_disk/symmetric_disk.py index 8abdb59..d77c6bd 100644 --- a/examples/1_symmetric_disk/symmetric_disk.py +++ b/examples/1_symmetric_disk/symmetric_disk.py @@ -1,88 +1,88 @@ import numpy as np from symmetric_disk_engine import SymmetricDiskEngine as engine from rrompy.reduction_methods import ( NearestNeighbor as NN, RationalInterpolant as RI, ReducedBasis as RB, RationalInterpolantGreedy as RIG, ReducedBasisGreedy as RBG) from rrompy.parameter import parameterMap as pMap from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) ks = [10., 20.] k0, n = np.mean(np.power(ks, 2.)) ** .5, 150 solver = engine(k0, n) k = 12. for method in ["RI", "RB", "RI_GREEDY", "RB_GREEDY"]: print("Testing {} method".format(method)) if method == "RI": params = {'S':40, 'POD':True, 'polybasis':"CHEBYSHEV", 'sampler':QS(ks, "CHEBYSHEV", parameterMap = pMap(2.))} algo = RI if method == "RB": params = {'S':40, 'POD':True, 'sampler':QS(ks, "CHEBYSHEV", parameterMap = pMap(2.))} algo = RB if method == "RI_GREEDY": params = {'S':10, 'POD':True, 'polybasis':"LEGENDRE", 'greedyTol':1e-2, 'sampler':QS(ks, "UNIFORM", parameterMap = pMap(2.)), 'errorEstimatorKind':"DISCREPANCY", 'samplerTrainSet':QS(ks, "CHEBYSHEV", parameterMap = pMap(2.))} algo = RIG if method == "RB_GREEDY": params = {'S':10, 'POD':True, 'greedyTol':1e-2, 'sampler':QS(ks, "UNIFORM", parameterMap = pMap(2.)), 'samplerTrainSet':QS(ks, "CHEBYSHEV", parameterMap = pMap(2.))} algo = RBG approx = algo(solver, mu0 = k0, approxParameters = params, verbosity = 20) if len(method) == 2: approx.setupApprox() else: approx.setupApprox("LAST") print("--- Approximant ---") - approx.plotApprox(k, name = 'u_app') - approx.plotHF(k, name = 'u_HF') - approx.plotErr(k, name = 'err_app') - approx.plotRes(k, name = 'res_app') + approx.plotApprox(k, plotargs = {"name": 'u_app'}) + approx.plotHF(k, plotargs = {"name": 'u_HF'}) + approx.plotErr(k, plotargs = {"name": 'err_app'}) + approx.plotRes(k, plotargs = {"name": 'res_app'}) normErr = approx.normErr(k)[0] normSol = approx.normHF(k)[0] normRes = approx.normRes(k)[0] normRHS = approx.normRHS(k)[0] print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_app: \t{:.5e}\nResRel_app:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) print("--- Closest snapshot ---") approxNN = NN(solver, mu0 = k0, verbosity = 0, approxParameters = {'S':approx.S, 'POD':True, 'sampler':ES()}) approxNN.setSamples(approx.samplingEngine) - approxNN.plotApprox(k, name = 'u_close') - approxNN.plotHF(k, name = 'u_HF') - approxNN.plotErr(k, name = 'err_close') - approxNN.plotRes(k, name = 'res_close') + approxNN.plotApprox(k, plotargs = {"name": 'u_close'}) + approxNN.plotHF(k, plotargs = {"name": 'u_HF'}) + approxNN.plotErr(k, plotargs = {"name": 'err_close'}) + approxNN.plotRes(k, plotargs = {"name": 'res_close'}) normErr = approxNN.normErr(k)[0] normSol = approxNN.normHF(k)[0] normRes = approxNN.normRes(k)[0] normRHS = approxNN.normRHS(k)[0] print("SolNorm:\t{:.5e}\nErr_close:\t{:.5e}\nErrRel_close:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_close:\t{:.5e}\nResRel_close:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) if method[:2] == "RI": poles, residues = approx.getResidues() if method[:2] == "RB": poles = approx.getPoles() print("Poles:\n{}".format(poles)) if method[:2] == "RI": for pol, res in zip(poles, residues): solver.plot(res) print("pole = {:.5e}".format(pol)) print("\n") diff --git a/examples/2_double_slit/double_slit.py b/examples/2_double_slit/double_slit.py index 0273d9c..1e9858f 100644 --- a/examples/2_double_slit/double_slit.py +++ b/examples/2_double_slit/double_slit.py @@ -1,82 +1,82 @@ import numpy as np from double_slit_engine import DoubleSlitEngine as engine from rrompy.reduction_methods import (NearestNeighbor as NN, RationalInterpolant as RI, RationalInterpolantGreedy as RIG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) from rrompy.solver.fenics import interp_project ks = [10., 15.] k0, n = np.mean(ks), 150 solver = engine(k0, n) k = 11. for method in ["RI", "RI_GREEDY"]: print("Testing {} method".format(method)) if method == "RI": params = {'S':20, 'POD':True, 'polybasis':"CHEBYSHEV", 'sampler':QS(ks, "CHEBYSHEV")} algo = RI if method == "RI_GREEDY": params = {'S':10, 'POD':True, 'polybasis':"LEGENDRE", 'greedyTol':1e-2, 'sampler':QS(ks, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD", 'samplerTrainSet':QS(ks, "CHEBYSHEV")} algo = RIG approx = algo(solver, mu0 = k0, approxParameters = params, verbosity = 20) if len(method) == 2: approx.setupApprox() else: approx.setupApprox("LAST") print("--- Approximant ---") - approx.plotApprox(k, name = 'u_app') - approx.plotHF(k, name = 'u_HF') - approx.plotErr(k, name = 'err_app') - approx.plotRes(k, name = 'res_app') + approx.plotApprox(k, plotargs = {"name": 'u_app'}) + approx.plotHF(k, plotargs = {"name": 'u_HF'}) + approx.plotErr(k, plotargs = {"name": 'err_app'}) + approx.plotRes(k, plotargs = {"name": 'res_app'}) normErr = approx.normErr(k)[0] normSol = approx.normHF(k)[0] normRes = approx.normRes(k)[0] normRHS = approx.normRHS(k)[0] print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_app: \t{:.5e}\nResRel_app:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) print("--- Closest snapshot ---") approxNN = NN(solver, mu0 = k0, verbosity = 0, approxParameters = {'S':approx.S, 'POD':True, 'sampler':ES()}) approxNN.setSamples(approx.storeSamples()) - approxNN.plotApprox(k, name = 'u_close') - approxNN.plotHF(k, name = 'u_HF') - approxNN.plotErr(k, name = 'err_close') - approxNN.plotRes(k, name = 'res_close') + approxNN.plotApprox(k, plotargs = {"name": 'u_close'}) + approxNN.plotHF(k, plotargs = {"name": 'u_HF'}) + approxNN.plotErr(k, plotargs = {"name": 'err_close'}) + approxNN.plotRes(k, plotargs = {"name": 'res_close'}) normErr = approxNN.normErr(k)[0] normSol = approxNN.normHF(k)[0] normRes = approxNN.normRes(k)[0] normRHS = approxNN.normRHS(k)[0] print("SolNorm:\t{:.5e}\nErr_close:\t{:.5e}\nErrRel_close:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_close:\t{:.5e}\nResRel_close:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) uIncR, uIncI = solver.getDirichletValues(k) uIncR = interp_project(uIncR, solver.V) uIncI = interp_project(uIncI, solver.V) uInc = np.array(uIncR.vector()) + 1.j * np.array(uIncI.vector()) uEx = approx.getHF(k)[0] - uInc uApp = approx.getApprox(k)[0] - uInc solver.plot(uEx, name = 'uex_tot') solver.plot(uApp, name = 'u_app_tot') poles, residues = approx.getResidues() print("Poles:\n{}".format(poles)) for pol, res in zip(poles, residues): solver.plot(res) print("pole = {:.5e}".format(pol)) print("\n") diff --git a/examples/3_sector_angle/sector_angle.py b/examples/3_sector_angle/sector_angle.py index 94cd17f..80002d2 100644 --- a/examples/3_sector_angle/sector_angle.py +++ b/examples/3_sector_angle/sector_angle.py @@ -1,107 +1,107 @@ import numpy as np import matplotlib.pyplot as plt from sector_angle_engine import SectorAngleEngine as engine from rrompy.reduction_methods import (NearestNeighbor as NN, RationalInterpolantPivotedPoleMatch as RIP, RationalInterpolantGreedyPivotedPoleMatch as RIGP) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) ks, ts = [10., 15.], [.4, .6] k0, t0, n = np.mean(np.power(ks, 2.)) ** .5, np.mean(ts), 50 solver = engine(k0, t0, n) murange = [[ks[0], ts[0]], [ks[-1], ts[-1]]] mu = [12., .535] fighandles = [] for method in ["RI", "RI_GREEDY"]: print("Testing {} method".format(method)) if method == "RI": params = {'S':20, "paramsMarginal":{"MMarginal": 3}, 'SMarginal':11, 'POD':True, 'polybasis':"CHEBYSHEV", 'polybasisMarginal':"MONOMIAL_GAUSSIAN", 'radialDirectionalWeightsMarginal': 100., 'matchingWeight':1., 'samplerPivot':QS(ks, "CHEBYSHEV", 2.), 'samplerMarginal':QS(ts, "UNIFORM")} algo = RIP if method == "RI_GREEDY": params = {'S':10, "paramsMarginal":{"MMarginal": 3}, 'SMarginal':11, 'POD':True, 'polybasis':"LEGENDRE", 'polybasisMarginal':"MONOMIAL_GAUSSIAN", 'radialDirectionalWeightsMarginal': 100., 'matchingWeight':1., 'samplerPivot':QS(ks, "UNIFORM", 2.), 'greedyTol':1e-3, 'errorEstimatorKind':"LOOK_AHEAD_RES", 'samplerTrainSet':QS(ks, "CHEBYSHEV", 2.), 'samplerMarginal':QS(ts, "UNIFORM")} algo = RIGP approx = algo([0], solver, mu0 = [k0, t0], approxParameters = params, verbosity = 10, storeAllSamples = True) if len(method) == 2: approx.setupApprox() else: approx.setupApprox("LAST") print("--- Approximant ---") - approx.plotApprox(mu, name = 'u_app') - approx.plotHF(mu, name = 'u_HF') - approx.plotErr(mu, name = 'err_app') - approx.plotRes(mu, name = 'res_app') + approx.plotApprox(mu, plotargs = {"name": 'u_app'}) + approx.plotHF(mu, plotargs = {"name": 'u_HF'}) + approx.plotErr(mu, plotargs = {"name": 'err_app'}) + approx.plotRes(mu, plotargs = {"name": 'res_app'}) normErr = approx.normErr(mu)[0] normSol = approx.normHF(mu)[0] normRes = approx.normRes(mu)[0] normRHS = approx.normRHS(mu)[0] print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_app: \t{:.5e}\nResRel_app:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) print("--- Closest snapshot ---") paramsNN = {'S':len(approx.mus), 'POD':True, 'sampler':ES()} approxNN = NN(solver, mu0 = [k0, t0], approxParameters = paramsNN, verbosity = 0) approxNN.setSamples(approx.storedSamplesFilenames) approx.purgeStoredSamples() - approxNN.plotApprox(mu, name = 'u_close') - approxNN.plotHF(mu, name = 'u_HF') - approxNN.plotErr(mu, name = 'err_close') - approxNN.plotRes(mu, name = 'res_close') + approxNN.plotApprox(mu, plotargs = {"name": 'u_close'}) + approxNN.plotHF(mu, plotargs = {"name": 'u_HF'}) + approxNN.plotErr(mu, plotargs = {"name": 'err_close'}) + approxNN.plotRes(mu, plotargs = {"name": 'res_close'}) normErr = approxNN.normErr(mu)[0] normSol = approxNN.normHF(mu)[0] normRes = approxNN.normRes(mu)[0] normRHS = approxNN.normRHS(mu)[0] print("SolNorm:\t{:.5e}\nErr_close:\t{:.5e}\nErrRel_close:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("RHSNorm:\t{:.5e}\nRes_close:\t{:.5e}\nResRel_close:\t{:.5e}".format( normRHS, normRes, normRes / normRHS)) verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(ts[0], ts[-1], 100) for j, t in enumerate(tspace): pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls ** 2.)) > 1e-5] = np.nan if j == 0: poles = np.empty((len(tspace), len(pls))) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (12, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(ts) ax1.set_xlabel('mu_1') ax1.set_ylabel('mu_2') ax1.grid() ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(ks, [mm[0, 0]] * 2, 'k--', linewidth = 1) ax2.set_xlim(ks) ax2.set_ylim(ts) ax2.set_xlabel('mu_1') ax2.set_ylabel('mu_2') ax2.grid() plt.show() print("\n") diff --git a/examples/4_funnel_output/funnel_output.py b/examples/4_funnel_output/funnel_output.py index 57a1eff..edb19d2 100644 --- a/examples/4_funnel_output/funnel_output.py +++ b/examples/4_funnel_output/funnel_output.py @@ -1,61 +1,61 @@ import numpy as np from funnel_output_engine import FunnelOutputEngine as engine from rrompy.reduction_methods import (NearestNeighbor as NN, RationalInterpolant as RI, RationalInterpolantGreedy as RIG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, EmptySampler as ES) ks = [5., 10.] k0, n = np.mean(ks), 50 solver = engine(k0, n) k = 6.5 for method in ["RI", "RI_GREEDY"]: print("Testing {} method".format(method)) if "GREEDY" not in method: params = {'S':20, 'POD':True, 'polybasis':"CHEBYSHEV", 'sampler':QS(ks, "CHEBYSHEV")} algo = RI if "GREEDY" in method: params = {'S':2, 'POD':True, 'polybasis':"LEGENDRE", 'greedyTol':1e-1, 'maxIter':25, 'sampler':QS(ks, "UNIFORM"), 'errorEstimatorKind':"LOOK_AHEAD_OUTPUT"} algo = RIG approx = algo(solver, mu0 = k0, approxParameters = params, verbosity = 5) if "GREEDY" not in method: approx.setupApprox() else: approx.setupApprox("LAST") print("--- Approximant ---") - approx.plotApprox(k, name = 'u_app') - approx.plotHF(k, name = 'u_HF') - approx.plotErr(k, name = 'err_app') + approx.plotApprox(k, plotargs = {"name": 'u_app'}) + approx.plotHF(k, plotargs = {"name": 'u_HF'}) + approx.plotErr(k, plotargs = {"name": 'err_app'}) err = approx.getErr(k)[0] sol = approx.getHF(k)[0] normErr = np.abs(solver.L2NormMatrix.dot(err).dot(err.conj())) ** .5 normSol = np.abs(solver.L2NormMatrix.dot(sol).dot(sol.conj())) ** .5 print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("--- Closest snapshot ---") approxNN = NN(solver, mu0 = k0, verbosity = 0, approxParameters = {'S':approx.samplingEngine.nsamples, 'POD':True, 'sampler':ES()}) approxNN.setSamples(approx.samplingEngine) - approxNN.plotApprox(k, name = 'u_close') - approxNN.plotHF(k, name = 'u_HF') - approxNN.plotErr(k, name = 'err_close') + approxNN.plotApprox(k, plotargs = {"name": 'u_close'}) + approxNN.plotHF(k, plotargs = {"name": 'u_HF'}) + approxNN.plotErr(k, plotargs = {"name": 'err_close'}) err = approxNN.getErr(k)[0] sol = approxNN.getHF(k)[0] normErr = np.abs(solver.L2NormMatrix.dot(err).dot(err.conj())) ** .5 normSol = np.abs(solver.L2NormMatrix.dot(sol).dot(sol.conj())) ** .5 print("SolNorm:\t{:.5e}\nErr_close:\t{:.5e}\nErrRel_close:\t{:.5e}".format( normSol, normErr, normErr / normSol)) print("Poles:\n{}".format(approx.getPoles())) print("\n") diff --git a/examples/5_anisotropic_square/anisotropic_square.py b/examples/5_anisotropic_square/anisotropic_square.py index 949d536..1c7e05a 100644 --- a/examples/5_anisotropic_square/anisotropic_square.py +++ b/examples/5_anisotropic_square/anisotropic_square.py @@ -1,82 +1,81 @@ ### example from Smetana, Zahm, Patera. Randomized residual-based error ### estimators for parametrized equations. import numpy as np import matplotlib.pyplot as plt from itertools import product from anisotropic_square_engine import (AnisotropicSquareEngine as engine, AnisotropicSquareEnginePoles as plsEx) from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedGreedyPoleMatch as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, Ls = [10., 50.], [.2, 1.2] z0, L0, n = np.mean(zs), np.mean(Ls), 50 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]] np.random.seed(4020) mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]), Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])] solver = engine(z0, L0, n) fighandles = [] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3, "polybasisMarginal": "PIECEWISE_LINEAR_UNIFORM", "polybasis": "LEGENDRE", "samplerPivot":QS(zs, "UNIFORM"), "samplerTrainSet":QS(zs, "UNIFORM"), "errorEstimatorKind":"LOOK_AHEAD_RES", "errorEstimatorKindMarginal":"LOOK_AHEAD_RECOVER", - "matchingChordalRadius": [1., "AUTO"], "SMarginal": 3, "paramsMarginal": {"MMarginal": 2, "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls), "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1., - "badPoleCorrection": "POLYNOMIAL"} + "badPoleCorrection": "POLYNOMIAL", "autoCollapse": 1} for shared, tol in product([1., 0.], [1., 3.]): print("Testing cutoff tolerance {} with shared ratio {}.".format(tol, shared)) solver.cutOffPolesRMinRel = - 1. - tol solver.cutOffPolesRMaxRel = 1. + tol params["matchingShared"] = shared approx = RIGPG([0], solver, mu0 = [z0, L0], approxParameters = params, verbosity = 5) approx.setupApprox("ALL") verb = approx.verbosity approx.verbosity = 0 tspace = np.linspace(Ls[0], Ls[-1], 100) for j, t in enumerate(tspace): plsE = plsEx(t, 0., zs[-1]) pls = approx.getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: polesE = np.empty((len(tspace), len(plsE))) poles = np.empty((len(tspace), len(pls))) polesE[:] = np.nan if len(plsE) > polesE.shape[1]: nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1])) nanR[:] = np.nan polesE = np.hstack((polesE, nanR)) polesE[j, : len(plsE)] = np.real(plsE) poles[j] = np.real(pls) approx.verbosity = verb fighandles += [plt.figure(figsize = (17, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) ax1.plot(poles, tspace) ax1.set_ylim(Ls) ax1.set_xlabel("mu_1") ax1.set_ylabel("mu_2") ax1.grid() ax2.plot(polesE, tspace, "k-.", linewidth = 1) ax2.plot(poles, tspace) for mm in approx.musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, "k--", linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(Ls) ax2.set_xlabel("mu_1") ax2.set_ylabel("mu_2") ax2.grid() plt.show() print("\n") diff --git a/examples/9_active_remeshing/active_remeshing.py b/examples/9_active_remeshing/active_remeshing.py index 9b89f2e..1ce5bf0 100755 --- a/examples/9_active_remeshing/active_remeshing.py +++ b/examples/9_active_remeshing/active_remeshing.py @@ -1,145 +1,141 @@ import numpy as np from pickle import load from matplotlib import pyplot as plt from active_remeshing_engine import ActiveRemeshingEngine from rrompy.reduction_methods import ( RationalInterpolantGreedyPivotedNoMatch as RIGPNM, RationalInterpolantGreedyPivotedPoleMatch as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) zs, ts = [0., 100.], [0., .5] z0, t0, n = np.mean(zs), np.mean(ts), 150 solver = ActiveRemeshingEngine(z0, t0, n) solver.cutOffPolesRMinRel, solver.cutOffPolesRMaxRel = -2.5, 2.5 solver.cutOffPolesIMin, solver.cutOffPolesIMax = -.01, .01 mus = [[z0, ts[0]], [z0, ts[1]]] for mu in mus: u = solver.solve(mu, return_state = True)[0] Y = solver.applyC(u, mu)[0] _ = solver.plot(u, what = "REAL", name = "u(z={}, t={})".format(*mu), is_state = True, figsize = (12, 4)) print("Y(z={}, t={}) = {} (solution average)".format(*mu, np.real(Y))) fighandles = [] with open("./active_remeshing_hf_samples.pkl", "rb") as f: zspace, tspace, Yex = load(f) # zspace, tspace = np.linspace(*zs, 200), np.linspace(*ts, 50) # Yex = [[solver.solve([z, t]) for t in tspace] for z in zspace] # (from a ~2h45m simulation on one node of the EPFL Helvetios cluster) radius2Err = np.mean(np.abs(Yex) ** 2.) YCmin, YCmax = np.quantile(Yex, .05), np.quantile(Yex, .95) YexC = np.clip(Yex, YCmin, YCmax) approx = [] -for match in range(3): +for match in range(2): params = {"POD": True, "S": 5, "greedyTol": 1e-4, "nTestPoints": 500, "polybasis": "LEGENDRE", "samplerTrainSet": QS(zs, "UNIFORM"), "samplerPivot": QS(zs, "CHEBYSHEV"), "SMarginal": 5, "samplerMarginal": SGS(ts), "errorEstimatorKind": "LOOK_AHEAD_OUTPUT"} if match: - if match == 1: - print("\nTesting output-based matching.") - else: #if match == 2: - print("\nTesting output-based matching in chordal metric.") - params["matchingChordalRadius"] = [1., "AUTO"] + print("\nTesting output-based matching.") params["matchingWeight"] = 1. params["matchingShared"] = .75 params["polybasisMarginal"] = "PIECEWISE_LINEAR_UNIFORM" algo = RIGPG else: print("\nTesting matching-free approach.") algo = RIGPNM approx += [algo([0], solver, mu0 = [z0, t0], approxParameters = params, verbosity = 5)] if match: approx[match].setTrainedModel(approx[0]) else: approx[match].setupApprox() verb = approx[match].verbosity verbTM = approx[match].trainedModel.verbosity approx[match].verbosity, approx[match].trainedModel.verbosity = 0, 0 for j, t in enumerate(tspace): out = approx[match].getApprox(np.pad(zspace.reshape(-1, 1), [(0, 0), (0, 1)], "constant", constant_values = t)) pls = approx[match].getPoles([None, t]) pls[np.abs(np.imag(pls)) > 1e-5] = np.nan if j == 0: Ys = np.empty((len(zspace), len(tspace))) poles = np.empty((len(tspace), len(pls))) Ys[:, j] = out.re.data if len(pls) > poles.shape[1]: poles = np.pad(poles, [(0, 0), (0, len(pls) - poles.shape[1])], "constant", constant_values = np.nan) poles[j, : len(pls)] = np.real(pls) approx[match].verbosity = verb approx[match].trainedModel.verbosity = verbTM YsC = np.clip(Ys, YCmin, YCmax) err = (np.abs(Yex - YsC) / (np.abs(Yex) ** 2. + radius2Err) ** .5 / (np.abs(Ys) ** 2. + radius2Err) ** .5) fighandles += [plt.figure(figsize = (15, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) if match: ax1.plot(poles, tspace) else: ax1.plot(poles, tspace, "k.") ax1.set_ylim(ts) ax1.set_xlabel("z") ax1.set_ylabel("t") ax1.grid() if match: ax2.plot(poles, tspace) else: ax2.plot(poles, tspace, "k.") for mm in approx[match].musMarginal: ax2.plot(zs, [mm[0, 0]] * 2, "k--", linewidth = 1) ax2.set_xlim(zs) ax2.set_ylim(ts) ax2.set_xlabel("z") ax2.set_ylabel("t") ax2.grid() plt.show() print("Approximate poles") fighandles += [plt.figure(figsize = (15, 5))] ax1 = fighandles[-1].add_subplot(1, 2, 1) ax2 = fighandles[-1].add_subplot(1, 2, 2) p = ax1.contourf(np.repeat(zspace.reshape(-1, 1), len(tspace), axis = 1), np.repeat(tspace.reshape(1, -1), len(zspace), axis = 0), YsC, vmin = YCmin, vmax = YCmax, levels = np.linspace(YCmin, YCmax, 50)) plt.colorbar(p, ax = ax1) ax1.set_xlabel("z") ax1.set_ylabel("t") ax1.grid() p = ax2.contourf(np.repeat(zspace.reshape(-1, 1), len(tspace), axis = 1), np.repeat(tspace.reshape(1, -1), len(zspace), axis = 0), YexC, vmin = YCmin, vmax = YCmax, levels = np.linspace(YCmin, YCmax, 50)) ax2.set_xlabel("z") ax2.set_ylabel("t") ax2.grid() plt.colorbar(p, ax = ax2) plt.show() print("Approximate and exact output\n") fighandles += [plt.figure(figsize = (9, 6))] ax1 = fighandles[-1].add_subplot(1, 1, 1) p = ax1.contourf(np.repeat(zspace.reshape(-1, 1), len(tspace), axis = 1), np.repeat(tspace.reshape(1, -1), len(zspace), axis = 0), np.log10(err), vmin = -10, vmax = 0, levels = np.linspace(-10, 0, 50)) plt.colorbar(p, ax = ax1) ax1.set_xlabel("z") ax1.set_ylabel("t") ax1.grid() plt.show() print("Output error (log-chordal)\n") diff --git a/examples/9_active_remeshing/active_remeshing_engine.py b/examples/9_active_remeshing/active_remeshing_engine.py index d2a5e88..6275d5b 100644 --- a/examples/9_active_remeshing/active_remeshing_engine.py +++ b/examples/9_active_remeshing/active_remeshing_engine.py @@ -1,89 +1,89 @@ import numpy as np import ufl import fenics as fen import mshr from rrompy.utilities.base.decorators import (pivot_affine_construct, mupivot_independent) from rrompy.hfengines.fenics_engines import HelmholtzProblemEngine from rrompy.utilities.numerical.hash_derivative import ( hashDerivativeToIdx as hashD) from rrompy.solver.fenics import fenZERO, fenONE, fenics2Vector from rrompy.parameter import parameterMap as pMap from rrompy.utilities.exception_manager import RROMPyException class ActiveRemeshingEngine(HelmholtzProblemEngine): def __init__(self, z0:float, t0:float, n:int): super().__init__(mu0 = [z0, t0]) self._affinePoly = False self._nMesh = n self.meshGen(t0) self.parameterMap = pMap(1., 2) self.DirichletBoundary = lambda x, on_boundary: (on_boundary and np.abs(x[0]) >= .75 - 1e-5 or np.abs(x[1]) >= .5 - 1e-5) self.NeumannBoundary = "REST" self.cutOffPolesIMin, self.cutOffPolesIMax = -1e-2, 1e-2 def meshGen(self, t:float): t = np.real(t) if (not hasattr(self, "_tMesh") or not np.isclose(self._tMesh, t)): e, self._tMesh = .01, t tipx, tipy = .5 * np.sin(t), .5 - .5 * np.cos(t) tiplx, tiply = tipx + .5 * e * np.cos(t), tipy + .5 * e * np.sin(t) tiprx, tipry = tipx - .5 * e * np.cos(t), tipy - .5 * e * np.sin(t) basx, basy = - e * np.sin(t), .5 + e * np.cos(t) baslx, basly = basx + .5 * e * np.cos(t), basy + .5 * e * np.sin(t) basrx, basry = basx - .5 * e * np.cos(t), basy - .5 * e * np.sin(t) mesh = mshr.generate_mesh( mshr.Rectangle(fen.Point(-.75, -.5), fen.Point(.75, .5)) - mshr.Polygon([fen.Point(tiprx, tipry), fen.Point(tiplx, tiply), fen.Point(baslx, basly), fen.Point(basrx, basry)]) - mshr.Circle(fen.Point(tipx, tipy), .5 * e), self._nMesh) self.V = fen.FunctionSpace(mesh, "P", 1) self.As, self._C = [None] * 2, None self.autoSetDS() - if hasattr(self, "energyNormMatrix"): - del self.energyNormMatrix - if hasattr(self, "energyNormDualMatrix"): - del self.energyNormDualMatrix + if hasattr(self, "_energyNormMatrix"): + del self._energyNormMatrix + if hasattr(self, "_energyNormDualMatrix"): + del self._energyNormDualMatrix def getForcingTerm(self, mu = []): mu = self.checkParameter(mu) self.meshGen(mu(0, 1)) x, y = fen.SpatialCoordinate(self.V.mesh())[:] rightZone = .1875**-2 * ufl.conditional(ufl.And( ufl.And(ufl.ge(x, -.5625), ufl.le(x, -.375)), ufl.And(ufl.ge(y, .125), ufl.le(y, .3125))), fenONE, fenZERO) return rightZone, fenZERO @pivot_affine_construct def A(self, mu = [], der = 0): derI = hashD(der) if hasattr(der, "__len__") else der if derI > 0: raise Exception("Derivatives not implemented.") mu = self.checkParameter(mu) self.meshGen(mu(0, 1)) return HelmholtzProblemEngine.A(self, mu, der) @pivot_affine_construct def b(self, mu = [], der = 0): derI = hashD(der) if hasattr(der, "__len__") else der if derI > 0: raise Exception("Derivatives not implemented.") if self.thbs[0] is None: self.thbs = self.getMonomialWeights(self.nbs) fen0 = self.getForcingTerm(mu)[0] * self.v * fen.dx DBC = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) self.bs = [fenics2Vector(fen0, {}, DBC, 1)] return HelmholtzProblemEngine.b(self, mu, der) @mupivot_independent def C(self, mu): mu = self.checkParameterList(mu) if not np.all(np.isclose(mu(1), mu(0, 1))): raise RROMPyException(("Simultaneous evaluation of C on multiple " "meshes not supported.")) self.meshGen(mu(0, 1)) if self._C is None: self._C = fenics2Vector(self.v * fen.dx, {}).reshape(1, -1) / 1.5 return self._C diff --git a/rrompy/hfengines/base/fenics_engine_base.py b/rrompy/hfengines/base/fenics_engine_base.py index 2a2c303..613aad7 100644 --- a/rrompy/hfengines/base/fenics_engine_base.py +++ b/rrompy/hfengines/base/fenics_engine_base.py @@ -1,514 +1,512 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from os import path, mkdir import fenics as fen import numpy as np from matplotlib import pyplot as plt from .scipy_engine_base import ScipyEngineBase, checknports from rrompy.utilities.base.types import (Np1D, strLst, FenFunc, Tuple, List, FigHandle) from rrompy.utilities.base.data_structures import purgeList, getNewFilename from rrompy.utilities.base import verbosityManager as vbMng from rrompy.solver.fenics import (L2NormMatrix, fenplot, interp_project, serializeFunctionSpace) from .boundary_conditions import BoundaryConditions from rrompy.utilities.exception_manager import RROMPyException from rrompy.utilities.parallel import (SELF, masterCore, bcast, indicesScatter, listGather) __all__ = ['FenicsEngineBase', 'FenicsEngineBaseTensorized'] def plottingBaseFen(u, fig, V, what, nRows, subplotidx, warping, name, colorbar, fenplotArgs): if 'ABS' in what: uAb = fen.Function(V) uAb.vector().set_local(np.abs(u)) subplotidx = subplotidx + 1 ax = fig.add_subplot(nRows, len(what), subplotidx) p = fenplot(uAb, warping = warping, title = "|{}|".format(name), **fenplotArgs) if colorbar: fig.colorbar(p, ax = ax) if 'PHASE' in what: uPh = fen.Function(V) uPh.vector().set_local(np.angle(u)) subplotidx = subplotidx + 1 ax = fig.add_subplot(nRows, len(what), subplotidx) p = fenplot(uPh, warping = warping, title = "phase({})".format(name), **fenplotArgs) if colorbar: fig.colorbar(p, ax = ax) if 'REAL' in what: uRe = fen.Function(V) uRe.vector().set_local(np.real(u)) subplotidx = subplotidx + 1 ax = fig.add_subplot(nRows, len(what), subplotidx) p = fenplot(uRe, warping = warping, title = "Re({})".format(name), **fenplotArgs) if colorbar: fig.colorbar(p, ax = ax) if 'IMAG' in what: uIm = fen.Function(V) uIm.vector().set_local(np.imag(u)) subplotidx = subplotidx + 1 ax = fig.add_subplot(nRows, len(what), subplotidx) p = fenplot(uIm, warping = warping, title = "Im({})".format(name), **fenplotArgs) if colorbar: fig.colorbar(p, ax = ax) class FenicsEngineBase(ScipyEngineBase): """Generic solver for parametric fenics problems.""" def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10, timestamp : bool = True): super().__init__(verbosity = verbosity, timestamp = timestamp) self.BCManager = BoundaryConditions("Dirichlet") self.V = fen.FunctionSpace(fen.UnitSquareMesh(SELF, 1, 1), "P", 1) self.degree_threshold = degree_threshold @property def V(self): """Value of V.""" return self._V @V.setter def V(self, V): if not type(V).__name__ == 'FunctionSpace': raise RROMPyException("V type not recognized.") self.dsToBeSet = True self._V = serializeFunctionSpace(V) self.u = fen.TrialFunction(self._V) self.v = fen.TestFunction(self._V) @property def spacedim(self): if hasattr(self, "_V"): return self.V.dim() return super().spacedim def autoSetDS(self): """Set FEniCS boundary measure based on boundary function handles.""" if self.dsToBeSet: vbMng(self, "INIT", "Initializing boundary measures.", 20) mesh = self.V.mesh() NB = self.NeumannBoundary RB = self.RobinBoundary boundary_markers = fen.MeshFunction("size_t", mesh, mesh.topology().dim() - 1) NB.mark(boundary_markers, 0) RB.mark(boundary_markers, 1) self.ds = fen.Measure("ds", domain = mesh, subdomain_data = boundary_markers) self.dsToBeSet = False vbMng(self, "DEL", "Done assembling boundary measures.", 20) def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = L2NormMatrix(self.V) + self._energyNormMatrix = L2NormMatrix(self.V) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ - if not hasattr(self, "energyNormMatrix"): - self.buildEnergyNormForm() - self.energyNormDualMatrix = self.energyNormMatrix + self._energyNormDualMatrix = self.energyNormMatrix def liftDirichletData(self) -> Np1D: """Lift Dirichlet datum.""" if not hasattr(self, "_liftedDirichletDatum"): liftRe = interp_project(self.DirichletDatum[0], self.V) liftIm = interp_project(self.DirichletDatum[1], self.V) self._liftedDirichletDatum = (np.array(liftRe.vector()) + 1.j * np.array(liftIm.vector())) return self._liftedDirichletDatum def reduceQuadratureDegree(self, fun:FenFunc, name:str): """Check whether to reduce compiler parameters to degree threshold.""" if not np.isinf(self.degree_threshold): from ufl.algorithms.estimate_degrees import ( estimate_total_polynomial_degree as ETPD) try: deg = ETPD(fun) except: return False if deg > self.degree_threshold: vbMng(self, "MAIN", ("Reducing quadrature degree from {} to {} for " "{}.").format(deg, self.degree_threshold, name), 15) return True return False def iterReduceQuadratureDegree(self, funsNames:List[Tuple[FenFunc, str]]): """ Iterate reduceQuadratureDegree over list and define reduce compiler parameters. """ if funsNames is not None: for fun, name in funsNames: if self.reduceQuadratureDegree(fun, name): return {"quadrature_degree" : self.degree_threshold} return {} def plot(self, u:Np1D, warping : List[callable] = None, is_state : bool = False, name : str = "u", save : str = None, what : strLst = 'all', forceNewFile : bool = True, saveFormat : str = "eps", saveDPI : int = 100, show : bool = True, colorMap : str = "jet", fenplotArgs : dict = {}, **figspecs) -> Tuple[FigHandle, str]: """ Do some nice plots of the complex-valued function with given dofs. Args: u: numpy complex array with function dofs. warping(optional): Domain warping functions. is_state(optional): whether given u is value before multiplication by c. Defaults to False. name(optional): Name to be shown as title of the plots. Defaults to 'u'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. what(optional): Which plots to do. If list, can contain 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. forceNewFile(optional): Whether to create new output file. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. show(optional): Whether to show figure. Defaults to True. colorMap(optional): Pyplot colormap. Defaults to 'jet'. fenplotArgs(optional): Optional arguments for fenplot. figspecs(optional key args): Optional arguments for matplotlib figure creation. Returns: Output filename and figure handle. """ if not is_state and not self.isCEye: return super().plot(u, warping, False, name, save, what, forceNewFile, saveFormat, saveDPI, show, colorMap, fenplotArgs, **figspecs) if isinstance(what, (str,)): if what.upper() == 'ALL': what = ['ABS', 'PHASE', 'REAL', 'IMAG'] else: what = [what] what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'], listname = self.name() + ".what", baselevel = 1) if len(what) == 0: return out = None if masterCore(): if 'figsize' not in figspecs.keys(): figspecs['figsize'] = plt.figaspect(1. / len(what)) fig = plt.figure(**figspecs) plt.set_cmap(colorMap) plottingBaseFen(u, fig, self.V, what, 1, 0, warping, name, self.V.mesh().geometric_dimension() > 1, fenplotArgs) plt.tight_layout() if save is not None: save = save.strip() if forceNewFile: fileOut = getNewFilename("{}_fig_".format(save), saveFormat) else: fileOut = "{}_fig.{}".format(save, saveFormat) fig.savefig(fileOut, format = saveFormat, dpi = saveDPI) else: fileOut = None if show: plt.show() out = fig if fileOut is None else (fig, fileOut) return bcast(out) def plotmesh(self, warping : List[callable] = None, name : str = "Mesh", save : str = None, forceNewFile : bool = True, saveFormat : str = "eps", saveDPI : int = 100, show : bool = True, fenplotArgs : dict = {}, **figspecs) -> Tuple[FigHandle, str]: """ Do a nice plot of the mesh. Args: u: numpy complex array with function dofs. warping(optional): Domain warping functions. name(optional): Name to be shown as title of the plots. Defaults to 'u'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. forceNewFile(optional): Whether to create new output file. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. show(optional): Whether to show figure. Defaults to True. fenplotArgs(optional): Optional arguments for fenplot. figspecs(optional key args): Optional arguments for matplotlib figure creation. Returns: Output filename and figure handle. """ out = None if masterCore(): fig = plt.figure(**figspecs) fenplot(self.V.mesh(), warping = warping, **fenplotArgs) plt.tight_layout() if save is not None: save = save.strip() if forceNewFile: fileOut = getNewFilename("{}_msh_".format(save), saveFormat) else: fileOut = "{}_msh.{}".format(save, saveFormat) fig.savefig(fileOut, format = saveFormat, dpi = saveDPI) else: fileOut = None if show: plt.show() out = fig if fileOut is None else (fig, fileOut) return bcast(out) def outParaview(self, u:Np1D, warping : List[callable] = None, is_state : bool = False, name : str = "u", filename : str = "out", time : float = 0., what : strLst = 'all', forceNewFile : bool = True, folder : bool = False, filePW = None) -> str: """ Output complex-valued function with given dofs to ParaView file. Args: u: numpy complex array with function dofs. warping(optional): Domain warping functions. is_state(optional): whether given u is value before multiplication by c. Defaults to False. name(optional): Base name to be used for data output. filename(optional): Name of output file. time(optional): Timestamp. what(optional): Which plots to do. If list, can contain 'MESH', 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. forceNewFile(optional): Whether to create new output file. folder(optional): Whether to create an additional folder layer. filePW(optional): Fenics File entity (for time series). Returns: Output filename. """ if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) if isinstance(what, (str,)): if what.upper() == 'ALL': what = ['MESH', 'ABS', 'PHASE', 'REAL', 'IMAG'] else: what = [what] what = purgeList(what, ['MESH', 'ABS', 'PHASE', 'REAL', 'IMAG'], listname = self.name() + ".what", baselevel = 1) if len(what) == 0: return filePW = None if masterCore(): if filePW is None: if folder: if not path.exists(filename + "/"): mkdir(filename) idxpath = filename.rfind("/") filename += "/" + filename[idxpath + 1 :] if forceNewFile: filePW = fen.File(getNewFilename(filename, "pvd")) else: filePW = fen.File("{}.pvd".format(filename)) if warping is not None: fen.ALE.move(self.V.mesh(), interp_project(warping[0], self.V.mesh())) if what == ['MESH']: filePW << (self.V.mesh(), time) if 'ABS' in what: uAb = fen.Function(self.V, name = "{}_ABS".format(name)) uAb.vector().set_local(np.abs(u)) filePW << (uAb, time) if 'PHASE' in what: uPh = fen.Function(self.V, name = "{}_PHASE".format(name)) uPh.vector().set_local(np.angle(u)) filePW << (uPh, time) if 'REAL' in what: uRe = fen.Function(self.V, name = "{}_REAL".format(name)) uRe.vector().set_local(np.real(u)) filePW << (uRe, time) if 'IMAG' in what: uIm = fen.Function(self.V, name = "{}_IMAG".format(name)) uIm.vector().set_local(np.imag(u)) filePW << (uIm, time) if warping is not None: fen.ALE.move(self.V.mesh(), interp_project(warping[1], self.V.mesh())) return bcast(filePW) def outParaviewTimeDomain(self, u:Np1D, omega:float, warping : List[callable] = None, is_state : bool = False, timeFinal : float = None, periodResolution : int = 20, name : str = "u", filename : str = "out", forceNewFile : bool = True, folder : bool = False) -> str: """ Output complex-valued function with given dofs to ParaView file, converted to time domain. Args: u: numpy complex array with function dofs. omega: frequency. warping(optional): Domain warping functions. is_state(optional): whether given u is value before multiplication by c. Defaults to False. timeFinal(optional): final time of simulation. periodResolution(optional): number of time steps per period. name(optional): Base name to be used for data output. filename(optional): Name of output file. forceNewFile(optional): Whether to create new output file. folder(optional): Whether to create an additional folder layer. Returns: Output filename. """ if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) filePW = None if masterCore(): if folder: if not path.exists(filename + "/"): mkdir(filename) idxpath = filename.rfind("/") filename += "/" + filename[idxpath + 1 :] if forceNewFile: filePW = fen.File(getNewFilename(filename, "pvd")) else: filePW = fen.File("{}.pvd".format(filename)) t = 0. dt = 2. * np.pi / np.abs(omega) / periodResolution if timeFinal is None: timeFinal = 2. * np.pi / np.abs(omega) - dt if warping is not None: fen.ALE.move(self.V.mesh(), interp_project(warping[0], self.V.mesh())) for j in range(int(np.ceil(timeFinal / dt)) + 1): ut = fen.Function(self.V, name = name) ut.vector().set_local(np.real(u) * np.cos(omega * t) + np.imag(u) * np.sin(omega * t)) filePW << (ut, t) t += dt if warping is not None: fen.ALE.move(self.V.mesh(), interp_project(warping[1], self.V.mesh())) return bcast(filePW) class FenicsEngineBaseTensorized(FenicsEngineBase): """The number of tensorized dimensions should be assigned to nports.""" def plot(self, u:Np1D, warping : List[callable] = None, is_state : bool = False, name : str = "u", save : str = None, what : strLst = 'all', forceNewFile : bool = True, saveFormat : str = "eps", saveDPI : int = 100, show : bool = True, colorMap : str = "jet", fenplotArgs : dict = {}, **figspecs) -> Tuple[FigHandle, str]: """ Do some nice plots of the complex-valued function with given dofs. Args: u: numpy complex array with function dofs. warping(optional): Domain warping functions. is_state(optional): whether given u is value before multiplication by c. Defaults to False. name(optional): Name to be shown as title of the plots. Defaults to 'u'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. what(optional): Which plots to do. If list, can contain 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. forceNewFile(optional): Whether to create new output file. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. show(optional): Whether to show figure. Defaults to True. colorMap(optional): Pyplot colormap. Defaults to 'jet'. fenplotArgs(optional): Optional arguments for fenplot. figspecs(optional key args): Optional arguments for matplotlib figure creation. Returns: Output filename and figure handle. """ nP = checknports(self) if not is_state and not self.isCEye: return super().plot(u.reshape(-1, nP), warping, False, name, save, what, forceNewFile, saveFormat, saveDPI, show, colorMap, fenplotArgs, **figspecs) if isinstance(what, (str,)): if what.upper() == 'ALL': what = ['ABS', 'PHASE', 'REAL', 'IMAG'] else: what = [what] what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'], listname = self.name() + ".what", baselevel = 1) if len(what) == 0: return out = None if masterCore(): if 'figsize' not in figspecs.keys(): figspecs['figsize'] = plt.figaspect(1. / len(what)) figspecs['figsize'][1] *= nP fig = plt.figure(**figspecs) plt.set_cmap(colorMap) for i in range(nP): plottingBaseFen(u[i :: nP], fig, self.V, what, nP, i * len(what), warping, "{}_port{}".format(name, i + 1), self.V.mesh().geometric_dimension() > 1, fenplotArgs) plt.tight_layout() if save is not None: save = save.strip() if forceNewFile: fileOut = getNewFilename("{}_fig_".format(save), saveFormat) else: fileOut = "{}_fig.{}".format(save, saveFormat) fig.savefig(fileOut, format = saveFormat, dpi = saveDPI) else: fileOut = None if show: plt.show() out = fig if fileOut is None else (fig, fileOut) return bcast(out) def outParaview(self, u:Np1D, *args, **kwargs) -> List[str]: nP = checknports(self) idx = indicesScatter(nP)[0] filesOut = [] if len(idx) > 0: for j in idx: filesOut += super().outParaview(u[j :: nP], *args, **kwargs) filesOut = listGather(filesOut) if filesOut[0] is None: return None return filesOut def outParaviewTimeDomain(self, u:Np1D, *args, **kwargs) -> List[str]: nP = checknports(self) idx = indicesScatter(nP)[0] filesOut = [] if len(idx) > 0: for j in idx: filesOut += super().outParaviewTimeDomain(u[j :: nP], *args, **kwargs) filesOut = listGather(filesOut) if filesOut[0] is None: return None return filesOut diff --git a/rrompy/hfengines/base/hfengine_base.py b/rrompy/hfengines/base/hfengine_base.py index e444519..244724e 100644 --- a/rrompy/hfengines/base/hfengine_base.py +++ b/rrompy/hfengines/base/hfengine_base.py @@ -1,386 +1,395 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np import scipy.sparse as scsp from numbers import Number from collections.abc import Iterable from copy import copy as softcopy from rrompy.utilities.base.decorators import (nonaffine_construct, mu_independent) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, DictAny, paramVal, paramList, sampList) from rrompy.utilities.numerical import solve as tsolve, dot, potential from rrompy.utilities.expression import expressionEvaluator from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.sampling.sample_list import sampleList from rrompy.parameter import (checkParameter, checkParameterList, parameterList, parameterMap as pMap) from rrompy.solver.linear_solver import setupSolver from rrompy.utilities.parallel import (poolRank, masterCore, listScatter, matrixGatherv, isend, recv) __all__ = ['HFEngineBase'] class HFEngineBase: """Generic solver for parametric problems.""" def __init__(self, verbosity : int = 10, timestamp : bool = True): self.verbosity = verbosity self.timestamp = timestamp self.setSolver("SPSOLVE", {"use_umfpack" : False}) self.npar = 0 self._C = None def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def __dir_base__(self): return [x for x in self.__dir__() if x[:2] != "__"] def __deepcopy__(self, memo): return softcopy(self) @property def npar(self): """Value of npar.""" return self._npar @npar.setter def npar(self, npar): nparOld = self._npar if hasattr(self, "_npar") else -1 if npar != nparOld: self.parameterMap = pMap(1., npar) self._npar = npar @property def spacedim(self): return 1 def checkParameter(self, mu:paramVal) -> paramVal: muP = checkParameter(mu, self.npar) if self.npar == 0: muP.reset((1, 0), muP.dtype) return muP def checkParameterList(self, mu:paramList, check_if_single : bool = False) -> paramList: muL = checkParameterList(mu, self.npar, check_if_single) return muL def mapParameterList(self, mu:paramList, direct : str = "F", idx : List[int] = None) -> paramList: if idx is None: idx = np.arange(self.npar) muMapped = checkParameterList(mu, len(idx)) for j, d in enumerate(idx): muMapped.data[:, j] = expressionEvaluator( self.parameterMap[direct][d], muMapped(j)).flatten() return muMapped + @property + def energyNormMatrix(self): + if not hasattr(self, "_energyNormMatrix"): + self.buildEnergyNormForm() + return self._energyNormMatrix def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ - self.energyNormMatrix = 1. + self._energyNormMatrix = 1. + @property + def energyNormDualMatrix(self): + if not hasattr(self, "_energyNormDualMatrix"): + self.buildEnergyNormDualForm() + return self._energyNormDualMatrix def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ - self.energyNormDualMatrix = 1. + self._energyNormDualMatrix = 1. + @property + def energyNormOutputMatrix(self): + if not hasattr(self, "_energyNormOutputMatrix"): + self.buildEnergyNormOutput() + return self._energyNormOutputMatrix def buildEnergyNormOutput(self): """ Build sparse matrix (in CSR format) representative of scalar product over output space. """ - self.energyNormOutputMatrix = 1. + self._energyNormOutputMatrix = 1. def innerProduct(self, u:Np2D, v:Np2D, onlyDiag : bool = False, dual : bool = False, is_state : bool = True) -> Np2D: """Scalar product.""" if is_state or self.isCEye: if dual: - if not hasattr(self, "energyNormDualMatrix"): - self.buildEnergyNormDualForm() energyMat = self.energyNormDualMatrix else: - if not hasattr(self, "energyNormMatrix"): - self.buildEnergyNormForm() energyMat = self.energyNormMatrix else: - if not hasattr(self, "energyNormOutputMatrix"): - self.buildEnergyNormOutput() energyMat = self.energyNormOutputMatrix if isinstance(u, (parameterList, sampleList)): u = u.data if isinstance(v, (parameterList, sampleList)): v = v.data if onlyDiag: return np.sum(dot(energyMat, u) * v.conj(), axis = 0) return dot(dot(energyMat, u).T, v.conj()).T def norm(self, u:Np2D, dual : bool = False, is_state : bool = True) -> Np1D: return np.abs(self.innerProduct(u, u, onlyDiag = True, dual = dual, is_state = is_state)) ** .5 def baselineA(self): """Return 0 of shape consistent with operator of linear system.""" if (hasattr(self, "As") and isinstance(self.As, Iterable) and self.As[0] is not None): d = self.As[0].shape[0] else: d = self.spacedim return scsp.csr_matrix((np.zeros(0), np.zeros(0), np.zeros(d + 1)), shape = (d, d), dtype = np.complex) def baselineb(self): """Return 0 of shape consistent with RHS of linear system.""" return np.zeros(self.spacedim, dtype = np.complex) @nonaffine_construct @abstractmethod def A(self, mu : paramVal = [], der : List[int] = 0) -> Np2D: """ Assemble terms of operator of linear system and return it (or its derivative) at a given parameter. """ return @nonaffine_construct @abstractmethod def b(self, mu : paramVal = [], der : List[int] = 0) -> Np1D: """ Assemble terms of RHS of linear system and return it (or its derivative) at a given parameter. """ return @mu_independent def C(self, mu:paramVal): """ Value of C. Should be overridden (with something like return self._C(mu) ) if a mu-dependent C is needed. """ if self._C is None: self._C = 1. return self._C @property def isCEye(self): """ Whether the action of C can be seen as a scalar multiplication. Should be overridden (with return True ) if a mu-dependent scalar C is used. """ return isinstance(self._C, Number) def applyC(self, u:sampList, mu:paramVal): """Apply LHS of linear system.""" return dot(self.C(mu), u) def setSolver(self, solverType:str, solverArgs : DictAny = {}): """Choose solver type and parameters.""" self._solver, self._solverArgs = setupSolver(solverType, solverArgs) def solve(self, mu : paramList = [], RHS : sampList = None, return_state : bool = False) -> sampList: """ Find solution of linear system. Args: mu: parameter value. RHS: RHS of linear system. If None, defaults to that of parametric system. Defaults to None. return_state: whether to return state before multiplication by c. Defaults to False. """ from rrompy.sampling import sampleList, emptySampleList if mu == []: mu = self.mu0 mu = self.checkParameterList(mu) if len(mu) == 0: return emptySampleList() mu = self.checkParameterList(mu) mu_loc, idx, sizes = listScatter(mu, return_sizes = True) mu_loc = self.checkParameterList(mu_loc) req, emptyCores = [], np.where(sizes == 0)[0] if len(mu_loc) == 0: uL, uT = recv(source = 0, tag = poolRank()) sol = np.empty((uL, 0), dtype = uT) else: if RHS is None: # build RHSs RHS = sampleList([self.b(m) for m in mu_loc]) else: RHS = sampleList(RHS) if len(RHS) > 1: RHS = sampleList([RHS[i] for i in idx]) mult = 0 if len(RHS) == 1 else 1 RROMPyAssert(mult * (len(mu_loc) - 1) + 1, len(RHS), "Sample size") for j, mj in enumerate(mu_loc): u = tsolve(self.A(mj), RHS[mult * j], self._solver, self._solverArgs) if not return_state: u = self.applyC(u, mj) if j == 0: sol = np.empty((len(u), len(mu_loc)), dtype = u.dtype) if masterCore(): for dest in emptyCores: req += [isend((len(u), u.dtype), dest = dest, tag = dest)] sol[:, j] = u for r in req: r.wait() sol = matrixGatherv(sol, sizes) return sampleList(sol) def residual(self, mu : paramList = [], u : sampList = None, post_c : bool = True) -> sampList: """ Find residual of linear system for given approximate solution. Args: mu: parameter value. u: numpy complex array with function dofs. If None, set to 0. post_c: whether to post-process using c. Defaults to True. """ from rrompy.sampling import sampleList, emptySampleList if mu == []: mu = self.mu0 mu = self.checkParameterList(mu) if len(mu) == 0: return emptySampleList() mu_loc, idx, sizes = listScatter(mu, return_sizes = True) mu_loc = self.checkParameterList(mu_loc) req, emptyCores = [], np.where(sizes == 0)[0] if len(mu_loc) == 0: uL, uT = recv(source = 0, tag = poolRank()) res = np.empty((uL, 0), dtype = uT) else: v = sampleList(np.zeros((self.spacedim, len(mu_loc)))) if u is not None: u = sampleList(u) v = v + sampleList([u[i] for i in idx]) for j, (mj, vj) in enumerate(zip(mu_loc, v)): r = self.b(mj) - dot(self.A(mj), vj) if post_c: r = self.applyC(r, mj) if j == 0: res = np.empty((len(r), len(mu_loc)), dtype = r.dtype) if masterCore(): for dest in emptyCores: req += [isend((len(r), r.dtype), dest = dest, tag = dest)] res[:, j] = r for r in req: r.wait() res = matrixGatherv(res, sizes) return sampleList(res) cutOffPolesRMax,cutOffPolesRMin = np.inf, - np.inf cutOffPolesIMax, cutOffPolesIMin = np.inf, - np.inf def flagBadPolesResiduesAbsolute(self, poles:Np1D, residues : Np1D = None, projMat : Np2D = None) -> Np1D: """ Flag (numerical) poles/residues which are impossible. Args: poles: poles to be judged. residues: residues norms to be judged. projMat: matrix for projection of residues. """ poles = np.array(poles).flatten() flag = np.zeros(len(poles), dtype = bool) RMax, RMin = self.cutOffPolesRMax, self.cutOffPolesRMin IMax, IMin = self.cutOffPolesIMax, self.cutOffPolesIMin if not np.isinf(RMax): flag = flag + (np.real(poles) > RMax) if not np.isinf(RMin): flag = flag + (np.real(poles) < RMin) if not np.isinf(IMax): flag = flag + (np.imag(poles) > IMax) if not np.isinf(IMin): flag = flag + (np.imag(poles) < IMin) return flag cutOffPolesPotentialMax = np.inf cutOffPolesRMaxRel, cutOffPolesRMinRel = np.inf, - np.inf cutOffPolesIMaxRel, cutOffPolesIMinRel = np.inf, - np.inf cutOffResNormMin = -1 cutOffResAngleMin, cutOffResAngleMax = -1, np.pi + 1 def flagBadPolesResiduesRelative(self, poles:Np1D, residues : Np1D = None, projMat : Np2D = None, foci : Tuple[float, float] = [-1., 1.]) \ -> Np1D: """ Flag (numerical) poles/residues which are impossible. Args: poles: poles to be judged. residues: residues norms to be judged. projMat: matrix for projection of residues. foci: foci for potential evaluation. """ poles = np.array(poles).flatten() flag = np.zeros(len(poles), dtype = bool) potMax = self.cutOffPolesPotentialMax RMax, RMin = self.cutOffPolesRMaxRel, self.cutOffPolesRMinRel IMax, IMin = self.cutOffPolesIMaxRel, self.cutOffPolesIMinRel if not np.isinf(potMax) or (residues is not None and not self._ignoreResidues): plsInf = np.isinf(poles) pot = potential(poles, foci) if not np.isinf(potMax): flag = flag + (pot > potMax) if not np.isinf(RMax): flag = flag + (np.real(poles) > RMax) if not np.isinf(RMin): flag = flag + (np.real(poles) < RMin) if not np.isinf(IMax): flag = flag + (np.imag(poles) > IMax) if not np.isinf(IMin): flag = flag + (np.imag(poles) < IMin) if residues is not None and not self._ignoreResidues: residues = np.array(residues).reshape(-1, len(poles)) resGood = np.where(flag + plsInf == False)[0] if len(resGood) > 0: residues = residues[:, resGood] / pot[resGood] if projMat is None: resNorm = np.linalg.norm(residues, axis = 0) else: residues = projMat.dot(residues) resNorm = self.norm(residues) if self.cutOffResNormMin > 0.: flag[resGood[resNorm < self.cutOffResNormMin * np.max(resNorm)]] = 1 resGood = np.where(flag + plsInf == False)[0] if len(resGood) > 0 and (self.cutOffResAngleMin > 0. or self.cutOffResAngleMax < np.pi): if projMat is None: angles = np.real(residues.T.conj().dot(residues)) else: angles = np.real(self.innerProduct(residues, residues)) resNormEff = resNorm resNormEff[np.isclose(resNormEff, 0., atol = 1e-15)] = 1. angles = np.clip((angles / resNormEff).T / resNormEff, -1., 1.) angles = np.arccos(angles) badangles = ((angles < self.cutOffResAngleMin) + (angles > self.cutOffResAngleMax)) badangles[np.arange(len(angles)), np.arange(len(angles))] = 0 idx = np.zeros(len(angles), dtype = bool) while np.sum(badangles) > 0: idxn = np.argmax(np.sum(badangles, axis = 1)) badangles[idxn], badangles[:, idxn] = 0, 0 idx[idxn] = True flag[resGood[idx]] = 1 return flag > 0 @property def _ignoreResidues(self): return (self.cutOffResNormMin <= 0. and self.cutOffResAngleMin <= 0. and self.cutOffResAngleMax >= np.pi) diff --git a/rrompy/hfengines/base/linear_affine_engine.py b/rrompy/hfengines/base/linear_affine_engine.py index b3747f6..f0bef1c 100644 --- a/rrompy/hfengines/base/linear_affine_engine.py +++ b/rrompy/hfengines/base/linear_affine_engine.py @@ -1,201 +1,202 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np import scipy.sparse as scsp from collections.abc import Iterable from copy import deepcopy as copy from .hfengine_base import HFEngineBase from rrompy.utilities.base.decorators import affine_construct from rrompy.utilities.base.types import (Np1D, Np2D, List, ListAny, TupleAny, paramVal) from rrompy.utilities.expression import (expressionEvaluator, createMonomial, createMonomialList) from rrompy.utilities.numerical.hash_derivative import ( hashDerivativeToIdx as hashD) -from rrompy.utilities.exception_manager import RROMPyException +from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert __all__ = ['LinearAffineEngine', 'checkIfAffine'] class LinearAffineEngine(HFEngineBase): """Generic solver for affine parametric problems.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._affinePoly = True self.nAs, self.nbs = 1, 1 @property def affinePoly(self): return self._affinePoly @property def nAs(self): """Value of nAs.""" return self._nAs @nAs.setter def nAs(self, nAs): nAsOld = self._nAs if hasattr(self, "_nAs") else -1 if nAs != nAsOld: self._nAs = nAs self.resetAs() @property def nbs(self): """Value of nbs.""" return self._nbs @nbs.setter def nbs(self, nbs): nbsOld = self._nbs if hasattr(self, "_nbs") else -1 if nbs != nbsOld: self._nbs = nbs self.resetbs() @property def spacedim(self): if (hasattr(self, "bs") and isinstance(self.bs, Iterable) and self.bs[0] is not None): return len(self.bs[0]) return super().spacedim def getMonomialSingleWeight(self, deg:List[int]): + RROMPyAssert(len(deg), self.npar, "Monomial degree") return createMonomial(deg, True) def getMonomialWeights(self, n:int): return createMonomialList(n, self.npar, True) def setAs(self, As:List[Np2D]): """Assign terms of operator of linear system.""" if len(As) != self.nAs: raise RROMPyException(("Expected number {} of terms of As not " "matching given list length {}.").format(self.nAs, len(As))) self.As = [copy(A) for A in As] def setthAs(self, thAs:List[List[TupleAny]]): """Assign terms of operator of linear system.""" if len(thAs) != self.nAs: raise RROMPyException(("Expected number {} of terms of thAs not " "matching given list length {}.").format(self.nAs, len(thAs))) self.thAs = copy(thAs) def setbs(self, bs:List[Np1D]): """Assign terms of RHS of linear system.""" if len(bs) != self.nbs: raise RROMPyException(("Expected number {} of terms of bs not " "matching given list length {}.").format(self.nbs, len(bs))) self.bs = [copy(b) for b in bs] def setthbs(self, thbs:List[List[TupleAny]]): """Assign terms of RHS of linear system.""" if len(thbs) != self.nbs: raise RROMPyException(("Expected number {} of terms of thbs not " "matching given list length {}.").format(self.nbs, len(thbs))) self.thbs = copy(thbs) def resetAs(self): """Reset (derivatives of) operator of linear system.""" if hasattr(self, "_nAs"): self.setAs([None] * self.nAs) self.setthAs([None] * self.nAs) def resetbs(self): """Reset (derivatives of) RHS of linear system.""" if hasattr(self, "_nbs"): self.setbs([None] * self.nbs) self.setthbs([None] * self.nbs) def _assembleObject(self, mu:paramVal, objs:ListAny, th:ListAny, derI:int) -> Np2D: """Assemble (derivative of) affine object from list of affine terms.""" muE = self.mapParameterList(mu) obj = None for j in range(len(objs)): if len(th[j]) <= derI and th[j][-1] is not None: raise RROMPyException(("Cannot assemble operator. Non enough " "derivatives of theta provided.")) if len(th[j]) > derI and th[j][derI] is not None: expr = expressionEvaluator(th[j][derI], muE) if isinstance(expr, Iterable): if len(expr) > 1: raise RROMPyException(("Size mismatch in value of " "theta function. Only scalars " "allowed.")) expr = expr[0] if obj is None: obj = expr * objs[j] else: obj = obj + expr * objs[j] return obj @abstractmethod def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: self.As[0] = scsp.eye(self.spacedim, dtype = np.complex, format = "csr") for j in range(1, self.nAs): if self.As[j] is None: self.As[j] = self.baselineA() @affine_construct def A(self, mu : paramVal = [], der : List[int] = 0) -> Np2D: """ Assemble terms of operator of linear system and return it (or its derivative) at a given parameter. """ derI = hashD(der) if isinstance(der, Iterable) else der if derI < 0 or derI > self.nAs - 1: return self.baselineA() self.buildA() assembledA = self._assembleObject(mu, self.As, self.thAs, derI) if assembledA is None: return self.baselineA() return assembledA @abstractmethod def buildb(self): """Build terms of RHS of linear system.""" if self.thbs[0] is None: self.thbs = self.getMonomialWeights(self.nbs) for j in range(self.nbs): if self.bs[j] is None: self.bs[j] = self.baselineb() @affine_construct def b(self, mu : paramVal = [], der : List[int] = 0) -> Np1D: """ Assemble terms of RHS of linear system and return it (or its derivative) at a given parameter. """ derI = hashD(der) if isinstance(der, Iterable) else der if derI < 0 or derI > self.nbs - 1: return self.baselineb() self.buildb() assembledb = self._assembleObject(mu, self.bs, self.thbs, derI) if assembledb is None: return self.baselineb() return assembledb def checkIfAffine(engine, msg : str = "apply method", noA : bool = False, lvl : List[int] = [1]): msg = ("Cannot {} because of non-affine parametric dependence{}. Consider " "using EIM to define a new engine.").format(msg, " of RHS" * noA) if hasattr(engine.b, "is_affine") and engine.b.is_affine in lvl: if noA or (hasattr(engine.A, "is_affine") and engine.A.is_affine in lvl): return raise RROMPyException(msg) diff --git a/rrompy/hfengines/fenics_engines/helmholtz_problem_engine_augmented.py b/rrompy/hfengines/fenics_engines/helmholtz_problem_engine_augmented.py index 6212686..8b0fae3 100755 --- a/rrompy/hfengines/fenics_engines/helmholtz_problem_engine_augmented.py +++ b/rrompy/hfengines/fenics_engines/helmholtz_problem_engine_augmented.py @@ -1,268 +1,268 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from numpy import pad from scipy.sparse import eye, bmat, block_diag from collections.abc import Iterable from .helmholtz_problem_engine import (HelmholtzProblemEngine, ScatteringProblemEngine) from rrompy.solver.fenics import (augmentedH1NormMatrix, augmentedHminus1NormMatrix) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyException from rrompy.parameter import parameterMap as pMap __all__ = ['HelmholtzProblemEngineAugmented', 'ScatteringProblemEngineAugmented'] class HelmholtzProblemEngineAugmented(HelmholtzProblemEngine): """ Solver for generic Helmholtz problems with parametric wavenumber. - \nabla \cdot (a \nabla u) - omega * n**2 * v = f in \Omega omega * u = v in \overline{\Omega} u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. diffusivity: Value of a. refractionIndex: Value of n. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.parameterMap = pMap(1., self.npar) @property def spacedim(self): if (hasattr(self, "bs") and isinstance(self.bs, Iterable) and self.bs[0] is not None): return len(self.bs[0]) return 2 * super().spacedim def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = augmentedH1NormMatrix(self.V) + self._energyNormMatrix = augmentedH1NormMatrix(self.V) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = augmentedHminus1NormMatrix(self.V, + self._energyNormDualMatrix = augmentedHminus1NormMatrix(self.V, compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" ANone = any([A is None for A in self.As]) if not ANone: return self.nAs = 2 super().buildA() I = eye(self.spacedim // 2) self.As[0] = block_diag((self.As[0], I), format = "csr") self.As[1] = bmat([[None, self.As[1]], [- I, None]], format = "csr") def buildb(self): """Build terms of operator of linear system.""" bNone = any([b is None for b in self.bs]) if not bNone: return self.nbs = 1 dim = self.spacedim // 2 super().buildb() self.bs[0] = pad(self.bs[0], (0, dim), "constant") def plot(self, u, warping = None, is_state = False, name = "u", save = None, what = 'all', forceNewFile = True, saveFormat = "eps", saveDPI = 100, show = True, colorMap = "jet", fenplotArgs = {}, **figspecs): uh = u[: self.spacedim // 2] if is_state or self.isCEye else u return super().plot(uh, warping, is_state, name, save, what, forceNewFile, saveFormat, saveDPI, show, colorMap, fenplotArgs, **figspecs) def outParaview(self, u, warping = None, is_state = False, name = "u", filename = "out", time = 0., what = 'all', forceNewFile = True, folder = False, filePW = None): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaview(u[: self.spacedim // 2], warping, is_state, name, filename, time, what, forceNewFile, folder, filePW) def outParaviewTimeDomain(self, u, omega, warping = None, is_state = False, timeFinal = None, periodResolution = 20, name = "u", filename = "out", forceNewFile = True, folder = False): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaviewTimeDomain(u[: self.spacedim // 2], omega, warping, is_state, timeFinal, periodResolution, name, filename, forceNewFile, folder) class ScatteringProblemEngineAugmented(ScatteringProblemEngine): """ Solver for scattering problems with parametric wavenumber. - \nabla \cdot (a \nabla u) - omega * n**2 * v = f in \Omega omega * u = v in \overline{\Omega} u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu +- i v = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. signR: Sign in ABC. omega: Value of omega. diffusivity: Value of a. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.nAs = 2 self._weight0 = 1. @property def spacedim(self): if (hasattr(self, "bs") and isinstance(self.bs, Iterable) and self.bs[0] is not None): return len(self.bs[0]) return 2 * super().spacedim def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = augmentedH1NormMatrix(self.V) + self._energyNormMatrix = augmentedH1NormMatrix(self.V) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = augmentedHminus1NormMatrix(self.V, + self._energyNormDualMatrix = augmentedHminus1NormMatrix(self.V, compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" ANone = any([A is None for A in self.As]) if not ANone: return self.nAs = 3 super().buildA() self._nAs = 2 I = eye(self.spacedim // 2) self.As[0] = bmat([[self.As[0], self._weight0 * self.As[1]], [None, I]], format = "csr") self.As[1] = bmat([[(1. - self._weight0) * self.As[1], self.As[2]], [- I, None]], format = "csr") self.thAs.pop() self.As.pop() def buildb(self): """Build terms of operator of linear system.""" bNone = any([b is None for b in self.bs]) if not bNone: return self.nbs = 1 dim = self.spacedim // 2 super().buildb() self.bs[0] = pad(self.bs[0], (0, dim), "constant") def plot(self, u, warping = None, is_state = False, name = "u", save = None, what = 'all', forceNewFile = True, saveFormat = "eps", saveDPI = 100, show = True, colorMap = "jet", fenplotArgs = {}, **figspecs): uh = u[: self.spacedim // 2] if is_state or self.isCEye else u return super().plot(uh, warping, is_state, name, save, what, forceNewFile, saveFormat, saveDPI, show, colorMap, fenplotArgs, **figspecs) def outParaview(self, u, warping = None, is_state = False, name = "u", filename = "out", time = 0., what = 'all', forceNewFile = True, folder = False, filePW = None): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaview(u[: self.spacedim // 2], warping, is_state, name, filename, time, what, forceNewFile, folder, filePW) def outParaviewTimeDomain(self, u, omega, warping = None, is_state = False, timeFinal = None, periodResolution = 20, name = "u", filename = "out", forceNewFile = True, folder = False): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaviewTimeDomain(u[: self.spacedim // 2], omega, warping, is_state, timeFinal, periodResolution, name, filename, forceNewFile, folder) diff --git a/rrompy/hfengines/fenics_engines/laplace_base_problem_engine.py b/rrompy/hfengines/fenics_engines/laplace_base_problem_engine.py index 472005d..b84ce31 100644 --- a/rrompy/hfengines/fenics_engines/laplace_base_problem_engine.py +++ b/rrompy/hfengines/fenics_engines/laplace_base_problem_engine.py @@ -1,252 +1,252 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np import fenics as fen from rrompy.hfengines.base.linear_affine_engine import LinearAffineEngine from rrompy.hfengines.base.fenics_engine_base import FenicsEngineBase from rrompy.utilities.base.types import paramVal from rrompy.solver.fenics import (fenZERO, fenONE, H1NormMatrix, Hminus1NormMatrix) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.parameter import checkParameter from rrompy.solver.fenics import fenics2Sparse, fenics2Vector __all__ = ['LaplaceBaseProblemEngine'] class LaplaceBaseProblemEngine(LinearAffineEngine, FenicsEngineBase): """ Solver for generic Laplace problems. - \nabla \cdot (a \nabla u) = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real FE space. u: Generic trial functions for variational form evaluation. v: Generic test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. diffusivity: Value of a. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ _energyDualNormCompress = None def __init__(self, mu0 : paramVal = [], degree_threshold : int = np.inf, verbosity : int = 10, timestamp : bool = True): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity, timestamp = timestamp) self._affinePoly = True self.mu0 = checkParameter(mu0) self.npar = self.mu0.shape[1] self.omega = np.abs(self.mu0(0, 0)) if self.npar > 0 else 0. self.diffusivity = fenONE self.forcingTerm = fenZERO self.DirichletDatum = fenZERO self.NeumannDatum = fenZERO self.RobinDatumG = fenZERO self.RobinDatumH = fenZERO @property def diffusivity(self): """Value of a.""" return self._diffusivity @diffusivity.setter def diffusivity(self, diffusivity): self.resetAs() if not isinstance(diffusivity, (list, tuple,)): diffusivity = [diffusivity, fenZERO] self._diffusivity = diffusivity @property def forcingTerm(self): """Value of f.""" return self._forcingTerm @forcingTerm.setter def forcingTerm(self, forcingTerm): self.resetbs() if not isinstance(forcingTerm, (list, tuple,)): forcingTerm = [forcingTerm, fenZERO] self._forcingTerm = forcingTerm @property def DirichletDatum(self): """Value of u0.""" return self._DirichletDatum @DirichletDatum.setter def DirichletDatum(self, DirichletDatum): self.resetbs() if not isinstance(DirichletDatum, (list, tuple,)): DirichletDatum = [DirichletDatum, fenZERO] self._DirichletDatum = DirichletDatum @property def NeumannDatum(self): """Value of g1.""" return self._NeumannDatum @NeumannDatum.setter def NeumannDatum(self, NeumannDatum): self.resetbs() if not isinstance(NeumannDatum, (list, tuple,)): NeumannDatum = [NeumannDatum, fenZERO] self._NeumannDatum = NeumannDatum @property def RobinDatumG(self): """Value of g2.""" return self._RobinDatumG @RobinDatumG.setter def RobinDatumG(self, RobinDatumG): self.resetbs() if not isinstance(RobinDatumG, (list, tuple,)): RobinDatumG = [RobinDatumG, fenZERO] self._RobinDatumG = RobinDatumG @property def RobinDatumH(self): """Value of h.""" return self._RobinDatumH @RobinDatumH.setter def RobinDatumH(self, RobinDatumH): self.resetAs() if not isinstance(RobinDatumH, (list, tuple,)): RobinDatumH = [RobinDatumH, fenZERO] self._RobinDatumH = RobinDatumH @property def DirichletBoundary(self): """Function handle to DirichletBoundary.""" return self.BCManager.DirichletBoundary @DirichletBoundary.setter def DirichletBoundary(self, DirichletBoundary): self.resetAs() self.resetbs() self.BCManager.DirichletBoundary = DirichletBoundary @property def NeumannBoundary(self): """Function handle to NeumannBoundary.""" return self.BCManager.NeumannBoundary @NeumannBoundary.setter def NeumannBoundary(self, NeumannBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.NeumannBoundary = NeumannBoundary @property def RobinBoundary(self): """Function handle to RobinBoundary.""" return self.BCManager.RobinBoundary @RobinBoundary.setter def RobinBoundary(self, RobinBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.RobinBoundary = RobinBoundary def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = H1NormMatrix(self.V, np.abs(self.omega)**2) + self._energyNormMatrix = H1NormMatrix(self.V, np.abs(self.omega)**2) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = Hminus1NormMatrix( + self._energyNormDualMatrix = Hminus1NormMatrix( self.V, np.abs(self.omega)**2, compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling operator term A0.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZERO, self.DirichletBoundary) aRe, aIm = self.diffusivity hRe, hIm = self.RobinDatumH termNames = ["diffusivity", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip([aRe, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip([aIm, hIm], [x + "Imag" for x in termNames])) a0Re = (aRe * fen.inner(fen.grad(self.u), fen.grad(self.v)) * fen.dx + hRe * self.u * self.v * self.ds(1)) a0Im = (aIm * fen.inner(fen.grad(self.u), fen.grad(self.v)) * fen.dx + hIm * self.u * self.v * self.ds(1)) self.As[0] = (fenics2Sparse(a0Re, parsRe, DirichletBC0, 1) + 1.j * fenics2Sparse(a0Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) def buildb(self): """Build terms of operator of linear system.""" if self.thbs[0] is None: self.thbs = self.getMonomialWeights(self.nbs) if self.bs[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling forcing term b0.", 20) u0Re, u0Im = self.DirichletDatum fRe, fIm = self.forcingTerm g1Re, g1Im = self.NeumannDatum g2Re, g2Im = self.RobinDatumG termNames = ["forcingTerm", "NeumannDatum", "RobinDatumG"] parsRe = self.iterReduceQuadratureDegree(zip([fRe, g1Re, g2Re], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip([fIm, g1Im, g2Im], [x + "Imag" for x in termNames])) L0Re = (fRe * self.v * fen.dx + g1Re * self.v * self.ds(0) + g2Re * self.v * self.ds(1)) L0Im = (fIm * self.v * fen.dx + g1Im * self.v * self.ds(0) + g2Im * self.v * self.ds(1)) DBCR = fen.DirichletBC(self.V, u0Re, self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, u0Im, self.DirichletBoundary) self.bs[0] = (fenics2Vector(L0Re, parsRe, DBCR, 1) + 1.j * fenics2Vector(L0Im, parsIm, DBCI, 1)) vbMng(self, "DEL", "Done assembling forcing term.", 20) diff --git a/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine.py b/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine.py index b7af518..b8872a9 100644 --- a/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine.py +++ b/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine.py @@ -1,285 +1,285 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np import fenics as fen from .linear_elasticity_problem_engine import LinearElasticityProblemEngine from rrompy.utilities.base.types import paramVal from rrompy.solver.fenics import (fenZERO, fenZEROS, fenONE, fenics2Sparse, elasticNormMatrix, elasticDualNormMatrix) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.parameter import parameterMap as pMap __all__ = ['LinearElasticityHelmholtzProblemEngine', 'LinearElasticityHelmholtzProblemEngineDamped'] class LinearElasticityHelmholtzProblemEngine(LinearElasticityProblemEngine): """ Solver for generic linear elasticity Helmholtz problems with parametric wavenumber. - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) - rho_ * mu^2 * u = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real vector FE space. u: Generic vector trial functions for variational form evaluation. v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. lambda_: Value of lambda_. mu_: Value of mu_. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, mu0 : paramVal = [0.], degree_threshold : int = np.inf, verbosity : int = 10, timestamp : bool = True): super().__init__(mu0 = mu0, degree_threshold = degree_threshold, verbosity = verbosity, timestamp = timestamp) self._affinePoly = True self.nAs = 2 self.omega = np.abs(self.mu0(0, 0)) self.rho_ = fenONE self.parameterMap = pMap([2.] + [1.] * (self.npar - 1)) @property def rho_(self): """Value of rho_.""" return self._rho_ @rho_.setter def rho_(self, rho_): self.resetAs() if not isinstance(rho_, (list, tuple,)): rho_ = [rho_, fenZERO] self._rho_ = rho_ def buildEnergyNormForm(self): # energy + omega norm """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = elasticNormMatrix( + self._energyNormMatrix = elasticNormMatrix( self.V, self.lambda_[0], self.mu_[0], np.abs(self.omega)**2 * self.rho_[0]) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = elasticDualNormMatrix( + self._energyNormDualMatrix = elasticDualNormMatrix( self.V, self.lambda_[0], self.mu_[0], np.abs(self.omega)**2 * self.rho_[0], compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling operator term A0.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) lambda_Re, lambda_Im = self.lambda_ mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) sigma = lambda u, l_, m_: ( l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + 2. * m_ * epsilon(u)) a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), epsilon(self.v)) * fen.dx + hRe * fen.inner(self.u, self.v) * self.ds(1)) a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), epsilon(self.v)) * fen.dx + hIm * fen.inner(self.u, self.v) * self.ds(1)) self.As[0] = (fenics2Sparse(a0Re, parsRe, DirichletBC0, 1) + 1.j * fenics2Sparse(a0Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) if self.As[1] is None: vbMng(self, "INIT", "Assembling operator term A1.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) rho_Re, rho_Im = self.rho_ parsRe = self.iterReduceQuadratureDegree(zip([rho_Re], ["rho_Real"])) parsIm = self.iterReduceQuadratureDegree(zip([rho_Im], ["rho_Imag"])) a1Re = - rho_Re * fen.inner(self.u, self.v) * fen.dx a1Im = - rho_Im * fen.inner(self.u, self.v) * fen.dx self.As[1] = (fenics2Sparse(a1Re, parsRe, DirichletBC0, 0) + 1.j * fenics2Sparse(a1Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) class LinearElasticityHelmholtzProblemEngineDamped( LinearElasticityHelmholtzProblemEngine): """ Solver for generic linear elasticity Helmholtz problems with parametric wavenumber. - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) - rho_ * (mu^2 - i * eta * mu) * u = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real vector FE space. u: Generic vector trial functions for variational form evaluation. v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. lambda_: Value of lambda_. mu_: Value of mu_. eta: Value of eta. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._affinePoly = True self.nAs = 3 self.eta = fenZERO self.parameterMap = pMap(1., self.npar) @property def eta(self): """Value of eta.""" return self._eta @eta.setter def eta(self, eta): self.resetAs() if not isinstance(eta, (list, tuple,)): eta = [eta, fenZERO] self._eta = eta def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling operator term A0.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) lambda_Re, lambda_Im = self.lambda_ mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) sigma = lambda u, l_, m_: ( l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + 2. * m_ * epsilon(u)) a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), epsilon(self.v)) * fen.dx + hRe * fen.inner(self.u, self.v) * self.ds(1)) a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), epsilon(self.v)) * fen.dx + hIm * fen.inner(self.u, self.v) * self.ds(1)) self.As[0] = (fenics2Sparse(a0Re, parsRe, DirichletBC0, 1) + 1.j * fenics2Sparse(a0Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) if self.As[1] is None: vbMng(self, "INIT", "Assembling operator term A1.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) rho_Re, rho_Im = self.rho_ eta_Re, eta_Im = self.eta termNames = ["rho_", "eta"] parsRe = self.iterReduceQuadratureDegree(zip([rho_Re, eta_Re], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip([rho_Im, eta_Im], [x + "Imag" for x in termNames])) a1Re = - ((eta_Re * rho_Im + eta_Im * rho_Re) * fen.inner(self.u, self.v)) * fen.dx a1Im = ((eta_Re * rho_Re - eta_Im * rho_Im) * fen.inner(self.u, self.v)) * fen.dx self.As[1] = (fenics2Sparse(a1Re, parsRe, DirichletBC0, 0) + 1.j * fenics2Sparse(a1Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) if self.As[2] is None: vbMng(self, "INIT", "Assembling operator term A2.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) rho_Re, rho_Im = self.rho_ parsRe = self.iterReduceQuadratureDegree(zip([rho_Re], ["rho_Real"])) parsIm = self.iterReduceQuadratureDegree(zip([rho_Im], ["rho_Imag"])) a2Re = - rho_Re * fen.inner(self.u, self.v) * fen.dx a2Im = - rho_Im * fen.inner(self.u, self.v) * fen.dx self.As[2] = (fenics2Sparse(a2Re, parsRe, DirichletBC0, 0) + 1.j * fenics2Sparse(a2Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) diff --git a/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine_augmented.py b/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine_augmented.py index 21f9c7b..b198b61 100755 --- a/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine_augmented.py +++ b/rrompy/hfengines/fenics_engines/linear_elasticity_helmholtz_problem_engine_augmented.py @@ -1,281 +1,281 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.sparse import eye, bmat, block_diag from collections.abc import Iterable from .linear_elasticity_helmholtz_problem_engine import ( LinearElasticityHelmholtzProblemEngine, LinearElasticityHelmholtzProblemEngineDamped) from rrompy.solver.fenics import (augmentedElasticNormMatrix, augmentedElasticDualNormMatrix) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyException from rrompy.parameter import parameterMap as pMap __all__ = ['LinearElasticityHelmholtzProblemEngineAugmented', 'LinearElasticityHelmholtzProblemEngineDampedAugmented'] class LinearElasticityHelmholtzProblemEngineAugmented( LinearElasticityHelmholtzProblemEngine): """ Solver for generic linear elasticity Helmholtz problems with parametric wavenumber. - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) - rho_ * mu * v = f in \Omega mu * u = v in \overline{\Omega} u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real vector FE space. u: Generic vector trial functions for variational form evaluation. v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. lambda_: Value of lambda_. mu_: Value of mu_. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.parameterMap = pMap(1., self.npar) @property def spacedim(self): if (hasattr(self, "bs") and isinstance(self.bs, Iterable) and self.bs[0] is not None): return len(self.bs[0]) return 2 * super().spacedim def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = augmentedElasticNormMatrix(self.V, + self._energyNormMatrix = augmentedElasticNormMatrix(self.V, self.lambda_[0], self.mu_[0]) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = augmentedElasticDualNormMatrix( + self._energyNormDualMatrix = augmentedElasticDualNormMatrix( self.V, self.lambda_[0], self.mu_[0], compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" ANone = any([A is None for A in self.As]) if not ANone: return self.nAs = 2 super().buildA() I = eye(self.spacedim // 2) self.As[0] = block_diag((self.As[0], I), format = "csr") self.As[1] = bmat([[None, self.As[1]], [- I, None]], format = "csr") def buildb(self): """Build terms of operator of linear system.""" bNone = any([b is None for b in self.bs]) if not bNone: return self.nbs = 1 dim = self.spacedim // 2 super().buildb() self.bs[0] = np.pad(self.bs[0], (0, dim), "constant") def plot(self, u, warping = None, is_state = False, name = "u", save = None, what = 'all', forceNewFile = True, saveFormat = "eps", saveDPI = 100, show = True, colorMap = "jet", fenplotArgs = {}, **figspecs): uh = u[: self.spacedim // 2] if is_state or self.isCEye else u return super().plot(uh, warping, is_state, name, save, what, forceNewFile, saveFormat, saveDPI, show, colorMap, fenplotArgs, **figspecs) def outParaview(self, u, warping = None, is_state = False, name = "u", filename = "out", time = 0., what = 'all', forceNewFile = True, folder = False, filePW = None): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaview(u[: self.spacedim // 2], warping, is_state, name, filename, time, what, forceNewFile, folder, filePW) def outParaviewTimeDomain(self, u, omega, warping = None, is_state = False, timeFinal = None, periodResolution = 20, name = "u", filename = "out", forceNewFile = True, folder = False): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaviewTimeDomain(u[: self.spacedim // 2], omega, warping, is_state, timeFinal, periodResolution, name, filename, forceNewFile, folder) class LinearElasticityHelmholtzProblemEngineDampedAugmented( LinearElasticityHelmholtzProblemEngineDamped): """ Solver for generic linear elasticity Helmholtz problems with parametric wavenumber. - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) - rho_ * (mu - i * eta) * v = f in \Omega mu * u = v in \overline{\Omega} u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real vector FE space. u: Generic vector trial functions for variational form evaluation. v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. omega: Value of omega. lambda_: Value of lambda_. mu_: Value of mu_. eta: Value of eta. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.nAs = 2 self._weight0 = 1. @property def spacedim(self): if (hasattr(self, "bs") and isinstance(self.bs, Iterable) and self.bs[0] is not None): return len(self.bs[0]) return 2 * super().spacedim def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = augmentedElasticNormMatrix(self.V, + self._energyNormMatrix = augmentedElasticNormMatrix(self.V, self.lambda_[0], self.mu_[0]) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = augmentedElasticDualNormMatrix( + self._energyNormDualMatrix = augmentedElasticDualNormMatrix( self.V, self.lambda_[0], self.mu_[0], compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" ANone = any([A is None for A in self.As]) if not ANone: return self.nAs = 3 super().buildA() self._nAs = 2 I = eye(self.spacedim // 2) self.As[0] = bmat([[self.As[0], self._weight0 * self.As[1]], [None, I]], format = "csr") self.As[1] = bmat([[(1. - self._weight0) * self.As[1], self.As[2]], [- I, None]], format = "csr") self.thAs.pop() self.As.pop() def buildb(self): """Build terms of operator of linear system.""" bNone = any([b is None for b in self.bs]) if not bNone: return self.nbs = 1 dim = self.spacedim // 2 super().buildb() self.bs[0] = np.pad(self.bs[0], (0, dim), "constant") def plot(self, u, warping = None, is_state = False, name = "u", save = None, what = 'all', forceNewFile = True, saveFormat = "eps", saveDPI = 100, show = True, colorMap = "jet", fenplotArgs = {}, **figspecs): uh = u[: self.spacedim // 2] if is_state or self.isCEye else u return super().plot(uh, warping, is_state, name, save, what, forceNewFile, saveFormat, saveDPI, show, colorMap, fenplotArgs, **figspecs) def outParaview(self, u, warping = None, is_state = False, name = "u", filename = "out", time = 0., what = 'all', forceNewFile = True, folder = False, filePW = None): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaview(u[: self.spacedim // 2], warping, is_state, name, filename, time, what, forceNewFile, folder, filePW) def outParaviewTimeDomain(self, u, omega, warping = None, is_state = False, timeFinal = None, periodResolution = 20, name = "u", filename = "out", forceNewFile = True, folder = False): if not is_state and not self.isCEye: raise RROMPyException(("Cannot output to Paraview non-state " "object.")) return super().outParaviewTimeDomain(u[: self.spacedim // 2], omega, warping, is_state, timeFinal, periodResolution, name, filename, forceNewFile, folder) diff --git a/rrompy/hfengines/fenics_engines/linear_elasticity_problem_engine.py b/rrompy/hfengines/fenics_engines/linear_elasticity_problem_engine.py index e3ad8fb..6832acc 100644 --- a/rrompy/hfengines/fenics_engines/linear_elasticity_problem_engine.py +++ b/rrompy/hfengines/fenics_engines/linear_elasticity_problem_engine.py @@ -1,289 +1,289 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np import fenics as fen from rrompy.hfengines.base.linear_affine_engine import LinearAffineEngine from rrompy.hfengines.base.vector_fenics_engine_base import \ VectorFenicsEngineBase from rrompy.utilities.base.types import paramVal from rrompy.solver.fenics import (fenZERO, fenZEROS, fenONE, elasticNormMatrix, elasticDualNormMatrix) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.parameter import checkParameter from rrompy.solver.fenics import fenics2Sparse, fenics2Vector __all__ = ['LinearElasticityProblemEngine'] class LinearElasticityProblemEngine(LinearAffineEngine, VectorFenicsEngineBase): """ Solver for generic linear elasticity problems. - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real vector FE space. u: Generic vector trial functions for variational form evaluation. v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. cs: Numpy array representation of cs. energyNormMatrix: Scipy sparse matrix representing inner product over V. energyNormDualMatrix: Scipy sparse matrix representing dual inner product between Riesz representers V-V. degree_threshold: Threshold for ufl expression interpolation degree. lambda_: Value of lambda_. mu_: Value of mu_. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). dsToBeSet: Whether ds needs to be set. """ _energyDualNormCompress = None def __init__(self, mu0 : paramVal = [], degree_threshold : int = np.inf, verbosity : int = 10, timestamp : bool = True): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity, timestamp = timestamp) self._affinePoly = True self.lambda_ = fenONE self.mu_ = fenONE self.mu0 = checkParameter(mu0) self.npar = self.mu0.shape[1] self.RobinDatumH = fenZERO @property def V(self): """Value of V.""" return self._V @V.setter def V(self, V): VectorFenicsEngineBase.V.fset(self, V) self.forcingTerm = fenZEROS(self.V.mesh().topology().dim()) self.DirichletDatum = fenZEROS(self.V.mesh().topology().dim()) self.NeumannDatum = fenZEROS(self.V.mesh().topology().dim()) self.RobinDatumG = fenZEROS(self.V.mesh().topology().dim()) self.dsToBeSet = True @property def lambda_(self): """Value of lambda_.""" return self._lambda_ @lambda_.setter def lambda_(self, lambda_): self.resetAs() if not isinstance(lambda_, (list, tuple,)): lambda_ = [lambda_, fenZERO] self._lambda_ = lambda_ @property def mu_(self): """Value of mu_.""" return self._mu_ @mu_.setter def mu_(self, mu_): self.resetAs() if not isinstance(mu_, (list, tuple,)): mu_ = [mu_, fenZERO] self._mu_ = mu_ @property def forcingTerm(self): """Value of f.""" return self._forcingTerm @forcingTerm.setter def forcingTerm(self, forcingTerm): self.resetbs() if not isinstance(forcingTerm, (list, tuple,)): forcingTerm = [forcingTerm, fenZEROS(self.V.mesh().topology().dim())] self._forcingTerm = forcingTerm @property def DirichletDatum(self): """Value of u0.""" return self._DirichletDatum @DirichletDatum.setter def DirichletDatum(self, DirichletDatum): self.resetbs() if not isinstance(DirichletDatum, (list, tuple,)): DirichletDatum = [DirichletDatum, fenZEROS(self.V.mesh().topology().dim())] self._DirichletDatum = DirichletDatum @property def NeumannDatum(self): """Value of g1.""" return self._NeumannDatum @NeumannDatum.setter def NeumannDatum(self, NeumannDatum): self.resetbs() if not isinstance(NeumannDatum, (list, tuple,)): NeumannDatum = [NeumannDatum, fenZEROS(self.V.mesh().topology().dim())] self._NeumannDatum = NeumannDatum @property def RobinDatumG(self): """Value of g2.""" return self._RobinDatumG @RobinDatumG.setter def RobinDatumG(self, RobinDatumG): self.resetbs() if not isinstance(RobinDatumG, (list, tuple,)): RobinDatumG = [RobinDatumG, fenZEROS(self.V.mesh().topology().dim())] self._RobinDatumG = RobinDatumG @property def RobinDatumH(self): """Value of h.""" return self._RobinDatumH @RobinDatumH.setter def RobinDatumH(self, RobinDatumH): self.resetAs() if not isinstance(RobinDatumH, (list, tuple,)): RobinDatumH = [RobinDatumH, fenZERO] self._RobinDatumH = RobinDatumH @property def DirichletBoundary(self): """Function handle to DirichletBoundary.""" return self.BCManager.DirichletBoundary @DirichletBoundary.setter def DirichletBoundary(self, DirichletBoundary): self.resetAs() self.resetbs() self.BCManager.DirichletBoundary = DirichletBoundary @property def NeumannBoundary(self): """Function handle to NeumannBoundary.""" return self.BCManager.NeumannBoundary @NeumannBoundary.setter def NeumannBoundary(self, NeumannBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.NeumannBoundary = NeumannBoundary @property def RobinBoundary(self): """Function handle to RobinBoundary.""" return self.BCManager.RobinBoundary @RobinBoundary.setter def RobinBoundary(self, RobinBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.RobinBoundary = RobinBoundary def buildEnergyNormForm(self): """ Build sparse matrix (in CSR format) representative of scalar product. """ vbMng(self, "INIT", "Assembling energy matrix.", 20) - self.energyNormMatrix = elasticNormMatrix(self.V, self.lambda_[0], - self.mu_[0]) + self._energyNormMatrix = elasticNormMatrix(self.V, self.lambda_[0], + self.mu_[0]) vbMng(self, "DEL", "Done assembling energy matrix.", 20) def buildEnergyNormDualForm(self): """ Build sparse matrix (in CSR format) representative of dual scalar product without duality. """ vbMng(self, "INIT", "Assembling energy dual matrix.", 20) - self.energyNormDualMatrix = elasticDualNormMatrix( + self._energyNormDualMatrix = elasticDualNormMatrix( self.V, self.lambda_[0], self.mu_[0], compressRank = self._energyDualNormCompress) vbMng(self, "DEL", "Done assembling energy dual matrix.", 20) def buildA(self): """Build terms of operator of linear system.""" if self.thAs[0] is None: self.thAs = self.getMonomialWeights(self.nAs) if self.As[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling operator term A0.", 20) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) lambda_Re, lambda_Im = self.lambda_ mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) sigma = lambda u, l_, m_: ( l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + 2. * m_ * epsilon(u)) a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), epsilon(self.v)) * fen.dx + hRe * fen.inner(self.u, self.v) * self.ds(1)) a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), epsilon(self.v)) * fen.dx + hIm * fen.inner(self.u, self.v) * self.ds(1)) self.As[0] = (fenics2Sparse(a0Re, parsRe, DirichletBC0, 1) + 1.j * fenics2Sparse(a0Im, parsIm, DirichletBC0, 0)) vbMng(self, "DEL", "Done assembling operator term.", 20) def buildb(self): """Build terms of operator of linear system.""" if self.thbs[0] is None: self.thbs = self.getMonomialWeights(self.nbs) if self.bs[0] is None: self.autoSetDS() vbMng(self, "INIT", "Assembling forcing term b0.", 20) u0Re, u0Im = self.DirichletDatum fRe, fIm = self.forcingTerm g1Re, g1Im = self.NeumannDatum g2Re, g2Im = self.RobinDatumG termNames = ["forcingTerm", "NeumannDatum", "RobinDatumG"] parsRe = self.iterReduceQuadratureDegree(zip([fRe, g1Re, g2Re], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip([fIm, g1Im, g2Im], [x + "Imag" for x in termNames])) L0Re = (fen.inner(fRe, self.v) * fen.dx + fen.inner(g1Re, self.v) * self.ds(0) + fen.inner(g2Re, self.v) * self.ds(1)) L0Im = (fen.inner(fIm, self.v) * fen.dx + fen.inner(g1Im, self.v) * self.ds(0) + fen.inner(g2Im, self.v) * self.ds(1)) DBCR = fen.DirichletBC(self.V, u0Re, self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, u0Im, self.DirichletBoundary) self.bs[0] = (fenics2Vector(L0Re, parsRe, DBCR, 1) + 1.j * fenics2Vector(L0Im, parsIm, DBCI, 1)) vbMng(self, "DEL", "Done assembling forcing term.", 20) diff --git a/rrompy/parameter/parameter_sampling/generic_sampler.py b/rrompy/parameter/parameter_sampling/generic_sampler.py index 8ba2b07..cc7ced1 100644 --- a/rrompy/parameter/parameter_sampling/generic_sampler.py +++ b/rrompy/parameter/parameter_sampling/generic_sampler.py @@ -1,92 +1,93 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from abc import abstractmethod from rrompy.utilities.base.types import List, DictAny, paramList from rrompy.utilities.expression import expressionEvaluator from rrompy.utilities.exception_manager import RROMPyException from rrompy.parameter import checkParameterList, parameterMap as pMap from rrompy.parameter.parameter_list import emptyParameterList __all__ = ['GenericSampler'] class GenericSampler: """ABSTRACT. Generic generator of sample points.""" def __init__(self, lims:paramList, parameterMap : DictAny = 1.): self.lims = lims self.parameterMap = pMap(parameterMap, self.npar) def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return "{}[{}_{}]".format(self.name(), self.lims[0], self.lims[1]) def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def __eq__(self, other) -> bool: if (not hasattr(other, "__dict__") or self.__dict__.keys() != other.__dict__.keys()): return False for key in self.__dict__: val = self.__dict__[key] if isinstance(val, (np.ndarray,)): if not np.allclose(val, other.__dict__[key]): return False else: if val != other.__dict__[key]: return False return True @property def npar(self): """Number of parameters.""" return self._lims.shape[1] def normalFoci(self, d : int = 0): return [-1., 1.] @property def lims(self): """Value of lims.""" return self._lims @lims.setter def lims(self, lims): lims = checkParameterList(lims) if len(lims) != 2: raise RROMPyException("2 limits must be specified.") + lims.data = lims.data + 0. self._lims = lims def mapParameterList(self, mu:paramList, direct : str = "F", idx : List[int] = None) -> paramList: if idx is None: idx = np.arange(self.npar) muMapped = checkParameterList(mu, len(idx)) for j, d in enumerate(idx): muMapped.data[:, j] = expressionEvaluator( self.parameterMap[direct][d], muMapped(j)).flatten() return muMapped def reset(self): self.points = emptyParameterList() @abstractmethod def generatePoints(self, n:int, reorder : bool = True) -> paramList: """Array of points.""" pass diff --git a/rrompy/reduction_methods/base/generic_approximant.py b/rrompy/reduction_methods/base/generic_approximant.py index db43c3d..0fca368 100644 --- a/rrompy/reduction_methods/base/generic_approximant.py +++ b/rrompy/reduction_methods/base/generic_approximant.py @@ -1,872 +1,878 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np from collections.abc import Iterable from itertools import product as iterprod from copy import deepcopy as copy from os import remove as osrm from rrompy.sampling import (SamplingEngine, SamplingEngineNormalize, SamplingEnginePOD) from rrompy.utilities.base.types import (Np1D, DictAny, HFEng, List, Tuple, ListAny, strLst, paramVal, paramList, sampList) from rrompy.utilities.base.data_structures import purgeDict, getNewFilename from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPy_READY, RROMPy_FRAGILE) from rrompy.utilities.base.pickle_utilities import pickleDump, pickleLoad from rrompy.parameter import (emptyParameterList, checkParameter, checkParameterList) from rrompy.sampling import sampleList, emptySampleList from rrompy.utilities.parallel import (bcast, masterCore, listGather, listScatter) __all__ = ['GenericApproximant'] def addNormFieldToClass(self, fieldName): - def objFunc(self, mu:paramList, *args, **kwargs) -> Np1D: - uV = getattr(self.__class__, "get" + fieldName)(self, mu) - kwargs["is_state"] = False - val = self.HFEngine.norm(uV, *args, **kwargs) + def objFunc(self, mu:paramList, getargs = {}, normargs = {}) -> Np1D: + uV = getattr(self.__class__, "get" + fieldName)(self, mu, **getargs) + normargs["is_state"] = False + val = self.HFEngine.norm(uV, **normargs) return val setattr(self.__class__, "norm" + fieldName, objFunc) def addNormDualFieldToClass(self, fieldName): - def objFunc(self, mu:paramList, *args, **kwargs) -> Np1D: - uV = getattr(self.__class__, "get" + fieldName)(self, mu) - kwargs["is_state"] = True - if "dual" not in kwargs.keys(): kwargs["dual"] = True - val = self.HFEngine.norm(uV, *args, **kwargs) + def objFunc(self, mu:paramList, getargs = {}, normargs = {}) -> Np1D: + uV = getattr(self.__class__, "get" + fieldName)(self, mu, **getargs) + normargs["is_state"] = True + if "dual" not in normargs.keys(): normargs["dual"] = True + val = self.HFEngine.norm(uV, **normargs) return val setattr(self.__class__, "norm" + fieldName, objFunc) def addPlotFieldToClass(self, fieldName): - def objFunc(self, mu:paramList, *args, **kwargs): - uV = getattr(self.__class__, "get" + fieldName)(self, mu) + def objFunc(self, mu:paramList, getargs = {}, plotargs = {}): + uV = getattr(self.__class__, "get" + fieldName)(self, mu, **getargs) uV = listScatter(uV)[0].T filesOut = [] if len(uV) > 0: - if "name" in kwargs.keys(): nameBase = copy(kwargs["name"]) + if "name" in plotargs.keys(): nameBase = copy(plotargs["name"]) for j, u in enumerate(uV): - if "name" in kwargs.keys(): kwargs["name"] = nameBase + str(j) - filesOut += [self.HFEngine.plot(u, *args, **kwargs)] - if "name" in kwargs.keys(): kwargs["name"] = nameBase + if "name" in plotargs.keys() and len(uV) > 1: + plotargs["name"] = nameBase + str(j) + filesOut += [self.HFEngine.plot(u, **plotargs)] + if "name" in plotargs.keys(): plotargs["name"] = nameBase filesOut = listGather(filesOut) if filesOut[0] is None: return None return filesOut setattr(self.__class__, "plot" + fieldName, objFunc) def addPlotDualFieldToClass(self, fieldName): - def objFunc(self, mu:paramList, *args, **kwargs): - uV = getattr(self.__class__, "get" + fieldName)(self, mu) + def objFunc(self, mu:paramList, getargs = {}, plotargs = {}): + uV = getattr(self.__class__, "get" + fieldName)(self, mu, **getargs) uV = listScatter(uV)[0].T filesOut = [] if len(uV) > 0: - if "name" in kwargs.keys(): nameBase = copy(kwargs["name"]) + if "name" in plotargs.keys(): nameBase = copy(plotargs["name"]) for j, u in enumerate(uV): - if "name" in kwargs.keys(): kwargs["name"] = nameBase + str(j) - filesOut += [self.HFEngine.plot(u, *args, **kwargs)] - if "name" in kwargs.keys(): kwargs["name"] = nameBase + if "name" in plotargs.keys() and len(uV) > 1: + plotargs["name"] = nameBase + str(j) + filesOut += [self.HFEngine.plot(u, **plotargs)] + if "name" in plotargs.keys(): plotargs["name"] = nameBase filesOut = listGather(filesOut) if filesOut[0] is None: return None return filesOut setattr(self.__class__, "plot" + fieldName, objFunc) def addOutParaviewFieldToClass(self, fieldName): - def objFunc(self, mu:paramVal, *args, **kwargs): + def objFunc(self, mu:paramVal, getargs = {}, outargs = {}): if not hasattr(self.HFEngine, "outParaview"): raise RROMPyException(("High fidelity engine cannot output to " "Paraview.")) - uV = getattr(self.__class__, "get" + fieldName)(self, mu) + uV = getattr(self.__class__, "get" + fieldName)(self, mu, **getargs) uV = listScatter(uV)[0].T filesOut = [] if len(uV) > 0: - if "name" in kwargs.keys(): nameBase = copy(kwargs["name"]) + if "name" in outargs.keys(): nameBase = copy(outargs["name"]) for j, u in enumerate(uV): - if "name" in kwargs.keys(): kwargs["name"] = nameBase + str(j) - filesOut += [self.HFEngine.outParaview(u, *args, **kwargs)] - if "name" in kwargs.keys(): kwargs["name"] = nameBase + if "name" in outargs.keys() and len(uV) > 1: + outargs["name"] = nameBase + str(j) + filesOut += [self.HFEngine.outParaview(u, **outargs)] + if "name" in outargs.keys(): outargs["name"] = nameBase filesOut = listGather(filesOut) if filesOut[0] is None: return None return filesOut setattr(self.__class__, "outParaview" + fieldName, objFunc) def addOutParaviewTimeDomainFieldToClass(self, fieldName): - def objFunc(self, mu:paramVal, *args, **kwargs): + def objFunc(self, mu:paramVal, getargs = {}, outargs = {}): if not hasattr(self.HFEngine, "outParaviewTimeDomain"): raise RROMPyException(("High fidelity engine cannot output to " "Paraview.")) - uV = getattr(self.__class__, "get" + fieldName)(self, mu) + uV = getattr(self.__class__, "get" + fieldName)(self, mu, **getargs) uV = listScatter(uV)[0].T filesOut = [] if len(uV) > 0: - omega = args.pop(0) if len(args) > 0 else np.real(mu) - if "name" in kwargs.keys(): nameBase = copy(kwargs["name"]) + if "omega" not in outargs.keys(): outargs["omega"] = np.real(mu) + if "name" in outargs.keys(): nameBase = copy(outargs["name"]) filesOut = [] for j, u in enumerate(uV): - if "name" in kwargs.keys(): kwargs["name"] = nameBase + str(j) - filesOut += [self.HFEngine.outParaviewTimeDomain(u, omega, - *args, - **kwargs)] - if "name" in kwargs.keys(): kwargs["name"] = nameBase + if "name" in outargs.keys() and len(uV) > 1: + outargs["name"] = nameBase + str(j) + filesOut += [self.HFEngine.outParaviewTimeDomain(u, **outargs)] + if "name" in outargs.keys(): outargs["name"] = nameBase filesOut = listGather(filesOut) if filesOut[0] is None: return None return filesOut setattr(self.__class__, "outParaviewTimeDomain" + fieldName, objFunc) class GenericApproximant: """ ABSTRACT ROM approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. full POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. trainedModel: Trained model evaluator. mu0: Default parameter. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList{Soft,Critical}. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ __all__ += [ftype + dtype for ftype, dtype in iterprod( ["norm", "plot", "outParaview", "outParaviewTimeDomain"], ["HF", "RHS", "Approx", "Res", "Err"])] def __init__(self, HFEngine:HFEng, mu0 : paramVal = None, approxParameters : DictAny = {}, verbosity : int = 10, timestamp : bool = True): self._preInit() self._mode = RROMPy_READY self.verbosity = verbosity self.timestamp = timestamp if not hasattr(self, "_output_lvl"): self._output_lvl = [] self._output_lvl += [1] vbMng(self, "INIT", "Initializing engine of type {}.".format(self.name()), 10) self._HFEngine = HFEngine self.trainedModel = None self.lastSolvedHF = emptyParameterList() self.uHF = emptySampleList() self._addParametersToList(["POD", "scaleFactorDer"], [1, "AUTO"], ["S"], [1.]) if mu0 is None: if hasattr(self.HFEngine, "mu0"): self.mu0 = checkParameter(self.HFEngine.mu0) else: raise RROMPyException(("Center of approximation cannot be " "inferred from HF engine. Parameter " "required")) else: self.mu0 = checkParameter(mu0, self.HFEngine.npar) self.resetSamples() self.approxParameters = approxParameters self._postInit() ### add norm{HF,Approx,Err} methods """ Compute norm of * at arbitrary parameter. Args: mu: Target parameter. Returns: Target norm of *. """ for objName in ["HF", "Approx", "Err"]: addNormFieldToClass(self, objName) ### add norm{RHS,Res} methods """ Compute norm of * at arbitrary parameter. Args: mu: Target parameter. Returns: Target norm of *. """ for objName in ["RHS", "Res"]: addNormDualFieldToClass(self, objName) ### add plot{HF,Approx,Err} methods """ Do some nice plots of * at arbitrary parameter. Args: mu: Target parameter. name(optional): Name to be shown as title of the plots. Defaults to 'u'. what(optional): Which plots to do. If list, can contain 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. show(optional): Whether to show figure. Defaults to True. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ for objName in ["HF", "Approx", "Err"]: addPlotFieldToClass(self, objName) ### add plot{RHS,Res} methods """ Do some nice plots of * at arbitrary parameter. Args: mu: Target parameter. name(optional): Name to be shown as title of the plots. Defaults to 'u'. what(optional): Which plots to do. If list, can contain 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. show(optional): Whether to show figure. Defaults to True. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ for objName in ["RHS", "Res"]: addPlotDualFieldToClass(self, objName) ### add outParaview{HF,RHS,Approx,Res,Err} methods """ Output * to ParaView file. Args: mu: Target parameter. name(optional): Base name to be used for data output. filename(optional): Name of output file. time(optional): Timestamp. what(optional): Which plots to do. If list, can contain 'MESH', 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. forceNewFile(optional): Whether to create new output file. filePW(optional): Fenics File entity (for time series). """ for objName in ["HF", "RHS", "Approx", "Res", "Err"]: addOutParaviewFieldToClass(self, objName) ### add outParaviewTimeDomain{HF,RHS,Approx,Res,Err} methods """ Output * to ParaView file, converted to time domain. Args: mu: Target parameter. omega(optional): frequency. timeFinal(optional): final time of simulation. periodResolution(optional): number of time steps per period. name(optional): Base name to be used for data output. filename(optional): Name of output file. forceNewFile(optional): Whether to create new output file. """ for objName in ["HF", "RHS", "Approx", "Res", "Err"]: addOutParaviewTimeDomainFieldToClass(self, objName) def _preInit(self): if not hasattr(self, "depth"): self.depth = 0 else: self.depth += 1 @property def tModelType(self): raise RROMPyException("No trainedModel type assigned.") def initializeModelData(self, datadict): from .trained_model.trained_model_data import TrainedModelData data = TrainedModelData(datadict["mu0"], datadict["mus"], datadict.pop("projMat"), datadict["scaleFactor"], datadict.pop("parameterMap")) return (data, ["mu0", "scaleFactor", "mus"]) @property def parameterList(self): """Value of parameterListSoft + parameterListCritical.""" return self.parameterListSoft + self.parameterListCritical def _addParametersToList(self, whatSoft : strLst = [], defaultSoft : ListAny = [], whatCritical : strLst = [], defaultCritical : ListAny = [], toBeExcluded : strLst = []): if not hasattr(self, "parameterToBeExcluded"): self.parameterToBeExcluded = [] self.parameterToBeExcluded = toBeExcluded + self.parameterToBeExcluded if not hasattr(self, "parameterListSoft"): self.parameterListSoft = [] if not hasattr(self, "parameterDefaultSoft"): self.parameterDefaultSoft = {} if not hasattr(self, "parameterListCritical"): self.parameterListCritical = [] if not hasattr(self, "parameterDefaultCritical"): self.parameterDefaultCritical = {} for j, what in enumerate(whatSoft): if what not in self.parameterToBeExcluded: self.parameterListSoft = [what] + self.parameterListSoft self.parameterDefaultSoft[what] = defaultSoft[j] for j, what in enumerate(whatCritical): if what not in self.parameterToBeExcluded: self.parameterListCritical = ([what] + self.parameterListCritical) self.parameterDefaultCritical[what] = defaultCritical[j] def _postInit(self): if self.depth == 0: vbMng(self, "DEL", "Done initializing.", 10) del self.depth else: self.depth -= 1 def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) def setupSampling(self, reset_samples : bool = True): """Setup sampling engine.""" RROMPyAssert(self._mode, message = "Cannot setup sampling engine.") if not hasattr(self, "_POD") or self._POD is None: return if self.POD == 1: sEng = SamplingEnginePOD elif self.POD == 1/2: sEng = SamplingEngineNormalize else: sEng = SamplingEngine self.samplingEngine = sEng(self.HFEngine, verbosity = self.verbosity) if reset_samples: self.resetSamples() @property def HFEngine(self): """Value of HFEngine.""" return self._HFEngine @HFEngine.setter def HFEngine(self, HFEngine): raise RROMPyException("Cannot change HFEngine.") @property def mu0(self): """Value of mu0.""" return self._mu0 @mu0.setter def mu0(self, mu0): mu0 = checkParameter(mu0) if not hasattr(self, "_mu0") or mu0 != self.mu0: self.resetSamples() self._mu0 = mu0 @property def npar(self): """Number of parameters.""" return self.mu0.shape[1] def checkParameterList(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.npar, check_if_single) def mapParameterList(self, *args, **kwargs): return self.HFEngine.mapParameterList(*args, **kwargs) @property def approxParameters(self): """Value of approximant parameters.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): if not hasattr(self, "approxParameters"): self._approxParameters = {} approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) keyList = list(approxParameters.keys()) for key in self.parameterListCritical: if key in keyList: setattr(self, "_" + key, self.parameterDefaultCritical[key]) for key in self.parameterListSoft: if key in keyList: setattr(self, "_" + key, self.parameterDefaultSoft[key]) fragile = False for key in self.parameterListCritical: if key in keyList: val = approxParameters[key] else: val = getattr(self, "_" + key, None) if val is None: fragile = True val = self.parameterDefaultCritical[key] if self._mode == RROMPy_FRAGILE: setattr(self, "_" + key, val) self.approxParameters[key] = val else: getattr(self.__class__, key, None).fset(self, val) for key in self.parameterListSoft: if key in keyList: val = approxParameters[key] else: val = getattr(self, "_" + key, None) if val is None: val = self.parameterDefaultSoft[key] if self._mode == RROMPy_FRAGILE: setattr(self, "_" + key, val) self.approxParameters[key] = val else: getattr(self.__class__, key, None).fset(self, val) if fragile: self._mode = RROMPy_FRAGILE @property def POD(self): """Value of POD.""" return self._POD @POD.setter def POD(self, POD): if hasattr(self, "_POD"): PODold = self.POD else: PODold = -1 if POD not in [0, 1/2, 1]: raise RROMPyException("POD must be either 0, 1/2, or 1.") self._POD = POD self._approxParameters["POD"] = self.POD if PODold != self.POD: self.samplingEngine = None self.resetSamples() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self.scaleFactor return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif isinstance(scaleFactorDer, Iterable): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def scaleFactorRel(self): """Value of scaleFactorDer / scaleFactor.""" if self._scaleFactorDer == "AUTO": return None try: return np.divide(self.scaleFactorDer, self.scaleFactor) except: raise RROMPyException(("Error in computation of relative scaling " "factor. Make sure that scaleFactor is " "properly initialized.")) from None @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): if S <= 0: raise RROMPyException("S must be positive.") if hasattr(self, "_S") and self._S is not None: Sold = self.S else: Sold = -1 self._S = S self._approxParameters["S"] = self.S if Sold != self.S: self.resetSamples() @property def trainedModel(self): """Value of trainedModel.""" return self._trainedModel @trainedModel.setter def trainedModel(self, trainedModel): self._trainedModel = trainedModel if self._trainedModel is not None: self._trainedModel.reset() self.lastSolvedApproxReduced = emptyParameterList() self.lastSolvedApprox = emptyParameterList() self.uApproxReduced = emptySampleList() self.uApprox = emptySampleList() def resetSamples(self): if hasattr(self, "samplingEngine") and self.samplingEngine is not None: self.samplingEngine.resetHistory() else: self.setupSampling() self._mode = RROMPy_READY def plotSamples(self, *args, **kwargs) -> List[str]: """ Do some nice plots of the samples. Returns: Output filenames. """ RROMPyAssert(self._mode, message = "Cannot plot samples.") return self.samplingEngine.plotSamples(*args, **kwargs) def outParaviewSamples(self, *args, **kwargs) -> List[str]: """ Output samples to ParaView file. Returns: Output filenames. """ RROMPyAssert(self._mode, message = "Cannot output samples.") return self.samplingEngine.outParaviewSamples(*args, **kwargs) def outParaviewTimeDomainSamples(self, *args, **kwargs) -> List[str]: """ Output samples to ParaView file, converted to time domain. Returns: Output filenames. """ RROMPyAssert(self._mode, message = "Cannot output samples.") return self.samplingEngine.outParaviewTimeDomainSamples(*args, **kwargs) def setTrainedModel(self, model): """Deepcopy approximation from trained model.""" if hasattr(model, "storeTrainedModel"): verb = model.verbosity model.verbosity = 0 fileOut = model.storeTrainedModel() model.verbosity = verb else: try: fileOut = getNewFilename("trained_model", "pkl") pickleDump(model.data.__dict__, fileOut) except: raise RROMPyException(("Failed to store model data. Parameter " "model must have either " "storeTrainedModel or " "data.__dict__ properties.")) from None self.loadTrainedModel(fileOut) osrm(fileOut) @abstractmethod def setupApprox(self) -> int: """ Setup approximant. (ABSTRACT) Any specialization should include something like self.trainedModel = ... self.trainedModel.data = ... self.trainedModel.data.approxParameters = copy( self.approxParameters) Returns > 0 if error was encountered, < 0 if no computation was necessary. """ if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) pass vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return self._mode == RROMPy_FRAGILE or (self.trainedModel is not None and self.trainedModel.data.approxParameters == self.approxParameters and len(self.mus) == len(self.trainedModel.data.mus)) def _pruneBeforeEval(self, mu:paramList, field:str, append:bool, prune:bool) -> Tuple[paramList, Np1D, Np1D, bool]: mu = self.checkParameterList(mu) idx = np.empty(len(mu), dtype = np.int) if prune: jExtra = np.zeros(len(mu), dtype = bool) muExtra = emptyParameterList() lastSolvedMus = getattr(self, "lastSolved" + field) if (len(mu) > 0 and len(mu) == len(lastSolvedMus) and mu == lastSolvedMus): idx = np.arange(len(mu), dtype = np.int) return muExtra, jExtra, idx, True muKeep = copy(muExtra) for j in range(len(mu)): jPos = lastSolvedMus.find(mu[j]) if jPos is not None: idx[j] = jPos muKeep.append(mu[j]) else: jExtra[j] = True muExtra.append(mu[j]) if len(muKeep) > 0 and not append: lastSolvedu = getattr(self, "u" + field) idx[~jExtra] = getattr(self.__class__, "set" + field)(self, muKeep, lastSolvedu[idx[~jExtra]], append) append = True else: jExtra = np.ones(len(mu), dtype = bool) muExtra = mu return muExtra, jExtra, idx, append def _setObject(self, mu:paramList, field:str, object:sampList, append:bool) -> List[int]: newMus = self.checkParameterList(mu) newObj = sampleList(object) if append: getattr(self, "lastSolved" + field).append(newMus) getattr(self, "u" + field).append(newObj) Ltot = len(getattr(self, "u" + field)) return list(range(Ltot - len(newObj), Ltot)) setattr(self, "lastSolved" + field, copy(newMus)) setattr(self, "u" + field, copy(newObj)) return list(range(len(getattr(self, "u" + field)))) def setHF(self, muHF:paramList, uHF:sampleList, append : bool = False) -> List[int]: """Assign high fidelity solution.""" return self._setObject(muHF, "HF", uHF, append) def evalHF(self, mu:paramList, append : bool = False, prune : bool = True) -> List[int]: """ Find high fidelity solution with original parameters and arbitrary parameter. Args: mu: Target parameter. append(optional): Whether to append new HF solutions to old ones. prune(optional): Whether to remove duplicates of already appearing HF solutions. """ muExtra, jExtra, idx, append = self._pruneBeforeEval(mu, "HF", append, prune) if len(muExtra) > 0: muExtra = self.checkParameterList(muExtra) vbMng(self, "INIT", "Solving HF model for mu = {}.".format(muExtra), 15) newuHFs = self.HFEngine.solve(muExtra) vbMng(self, "DEL", "Done solving HF model.", 15) idx[jExtra] = self.setHF(muExtra, newuHFs, append) return list(idx) def setApproxReduced(self, muApproxR:paramList, uApproxR:sampleList, append : bool = False) -> List[int]: """Assign high fidelity solution.""" return self._setObject(muApproxR, "ApproxReduced", uApproxR, append) def evalApproxReduced(self, mu:paramList, append : bool = False, prune : bool = False) -> List[int]: """ Evaluate reduced representation of approximant at arbitrary parameter. Args: mu: Target parameter. append(optional): Whether to append new HF solutions to old ones. prune(optional): Whether to remove duplicates of already appearing HF solutions. """ self.setupApprox() muExtra, jExtra, idx, append = self._pruneBeforeEval(mu, "ApproxReduced", append, prune) if len(muExtra) > 0: newuApproxs = self.trainedModel.getApproxReduced(muExtra) idx[jExtra] = self.setApproxReduced(muExtra, newuApproxs, append) return list(idx) def setApprox(self, muApprox:paramList, uApprox:sampleList, append : bool = False) -> List[int]: """Assign high fidelity solution.""" return self._setObject(muApprox, "Approx", uApprox, append) def evalApprox(self, mu:paramList, append : bool = False, prune : bool = False) -> List[int]: """ Evaluate approximant at arbitrary parameter. Args: mu: Target parameter. append(optional): Whether to append new HF solutions to old ones. prune(optional): Whether to remove duplicates of already appearing HF solutions. """ self.setupApprox() muExtra, jExtra, idx, append = self._pruneBeforeEval(mu, "Approx", append, prune) if len(muExtra) > 0: newuApproxs = self.trainedModel.getApprox(muExtra) idx[jExtra] = self.setApprox(muExtra, newuApproxs, append) return list(idx) def getHF(self, *args, **kwargs) -> sampList: """ Get HF solution at arbitrary parameter. Returns: HFsolution. """ idx = self.evalHF(*args, **kwargs) return self.uHF(idx) def getRHS(self, mu:paramList) -> sampList: """ Get linear system RHS at arbitrary parameter. Args: mu: Target parameter. Returns: Linear system RHS. """ return self.HFEngine.residual(mu, None) def getApproxReduced(self, *args, **kwargs) -> sampList: """ Get approximant at arbitrary parameter. Returns: Reduced approximant. """ idx = self.evalApproxReduced(*args, **kwargs) return self.uApproxReduced(idx) def getApprox(self, *args, **kwargs) -> sampList: """ Get approximant at arbitrary parameter. Returns: Approximant. """ idx = self.evalApprox(*args, **kwargs) return self.uApprox(idx) def getRes(self, mu:paramList, *args, **kwargs) -> sampList: """ Get residual at arbitrary parameter. Args: mu: Target parameter. Returns: Approximant residual. """ if not self.HFEngine.isCEye: raise RROMPyException(("Residual of solution with non-scalar C " "not computable.")) return self.HFEngine.residual(mu, self.getApprox(mu, *args, **kwargs) / self.HFEngine.C(mu)) def getErr(self, *args, **kwargs) -> sampList: """ Get error at arbitrary parameter. Returns: Approximant error. """ return self.getApprox(*args, **kwargs) - self.getHF(*args, **kwargs) def getPoles(self, *args, **kwargs) -> paramList: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() vbMng(self, "INIT", "Computing poles of model.", 20) poles = self.trainedModel.getPoles(*args, **kwargs) vbMng(self, "DEL", "Done computing poles.", 20) return poles + def compress(self, *args, **kwargs): + """Compress trained reduced model.""" + return self.trainedModel.compress(*args, **kwargs) + def storeSamples(self, filenameBase : str = "samples", forceNewFile : bool = True) -> str: """Store samples to file.""" filename = filenameBase + "_" + self.name() if forceNewFile: filename = getNewFilename(filename, "pkl")[: - 4] return self.samplingEngine.store(filename, False) def storeTrainedModel(self, filenameBase : str = "trained_model", forceNewFile : bool = True) -> str: """Store trained reduced model to file.""" self.setupApprox() filename = None if masterCore(): vbMng(self, "INIT", "Storing trained model to file.", 20) if forceNewFile: filename = getNewFilename(filenameBase, "pkl") else: filename = "{}.pkl".format(filenameBase) pickleDump(self.trainedModel.data.__dict__, filename) vbMng(self, "DEL", "Done storing trained model.", 20) filename = bcast(filename) return filename def loadTrainedModel(self, filename:str): """Load trained reduced model from file.""" vbMng(self, "INIT", "Loading pre-trained model from file.", 20) datadict = pickleLoad(filename) self.mu0 = datadict["mu0"] self.scaleFactor = datadict["scaleFactor"] self.mus = datadict["mus"] self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data, selfkeys = self.initializeModelData(datadict) for key in selfkeys: setattr(self, key, datadict.pop(key)) approxParameters = datadict.pop("approxParameters") data.approxParameters = copy(approxParameters) for apkey in data.approxParameters.keys(): self._approxParameters[apkey] = approxParameters.pop(apkey) setattr(self, "_" + apkey, self._approxParameters[apkey]) for key in datadict: setattr(data, key, datadict[key]) self.trainedModel.data = data self._mode = RROMPy_FRAGILE vbMng(self, "DEL", "Done loading pre-trained model.", 20) diff --git a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py index e5c7b8f..29a319a 100644 --- a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py +++ b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py @@ -1,863 +1,812 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from os import mkdir, remove, rmdir -from numbers import Number import numpy as np from collections.abc import Iterable from copy import deepcopy as copy from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from .trained_model.convert_trained_model_pivoted import ( convertTrainedModelPivoted) from rrompy.utilities.base.data_structures import purgeDict, getNewFilename from rrompy.utilities.poly_fitting.polynomial import polybases as ppb from rrompy.utilities.poly_fitting.radial_basis import polybases as rbpb from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk from rrompy.utilities.base.types import Np2D, paramList, List, ListAny from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical.degree import reduceDegreeN from rrompy.utilities.exception_manager import RROMPyException, RROMPyWarning from rrompy.parameter import checkParameterList from rrompy.utilities.parallel import poolRank, bcast __all__ = ['GenericPivotedApproximantNoMatch', 'GenericPivotedApproximantPoleMatch'] class GenericPivotedApproximantBase(GenericApproximant): def __init__(self, directionPivot:ListAny, *args, storeAllSamples : bool = False, **kwargs): self._preInit() if len(directionPivot) > 1: raise RROMPyException(("Exactly 1 pivot parameter allowed in pole " "matching.")) from rrompy.parameter.parameter_sampling import (EmptySampler as ES, SparseGridSampler as SG) - self._addParametersToList(["radialDirectionalWeightsMarginal"], [1.], + self._addParametersToList(["radialDirectionalWeightsMarginal"], [-1], ["samplerPivot", "SMarginal", "samplerMarginal"], [ES(), 1, SG([[-1.], [1.]])], toBeExcluded = ["sampler"]) self._directionPivot = directionPivot self.storeAllSamples = storeAllSamples if not hasattr(self, "_output_lvl"): self._output_lvl = [] self._output_lvl += [1 / 2] super().__init__(*args, **kwargs) self._postInit() def setupSampling(self): super().setupSampling(False) def initializeModelData(self, datadict): if "directionPivot" in datadict.keys(): from .trained_model.trained_model_pivoted_data import ( TrainedModelPivotedData) data = TrainedModelPivotedData(datadict["mu0"], datadict["mus"], datadict.pop("projMat"), datadict["scaleFactor"], datadict.pop("parameterMap"), datadict["directionPivot"]) return (data, ["mu0", "scaleFactor", "directionPivot", "mus"]) else: return super().initializeModelData(datadict) @property def npar(self): """Number of parameters.""" if hasattr(self, "_temporaryPivot"): return self.nparPivot return super().npar def checkParameterListPivot(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.nparPivot, check_if_single) def checkParameterListMarginal(self, mu:paramList, check_if_single : bool = False) -> paramList: return checkParameterList(mu, self.nparMarginal, check_if_single) def mapParameterList(self, *args, **kwargs): if hasattr(self, "_temporaryPivot"): return self.mapParameterListPivot(*args, **kwargs) return super().mapParameterList(*args, **kwargs) def mapParameterListPivot(self, mu:paramList, direct : str = "F", idx : List[int] = None): if idx is None: idx = self.directionPivot else: idx = [self.directionPivot[j] for j in idx] return super().mapParameterList(mu, direct, idx) def mapParameterListMarginal(self, mu:paramList, direct : str = "F", idx : List[int] = None): if idx is None: idx = self.directionMarginal else: idx = [self.directionMarginal[j] for j in idx] return super().mapParameterList(mu, direct, idx) @property def mu0(self): """Value of mu0.""" if hasattr(self, "_temporaryPivot"): return self.checkParameterListPivot(self._mu0(self.directionPivot)) return self._mu0 @mu0.setter def mu0(self, mu0): GenericApproximant.mu0.fset(self, mu0) @property def mus(self): """Value of mus. Its assignment may reset snapshots.""" return self._mus @mus.setter def mus(self, mus): mus = self.checkParameterList(mus) musOld = copy(self.mus) if hasattr(self, '_mus') else None if (musOld is None or len(mus) != len(musOld) or not mus == musOld): self.resetSamples() self._mus = mus @property def musMarginal(self): """Value of musMarginal. Its assignment may reset snapshots.""" return self._musMarginal @musMarginal.setter def musMarginal(self, musMarginal): musMarginal = self.checkParameterListMarginal(musMarginal) if hasattr(self, '_musMarginal'): musMOld = copy(self.musMarginal) else: musMOld = None if (musMOld is None or len(musMarginal) != len(musMOld) or not musMarginal == musMOld): self.resetSamples() self._musMarginal = musMarginal @property def SMarginal(self): """Value of SMarginal.""" return self._SMarginal @SMarginal.setter def SMarginal(self, SMarginal): if SMarginal <= 0: raise RROMPyException("SMarginal must be positive.") if hasattr(self, "_SMarginal") and self._SMarginal is not None: Sold = self.SMarginal else: Sold = -1 self._SMarginal = SMarginal self._approxParameters["SMarginal"] = self.SMarginal if Sold != self.SMarginal: self.resetSamples() @property def radialDirectionalWeightsMarginal(self): """Value of radialDirectionalWeightsMarginal.""" return self._radialDirectionalWeightsMarginal @radialDirectionalWeightsMarginal.setter def radialDirectionalWeightsMarginal(self, radialDirWeightsMarg): + if radialDirWeightsMarg == -1: + radialDirWeightsMarg = [1.] * self.nparMarginal if isinstance(radialDirWeightsMarg, Iterable): radialDirWeightsMarg = list(radialDirWeightsMarg) else: radialDirWeightsMarg = [radialDirWeightsMarg] self._radialDirectionalWeightsMarginal = radialDirWeightsMarg self._approxParameters["radialDirectionalWeightsMarginal"] = ( self.radialDirectionalWeightsMarginal) @property def directionPivot(self): """Value of directionPivot. Its assignment may reset snapshots.""" return self._directionPivot @directionPivot.setter def directionPivot(self, directionPivot): if hasattr(self, '_directionPivot'): directionPivotOld = copy(self.directionPivot) else: directionPivotOld = None if (directionPivotOld is None or len(directionPivot) != len(directionPivotOld) or not directionPivot == directionPivotOld): self.resetSamples() self._directionPivot = directionPivot @property def directionMarginal(self): return [x for x in range(self.HFEngine.npar) \ if x not in self.directionPivot] @property def nparPivot(self): return len(self.directionPivot) @property def nparMarginal(self): return self.npar - self.nparPivot @property def muBounds(self): """Value of muBounds.""" return self.samplerPivot.lims @property def muBoundsMarginal(self): """Value of muBoundsMarginal.""" return self.samplerMarginal.lims @property def sampler(self): """Proxy of samplerPivot.""" return self._samplerPivot @property def samplerPivot(self): """Value of samplerPivot.""" return self._samplerPivot @samplerPivot.setter def samplerPivot(self, samplerPivot): if 'generatePoints' not in dir(samplerPivot): raise RROMPyException("Pivot sampler type not recognized.") if hasattr(self, '_samplerPivot') and self._samplerPivot is not None: samplerOld = self.samplerPivot self._samplerPivot = samplerPivot self._approxParameters["samplerPivot"] = self.samplerPivot if not 'samplerOld' in locals() or samplerOld != self.samplerPivot: self.resetSamples() @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'generatePoints' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") if (hasattr(self, '_samplerMarginal') and self._samplerMarginal is not None): samplerOld = self.samplerMarginal self._samplerMarginal = samplerMarginal self._approxParameters["samplerMarginal"] = self.samplerMarginal if not 'samplerOld' in locals() or samplerOld != self.samplerMarginal: self.resetSamples() + @property + def matchState(self): + """Utility value of matchState.""" + return False + def computeScaleFactor(self): """Compute parameter rescaling factor.""" self.scaleFactorPivot = .5 * np.abs(( self.mapParameterListPivot(self.muBounds[0]) - self.mapParameterListPivot(self.muBounds[1]))[0]) self.scaleFactorMarginal = .5 * np.abs(( self.mapParameterListMarginal(self.muBoundsMarginal[0]) - self.mapParameterListMarginal(self.muBoundsMarginal[1]))[0]) self.scaleFactor = np.empty(self.npar) self.scaleFactor[self.directionPivot] = self.scaleFactorPivot self.scaleFactor[self.directionMarginal] = self.scaleFactorMarginal def _setupTrainedModel(self, pMat:Np2D, pMatUpdate : bool = False, pMatOld : Np2D = None, forceNew : bool = False): if forceNew or self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "mus": copy(self.mus), "projMat": pMat, "scaleFactor": self.scaleFactor, "parameterMap": self.HFEngine.parameterMap, "directionPivot": self.directionPivot} self.trainedModel.data = self.initializeModelData(datadict)[0] else: self.trainedModel = self.trainedModel if pMatUpdate: self.trainedModel.data.projMat = np.hstack( (self.trainedModel.data.projMat, pMat)) else: self.trainedModel.data.projMat = copy(pMat) self.trainedModel.data.mus = copy(self.mus) self.trainedModel.data.musMarginal = copy(self.musMarginal) + def addSamplePoints(self, mus:paramList): + """Add global sample points to reduced model.""" + raise RROMPyException(("Cannot add global samples to pivoted reduced " + "model.")) + def normApprox(self, mu:paramList) -> float: _PODOld, self._POD = self.POD, 0 result = super().normApprox(mu) self._POD = _PODOld return result @property def storedSamplesFilenames(self) -> List[str]: if not hasattr(self, "_sampleBaseFilename"): return [] return [self._sampleBaseFilename + "{}_{}.pkl" .format(idx + 1, self.name()) for idx in range(len(self.musMarginal))] def purgeStoredSamples(self): if not hasattr(self, "_sampleBaseFilename"): return for file in self.storedSamplesFilenames: remove(file) rmdir(self._sampleBaseFilename[: -8]) def storeSamples(self, idx : int = None): """Store samples to file.""" if not hasattr(self, "_sampleBaseFilename"): filenameBase = None if poolRank() == 0: foldername = getNewFilename(self.name(), "samples") mkdir(foldername) filenameBase = foldername + "/sample_" self._sampleBaseFilename = bcast(filenameBase, force = True) if idx is not None: super().storeSamples(self._sampleBaseFilename + str(idx + 1), False) def loadTrainedModel(self, filename:str): """Load trained reduced model from file.""" super().loadTrainedModel(filename) self._musMarginal = self.trainedModel.data.musMarginal def setTrainedModel(self, model): """Deepcopy approximation from trained model.""" super().setTrainedModel(model) self.trainedModel = convertTrainedModelPivoted(self.trainedModel, self.tModelType, self, True) self._preliminaryMarginalFinalization() self._finalizeMarginalization() self.trainedModel.data.approxParameters = self.approxParameters class GenericPivotedApproximantNoMatch(GenericPivotedApproximantBase): """ ROM pivoted approximant (without pole matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ @property def tModelType(self): from .trained_model.trained_model_pivoted_rational_nomatch import ( TrainedModelPivotedRationalNoMatch) return TrainedModelPivotedRationalNoMatch def _finalizeMarginalization(self): self.trainedModel.setupMarginalInterp( [self.radialDirectionalWeightsMarginal]) self.trainedModel.data.approxParameters = copy(self.approxParameters) def _preliminaryMarginalFinalization(self): pass class GenericPivotedApproximantPoleMatch(GenericPivotedApproximantBase): """ ROM pivoted approximant (with pole matching) computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; if <= 0, Euclidean metric is used; if - 'AUTO', automatically selected; defaults to -1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. - matchingChordalRadius: Radius to be used in chordal metric for poles - and residues. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ _allowedBadPoleCorrectionKinds = ["ERASE", "RATIONAL", "POLYNOMIAL"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchState", "matchingWeight", - "matchingChordalRadius", "matchingShared", - "badPoleCorrection", "polybasisMarginal", - "paramsMarginal"], - [False, 1., [-1, -1], 1., "ERASE", - "MONOMIAL", {}]) + "matchingShared", "badPoleCorrection", + "polybasisMarginal", "paramsMarginal"], + [False, 1., 1., "ERASE", "MONOMIAL", {}]) self.parameterMarginalList = ["MMarginal", "nNeighborsMarginal", "polydegreetypeMarginal", "interpTolMarginal", "radialDirectionalWeightsMarginalAdapt"] super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_pivoted_rational_polematch import ( TrainedModelPivotedRationalPoleMatch) return TrainedModelPivotedRationalPoleMatch @property def matchState(self): """Value of matchState.""" return self._matchState @matchState.setter def matchState(self, matchState): self._matchState = matchState self._approxParameters["matchState"] = self.matchState @property def matchingWeight(self): """Value of matchingWeight.""" return self._matchingWeight @matchingWeight.setter def matchingWeight(self, matchingWeight): self._matchingWeight = matchingWeight self._approxParameters["matchingWeight"] = self.matchingWeight - @property - def matchingChordalRadius(self): - """Value of matchingChordalRadius.""" - return self._matchingChordalRadius - @matchingChordalRadius.setter - def matchingChordalRadius(self, matchingChordalRadius): - if not hasattr(matchingChordalRadius, "__len__"): - matchingChordalRadius = [matchingChordalRadius] * 2 - if len(matchingChordalRadius) > 2: - matchingChordalRadius = matchingChordalRadius[: 2] - for j in range(2): - if isinstance(matchingChordalRadius[j], (str,)): - matchingChordalRadius[j] = ( - matchingChordalRadius[j].upper().strip().replace(" ","")) - if self.POD != 1 and (matchingChordalRadius[1] == "AUTO" - or (isinstance(matchingChordalRadius[1], (Number,)) - and matchingChordalRadius[1] > 0)): - RROMPyWarning(("Riemann interpolation of residues without POD " - "may lead to unreliable results due to metric " - "differences.")) - self._matchingChordalRadius = matchingChordalRadius - self._approxParameters["matchingChordalRadius"] = ( - self.matchingChordalRadius) - @property def matchingShared(self): """Value of matchingShared.""" return self._matchingShared @matchingShared.setter def matchingShared(self, matchingShared): if matchingShared > 1.: RROMPyWarning("Shared ratio too large. Clipping to 1.") matchingShared = 1. elif matchingShared < 0.: RROMPyWarning("Shared ratio too small. Clipping to 0.") matchingShared = 0. self._matchingShared = matchingShared self._approxParameters["matchingShared"] = self.matchingShared @property def badPoleCorrection(self): """Value of badPoleCorrection.""" return self._badPoleCorrection @badPoleCorrection.setter def badPoleCorrection(self, badPoleC): try: badPoleC = badPoleC.upper().strip().replace(" ","") if badPoleC not in self._allowedBadPoleCorrectionKinds: raise RROMPyException(("Prescribed badPoleCorrection not " "recognized.")) self._badPoleCorrection = badPoleC except: RROMPyWarning(("Prescribed badPoleCorrection not recognized. " "Overriding to 'ERASE'.")) self._badPoleCorrection = "ERASE" self._approxParameters["badPoleCorrection"] = self.badPoleCorrection @property def polybasisMarginal(self): """Value of polybasisMarginal.""" return self._polybasisMarginal @polybasisMarginal.setter def polybasisMarginal(self, polybasisMarginal): try: polybasisMarginal = polybasisMarginal.upper().strip().replace(" ", "") if polybasisMarginal not in ppb + rbpb + ["NEARESTNEIGHBOR"] + sk: raise RROMPyException( "Prescribed marginal polybasis not recognized.") self._polybasisMarginal = polybasisMarginal except: RROMPyWarning(("Prescribed marginal polybasis not recognized. " "Overriding to 'MONOMIAL'.")) self._polybasisMarginal = "MONOMIAL" self._approxParameters["polybasisMarginal"] = self.polybasisMarginal @property def paramsMarginal(self): """Value of paramsMarginal.""" return self._paramsMarginal @paramsMarginal.setter def paramsMarginal(self, paramsMarginal): paramsMarginal = purgeDict(paramsMarginal, self.parameterMarginalList, dictname = self.name() + ".paramsMarginal", baselevel = 1) keyList = list(paramsMarginal.keys()) if not hasattr(self, "_paramsMarginal"): self._paramsMarginal = {} if "MMarginal" in keyList: MMarg = paramsMarginal["MMarginal"] elif ("MMarginal" in self.paramsMarginal and not hasattr(self, "_MMarginal_isauto")): MMarg = self.paramsMarginal["MMarginal"] else: MMarg = "AUTO" if isinstance(MMarg, str): MMarg = MMarg.strip().replace(" ","") if "-" not in MMarg: MMarg = MMarg + "-0" self._MMarginal_isauto = True self._MMarginal_shift = int(MMarg.split("-")[-1]) MMarg = 0 if MMarg < 0: raise RROMPyException("MMarginal must be non-negative.") self._paramsMarginal["MMarginal"] = MMarg if "nNeighborsMarginal" in keyList: self._paramsMarginal["nNeighborsMarginal"] = max(1, paramsMarginal["nNeighborsMarginal"]) elif "nNeighborsMarginal" not in self.paramsMarginal: self._paramsMarginal["nNeighborsMarginal"] = 1 if "polydegreetypeMarginal" in keyList: try: polydegtypeM = paramsMarginal["polydegreetypeMarginal"]\ .upper().strip().replace(" ","") if polydegtypeM not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetypeMarginal " "not recognized.")) self._paramsMarginal["polydegreetypeMarginal"] = polydegtypeM except: RROMPyWarning(("Prescribed polydegreetypeMarginal not " "recognized. Overriding to 'TOTAL'.")) self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL" elif "polydegreetypeMarginal" not in self.paramsMarginal: self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL" if "interpTolMarginal" in keyList: self._paramsMarginal["interpTolMarginal"] = ( paramsMarginal["interpTolMarginal"]) elif "interpTolMarginal" not in self.paramsMarginal: self._paramsMarginal["interpTolMarginal"] = -1 if "radialDirectionalWeightsMarginalAdapt" in keyList: self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = ( paramsMarginal["radialDirectionalWeightsMarginalAdapt"]) elif "radialDirectionalWeightsMarginalAdapt" not in self.paramsMarginal: self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = [ -1., -1.] self._approxParameters["paramsMarginal"] = self.paramsMarginal def _setMMarginalAuto(self): if (self.polybasisMarginal not in ppb + rbpb or "MMarginal" not in self.paramsMarginal or "polydegreetypeMarginal" not in self.paramsMarginal): raise RROMPyException(("Cannot set MMarginal if " "polybasisMarginal does not allow it.")) self.paramsMarginal["MMarginal"] = max(0, reduceDegreeN( len(self.musMarginal), len(self.musMarginal), self.nparMarginal, self.paramsMarginal["polydegreetypeMarginal"]) - self._MMarginal_shift) vbMng(self, "MAIN", ("Automatically setting MMarginal to {}.").format( self.paramsMarginal["MMarginal"]), 25) def purgeparamsMarginal(self): self.paramsMarginal = {} paramsMbadkeys = [] if self.polybasisMarginal in ppb + rbpb + sk: paramsMbadkeys += ["nNeighborsMarginal"] if self.polybasisMarginal not in rbpb: paramsMbadkeys += ["radialDirectionalWeightsMarginalAdapt"] if self.polybasisMarginal in ["NEARESTNEIGHBOR"] + sk: paramsMbadkeys += ["MMarginal", "polydegreetypeMarginal", "interpTolMarginal"] if hasattr(self, "_MMarginal_isauto"): del self._MMarginal_isauto if hasattr(self, "_MMarginal_shift"): del self._MMarginal_shift for key in paramsMbadkeys: if key in self._paramsMarginal: del self._paramsMarginal[key] self._approxParameters["paramsMarginal"] = self.paramsMarginal def _finalizeMarginalization(self): vbMng(self, "INIT", "Checking shared ratio.", 10) msg = self.trainedModel.checkShared(self.matchingShared, self.badPoleCorrection) vbMng(self, "DEL", "Done checking. " + msg, 10) if self.polybasisMarginal in rbpb + ["NEARESTNEIGHBOR"]: self.computeScaleFactor() rDWMEff = np.array([w * f for w, f in zip( self.radialDirectionalWeightsMarginal, self.scaleFactorMarginal)]) if self.polybasisMarginal in ppb + rbpb + sk: interpPars = [self.polybasisMarginal] if self.polybasisMarginal in ppb + rbpb: if self.polybasisMarginal in rbpb: interpPars += [rDWMEff] interpPars += [self.verbosity >= 5, self.paramsMarginal["polydegreetypeMarginal"] == "TOTAL"] if self.polybasisMarginal in ppb: interpPars += [{}] else: # if self.polybasisMarginal in rbpb: interpPars += [{"optimizeScalingBounds":self.paramsMarginal[ "radialDirectionalWeightsMarginalAdapt"]}] interpPars += [ {"rcond":self.paramsMarginal["interpTolMarginal"]}] extraPar = hasattr(self, "_MMarginal_isauto") else: # if self.polybasisMarginal in sk: idxEff = [x for x in range(self.samplerMarginal.npoints) if not hasattr(self.trainedModel, "_idxExcl") or x not in self.trainedModel._idxExcl] extraPar = self.samplerMarginal.depth[idxEff] else: # if self.polybasisMarginal == "NEARESTNEIGHBOR": interpPars = [self.paramsMarginal["nNeighborsMarginal"], rDWMEff] extraPar = None self.trainedModel.setupMarginalInterp(self, interpPars, extraPar) self.trainedModel.data.approxParameters = copy(self.approxParameters) def _preliminaryMarginalFinalization(self): vbMng(self, "INIT", "Compressing and matching poles.", 10) - if (self.matchingChordalRadius[1] == "AUTO" - or self.matchingChordalRadius[1] > 0): - if self.HFEngine.isCEye: - if not hasattr(self.trainedModel.data, "projGramian"): - projG = self.HFEngine.innerProduct( - self.trainedModel.data.projMat, - self.trainedModel.data.projMat, - is_state = False) - else: - Sold = self.trainedModel.data.projGramian.shape[0] - S = self.trainedModel.data.projMat.shape[1] - if Sold > S: - projG = self.trainedModel.data.projGramian[: S, : S] - else: - projG = np.pad(self.trainedModel.data.projGramian, - (0, S - Sold), "constant") - projG[: Sold, Sold :] = self.HFEngine.innerProduct( - self.trainedModel.data.projMat[:, Sold :], - self.trainedModel.data.projMat[:, : Sold], - is_state = False) - projG[Sold :, : Sold] = projG[: Sold, Sold :].T.conj() - projG[Sold :, Sold :] = self.HFEngine.innerProduct( - self.trainedModel.data.projMat[:, Sold :], - self.trainedModel.data.projMat[:, Sold :], - is_state = False) - else: - projG = None - self.trainedModel.data.projGramian = projG self.trainedModel.initializeFromRational(self.matchingWeight, self.HFEngine, - self.matchState, - self.matchingChordalRadius) + self.matchState) vbMng(self, "DEL", "Done compressing and matching poles.", 10) def _postApplyC(self): if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C to " "orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMat = None for j, mu in enumerate(self.trainedModel.data.mus): pMatj = self.trainedModel.data.projMat[:, j] pMatj = np.expand_dims(self.HFEngine.applyC(pMatj, mu), -1) if pMat is None: pMat = np.array(pMatj) else: pMat = np.append(pMat, pMatj, axis = 1) vbMng(self, "DEL", "Done extracting system output.", 35) self.trainedModel.data.projMat = pMat @abstractmethod def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() setupOK = super().setupApprox(*args, **kwargs) if self.matchState: self._postApplyC() return setupOK diff --git a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py index 1659aa5..a82405f 100644 --- a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py +++ b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py @@ -1,651 +1,666 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from copy import deepcopy as copy import numpy as np from collections.abc import Iterable from matplotlib import pyplot as plt from rrompy.reduction_methods.pivoted.generic_pivoted_approximant import ( GenericPivotedApproximantBase, GenericPivotedApproximantPoleMatch) from rrompy.reduction_methods.pivoted.gather_pivoted_approximant import ( gatherPivotedApproximant) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal, paramList, ListAny) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.point_matching import pointMatching from rrompy.utilities.numerical.point_distances import doubleDistanceMatrix from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import (masterCore, indicesScatter, arrayGatherv, isend) __all__ = ['GenericPivotedGreedyApproximantPoleMatch'] class GenericPivotedGreedyApproximantBase(GenericPivotedApproximantBase): _allowedEstimatorKindsMarginal = ["LOOK_AHEAD", "LOOK_AHEAD_RECOVER", "NONE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["matchingWeightError", "errorEstimatorKindMarginal", - "greedyTolMarginal", "maxIterMarginal"], - [0., "NONE", 1e-1, 1e2]) + "greedyTolMarginal", "maxIterMarginal", + "autoCollapse"], + [0., "NONE", 1e-1, 1e2, False]) super().__init__(*args, **kwargs) self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self._scaleFactorOldPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif isinstance(scaleFactorDer, Iterable): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def samplerMarginal(self): """Value of samplerMarginal.""" return self._samplerMarginal @samplerMarginal.setter def samplerMarginal(self, samplerMarginal): if 'refine' not in dir(samplerMarginal): raise RROMPyException("Marginal sampler type not recognized.") GenericPivotedApproximantBase.samplerMarginal.fset(self, samplerMarginal) @property def errorEstimatorKindMarginal(self): """Value of errorEstimatorKindMarginal.""" return self._errorEstimatorKindMarginal @errorEstimatorKindMarginal.setter def errorEstimatorKindMarginal(self, errorEstimatorKindMarginal): errorEstimatorKindMarginal = errorEstimatorKindMarginal.upper() if errorEstimatorKindMarginal not in ( self._allowedEstimatorKindsMarginal): RROMPyWarning(("Marginal error estimator kind not recognized. " "Overriding to 'NONE'.")) errorEstimatorKindMarginal = "NONE" self._errorEstimatorKindMarginal = errorEstimatorKindMarginal self._approxParameters["errorEstimatorKindMarginal"] = ( self.errorEstimatorKindMarginal) @property def matchingWeightError(self): """Value of matchingWeightError.""" return self._matchingWeightError @matchingWeightError.setter def matchingWeightError(self, matchingWeightError): self._matchingWeightError = matchingWeightError self._approxParameters["matchingWeightError"] = ( self.matchingWeightError) @property def greedyTolMarginal(self): """Value of greedyTolMarginal.""" return self._greedyTolMarginal @greedyTolMarginal.setter def greedyTolMarginal(self, greedyTolMarginal): if greedyTolMarginal < 0: raise RROMPyException("greedyTolMarginal must be non-negative.") if (hasattr(self, "_greedyTolMarginal") and self.greedyTolMarginal is not None): greedyTolMarginalold = self.greedyTolMarginal else: greedyTolMarginalold = -1 self._greedyTolMarginal = greedyTolMarginal self._approxParameters["greedyTolMarginal"] = self.greedyTolMarginal if greedyTolMarginalold != self.greedyTolMarginal: self.resetSamples() @property def maxIterMarginal(self): """Value of maxIterMarginal.""" return self._maxIterMarginal @maxIterMarginal.setter def maxIterMarginal(self, maxIterMarginal): if maxIterMarginal <= 0: raise RROMPyException("maxIterMarginal must be positive.") if (hasattr(self, "_maxIterMarginal") and self.maxIterMarginal is not None): maxIterMarginalold = self.maxIterMarginal else: maxIterMarginalold = -1 self._maxIterMarginal = maxIterMarginal self._approxParameters["maxIterMarginal"] = self.maxIterMarginal if maxIterMarginalold != self.maxIterMarginal: self.resetSamples() + @property + def autoCollapse(self): + """Value of autoCollapse.""" + return self._autoCollapse + @autoCollapse.setter + def autoCollapse(self, autoCollapse): + self._autoCollapse = autoCollapse + self._approxParameters["autoCollapse"] = self.autoCollapse + def resetSamples(self): """Reset samples.""" super().resetSamples() if not hasattr(self, "_temporaryPivot"): self._mus = emptyParameterList() self._musMarginal = emptyParameterList() if hasattr(self, "samplerMarginal"): self.samplerMarginal.reset() if hasattr(self, "samplingEngine") and self.samplingEngine is not None: self.samplingEngine.resetHistory() def _getDistanceApp(self, polesEx:Np1D, resEx:Np2D, muTest:paramVal) -> float: polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0] if self.matchingWeightError != 0: resAp = self.trainedModel.interpolateMarginalCoeffs(muTest)[0][ : len(polesAp), :] resEx = dot(self.trainedModel.data.projMat, resEx) resAp = dot(self.trainedModel.data.projMat, resAp) else: resAp = None dist = doubleDistanceMatrix(polesEx, polesAp, self.matchingWeightError, - resEx, resAp, self.HFEngine, False, - self.trainedModel.data.chordalRadius) + resEx, resAp, self.HFEngine, False) pmR, pmC = pointMatching(dist) return np.mean(dist[pmR, pmC]) def getErrorEstimatorMarginalLookAhead(self) -> Np1D: if not hasattr(self.trainedModel, "_musMExcl"): err = np.zeros(0) err[:] = np.inf self._musMarginalTestIdxs = np.zeros(0, dtype = int) return err self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl, dtype = int) idx, sizes = indicesScatter(len(self.trainedModel._musMExcl), return_sizes = True) err = [] if len(idx) > 0: self.verbosity -= 25 self.trainedModel.verbosity -= 25 for j in idx: muTest = self.trainedModel._musMExcl[j] HITest = self.trainedModel._HIsExcl[j] polesEx = HITest.poles idxGood = np.isinf(polesEx) + np.isnan(polesEx) == False polesEx = polesEx[idxGood] if self.matchingWeightError != 0: resEx = HITest.coeffs[np.where(idxGood)[0]] else: resEx = None if len(polesEx) == 0: err += [0.] continue err += [self._getDistanceApp(polesEx, resEx, muTest)] self.verbosity += 25 self.trainedModel.verbosity += 25 return arrayGatherv(np.array(err), sizes) def getErrorEstimatorMarginalNone(self) -> Np1D: nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) return (1. + self.greedyTolMarginal) * np.ones(nErr) def errorEstimatorMarginal(self, return_max : bool = False) -> Np1D: vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format( self.trainedModel.data.musMarginal), 10) if self.errorEstimatorKindMarginal == "NONE": nErr = len(self.trainedModel.data.musMarginal) self._musMarginalTestIdxs = np.arange(nErr) err = (1. + self.greedyTolMarginal) * np.ones(nErr) else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": err = self.getErrorEstimatorMarginalLookAhead() vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10) if not return_max: return err idxMaxEst = np.where(err > self.greedyTolMarginal)[0] maxErr = err[idxMaxEst] if self.errorEstimatorKindMarginal == "NONE": maxErr = None return err, idxMaxEst, maxErr def plotEstimatorMarginal(self, est:Np1D, idxMax:List[int], estMax:List[float]): if self.errorEstimatorKindMarginal == "NONE": return if (not (np.any(np.isnan(est)) or np.any(np.isinf(est))) and masterCore() and hasattr(self.trainedModel, "_musMExcl")): fig = plt.figure(figsize = plt.figaspect(1. / self.nparMarginal)) for jpar in range(self.nparMarginal): ax = fig.add_subplot(1, self.nparMarginal, 1 + jpar) musre = np.real(self.trainedModel._musMExcl) if len(idxMax) > 0 and estMax is not None: maxrej = musre[idxMax, jpar] errCP = copy(est) idx = np.delete(np.arange(self.nparMarginal), jpar) while len(musre) > 0: if self.nparMarginal == 1: currIdx = np.arange(len(musre)) else: currIdx = np.where(np.isclose(np.sum( np.abs(musre[:, idx] - musre[0, idx]), 1), 0., atol = 1e-15))[0] currIdxSorted = currIdx[np.argsort(musre[currIdx, jpar])] ax.semilogy(musre[currIdxSorted, jpar], errCP[currIdxSorted], 'k.-', linewidth = 1) musre = np.delete(musre, currIdx, 0) errCP = np.delete(errCP, currIdx) ax.semilogy(self.musMarginal.re(jpar), (self.greedyTolMarginal,) * len(self.musMarginal), '*m') if len(idxMax) > 0 and estMax is not None: ax.semilogy(maxrej, estMax, 'xr') ax.set_xlim(*list(self.samplerMarginal.lims.re(jpar))) ax.grid() plt.tight_layout() plt.show() def _addMarginalSample(self, mus:paramList): mus = self.checkParameterListMarginal(mus) if len(mus) == 0: return self._nmusOld, nmus = len(self.musMarginal), len(mus) if (hasattr(self, "trainedModel") and self.trainedModel is not None and hasattr(self.trainedModel, "_musMExcl")): self._nmusOld += len(self.trainedModel._musMExcl) vbMng(self, "MAIN", ("Adding marginal sample point{} no. {}{} at {} to training " "set.").format("s" * (nmus > 1), self._nmusOld + 1, "--{}".format(self._nmusOld + nmus) * (nmus > 1), mus), 3) self.musMarginal.append(mus) self.setupApproxPivoted(mus) self._preliminaryMarginalFinalization() del self._nmusOld if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): ubRange = len(self.trainedModel.data.musMarginal) if hasattr(self.trainedModel, "_idxExcl"): shRange = len(self.trainedModel._musMExcl) else: shRange = 0 testIdxs = list(range(ubRange + shRange - len(mus), ubRange + shRange)) for j in testIdxs[::-1]: self.musMarginal.pop(j - shRange) if hasattr(self.trainedModel, "_idxExcl"): testIdxs = self.trainedModel._idxExcl + testIdxs self._updateTrainedModelMarginalSamples(testIdxs) self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = self.SMarginal def greedyNextSampleMarginal(self, muidx:List[int], plotEst : str = "NONE") \ -> Tuple[Np1D, List[int], float, paramVal]: RROMPyAssert(self._mode, message = "Cannot add greedy sample.") muidx = self._musMarginalTestIdxs[muidx] if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD" and not self.firstGreedyIterM): if not hasattr(self.trainedModel, "_idxExcl"): raise RROMPyException(("Sample index to be added not present " "in trained model.")) testIdxs = copy(self.trainedModel._idxExcl) skippedIdx = 0 for cj, j in enumerate(self.trainedModel._idxExcl): if j in muidx: testIdxs.pop(skippedIdx) self.musMarginal.insert(self.trainedModel._musMExcl[cj], j - skippedIdx) else: skippedIdx += 1 if len(self.trainedModel._idxExcl) < (len(muidx) + len(testIdxs)): raise RROMPyException(("Sample index to be added not present " "in trained model.")) self._updateTrainedModelMarginalSamples(testIdxs) self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) self.firstGreedyIterM = False idxAdded = self.samplerMarginal.refine(muidx)[0] self._addMarginalSample(self.samplerMarginal.points[idxAdded]) errorEstTest, muidx, maxErrorEst = self.errorEstimatorMarginal(True) if plotEst == "ALL": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) return (errorEstTest, muidx, maxErrorEst, self.samplerMarginal.points[muidx]) def _preliminaryTrainingMarginal(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if np.sum(self.samplingEngine.nsamples) > 0: return self.resetSamples() self._addMarginalSample(self.samplerMarginal.generatePoints( self.SMarginal)) def _preSetupApproxPivoted(self, mus:paramList) \ -> Tuple[ListAny, ListAny, ListAny]: self.computeScaleFactor() if self.trainedModel is None: self._setupTrainedModel(np.zeros((0, 0))) self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] self.trainedModel.data.Psupp = [] self._trainedModelOld = copy(self.trainedModel) self._scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 self._musLoc = copy(self.mus) idx, sizes = indicesScatter(len(mus), return_sizes = True) emptyCores = np.where(sizes == 0)[0] self.verbosity -= 10 self.samplingEngine.verbosity -= 10 return idx, sizes, emptyCores def _postSetupApproxPivoted(self, mus:Np2D, pMat:Np2D, Ps:ListAny, Qs:ListAny, sizes:ListAny): self.scaleFactor = self._scaleFactorOldPivot del self._scaleFactorOldPivot, self._temporaryPivot pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, mus, sizes, self.polybasis) if len(self._musLoc) > 0: self._mus = self.checkParameterList(self._musLoc) self._mus.append(mus) else: self._mus = self.checkParameterList(mus) self.trainedModel = self._trainedModelOld del self._trainedModelOld - padLeft = self.trainedModel.data.projMat.shape[1] - suppNew = np.append(0, np.cumsum(nsamples)) + if not self.matchState and self.autoCollapse: + pMat, padLeft, suppNew = 1., 0, [0] * len(nsamples) + else: + padLeft = self.trainedModel.data.projMat.shape[1] + suppNew = list(padLeft + np.append(0, np.cumsum(nsamples[: -1]))) self._setupTrainedModel(pMat, padLeft > 0) + if not self.matchState and self.autoCollapse: + self.trainedModel.data._collapsed = True self.trainedModel.data.Qs += Qs self.trainedModel.data.Ps += Ps - self.trainedModel.data.Psupp += list(padLeft + suppNew[: -1]) + self.trainedModel.data.Psupp += suppNew self.trainedModel.data.approxParameters = copy(self.approxParameters) self.verbosity += 10 self.samplingEngine.verbosity += 10 def _localPivotedResult(self, pMat:Np2D, req:ListAny, emptyCores:ListAny, mus:Np2D) -> Tuple[Np2D, ListAny, Np2D]: pMati = self.samplingEngine.projectionMatrix musi = self.samplingEngine.mus - if not hasattr(self, "matchState") or not self.matchState: + if not self.matchState: if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C " "to orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMatiEff = None for j, mu in enumerate(musi): pMij = np.expand_dims(self.HFEngine.applyC(pMati[:, j], mu), -1) if pMatiEff is None: pMatiEff = np.array(pMij) else: pMatiEff = np.append(pMatiEff, pMij, axis = 1) pMati = pMatiEff vbMng(self, "DEL", "Done extracting system output.", 35) if pMat is None: mus = copy(musi.data) pMat = copy(pMati) if masterCore(): for dest in emptyCores: req += [isend((len(pMat), pMat.dtype, mus.dtype), dest = dest, tag = dest)] else: mus = np.vstack((mus, musi.data)) - pMat = np.hstack((pMat, pMati)) + if not self.matchState and self.autoCollapse: + pMat = copy(pMati) + else: + pMat = np.hstack((pMat, pMati)) return pMat, req, mus @abstractmethod def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) self._preSetupApproxPivoted() data = [] pass self._postSetupApproxPivoted(mus, data) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: """Compute greedy snapshots of solution map.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) vbMng(self, "INIT", "Starting computation of snapshots.", 5) max2ErrorEst, self.firstGreedyIterM = np.inf, True self._preliminaryTrainingMarginal() if self.errorEstimatorKindMarginal == "NONE": muidx = [] else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD": muidx = np.arange(len(self.trainedModel.data.musMarginal)) self._musMarginalTestIdxs = np.array(muidx) while self.firstGreedyIterM or (max2ErrorEst > self.greedyTolMarginal and self.samplerMarginal.npoints < self.maxIterMarginal): errorEstTest, muidx, maxErrorEst, mu = \ self.greedyNextSampleMarginal(muidx, plotEst) if maxErrorEst is None: max2ErrorEst = 1. + self.greedyTolMarginal else: if len(maxErrorEst) > 0: max2ErrorEst = np.max(maxErrorEst) else: max2ErrorEst = np.max(errorEstTest) vbMng(self, "MAIN", ("Uniform testing error estimate " "{:.4e}.").format(max2ErrorEst), 5) if plotEst == "LAST": self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst) vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: " "{}).").format(len(self.mus)), 5) if (self.errorEstimatorKindMarginal == "LOOK_AHEAD_RECOVER" and hasattr(self.trainedModel, "_idxExcl") and len(self.trainedModel._idxExcl) > 0): vbMng(self, "INIT", "Recovering {} test models.".format( len(self.trainedModel._idxExcl)), 7) for j, mu in zip(self.trainedModel._idxExcl, self.trainedModel._musMExcl): self.musMarginal.insert(mu, j) self._preliminaryMarginalFinalization() self._updateTrainedModelMarginalSamples() self._finalizeMarginalization() self._SMarginal = len(self.musMarginal) self._approxParameters["SMarginal"] = self.SMarginal self.trainedModel.data.approxParameters["SMarginal"] = ( self.SMarginal) vbMng(self, "DEL", "Done recovering test models.", 7) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def checkComputedApproxPivoted(self) -> bool: return (super().checkComputedApprox() and len(self.musMarginal) == len(self.trainedModel.data.musMarginal)) class GenericPivotedGreedyApproximantPoleMatch( GenericPivotedGreedyApproximantBase, GenericPivotedApproximantPoleMatch): """ ROM pivoted greedy interpolant computation for parametric problems (with pole matching) (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; if <= 0, Euclidean metric is used; if - 'AUTO', automatically selected; defaults to -1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER', and 'NONE'; defaults to 'NONE'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for - marginal interpolant; defaults to 1. + marginal interpolant; defaults to 1; + - 'autoCollapse': whether to collapse trained reduced model as soon + as it is built; defaults to False. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for - marginal interpolant. + marginal interpolant; + - 'autoCollapse': whether to collapse trained reduced model as soon + as it is built. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. - matchingChordalRadius: Radius to be used in chordal metric for poles - and residues. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. matchingWeightError: Weight for pole matching optimization in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. + autoCollapse: Whether to collapse trained reduced model as soon as it + is built. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def _updateTrainedModelMarginalSamples(self, idx : ListAny = []): self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight, - self.HFEngine, False, - self.matchingChordalRadius) + self.HFEngine, False) def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() _polybasisMarginal = self.polybasisMarginal self._polybasisMarginal = ("PIECEWISE_LINEAR_" + self.samplerMarginal.kind) setupOK = super().setupApprox(*args, **kwargs) self._polybasisMarginal = _polybasisMarginal if self.matchState: self._postApplyC() return setupOK diff --git a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py index 59d16ad..9509dec 100644 --- a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py +++ b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py @@ -1,361 +1,363 @@ #Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_pivoted_greedy_approximant import ( GenericPivotedGreedyApproximantBase, GenericPivotedGreedyApproximantPoleMatch) from rrompy.reduction_methods.standard.greedy import RationalInterpolantGreedy from rrompy.reduction_methods.standard.greedy.generic_greedy_approximant \ import pruneSamples from rrompy.reduction_methods.pivoted import ( RationalInterpolantGreedyPivotedPoleMatch) from rrompy.utilities.base.types import Np1D, Tuple, paramVal, paramList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, recv __all__ = ['RationalInterpolantGreedyPivotedGreedyPoleMatch'] class RationalInterpolantGreedyPivotedGreedyBase( GenericPivotedGreedyApproximantBase): @property def sampleBatchSize(self): """Value of sampleBatchSize.""" return 1 @property def sampleBatchIdx(self): """Value of sampleBatchIdx.""" return self.S def greedyNextSample(self, muidx:int, plotEst : str = "NONE")\ -> Tuple[Np1D, int, float, paramVal]: """Compute next greedy snapshot of solution map.""" RROMPyAssert(self._mode, message = "Cannot add greedy sample.") mus = copy(self.muTest[muidx]) self.muTest.pop(muidx) for j, mu in enumerate(mus): vbMng(self, "MAIN", ("Adding sample point no. {} at {} to training " "set.").format(len(self.mus) + 1, mu), 3) self.mus.append(mu) self._S = len(self.mus) self._approxParameters["S"] = self.S if (self.samplingEngine.nsamples <= len(mus) - j - 1 or not np.allclose(mu, self.samplingEngine.mus[j - len(mus)])): self.samplingEngine.nextSample(mu) if self._isLastSampleCollinear(): vbMng(self, "MAIN", ("Collinearity above tolerance detected. Starting " "preemptive greedy loop termination."), 3) self._collinearityFlag = 1 errorEstTest = np.empty(len(self.muTest)) errorEstTest[:] = np.nan return errorEstTest, [-1], np.nan, np.nan errorEstTest, muidx, maxErrorEst = self.errorEstimator(self.muTest, True) if plotEst == "ALL": self.plotEstimator(errorEstTest, muidx, maxErrorEst) return errorEstTest, muidx, maxErrorEst, self.muTest[muidx] def _setSampleBatch(self, maxS:int): return self.S def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.nsamples > 0: return self.resetSamples() self.samplingEngine.scaleFactor = self.scaleFactorDer musPivot = self.samplerTrainSet.generatePoints(self.S) while len(musPivot) > self.S: musPivot.pop() muTestBasePivot = self.samplerPivot.generatePoints(self.nTestPoints, False) idxPop = pruneSamples(self.mapParameterListPivot(muTestBasePivot), self.mapParameterListPivot(musPivot), 1e-10 * self.scaleFactorPivot[0]) muTestBasePivot.pop(idxPop) self._mus = emptyParameterList() self.mus.reset((self.S - 1, self.HFEngine.npar)) self.muTest = emptyParameterList() self.muTest.reset((len(muTestBasePivot) + 1, self.HFEngine.npar)) self.mus.data[:, self.directionPivot] = musPivot[: -1] self.mus.data[:, self.directionMarginal] = np.repeat(self.muMargLoc, self.S - 1, axis = 0) self.muTest.data[: -1, self.directionPivot] = muTestBasePivot.data self.muTest.data[-1, self.directionPivot] = musPivot[-1] self.muTest.data[:, self.directionMarginal] = np.repeat(self.muMargLoc, len(muTestBasePivot) + 1, axis = 0) if len(self.mus) > 0: vbMng(self, "MAIN", ("Adding first {} sample point{} at {} to training " "set.").format(self.S - 1, "" + "s" * (self.S > 2), self.mus), 3) self.samplingEngine.iterSample(self.mus) self._S = len(self.mus) self._approxParameters["S"] = self.S self.M, self.N = ("AUTO",) * 2 def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) if not hasattr(self, "_plotEstPivot"): self._plotEstPivot = "NONE" idx, sizes, emptyCores = self._preSetupApproxPivoted(mus) S0 = copy(self.S) pMat, Ps, Qs, req, musA = None, [], [], [], None if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 45) if self.storeAllSamples: self.storeSamples() pL, pT, mT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) musA = np.empty((0, self.mu0.shape[1]), dtype = mT) else: for i in idx: self.muMargLoc = mus[[i]] vbMng(self, "MAIN", "Building marginal model no. {} at " "{}.".format(i + 1, self.muMargLoc[0]), 25) self.samplingEngine.resetHistory() self.trainedModel = None self.verbosity -= 5 self.samplingEngine.verbosity -= 5 RationalInterpolantGreedy.setupApprox(self, self._plotEstPivot) self.verbosity += 5 self.samplingEngine.verbosity += 5 if self.storeAllSamples: self.storeSamples(i + self._nmusOld) pMat, req, musA = self._localPivotedResult(pMat, req, emptyCores, musA) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] + if not self.matchState and self.autoCollapse: + Ps[-1].postmultiplyTensorize(pMat.T) self._S = S0 del self.muMargLoc for r in req: r.wait() + if not self.matchState and self.autoCollapse: pMat = pMat[:, : 0] self._postSetupApproxPivoted(musA, pMat, Ps, Qs, sizes) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 def setupApprox(self, plotEst : str = "NONE") -> int: if self.checkComputedApprox(): return -1 if '_' not in plotEst: plotEst = plotEst + "_NONE" plotEstM, self._plotEstPivot = plotEst.split("_") val = super().setupApprox(plotEstM) return val class RationalInterpolantGreedyPivotedGreedyPoleMatch( RationalInterpolantGreedyPivotedGreedyBase, GenericPivotedGreedyApproximantPoleMatch, RationalInterpolantGreedyPivotedPoleMatch): """ ROM greedy pivoted greedy rational interpolant computation for parametric problems (with pole matching). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; if <= 0, Euclidean metric is used; if - 'AUTO', automatically selected; defaults to -1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD' and 'LOOK_AHEAD_RECOVER'; defaults to 'NONE'; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'samplerTrainSet': training sample points generator; defaults to samplerPivot; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; + - 'autoCollapse': whether to collapse trained reduced model as soon + as it is built; defaults to False; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'samplerTrainSet': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; + - 'autoCollapse': whether to collapse trained reduced model as soon + as it is built; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. - matchingChordalRadius: Radius to be used in chordal metric for poles - and residues. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. matchingWeightError: Weight for pole matching optimization in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. samplerTrainSet: training sample points generator. errorEstimatorKind: kind of error estimator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. + autoCollapse: Whether to collapse trained reduced model as soon as it + is built. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ diff --git a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py index 6160230..601a663 100644 --- a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py +++ b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py @@ -1,295 +1,302 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_pivoted_greedy_approximant import ( GenericPivotedGreedyApproximantBase, GenericPivotedGreedyApproximantPoleMatch) from rrompy.reduction_methods.standard import RationalInterpolant from rrompy.reduction_methods.pivoted import ( RationalInterpolantPivotedPoleMatch) from rrompy.utilities.base.types import paramList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import RROMPyAssert from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, recv __all__ = ['RationalInterpolantPivotedGreedyPoleMatch'] class RationalInterpolantPivotedGreedyBase( GenericPivotedGreedyApproximantBase): def computeSnapshots(self): """Compute snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start snapshot computation.") vbMng(self, "INIT", "Starting computation of snapshots.", 5) self.samplingEngine.scaleFactor = self.scaleFactorDer if not hasattr(self, "musPivot") or len(self.musPivot) != self.S: self.musPivot = self.samplerPivot.generatePoints(self.S) while len(self.musPivot) > self.S: self.musPivot.pop() musLoc = emptyParameterList() musLoc.reset((self.S, self.HFEngine.npar)) self.samplingEngine.resetHistory() musLoc.data[:, self.directionPivot] = self.musPivot.data musLoc.data[:, self.directionMarginal] = np.repeat(self.muMargLoc, self.S, axis = 0) self.samplingEngine.iterSample(musLoc) vbMng(self, "DEL", "Done computing snapshots.", 5) self._m_selfmus = copy(musLoc) self._mus = self.musPivot self._m_HFEparameterMap = copy(self.HFEngine.parameterMap) self.HFEngine.parameterMap = { "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]], "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]} + def addMarginalSamplePoints(self, musMarginal:paramList, *args, **kwargs): + """Add marginal sample points to reduced model.""" + raise RROMPyException(("Cannot add marginal samples to marginal " + "greedy reduced model.")) + def setupApproxPivoted(self, mus:paramList) -> int: if self.checkComputedApproxPivoted(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up pivoted approximant.", 10) idx, sizes, emptyCores = self._preSetupApproxPivoted(mus) pMat, Ps, Qs, req, musA = None, [], [], [], None if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 45) if self.storeAllSamples: self.storeSamples() pL, pT, mT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) musA = np.empty((0, self.mu0.shape[1]), dtype = mT) else: for i in idx: self.muMargLoc = mus[[i]] vbMng(self, "MAIN", "Building marginal model no. {} at " "{}.".format(i + 1, self.muMargLoc[0]), 25) self.samplingEngine.resetHistory() self.trainedModel = None self.verbosity -= 5 self.samplingEngine.verbosity -= 5 RationalInterpolant.setupApprox(self) self.verbosity += 5 self.samplingEngine.verbosity += 5 self._mus = self._m_selfmus self.HFEngine.parameterMap = self._m_HFEparameterMap del self._m_selfmus, self._m_HFEparameterMap if self.storeAllSamples: self.storeSamples(i + self._nmusOld) pMat, req, musA = self._localPivotedResult(pMat, req, emptyCores, musA) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] + if not self.matchState and self.autoCollapse: + Ps[-1].postmultiplyTensorize(pMat.T) del self.muMargLoc for r in req: r.wait() + if not self.matchState and self.autoCollapse: pMat = pMat[:, : 0] self._postSetupApproxPivoted(musA, pMat, Ps, Qs, sizes) vbMng(self, "DEL", "Done setting up pivoted approximant.", 10) return 0 class RationalInterpolantPivotedGreedyPoleMatch( RationalInterpolantPivotedGreedyBase, GenericPivotedGreedyApproximantPoleMatch, RationalInterpolantPivotedPoleMatch): """ ROM pivoted greedy rational interpolant computation for parametric problems (with pole matching). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; if <= 0, Euclidean metric is used; if - 'AUTO', automatically selected; defaults to -1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'matchingWeightError': weight for pole matching optimization in error estimation; defaults to 0; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': number of starting marginal samples; - 'samplerMarginal': marginal sample point generator via sparse grid; - 'errorEstimatorKindMarginal': kind of marginal error estimator; available values include 'LOOK_AHEAD' and 'LOOK_AHEAD_RECOVER'; defaults to 'NONE'; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; defaults to 1e-1; - 'maxIterMarginal': maximum number of marginal greedy steps; defaults to 1e2; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; + - 'autoCollapse': whether to collapse trained reduced model as soon + as it is built; defaults to False; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'matchingWeightError': weight for pole matching optimization in error estimation; - 'errorEstimatorKindMarginal': kind of marginal error estimator; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'greedyTolMarginal': uniform error tolerance for marginal greedy algorithm; - 'maxIterMarginal': maximum number of marginal greedy steps; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; + - 'autoCollapse': whether to collapse trained reduced model as soon + as it is built; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator via sparse grid. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. - matchingChordalRadius: Radius to be used in chordal metric for poles - and residues. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. matchingWeightError: Weight for pole matching optimization in error estimation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator via sparse grid. errorEstimatorKindMarginal: Kind of marginal error estimator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. M: Degree of rational interpolant numerator. N: Degree of rational interpolant denominator. greedyTolMarginal: Uniform error tolerance for marginal greedy algorithm. maxIterMarginal: Maximum number of marginal greedy steps. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. + autoCollapse: Whether to collapse trained reduced model as soon as it + is built. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py index a05143c..865173d 100644 --- a/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py +++ b/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py @@ -1,571 +1,613 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_pivoted_approximant import (GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximantPoleMatch) from .gather_pivoted_approximant import gatherPivotedApproximant from rrompy.reduction_methods.standard.greedy.rational_interpolant_greedy \ import RationalInterpolantGreedy from rrompy.reduction_methods.standard.greedy.generic_greedy_approximant \ import pruneSamples -from rrompy.utilities.base.types import Np1D +from rrompy.utilities.base.types import Np1D, paramList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.poly_fitting.polynomial import polyvander as pv -from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert +from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk +from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, + RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv __all__ = ['RationalInterpolantGreedyPivotedNoMatch', 'RationalInterpolantGreedyPivotedPoleMatch'] class RationalInterpolantGreedyPivotedBase(GenericPivotedApproximantBase, RationalInterpolantGreedy): def __init__(self, *args, **kwargs): self._preInit() super().__init__(*args, **kwargs) if self.nparPivot > 1: self.HFEngine._ignoreResidues = 1 self._postInit() @property def tModelType(self): if hasattr(self, "_temporaryPivot"): return RationalInterpolantGreedy.tModelType.fget(self) return super().tModelType def _polyvanderAuxiliary(self, mus, deg, *args): degEff = [0] * self.npar degEff[self.directionPivot[0]] = deg return pv(mus, degEff, *args) def _marginalizeMiscellanea(self, forward:bool): if forward: self._m_selfmus = copy(self.mus) self._m_HFEparameterMap = copy(self.HFEngine.parameterMap) self._mus = self.checkParameterListPivot( self.mus(self.directionPivot)) self.HFEngine.parameterMap = { "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]], "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]} else: self._mus = self._m_selfmus self.HFEngine.parameterMap = self._m_HFEparameterMap del self._m_selfmus, self._m_HFEparameterMap def _marginalizeTrainedModel(self, forward:bool): if forward: del self._temporaryPivot self.trainedModel.data.mu0 = self.mu0 self.trainedModel.data.scaleFactor = [1.] * self.npar self.trainedModel.data.scaleFactor[self.directionPivot[0]] = ( self.scaleFactor[0]) self.trainedModel.data.parameterMap = self.HFEngine.parameterMap self._m_musUniqueCN = copy(self._musUniqueCN) musUniqueCNAux = np.zeros((self.S, self.npar), dtype = self._musUniqueCN.dtype) musUniqueCNAux[:, self.directionPivot[0]] = self._musUniqueCN(0) self._musUniqueCN = self.checkParameterList(musUniqueCNAux) self._m_derIdxs = copy(self._derIdxs) for j in range(len(self._derIdxs)): for l in range(len(self._derIdxs[j])): derjl = self._derIdxs[j][l][0] self._derIdxs[j][l] = [0] * self.npar self._derIdxs[j][l][self.directionPivot[0]] = derjl self.trainedModel.data.Q._dirPivot = self.directionPivot[0] self.trainedModel.data.P._dirPivot = self.directionPivot[0] # tell greedy error estimator that operator / RHS is pivot-affine if hasattr(self.HFEngine.A, "is_affine"): self._A_is_affine = self.HFEngine.A.is_affine else: self._A_is_affine = 0 if hasattr(self.HFEngine.b, "is_affine"): self._b_is_affine = self.HFEngine.b.is_affine else: self._b_is_affine = 0 if self._A_is_affine >= 1 / 2 and self._b_is_affine >= 1 / 2: self._affine_lvl += [1 / 2] else: self._temporaryPivot = 1 self.trainedModel.data.mu0 = self.checkParameterListPivot( self.mu0(self.directionPivot)) self.trainedModel.data.scaleFactor = self.scaleFactor self.trainedModel.data.parameterMap = { "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]], "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]} self._musUniqueCN = copy(self._m_musUniqueCN) self._derIdxs = copy(self._m_derIdxs) del self._m_musUniqueCN, self._m_derIdxs del self.trainedModel.data.Q._dirPivot del self.trainedModel.data.P._dirPivot if self._A_is_affine >= 1 / 2 and self._b_is_affine >= 1 / 2: self._affine_lvl.pop() del self._A_is_affine, self._b_is_affine self.trainedModel.data.npar = self.npar def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D: """Standard residual-based error estimator.""" setupOK = self.setupApproxLocal() if setupOK > 0: err = np.empty(len(mus)) err[:] = np.nan if not return_max: return err return err, [- setupOK], np.nan self._marginalizeTrainedModel(True) errRes = super().errorEstimator(mus, return_max) self._marginalizeTrainedModel(False) return errRes def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") self._S = self._setSampleBatch(self.S) self.resetSamples() self.samplingEngine.scaleFactor = self.scaleFactorDer musPivot = self.samplerTrainSet.generatePoints(self.S) while len(musPivot) > self.S: musPivot.pop() muTestPivot = self.samplerPivot.generatePoints(self.nTestPoints, False) idxPop = pruneSamples(self.mapParameterListPivot(muTestPivot), self.mapParameterListPivot(musPivot), 1e-10 * self.scaleFactorPivot[0]) muTestPivot.pop(idxPop) self._mus = emptyParameterList() self.mus.reset((self.S - 1, self.HFEngine.npar)) self.muTest = emptyParameterList() self.muTest.reset((len(muTestPivot) + 1, self.HFEngine.npar)) self.mus.data[:, self.directionPivot] = musPivot[: -1] self.mus.data[:, self.directionMarginal] = np.repeat(self.muMargLoc, self.S - 1, axis = 0) self.muTest.data[: -1, self.directionPivot] = muTestPivot.data self.muTest.data[-1, self.directionPivot] = musPivot[-1] self.muTest.data[:, self.directionMarginal] = np.repeat(self.muMargLoc, len(muTestPivot) + 1, axis = 0) if len(self.mus) > 0: vbMng(self, "MAIN", ("Adding first {} sample point{} at {} to training " "set.").format(self.S - 1, "" + "s" * (self.S > 2), self.mus), 3) self.samplingEngine.iterSample(self.mus) self._S = len(self.mus) self._approxParameters["S"] = self.S self.M, self.N = ("AUTO",) * 2 def setupApproxLocal(self) -> int: """Compute rational interpolant.""" self._marginalizeMiscellanea(True) setupOK = super().setupApproxLocal() self._marginalizeMiscellanea(False) return setupOK - def setupApprox(self, *args, **kwargs) -> int: - """Compute rational interpolant.""" - if self.checkComputedApprox(): return -1 - RROMPyAssert(self._mode, message = "Cannot setup approximant.") - vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) - self.computeScaleFactor() - self._musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) - while len(self.musMarginal) > self.SMarginal: self.musMarginal.pop() + def addMarginalSamplePoints(self, musMarginal:paramList, *args, + **kwargs) -> int: + """Add marginal sample points to reduced model.""" + RROMPyAssert(self._mode, message = "Cannot add sample points.") + musMarginal = self.checkParameterListMarginal(musMarginal) + vbMng(self, "INIT", + "Adding marginal sample point{} at {}.".format( + "s" * (len(musMarginal) > 1), musMarginal), 5) + if (self.SMarginal > 0 and hasattr(self, "polybasisMarginal") + and self.polybasisMarginal in sk): + RROMPyWarning(("Manually adding new samples with piecewise linear " + "marginal interpolation is dangerous. Sample depth " + "in samplerMarginal must be managed correctly.")) + _musOld = self.mus + self._musMarginal.append(musMarginal) S0 = copy(self.S) - idx, sizes = indicesScatter(len(self.musMarginal), return_sizes = True) + idx, sizes = indicesScatter(len(musMarginal), return_sizes = True) + _trainedModelOld = copy(self.trainedModel) + _collapsed = (_trainedModelOld is not None + and _trainedModelOld.data._collapsed) pMat, Ps, Qs, mus = None, [], [], None req, emptyCores = [], np.where(sizes == 0)[0] if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 25) if self.storeAllSamples: self.storeSamples() pL, pT, mT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) mus = np.empty((0, self.mu0.shape[1]), dtype = mT) else: _scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 for i in idx: - self.muMargLoc = self.musMarginal[[i]] + self.muMargLoc = self.musMarginal[[i + self.SMarginal]] vbMng(self, "MAIN", - "Building marginal model no. {} at {}.".format(i + 1, - self.musMarginal[i]), 5) + "Building marginal model no. {} at {}.".format( + i + self.SMarginal + 1, + self.musMarginal[i + self.SMarginal]), 5) self.samplingEngine.resetHistory() self.trainedModel = None self.verbosity -= 5 self.samplingEngine.verbosity -= 5 RationalInterpolantGreedy.setupApprox(self, *args, **kwargs) self.verbosity += 5 self.samplingEngine.verbosity += 5 - if self.storeAllSamples: self.storeSamples(i) + if self.storeAllSamples: self.storeSamples(i + self.SMarginal) musi = self.samplingEngine.mus pMati = self.samplingEngine.projectionMatrix - if not hasattr(self, "matchState") or not self.matchState: + if not self.matchState: if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C " "to orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMatiEff = None for j, mu in enumerate(musi): pMij = np.expand_dims(self.HFEngine.applyC( pMati[:, j], mu), -1) if pMatiEff is None: pMatiEff = np.array(pMij) else: pMatiEff = np.append(pMatiEff, pMij, axis = 1) pMati = pMatiEff vbMng(self, "DEL", "Done extracting system output.", 35) + if pMat is None: mus = copy(musi.data) - pMat = copy(pMati) if i == 0: for dest in emptyCores: - req += [isend((len(pMat), pMat.dtype, mus.dtype), + req += [isend((len(pMati), pMati.dtype, mus.dtype), dest = dest, tag = dest)] else: mus = np.vstack((mus, musi.data)) - pMat = np.hstack((pMat, pMati)) + if _collapsed: + pMat = 1. + else: + if pMat is None: + pMat = copy(pMati) + else: + pMat = np.hstack((pMat, pMati)) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] + if _collapsed: Ps[-1].postmultiplyTensorize(pMati.T) self._S = S0 del self._temporaryPivot, self.muMargLoc self.scaleFactor = _scaleFactorOldPivot for r in req: r.wait() + if _collapsed: pMat = pMati[:, : 0] pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs, - mus, sizes, - self.polybasis) - self._mus = self.checkParameterList(mus) - Psupp = np.append(0, np.cumsum(nsamples)) - self._setupTrainedModel(pMat, forceNew = True) - self.trainedModel.data.Qs, self.trainedModel.data.Ps = Qs, Ps - self.trainedModel.data.Psupp = list(Psupp[: -1]) + mus, sizes, self.polybasis) + self._mus = _musOld + self.mus.append(mus) + Psupp = np.append(0, np.cumsum(nsamples[: -1])) + if _trainedModelOld is None: + self._setupTrainedModel(pMat, forceNew = True) + self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] + self.trainedModel.data.Psupp = [] + else: + self._trainedModel = _trainedModelOld + if _collapsed: + self._setupTrainedModel(1.) + Psupp = [0] * len(musMarginal) + else: + Psupp = Psupp + self.trainedModel.data.projMat.shape[1] + self._setupTrainedModel(pMat, 1) + self._SMarginal += len(musMarginal) + self.trainedModel.data.Qs += Qs + self.trainedModel.data.Ps += Ps + self.trainedModel.data.Psupp += list(Psupp) self._preliminaryMarginalFinalization() self._finalizeMarginalization() vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 + def setupApprox(self, *args, **kwargs) -> int: + """Compute rational interpolant.""" + if self.checkComputedApprox(): return -1 + RROMPyAssert(self._mode, message = "Cannot setup approximant.") + vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) + self.computeScaleFactor() + self._mus = emptyParameterList() + self._musMarginal = emptyParameterList() + musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) + while len(musMarginal) > self.SMarginal: musMarginal.pop() + self._SMarginal = 0 + val = self.addMarginalSamplePoints(musMarginal, *args, **kwargs) + vbMng(self, "DEL", "Done setting up approximant.", 5) + return val + class RationalInterpolantGreedyPivotedNoMatch( RationalInterpolantGreedyPivotedBase, GenericPivotedApproximantNoMatch): """ ROM pivoted rational interpolant (without pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'samplerTrainSet': training sample points generator; defaults to samplerPivot; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for pivot interpolation; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'samplerTrainSet': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. samplerTrainSet: training sample points generator. errorEstimatorKind: kind of error estimator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ class RationalInterpolantGreedyPivotedPoleMatch( RationalInterpolantGreedyPivotedBase, GenericPivotedApproximantPoleMatch): """ ROM pivoted rational interpolant (with pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; if <= 0, Euclidean metric is used; if - 'AUTO', automatically selected; defaults to -1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'samplerTrainSet': training sample points generator; defaults to samplerPivot; - 'errorEstimatorKind': kind of error estimator; available values include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD', 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE'; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'samplerTrainSet': training sample points generator; - 'errorEstimatorKind': kind of error estimator; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. - matchingChordalRadius: Radius to be used in chordal metric for poles - and residues. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. greedyTol: uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. samplerTrainSet: training sample points generator. errorEstimatorKind: kind of error estimator. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() setupOK = super().setupApprox(*args, **kwargs) if self.matchState: self._postApplyC() return setupOK diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py index dc66a48..449ebd9 100644 --- a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py +++ b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py @@ -1,491 +1,523 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from collections.abc import Iterable from copy import deepcopy as copy from .generic_pivoted_approximant import (GenericPivotedApproximantBase, GenericPivotedApproximantNoMatch, GenericPivotedApproximantPoleMatch) from .gather_pivoted_approximant import gatherPivotedApproximant from rrompy.reduction_methods.standard.rational_interpolant import ( RationalInterpolant) from rrompy.utilities.base import verbosityManager as vbMng +from rrompy.utilities.base.types import paramList from rrompy.utilities.numerical.hash_derivative import nextDerivativeIndices +from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.parameter import emptyParameterList from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv __all__ = ['RationalInterpolantPivotedNoMatch', 'RationalInterpolantPivotedPoleMatch'] class RationalInterpolantPivotedBase(GenericPivotedApproximantBase, RationalInterpolant): def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(toBeExcluded = ["polydegreetype"]) super().__init__(*args, **kwargs) if self.nparPivot > 1: self.HFEngine._ignoreResidues = 1 self._postInit() @property def scaleFactorDer(self): """Value of scaleFactorDer.""" if self._scaleFactorDer == "NONE": return 1. if self._scaleFactorDer == "AUTO": return self.scaleFactorPivot return self._scaleFactorDer @scaleFactorDer.setter def scaleFactorDer(self, scaleFactorDer): if isinstance(scaleFactorDer, (str,)): scaleFactorDer = scaleFactorDer.upper() elif isinstance(scaleFactorDer, Iterable): scaleFactorDer = list(scaleFactorDer) self._scaleFactorDer = scaleFactorDer self._approxParameters["scaleFactorDer"] = self._scaleFactorDer @property def polydegreetype(self): """Value of polydegreetype.""" return "TOTAL" @polydegreetype.setter def polydegreetype(self, polydegreetype): RROMPyWarning(("polydegreetype is used just to simplify inheritance, " "and its value cannot be changed from 'TOTAL'.")) def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" RROMPyAssert(self._mode, message = "Cannot setup interpolation indices.") if (self._musUniqueCN is None or len(self._reorder) != len(self.musPivot)): try: muPC = self.trainedModel.centerNormalizePivot(self.musPivot) except: muPC = self.trainedModel.centerNormalize(self.musPivot) self._musUniqueCN, musIdxsTo, musIdxs, musCount = (muPC.unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.musPivot[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.nparPivot, cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt - def setupApprox(self) -> int: - """Compute rational interpolant.""" - if self.checkComputedApprox(): return -1 - RROMPyAssert(self._mode, message = "Cannot setup approximant.") - vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) - self.computeScaleFactor() - self.resetSamples() - self.samplingEngine.scaleFactor = self.scaleFactorDer - self.musPivot = self.samplerPivot.generatePoints(self.S) - while len(self.musPivot) > self.S: self.musPivot.pop() - self._musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) - while len(self.musMarginal) > self.SMarginal: self.musMarginal.pop() - self._mus = emptyParameterList() - self.mus.reset((self.S * self.SMarginal, self.HFEngine.npar)) - self.mus.data[:, self.directionPivot] = np.tile(self.musPivot.data, - (self.SMarginal, 1)) - self.mus.data[:, self.directionMarginal] = np.repeat( - self.musMarginal.data, - self.S, axis = 0) + def addMarginalSamplePoints(self, musMarginal:paramList) -> int: + """Add marginal sample points to reduced model.""" + RROMPyAssert(self._mode, message = "Cannot add sample points.") + musMarginal = self.checkParameterListMarginal(musMarginal) + vbMng(self, "INIT", + "Adding marginal sample point{} at {}.".format( + "s" * (len(musMarginal) > 1), musMarginal), 5) + if (self.SMarginal > 0 and hasattr(self, "polybasisMarginal") + and self.polybasisMarginal in sk): + RROMPyWarning(("Manually adding new samples with piecewise linear " + "marginal interpolation is dangerous. Sample depth " + "in samplerMarginal must be managed correctly.")) + mus = np.empty((self.S * len(musMarginal), self.HFEngine.npar), + dtype = np.complex) + mus[:, self.directionPivot] = np.tile(self.musPivot.data, + (len(musMarginal), 1)) + mus[:, self.directionMarginal] = np.repeat(musMarginal.data, self.S, + axis = 0) + self._mus.append(mus) + self._musMarginal.append(musMarginal) N0 = copy(self.N) - self._setupTrainedModel(np.zeros((0, 0)), forceNew = True) - idx, sizes = indicesScatter(len(self.musMarginal), return_sizes = True) + idx, sizes = indicesScatter(len(musMarginal), return_sizes = True) pMat, Ps, Qs = None, [], [] req, emptyCores = [], np.where(sizes == 0)[0] if len(idx) == 0: vbMng(self, "MAIN", "Idling.", 30) if self.storeAllSamples: self.storeSamples() pL, pT = recv(source = 0, tag = poolRank()) pMat = np.empty((pL, 0), dtype = pT) else: _scaleFactorOldPivot = copy(self.scaleFactor) self.scaleFactor = self.scaleFactorPivot self._temporaryPivot = 1 for i in idx: - musi = self.mus[self.S * i : self.S * (i + 1)] + musi = self.mus[self.S * (i + self.SMarginal) + : self.S * (i + self.SMarginal + 1)] vbMng(self, "MAIN", - "Building marginal model no. {} at {}.".format(i + 1, - self.musMarginal[i]), 5) + "Building marginal model no. {} at {}.".format( + i + self.SMarginal + 1, + self.musMarginal[i + self.SMarginal]), 5) vbMng(self, "INIT", "Starting computation of snapshots.", 10) self.samplingEngine.resetHistory() self.samplingEngine.iterSample(musi) vbMng(self, "DEL", "Done computing snapshots.", 10) self.verbosity -= 5 self.samplingEngine.verbosity -= 5 self._setupRational(self._setupDenominator()) self.verbosity += 5 self.samplingEngine.verbosity += 5 - if self.storeAllSamples: self.storeSamples(i) + if self.storeAllSamples: self.storeSamples(i + self.SMarginal) pMati = self.samplingEngine.projectionMatrix - if not hasattr(self, "matchState") or not self.matchState: + if not self.matchState: if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C " "to orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMatiEff = None for j, mu in enumerate(musi): pMij = np.expand_dims(self.HFEngine.applyC( pMati[:, j], mu), -1) if pMatiEff is None: pMatiEff = np.array(pMij) else: pMatiEff = np.append(pMatiEff, pMij, axis = 1) pMati = pMatiEff vbMng(self, "DEL", "Done extracting system output.", 35) - if pMat is None: - pMat = copy(pMati) - if i == 0: - for dest in emptyCores: - req += [isend((len(pMat), pMat.dtype), dest = dest, - tag = dest)] + if pMat is None and i == 0: + for dest in emptyCores: + req += [isend((len(pMati), pMati.dtype), dest = dest, + tag = dest)] + if self.trainedModel.data._collapsed: + pMat = 1. else: - pMat = np.hstack((pMat, pMati)) + if pMat is None: + pMat = copy(pMati) + else: + pMat = np.hstack((pMat, pMati)) Ps += [copy(self.trainedModel.data.P)] Qs += [copy(self.trainedModel.data.Q)] + if self.trainedModel.data._collapsed: + Ps[-1].postmultiplyTensorize(pMati.T) del self.trainedModel.data.Q, self.trainedModel.data.P self.N = N0 del self._temporaryPivot self.scaleFactor = _scaleFactorOldPivot for r in req: r.wait() + if self.trainedModel.data._collapsed: pMat = pMati[:, : 0] pMat, Ps, Qs, _, _ = gatherPivotedApproximant(pMat, Ps, Qs, self.mus.data, sizes, self.polybasis, False) - self._setupTrainedModel(pMat) - self.trainedModel.data.Qs, self.trainedModel.data.Ps = Qs, Ps - Psupp = np.arange(0, len(self.musMarginal) * self.S, self.S) - self.trainedModel.data.Psupp = list(Psupp) + if self.trainedModel.data._collapsed: + self._setupTrainedModel(1.) + Psupp = [0] * len(musMarginal) + else: + self._setupTrainedModel(pMat, + len(self.trainedModel.data.projMat) > 0) + Psupp = (self.SMarginal + np.arange(0, len(musMarginal))) * self.S + self._SMarginal += len(musMarginal) + self.trainedModel.data.Qs += Qs + self.trainedModel.data.Ps += Ps + self.trainedModel.data.Psupp += list(Psupp) self._preliminaryMarginalFinalization() self._finalizeMarginalization() - vbMng(self, "DEL", "Done setting up approximant.", 5) + vbMng(self, "DEL", "Done adding marginal sample points.", 5) return 0 + def setupApprox(self) -> int: + """Compute rational interpolant.""" + if self.checkComputedApprox(): return -1 + RROMPyAssert(self._mode, message = "Cannot setup approximant.") + vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) + self.computeScaleFactor() + self.resetSamples() + self.samplingEngine.scaleFactor = self.scaleFactorDer + self._mus = emptyParameterList() + self._musMarginal = emptyParameterList() + self.musPivot = self.samplerPivot.generatePoints(self.S) + while len(self.musPivot) > self.S: self.musPivot.pop() + musMarginal = self.samplerMarginal.generatePoints(self.SMarginal) + while len(musMarginal) > self.SMarginal: musMarginal.pop() + self._setupTrainedModel(np.zeros((0, 0)), forceNew = True) + self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], [] + self.trainedModel.data.Psupp = [] + self._SMarginal = 0 + val = self.addMarginalSamplePoints(musMarginal) + vbMng(self, "DEL", "Done setting up approximant.", 5) + return val + class RationalInterpolantPivotedNoMatch(RationalInterpolantPivotedBase, GenericPivotedApproximantNoMatch): """ ROM pivoted rational interpolant (without pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for pivot interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ class RationalInterpolantPivotedPoleMatch(RationalInterpolantPivotedBase, GenericPivotedApproximantPoleMatch): """ ROM pivoted rational interpolant (with pole matching) computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. directionPivot(optional): Pivot components. Defaults to [0]. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'matchState': whether to match the system state rather than the system output; defaults to False; - 'matchingWeight': weight for pole matching optimization; defaults to 1; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; if <= 0, Euclidean metric is used; if - 'AUTO', automatically selected; defaults to -1; - 'matchingShared': required ratio of marginal points to share resonance; defaults to 1.; - 'badPoleCorrection': strategy for correction of bad poles; available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL'; defaults to 'ERASE'; - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator; - 'polybasis': type of polynomial basis for pivot interpolation; defaults to 'MONOMIAL'; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; allowed values include 'MONOMIAL_*', 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL'; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; defaults to 'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'nNeighborsMarginal': number of marginal nearest neighbors; defaults to 1; only for 'NEARESTNEIGHBOR'; . 'polydegreetypeMarginal': type of polynomial degree for marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'interpTolMarginal': tolerance for marginal interpolation; defaults to None; not for 'NEARESTNEIGHBOR' or 'PIECEWISE_LINEAR_*'; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights; only for radial basis. - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'radialDirectionalWeights': radial basis weights for pivot numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; defaults to 1; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for pivot interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. directionPivot: Pivot components. mus: Array of snapshot parameters. musPivot: Array of pivot snapshot parameters. musMarginal: Array of marginal snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'matchState': whether to match the system state rather than the system output; - 'matchingWeight': weight for pole matching optimization; - - 'matchingChordalRadius': radius to be used in chordal metric for - poles and residues; - 'matchingShared': required ratio of marginal points to share resonance; - 'badPoleCorrection': strategy for correction of bad poles; - 'polybasis': type of polynomial basis for pivot interpolation; - 'polybasisMarginal': type of polynomial basis for marginal interpolation; - 'paramsMarginal': dictionary of parameters for marginal interpolation; include: . 'MMarginal': degree of marginal interpolant; . 'nNeighborsMarginal': number of marginal nearest neighbors; . 'polydegreetypeMarginal': type of polynomial degree for marginal; . 'interpTolMarginal': tolerance for marginal interpolation; . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive rescaling of marginal radial basis weights. - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'radialDirectionalWeights': radial basis weights for pivot numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'radialDirectionalWeightsMarginal': radial basis weights for marginal interpolant; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for pivot interpolation; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of pivot samples current approximant relies upon; - 'samplerPivot': pivot sample point generator; - 'SMarginal': total number of marginal samples current approximant relies upon; - 'samplerMarginal': marginal sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. matchState: Whether to match the system state rather than the system output. matchingWeight: Weight for pole matching optimization. - matchingChordalRadius: Radius to be used in chordal metric for poles - and residues. matchingShared: Required ratio of marginal points to share resonance. badPoleCorrection: Strategy for correction of bad poles. S: Total number of pivot samples current approximant relies upon. samplerPivot: Pivot sample point generator. SMarginal: Total number of marginal samples current approximant relies upon. samplerMarginal: Marginal sample point generator. polybasis: Type of polynomial basis for pivot interpolation. polybasisMarginal: Type of polynomial basis for marginal interpolation. paramsMarginal: Dictionary of parameters for marginal interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. radialDirectionalWeights: Radial basis weights for pivot numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. radialDirectionalWeightsMarginal: Radial basis weights for marginal interpolant. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for pivot interpolation. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for pivot parameter values. muBoundsMarginal: list of bounds for marginal parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ def setupApprox(self, *args, **kwargs) -> int: if self.checkComputedApprox(): return -1 self.purgeparamsMarginal() setupOK = super().setupApprox(*args, **kwargs) if self.matchState: self._postApplyC() return setupOK diff --git a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py index 91dd9d0..214a359 100644 --- a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py +++ b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py @@ -1,658 +1,563 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import warnings import numpy as np from scipy.special import factorial as fact from scipy.sparse import csr_matrix, hstack, SparseEfficiencyWarning from collections.abc import Iterable from copy import deepcopy as copy from itertools import combinations from .trained_model_pivoted_rational_nomatch import ( TrainedModelPivotedRationalNoMatch) from rrompy.utilities.base.types import (Tuple, Np1D, Np2D, List, ListAny, paramVal, paramList, sampList, HFEng) from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp from rrompy.utilities.numerical import dot from rrompy.utilities.numerical.point_matching import rationalFunctionMatching from rrompy.utilities.numerical.degree import reduceDegreeN from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb, PolynomialInterpolator as PI) from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.poly_fitting.heaviside import (rational2heaviside, polyval as heavival, heavisideUniformShape, HeavisideInterpolator as HI) from rrompy.utilities.poly_fitting.nearest_neighbor import ( NearestNeighborInterpolator as NNI) from rrompy.utilities.poly_fitting.piecewise_linear import (sparsekinds, PiecewiseLinearInterpolator as PLI) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.sampling import sampleList, emptySampleList __all__ = ['TrainedModelPivotedRationalPoleMatch'] -def getChordalScaling(x:Np2D, r:float, projGramian : Np2D = 1., - projHalfGramian : Np2D = None) -> Tuple[Np2D, Np2D]: - goodX = np.where(np.isinf(x[:, 0]) == False)[0] - normX = np.empty(len(x)) - if projGramian is None: - normX[goodX] = np.sum(np.abs(dot(projHalfGramian, x[goodX].T)) ** 2., - axis = 0) ** .5 - else: - normX[goodX] = np.abs(np.sum(dot(projGramian, x[goodX].T) - * x[goodX].T.conj(), axis = 0)) ** .5 - scale = np.ones((len(normX), 1)) - scale[goodX, 0] = 1. / ((normX[goodX] / r) ** 2. + 1.) - xscaled = np.zeros_like(x) - for j in goodX: xscaled[j] = x[j] * scale[j] - return xscaled, r * (1 - scale) - -def normalizeChordal(x:Np2D, r:float, projGramian : Np2D = 1., - projHalfGramian : Np2D = None) -> Np2D: - for j in range(x.shape[0]): - x[j, -1] -= .5 * r - if projGramian is None: - norm2xj = np.sum(np.abs(dot(projHalfGramian, x[j, : -1])) ** 2.) - else: - norm2xj = np.abs(np.sum(dot(projGramian, x[j, : -1]) - * x[j, : -1].conj())) - normxj = (norm2xj + np.abs(x[j, -1]) ** 2.) ** .5 - if normxj < 1e-15: normxj += np.finfo(float).eps - x[j] *= .5 * r / normxj - x[j, -1] += .5 * r - return x - -def pullbackChordal(x:Np2D, r:float) -> Np2D: - y = copy(x[:, : -1]) - for j, p in enumerate(x[:, -1]): - scalexj = 1. - p / r - y[j] = np.inf if scalexj < 1e-15 else y[j] / scalexj - return y - class TrainedModelPivotedRationalPoleMatch(TrainedModelPivotedRationalNoMatch): """ ROM approximant evaluation for pivoted approximants based on interpolation of rational approximants (with pole matching). Attributes: Data: dictionary with all that can be pickled. """ def compress(self, collapse : bool = False, tol : float = 0., returnRMat : bool = False, **compressMatrixkwargs): Psupp = copy(self.data.Psupp) RMat = super().compress(collapse, tol, True, **compressMatrixkwargs) if RMat is None: return for j in range(len(self.data.coeffsEff)): self.data.coeffsEff[j] = dot(self.data.coeffsEff[j], RMat.T) for obj, suppj in zip(self.data.HIs, Psupp): obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]]) if hasattr(self, "_HIsExcl"): for obj, suppj in zip(self._HIsExcl, Psupp): obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]]) if not hasattr(self, "_PsExcl"): self._PsuppExcl = [0] * len(self._PsuppExcl) if returnRMat: return RMat def centerNormalizeMarginal(self, mu : paramList = [], mu0 : paramVal = None) -> paramList: """ Compute normalized parameter to be plugged into approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.data.mu0Marginal. Returns: Normalized parameter. """ mu = self.checkParameterListMarginal(mu) if mu0 is None: mu0 = self.checkParameterListMarginal( self.data.mu0(0, self.data.directionMarginal)) return (self.mapParameterList(mu, idx = self.data.directionMarginal) - self.mapParameterList(mu0, idx = self.data.directionMarginal) ) / [self.data.scaleFactor[x] for x in self.data.directionMarginal] def setupMarginalInterp(self, approx, interpPars:ListAny, extraPar = None): vbMng(self, "INIT", "Starting computation of marginal interpolator.", 12) musMCN = self.centerNormalizeMarginal(self.data.musMarginal) nM, pbM = len(musMCN), approx.polybasisMarginal if pbM in ppb + rbpb: if extraPar: approx._setMMarginalAuto() _MMarginalEff = approx.paramsMarginal["MMarginal"] if pbM in ppb: p = PI() elif pbM in rbpb: p = RBI() else: # if pbM in sparsekinds + ["NEARESTNEIGHBOR"]: if pbM == "NEARESTNEIGHBOR": p = NNI() else: # if pbM in sparsekinds: pllims = [[-1.] * self.data.nparMarginal, [1.] * self.data.nparMarginal] p = PLI() for ipts, pts in enumerate(self.data.suppEffPts): if len(pts) == 0: raise RROMPyException("Empty list of support points.") musMCNEff, valsEff = musMCN[pts], np.eye(len(pts)) if pbM in ppb + rbpb: if extraPar: if ipts > 0: verb = approx.verbosity approx.verbosity = 0 _musM = approx.musMarginal approx.musMarginal = musMCNEff approx._setMMarginalAuto() approx.musMarginal = _musM approx.verbosity = verb else: approx.paramsMarginal["MMarginal"] = reduceDegreeN( _MMarginalEff, len(musMCNEff), self.data.nparMarginal, approx.paramsMarginal["polydegreetypeMarginal"]) MMEff = approx.paramsMarginal["MMarginal"] while MMEff >= 0: wellCond, msg = p.setupByInterpolation(musMCNEff, valsEff, MMEff, *interpPars) vbMng(self, "MAIN", msg, 30) if wellCond: break vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing " "MMarginal by 1."), 35) MMEff -= 1 if MMEff < 0: raise RROMPyException(("Instability in computation of " "interpolant. Aborting.")) if (pbM in rbpb and len(interpPars) > 4 and "optimizeScalingBounds" in interpPars[4].keys()): interpPars[4]["optimizeScalingBounds"] = [-1., -1.] elif pbM == "NEARESTNEIGHBOR": if ipts > 0: interpPars[0] = 1 p.setupByInterpolation(musMCNEff, valsEff, *interpPars) elif ipts == 0: # and pbM in sparsekinds: p.setupByInterpolation(musMCNEff, valsEff, pllims, extraPar[pts], *interpPars) if ipts == 0: self.data.marginalInterp = copy(p) self.data.coeffsEff, self.data.polesEff = [], [] N = len(self.data.suppEffIdx) goodIdx = np.where(self.data.suppEffIdx != -1)[0] for hi, sup in zip(self.data.HIs, self.data.Psupp): pEff, cEff = hi.poles.reshape(-1, 1), hi.coeffs - if self.data.chordalRadius[0] > 0.: - pEff = np.hstack(getChordalScaling(pEff, - self.data.chordalRadius[0])) - if self.data.chordalRadius[1] > 0.: - if self.data.projGramian is None: - projGramian = None - projHalfGramian = self.data.projMat[:, - sup : sup + cEff.shape[1]] - else: - projGramian = self.data.projGramian[ - sup : sup + cEff.shape[1]][:, - sup : sup + cEff.shape[1]] - projHalfGramian = None - cEff, cEffH = getChordalScaling(cEff, - self.data.chordalRadius[1], - projGramian, projHalfGramian) - else: - cEffH = np.empty((cEff.shape[0], 0)) + cEffH = np.empty((cEff.shape[0], 0)) if (self.data._collapsed or self.data.projMat.shape[1] == cEff.shape[1]): cEff = np.hstack([cEff, cEffH]) else: supC = self.data.projMat.shape[1] - sup - cEff.shape[1] cEff = hstack((csr_matrix((len(cEff), sup)), csr_matrix(cEff), csr_matrix((len(cEff), supC)), cEffH), "csr") goodIdxC = np.append(goodIdx, np.arange(N, cEff.shape[0])) self.data.coeffsEff += [cEff[goodIdxC, :]] self.data.polesEff += [pEff[goodIdx]] else: ptsBad = [i for i in range(nM) if i not in pts] idxBad = np.where(self.data.suppEffIdx[goodIdx] == ipts)[0] warnings.simplefilter('ignore', SparseEfficiencyWarning) if pbM in sparsekinds: for ij, j in enumerate(ptsBad): nearest = pts[np.argmin(np.sum(np.abs(musMCNEff.data - np.tile(musMCN[j], [len(pts), 1]) ), axis = 1).flatten())] self.data.coeffsEff[j][idxBad] = copy( self.data.coeffsEff[nearest][idxBad]) self.data.polesEff[j][idxBad] = copy( self.data.polesEff[nearest][idxBad]) else: if (self.data._collapsed or self.data.projMat.shape[1] == cEff.shape[1]): cfBase = np.zeros((len(idxBad), cEff.shape[1]), dtype = cEff.dtype) else: cfBase = csr_matrix((len(idxBad), self.data.coeffsEff[0].shape[1]), dtype = cEff.dtype) valMuMBad = p(musMCN[ptsBad]) for ijb, jb in enumerate(ptsBad): self.data.coeffsEff[jb][idxBad] = copy(cfBase) self.data.polesEff[jb][idxBad] = 0. for ij, j in enumerate(pts): val = valMuMBad[ij][ijb] if not np.isclose(val, 0., atol = 1e-15): self.data.coeffsEff[jb][idxBad] += (val * self.data.coeffsEff[j][idxBad]) self.data.polesEff[jb][idxBad] += (val * self.data.polesEff[j][idxBad]) - if self.data.chordalRadius[0] > 0: - self.data.polesEff[jb][idxBad] = normalizeChordal( - self.data.polesEff[jb][idxBad], - self.data.chordalRadius[0]) - if self.data.chordalRadius[1] > 0: - self.data.coeffsEff[jb][idxBad] = normalizeChordal( - self.data.coeffsEff[jb][idxBad], - self.data.chordalRadius[1], - self.data.projGramian, - self.data.projMat) warnings.filters.pop(0) if pbM in ppb + rbpb: approx.paramsMarginal["MMarginal"] = _MMarginalEff vbMng(self, "DEL", "Done computing marginal interpolator.", 12) def updateEffectiveSamples(self, exclude:List[int], *args, **kwargs): if hasattr(self, "_idxExcl"): for j, excl in enumerate(self._idxExcl): self.data.HIs.insert(excl, self._HIsExcl[j]) super().updateEffectiveSamples(exclude) self._HIsExcl = [] for excl in self._idxExcl[::-1]: self._HIsExcl = [self.data.HIs.pop(excl)] + self._HIsExcl poles = [hi.poles for hi in self.data.HIs] coeffs = [hi.coeffs for hi in self.data.HIs] self.initializeFromLists(poles, coeffs, self.data.Psupp, self.data.HIs[0].polybasis, *args, **kwargs) def initializeFromRational(self, *args, **kwargs): """Initialize Heaviside representation.""" RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") poles, coeffs = [], [] for Q, P in zip(self.data.Qs, self.data.Ps): cfs, pls, basis = rational2heaviside(P, Q) poles += [pls] coeffs += [cfs] self.initializeFromLists(poles, coeffs, self.data.Psupp, basis, *args, **kwargs) def initializeFromLists(self, poles:ListAny, coeffs:ListAny, supps:ListAny, basis:str, matchingWeight:float, HFEngine:HFEng, - is_state:bool, chordalRadius:Tuple[float, float]): + is_state:bool): """Initialize Heaviside representation.""" poles, coeffs = heavisideUniformShape(poles, coeffs) N = len(poles[0]) - if chordalRadius[0] == "AUTO": chordalRadius[0] = 1. - if chordalRadius[1] == "AUTO": - norm2s = 0. - for c, sup in zip(coeffs, self.data.Psupp): - if self.data.projGramian is None: - gramEff = self.data.projMat[:, sup : sup + c.shape[1]] - norm2s += np.sum(np.abs(dot(gramEff, c[: N].T)) ** 2.) - else: - gramEff = self.data.projGramian[sup : sup + c.shape[1]][:, - sup : sup + c.shape[1]] - norm2s += np.sum(np.abs(dot(gramEff, c[: N].T) - * c[: N].T.conj())) - chordalRadius[1] = (norm2s / N / len(coeffs)) ** .5 - self.data.chordalRadius = copy(chordalRadius) - if is_state and chordalRadius[1] > 0: chordalRadius[1] = "AUTO" poles, coeffs = rationalFunctionMatching(poles, coeffs, self.data.musMarginal.data, matchingWeight, supps, self.data.projMat, HFEngine, - is_state, None, chordalRadius) + is_state, None) self.data.HIs = [] for pls, cfs in zip(poles, coeffs): hsi = HI() hsi.poles = pls if len(cfs) == len(pls): cfs = np.pad(cfs, ((0, 1), (0, 0)), "constant") hsi.coeffs = cfs hsi.npar = 1 hsi.polybasis = basis self.data.HIs += [hsi] self.data.suppEffPts = [np.arange(len(self.data.HIs))] self.data.suppEffIdx = np.zeros(len(poles[0]), dtype = int) def checkShared(self, shared:float, correction : str = "ERASE") -> str: N = len(self.data.HIs[0].poles) M = len(self.data.HIs) correction = correction.upper().strip().replace(" ","") if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: RROMPyWarning(("Correction kind not recognized. Overriding to " "'ERASE'.")) correction = "ERASE" goodLocPoles = np.array([np.isinf(hi.poles) == False for hi in self.data.HIs]) self.data.suppEffPts = [np.arange(len(self.data.HIs))] self.data.suppEffIdx = - np.ones(N, dtype = int) goodGlobPoles = np.sum(goodLocPoles, axis = 0) goodEnoughPoles = goodGlobPoles >= max(1., 1. * shared * M) keepPole = np.where(goodEnoughPoles)[0] halfPole = np.where(goodEnoughPoles * (goodGlobPoles < M))[0] self.data.suppEffIdx[keepPole] = 0 for idxR in halfPole: pts = np.where(goodLocPoles[:, idxR])[0] idxEff = len(self.data.suppEffPts) for idEff, prevPts in enumerate(self.data.suppEffPts): if len(prevPts) == len(pts): if np.allclose(prevPts, pts): idxEff = idEff break if idxEff == len(self.data.suppEffPts): self.data.suppEffPts += [pts] self.data.suppEffIdx[idxR] = idxEff degBad = len(self.data.HIs[0].coeffs) - N - 1 for pt in range(len(self.data.HIs)): idxR = np.where(goodLocPoles[pt] * (goodEnoughPoles == False))[0] self.removePoleResLocal(idxR, pt, degBad, correction, True) return ("Hard-erased {} pole".format(N - len(keepPole)) + "s" * (N - len(keepPole) != 1) + " and soft-erased {} pole".format(len(halfPole)) + "s" * (len(halfPole) != 1) + ".") def removePoleResLocal(self, badidx:List[int], margidx:int, degcorr : int = None, correction : str = "ERASE", hidden : bool = False): if not hasattr(badidx, "__len__"): badidx = [badidx] badidx = np.array(badidx) if len(badidx) == 0: return correction = correction.upper().strip().replace(" ","") if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: RROMPyWarning(("Correction kind not recognized. Overriding to " "'ERASE'.")) correction = "ERASE" if hidden: N = len(self.data.HIs[margidx].poles) else: N = len(self.data.polesEff[margidx]) goodidx = [j for j in range(N) if j not in badidx] if correction != "ERASE": if degcorr is None: if hidden: degcorr = len(self.data.HIs[margidx].coeffs) - N - 1 else: degcorr = self.data.coeffsEff[margidx].shape[0] - N - 1 muM, musEff = self.data.musMarginal[margidx], [] polybasis = self.data.HIs[margidx].polybasis for mu in self.data.mus: if np.allclose(mu(self.data.directionMarginal), muM): musEff += [mu(self.data.directionPivot[0])] musEff = self.centerNormalizePivot(musEff) if hidden: plsBad = self.data.HIs[margidx].poles[badidx] else: plsBad = self.data.polesEff[margidx][badidx, 0] plsBadEff = np.isinf(plsBad) == False plsBad, badidx = plsBad[plsBadEff], badidx[plsBadEff] if hidden: plsGood = self.data.HIs[margidx].poles[goodidx] corrVals = heavival(musEff, self.data.HIs[margidx].coeffs[badidx], plsBad, polybasis).T else: plsGood = self.data.polesEff[margidx][goodidx] corrVals = heavival(musEff, self.data.coeffsEff[margidx].toarray()[badidx], plsBad, polybasis).T if correction == "RATIONAL": hi = HI() hi.setupByInterpolation(musEff, plsGood, corrVals, degcorr, polybasis) if hidden: self.data.HIs[margidx].coeffs[goodidx] += ( hi.coeffs[: len(goodidx)]) else: self.data.coeffsEff[margidx][goodidx, :] += ( hi.coeffs[: len(goodidx)]) polyCorr = hi.coeffs[len(goodidx) :] elif correction == "POLYNOMIAL": pi = PI() pi.setupByInterpolation(musEff, corrVals, degcorr, polybasis.split("_")[0]) polyCorr = pi.coeffs if hidden: self.data.HIs[margidx].coeffs[N : N + degcorr + 1] += polyCorr else: self.data.coeffsEff[margidx][N : N + degcorr + 1, :] += ( polyCorr) if hidden: self.data.HIs[margidx].poles[badidx] = np.inf self.data.HIs[margidx].coeffs[badidx] = 0. else: self.data.polesEff[margidx] = self.data.polesEff[margidx][goodidx] goodidx += list(range(N, self.data.coeffsEff[margidx].shape[0])) self.data.coeffsEff[margidx] = ( self.data.coeffsEff[margidx][goodidx, :]) def removePoleResGlobal(self, badidx:List[int], degcorr : int = None, correction : str = "ERASE", hidden : bool = False): if not hasattr(badidx, "__len__"): badidx = [badidx] if len(badidx) == 0: return correction = correction.upper().strip().replace(" ","") if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]: RROMPyWarning(("Correction kind not recognized. Overriding to " "'ERASE'.")) correction = "ERASE" for margidx in range(len(self.data.HIs)): self.removePoleResLocal(badidx, margidx, degcorr, correction, hidden) def getApproxReduced(self, mu : paramList = []) -> sampList: """ Evaluate reduced representation of approximant at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = self.checkParameterList(mu) if (not hasattr(self, "lastSolvedApproxReduced") or self.lastSolvedApproxReduced != mu): vbMng(self, "INIT", "Evaluating approximant at mu = {}.".format(mu), 12) muP = self.centerNormalizePivot(mu(self.data.directionPivot)) muM = mu(self.data.directionMarginal) his = self.interpolateMarginalInterpolator(muM) for i, (mP, hi) in enumerate(zip(muP, his)): uAppR = hi(mP)[:, 0] if i == 0: uApproxR = np.empty((len(uAppR), len(mu)), dtype = uAppR.dtype) uApproxR[:, i] = uAppR self.uApproxReduced = sampleList(uApproxR) vbMng(self, "DEL", "Done evaluating approximant.", 12) self.lastSolvedApproxReduced = mu return self.uApproxReduced def interpolateMarginalInterpolator(self, mu : paramList = []) -> ListAny: """Obtain interpolated approximant interpolator.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal models at mu = {}.".format(mu), 95) his = [] muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) verb, self.verbosity = self.verbosity, 0 poless = self.interpolateMarginalPoles(mu, mIvals) coeffss = self.interpolateMarginalCoeffs(mu, mIvals) self.verbosity = verb for j in range(len(mu)): his += [HI()] his[-1].poles = poless[j] his[-1].coeffs = coeffss[j] his[-1].npar = 1 his[-1].polybasis = self.data.HIs[0].polybasis vbMng(self, "DEL", "Done interpolating marginal models.", 95) return his def interpolateMarginalPoles(self, mu : paramList = [], mIvals : Np2D = None) -> ListAny: """Obtain interpolated approximant poles.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal poles at mu = {}.".format(mu), 95) intMPoles = np.zeros((len(mu),) + self.data.polesEff[0].shape, dtype = self.data.polesEff[0].dtype) if mIvals is None: muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) for pEff, mI in zip(self.data.polesEff, mIvals): for j, m in enumerate(mI): intMPoles[j] += m * pEff - rCP = self.data.chordalRadius[0] - if rCP > 0: - for j in range(len(mu)): - intMPoles[j, ..., 0] = pullbackChordal( - normalizeChordal(intMPoles[j], rCP), - rCP)[..., 0] vbMng(self, "DEL", "Done interpolating marginal poles.", 95) return intMPoles[..., 0] def interpolateMarginalCoeffs(self, mu : paramList = [], mIvals : Np2D = None) -> ListAny: """Obtain interpolated approximant coefficients.""" mu = self.checkParameterListMarginal(mu) vbMng(self, "INIT", "Interpolating marginal coefficients at mu = {}.".format(mu), 95) intMCoeffs = np.zeros((len(mu),) + self.data.coeffsEff[0].shape, dtype = self.data.coeffsEff[0].dtype) if mIvals is None: muC = self.centerNormalizeMarginal(mu) mIvals = self.data.marginalInterp(muC) for cEff, mI in zip(self.data.coeffsEff, mIvals): for j, m in enumerate(mI): intMCoeffs[j] += m * cEff - rCC = self.data.chordalRadius[1] - if rCC > 0: - for j in range(len(mu)): - intMCoeffs[j, ..., : -1] = pullbackChordal( - normalizeChordal(intMCoeffs[j], rCC, - self.data.projGramian, - self.data.projMat), - rCC) - intMCoeffs = intMCoeffs[..., : -1] vbMng(self, "DEL", "Done interpolating marginal coefficients.", 95) return intMCoeffs def getPVal(self, mu : paramList = []) -> sampList: """ Evaluate rational numerator at arbitrary parameter. Args: mu: Target parameter. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = self.checkParameterList(mu) p = emptySampleList() muP = self.centerNormalizePivot(mu(self.data.directionPivot)) muM = mu(self.data.directionMarginal) his = self.interpolateMarginalInterpolator(muM) for i, (mP, hi) in enumerate(zip(muP, his)): Pval = hi(mP) * np.prod(mP[0] - hi.poles) if i == 0: p.reset((len(Pval), len(mu)), dtype = Pval.dtype) p[i] = Pval return p def getQVal(self, mu:Np1D, der : List[int] = None, scl : Np1D = None) -> Np1D: """ Evaluate rational denominator at arbitrary parameter. Args: mu: Target parameter. der(optional): Derivatives to take before evaluation. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mu = self.checkParameterList(mu) muP = self.centerNormalizePivot(mu(self.data.directionPivot)) muM = mu(self.data.directionMarginal) if der is None: derP, derM = 0, [0] else: derP = der[self.data.directionPivot[0]] derM = [der[x] for x in self.data.directionMarginal] if np.any(np.array(derM) != 0): raise RROMPyException(("Derivatives of Q with respect to marginal " "parameters not allowed.")) sclP = 1 if scl is None else scl[self.data.directionPivot[0]] derVal = np.zeros(len(mu), dtype = np.complex) pls = self.interpolateMarginalPoles(muM) for i, (mP, pl) in enumerate(zip(muP, pls)): N = len(pl) if derP == N: derVal[i] = 1. elif derP >= 0 and derP < N: plDist = mP[0] - pl for terms in combinations(np.arange(N), N - derP): derVal[i] += np.prod(plDist[list(terms)]) return sclP ** derP * fact(derP) * derVal def getPoles(self, marginalVals : ListAny = [fp]) -> paramList: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters") mVals = list(marginalVals) rDim = mVals.index(fp) if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]: raise RROMPyException(("Exactly 1 'freepar' entry in " "marginalVals must be provided.")) if rDim != self.data.directionPivot[0]: raise RROMPyException(("'freepar' entry in marginalVals must " "coincide with pivot direction.")) mVals[rDim] = self.data.mu0(rDim)[0] mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim] roots = (self.data.scaleFactor[rDim] * self.interpolateMarginalPoles(mMarg)[0]) return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim), idx = [rDim])(0, 0) + roots, "B", [rDim])(0) def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]: """ Obtain approximant residues. Returns: Numpy matrix with residues as columns. """ pls = self.getPoles(*args, **kwargs) if len(args) == 1: mVals = args[0] elif len(args) == 0: mVals = [None] else: mVals = kwargs["marginalVals"] if not isinstance(mVals, Iterable): mVals = [mVals] mVals = list(mVals) rDim = mVals.index(fp) mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim] res = self.interpolateMarginalCoeffs(mMarg)[0][: len(pls), :].T if not self.data._collapsed: res = dot(self.data.projMat, res).T return pls, res diff --git a/rrompy/reduction_methods/standard/generic_standard_approximant.py b/rrompy/reduction_methods/standard/generic_standard_approximant.py index daab2b0..2437043 100644 --- a/rrompy/reduction_methods/standard/generic_standard_approximant.py +++ b/rrompy/reduction_methods/standard/generic_standard_approximant.py @@ -1,194 +1,212 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import deepcopy as copy from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.utilities.base import verbosityManager as vbMng -from rrompy.utilities.base.types import Np2D +from rrompy.utilities.base.types import Np2D, paramList from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['GenericStandardApproximant'] class GenericStandardApproximant(GenericApproximant): """ ROM interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, *args, **kwargs): self._preInit() from rrompy.parameter.parameter_sampling import EmptySampler as ES self._addParametersToList([], [], ["sampler"], [ES()]) super().__init__(*args, **kwargs) self._postInit() @property def mus(self): """Value of mus. Its assignment may reset snapshots.""" return self._mus @mus.setter def mus(self, mus): mus = self.checkParameterList(mus) musOld = copy(self.mus) if hasattr(self, '_mus') else None if (musOld is None or len(mus) != len(musOld) or not mus == musOld): self.resetSamples() self._mus = mus @property def muBounds(self): """Value of muBounds.""" return self.sampler.lims @property def sampler(self): """Value of sampler.""" return self._sampler @sampler.setter def sampler(self, sampler): if 'generatePoints' not in dir(sampler): raise RROMPyException("Sampler type not recognized.") if hasattr(self, '_sampler') and self._sampler is not None: samplerOld = self.sampler self._sampler = sampler self._approxParameters["sampler"] = self.sampler if not 'samplerOld' in locals() or samplerOld != self.sampler: self.resetSamples() def setSamples(self, samplingEngine, merge : bool = False): """Copy samplingEngine and samples.""" vbMng(self, "INIT", "Transfering samples.", 15) if isinstance(samplingEngine, (str, list, tuple,)): self.setupSampling() self.samplingEngine.load(samplingEngine, merge) elif merge: try: selfkeys = self.samplingEngine.feature_keys for key in samplingEngine.feature_keys: if key in selfkeys: self.samplingEngine._mergeFeature(key, samplingEngine.feature_vals[key]) except: RROMPyWarning(("Sample merge failed. Falling back to complete " "sampling engine replacement.")) self.samplingEngine = copy(samplingEngine) else: self.samplingEngine = copy(samplingEngine) if self.POD != 0 and (self.samplingEngine.nsamples != len(self.samplingEngine.samples_normal)): RROMPyWarning(("Assigning non-POD sampling engine to POD " "approximant is unstable. Declassing local " "POD to 0.")) self._POD = 0 self._mus = copy(self.samplingEngine.mus) self.scaleFactor = self.samplingEngine.scaleFactor vbMng(self, "DEL", "Done transfering samples.", 15) def computeSnapshots(self): """Compute snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start snapshot computation.") if self.samplingEngine.nsamples != self.S: self.computeScaleFactor() self.samplingEngine.scaleFactor = self.scaleFactorDer vbMng(self, "INIT", "Starting computation of snapshots.", 5) self.mus = self.sampler.generatePoints(self.S) while len(self.mus) > self.S: self.mus.pop() self.samplingEngine.iterSample(self.mus) vbMng(self, "DEL", "Done computing snapshots.", 5) def computeScaleFactor(self): """Compute parameter rescaling factor.""" self.scaleFactor = .5 * np.abs(( self.mapParameterList(self.muBounds[0]) - self.mapParameterList(self.muBounds[1]))[0]) def _setupTrainedModel(self, pMat:Np2D, pMatUpdate : bool = False): if self.POD == 1 and not ( hasattr(self.HFEngine.C, "is_mu_independent") and self.HFEngine.C.is_mu_independent in self._output_lvl): raise RROMPyException(("Cannot apply mu-dependent C to " "orthonormalized samples.")) vbMng(self, "INIT", "Extracting system output from state.", 35) pMat = self.HFEngine.applyC(pMat, self.mus) vbMng(self, "DEL", "Done extracting system output.", 35) if self.trainedModel is None: self.trainedModel = self.tModelType() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp datadict = {"mu0": self.mu0, "mus": copy(self.mus), "projMat": pMat, "scaleFactor": self.scaleFactor, "parameterMap": self.HFEngine.parameterMap} self.trainedModel.data = self.initializeModelData(datadict)[0] else: self.trainedModel = self.trainedModel if pMatUpdate: self.trainedModel.data.projMat = np.hstack( (self.trainedModel.data.projMat, pMat)) else: self.trainedModel.data.projMat = copy(pMat) self.trainedModel.data.mus = copy(self.mus) + + def addSamplePoints(self, mus:paramList) -> int: + """Add sample points to reduced model.""" + if not self.checkComputedApprox(): + raise RROMPyException(("Cannot add samples before initializing " + "reduced model through setupApprox.")) + RROMPyAssert(self._mode, message = "Cannot add sample points.") + mus = self.checkParameterList(mus) + vbMng(self, "INIT", + "Adding sample point{} at {}.".format("s" * (len(mus) > 1), mus), + 5) + for mu in mus: + self.mus.append(mu) + self.samplingEngine.nextSample(mu) + self._S = len(self.mus) + val = self.setupApprox() + vbMng(self, "DEL", "Done adding sample points.", 5) + return val diff --git a/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py b/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py index 692d4bd..2c105ad 100644 --- a/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py +++ b/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py @@ -1,630 +1,634 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod from copy import deepcopy as copy import numpy as np from matplotlib import pyplot as plt from rrompy.hfengines.base.linear_affine_engine import checkIfAffine from rrompy.reduction_methods.standard.generic_standard_approximant import ( GenericStandardApproximant) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal, paramList, sampList) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import dot from rrompy.utilities.expression import expressionEvaluator from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) from rrompy.sampling.sample_list import sampleList from rrompy.parameter import emptyParameterList, parameterList from rrompy.utilities.parallel import masterCore __all__ = ['GenericGreedyApproximant'] def localL2Distance(mus:Np2D, badmus:Np2D) -> Np2D: return np.linalg.norm(np.tile(mus[..., np.newaxis], [1, 1, len(badmus)]) - badmus[..., np.newaxis].T, axis = 1) def pruneSamples(mus:paramList, badmus:paramList, tol : float = 1e-8) -> Np1D: """Remove from mus all the elements which are too close to badmus.""" if isinstance(mus, (parameterList, sampleList)): mus = mus.data if isinstance(badmus, (parameterList, sampleList)): badmus = badmus.data if len(badmus) == 0: return np.arange(len(mus)) proximity = np.min(localL2Distance(mus, badmus), axis = 1) return np.where(proximity <= tol)[0] class GenericGreedyApproximant(GenericStandardApproximant): """ ROM greedy interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': number of starting training points; - 'sampler': sample point generator; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'collinearityTol': collinearity tolerance for greedy algorithm; defaults to 0.; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'nTestPoints': number of test points; defaults to 5e2; - 'samplerTrainSet': training sample points generator; defaults to sampler. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'collinearityTol': collinearity tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'nTestPoints': number of test points; - 'samplerTrainSet': training sample points generator. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: number of test points. sampler: Sample point generator. greedyTol: Uniform error tolerance for greedy algorithm. collinearityTol: Collinearity tolerance for greedy algorithm. maxIter: maximum number of greedy steps. nTestPoints: number of starting training points. samplerTrainSet: training sample points generator. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, *args, **kwargs): self._preInit() if not hasattr(self, "_affine_lvl"): self._affine_lvl = [] self._affine_lvl += [1] self._addParametersToList(["greedyTol", "collinearityTol", "maxIter", "nTestPoints", "samplerTrainSet"], [1e-2, 0., 1e2, 5e2, "AUTO"]) super().__init__(*args, **kwargs) self._postInit() @property def greedyTol(self): """Value of greedyTol.""" return self._greedyTol @greedyTol.setter def greedyTol(self, greedyTol): if greedyTol < 0: raise RROMPyException("greedyTol must be non-negative.") if hasattr(self, "_greedyTol") and self.greedyTol is not None: greedyTolold = self.greedyTol else: greedyTolold = -1 self._greedyTol = greedyTol self._approxParameters["greedyTol"] = self.greedyTol if greedyTolold != self.greedyTol: self.resetSamples() @property def collinearityTol(self): """Value of collinearityTol.""" return self._collinearityTol @collinearityTol.setter def collinearityTol(self, collinearityTol): if collinearityTol < 0: raise RROMPyException("collinearityTol must be non-negative.") if (hasattr(self, "_collinearityTol") and self.collinearityTol is not None): collinearityTolold = self.collinearityTol else: collinearityTolold = -1 self._collinearityTol = collinearityTol self._approxParameters["collinearityTol"] = self.collinearityTol if collinearityTolold != self.collinearityTol: self.resetSamples() @property def maxIter(self): """Value of maxIter.""" return self._maxIter @maxIter.setter def maxIter(self, maxIter): if maxIter <= 0: raise RROMPyException("maxIter must be positive.") if hasattr(self, "_maxIter") and self.maxIter is not None: maxIterold = self.maxIter else: maxIterold = -1 self._maxIter = maxIter self._approxParameters["maxIter"] = self.maxIter if maxIterold != self.maxIter: self.resetSamples() @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= 0: raise RROMPyException("nTestPoints must be positive.") if not np.isclose(nTestPoints, np.int(nTestPoints)): raise RROMPyException("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "_nTestPoints") and self.nTestPoints is not None: nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() @property def samplerTrainSet(self): """Value of samplerTrainSet.""" return self._samplerTrainSet @samplerTrainSet.setter def samplerTrainSet(self, samplerTrainSet): if (isinstance(samplerTrainSet, (str,)) and samplerTrainSet.upper() == "AUTO"): samplerTrainSet = self.sampler if 'generatePoints' not in dir(samplerTrainSet): raise RROMPyException("samplerTrainSet type not recognized.") if (hasattr(self, '_samplerTrainSet') and self.samplerTrainSet not in [None, "AUTO"]): samplerTrainSetOld = self.samplerTrainSet self._samplerTrainSet = samplerTrainSet self._approxParameters["samplerTrainSet"] = self.samplerTrainSet if (not 'samplerTrainSetOld' in locals() or samplerTrainSetOld != self.samplerTrainSet): self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self._mus = emptyParameterList() def _affineResidualMatricesContraction(self, rb:Np2D, rA : Np2D = None) \ -> Tuple[Np1D, Np1D, Np1D]: self.assembleReducedResidualBlocks(full = rA is not None) # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() ff = np.sum(self.trainedModel.data.resbb.dot(rb) * rb.conj(), axis = 0) if rA is None: return ff # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() Lf = np.sum(np.tensordot(self.trainedModel.data.resAb, rA, 2) * rb.conj(), axis = 0) # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() LL = np.sum(np.tensordot(self.trainedModel.data.resAA, rA, 2) * rA.conj(), axis = (0, 1)) return ff, Lf, LL def getErrorEstimatorAffine(self, mus:Np1D) -> Np1D: """Standard residual estimator.""" checkIfAffine(self.HFEngine, "apply affinity-based error estimator", False, self._affine_lvl) self.HFEngine.buildA() self.HFEngine.buildb() mus = self.checkParameterList(mus) tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0 uApproxRs = self.getApproxReduced(mus).data self.trainedModel.verbosity = tMverb muTestEff = self.mapParameterList(mus) radiusA = np.empty((len(self.HFEngine.thAs), len(mus)), dtype = np.complex) radiusb = np.empty((len(self.HFEngine.thbs), len(mus)), dtype = np.complex) for j, thA in enumerate(self.HFEngine.thAs): radiusA[j] = expressionEvaluator(thA[0], muTestEff) for j, thb in enumerate(self.HFEngine.thbs): radiusb[j] = expressionEvaluator(thb[0], muTestEff) radiusA = np.expand_dims(uApproxRs, 1) * radiusA ff, Lf, LL = self._affineResidualMatricesContraction(radiusb, radiusA) err = np.abs((LL - 2. * np.real(Lf) + ff) / ff) ** .5 return err def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D: setupOK = self.setupApproxLocal() if setupOK > 0: err = np.empty(len(mus)) err[:] = np.nan if not return_max: return err return err, [- setupOK], np.nan mus = self.checkParameterList(mus) vbMng(self.trainedModel, "INIT", "Evaluating error estimator at mu = {}.".format(mus), 10) err = self.getErrorEstimatorAffine(mus) vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10) if not return_max: return err idxMaxEst = [np.argmax(err)] return err, idxMaxEst, err[idxMaxEst] def _isLastSampleCollinear(self) -> bool: """Check collinearity of last sample.""" if self.collinearityTol <= 0.: return False if self.POD == 1: reff = self.samplingEngine.Rscale[:, -1] else: RROMPyWarning(("Repeated orthogonalization of the samples for " "collinearity check. Consider setting POD to " "True.")) if not hasattr(self, "_PODEngine"): from rrompy.sampling import PODEngine self._PODEngine = PODEngine(self.HFEngine) reff = self._PODEngine.generalizedQR(self.samplingEngine.samples, only_R = True, is_state = True)[:, -1] cLevel = np.abs(reff[-1]) / np.linalg.norm(reff) cLevel = np.inf if np.isclose(cLevel, 0., atol = 1e-15) else 1 / cLevel vbMng(self, "MAIN", "Collinearity indicator {:.4e}.".format(cLevel), 3) return cLevel > self.collinearityTol def plotEstimator(self, est:Np1D, idxMax:List[int], estMax:List[float]): if (not (np.any(np.isnan(est)) or np.any(np.isinf(est))) and masterCore()): fig = plt.figure(figsize = plt.figaspect(1. / self.npar)) for jpar in range(self.npar): ax = fig.add_subplot(1, self.npar, 1 + jpar) musre = np.array(self.muTest.re.data) errCP = copy(est) idx = np.delete(np.arange(self.npar), jpar) while len(musre) > 0: if self.npar == 1: currIdx = np.arange(len(musre)) else: currIdx = np.where(np.isclose(np.sum( np.abs(musre[:, idx] - musre[0, idx]), 1), 0., atol = 1e-15))[0] ax.semilogy(musre[currIdx, jpar], errCP[currIdx], 'k', linewidth = 1) musre = np.delete(musre, currIdx, 0) errCP = np.delete(errCP, currIdx) ax.semilogy([self.muBounds.re(0, jpar), self.muBounds.re(-1, jpar)], [self.greedyTol] * 2, 'r--') ax.semilogy(self.mus.re(jpar), 2. * self.greedyTol * np.ones(len(self.mus)), '*m') if len(idxMax) > 0 and estMax is not None: ax.semilogy(self.muTest.re(idxMax, jpar), estMax, 'xr') ax.set_xlim(*list(self.sampler.lims.re(jpar))) ax.grid() plt.tight_layout() plt.show() def greedyNextSample(self, muidx:int, plotEst : str = "NONE")\ -> Tuple[Np1D, int, float, paramVal]: """Compute next greedy snapshot of solution map.""" RROMPyAssert(self._mode, message = "Cannot add greedy sample.") mus = copy(self.muTest[muidx]) self.muTest.pop(muidx) for j, mu in enumerate(mus): vbMng(self, "MAIN", ("Adding sample point no. {} at {} to training " "set.").format(len(self.mus) + 1, mu), 3) self.mus.append(mu) self._S = len(self.mus) self._approxParameters["S"] = self.S if (self.samplingEngine.nsamples <= len(mus) - j - 1 or not np.allclose(mu, self.samplingEngine.mus[j - len(mus)])): self.samplingEngine.nextSample(mu) if self._isLastSampleCollinear(): vbMng(self, "MAIN", ("Collinearity above tolerance detected. Starting " "preemptive greedy loop termination."), 3) self._collinearityFlag = 1 errorEstTest = np.empty(len(self.muTest)) errorEstTest[:] = np.nan return errorEstTest, [-1], np.nan, np.nan errorEstTest, muidx, maxErrorEst = self.errorEstimator(self.muTest, True) if plotEst == "ALL": self.plotEstimator(errorEstTest, muidx, maxErrorEst) return errorEstTest, muidx, maxErrorEst, self.muTest[muidx] def _preliminaryTraining(self): """Initialize starting snapshots of solution map.""" RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.nsamples > 0: return self.resetSamples() self.computeScaleFactor() self.samplingEngine.scaleFactor = self.scaleFactorDer self.mus = self.samplerTrainSet.generatePoints(self.S) while len(self.mus) > self.S: self.mus.pop() muTestBase = self.sampler.generatePoints(self.nTestPoints, False) idxPop = pruneSamples(self.mapParameterList(muTestBase), self.mapParameterList(self.mus), 1e-10 * self.scaleFactor[0]) muTestBase.pop(idxPop) muLast = copy(self.mus[-1]) self.mus.pop() if len(self.mus) > 0: vbMng(self, "MAIN", ("Adding first {} sample point{} at {} to training " "set.").format(self.S - 1, "" + "s" * (self.S > 2), self.mus), 3) self.samplingEngine.iterSample(self.mus) self._S = len(self.mus) self._approxParameters["S"] = self.S self.muTest = emptyParameterList() self.muTest.reset((len(muTestBase) + 1, self.mus.shape[1])) self.muTest.data[: -1] = muTestBase.data self.muTest.data[-1] = muLast.data @abstractmethod def setupApproxLocal(self) -> int: if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up local approximant.", 5) pass vbMng(self, "DEL", "Done setting up local approximant.", 5) return 0 + def addSamplePoints(self, mus:paramList): + """Add sample points to reduced model.""" + raise RROMPyException("Cannot add samples to greedy reduced model.") + _postGreedyRecover = 1 def setupApprox(self, plotEst : str = "NONE") -> int: """Compute greedy snapshots of solution map.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) vbMng(self, "INIT", "Starting computation of snapshots.", 5) self._collinearityFlag = 0 self._preliminaryTraining() muidx, self.firstGreedyIter = [len(self.muTest) - 1], True errorEstTest, maxErrorEst = [np.inf], np.inf max2ErrorEst, trainedModelOld = np.inf, None while self.firstGreedyIter or (len(self.muTest) > 0 and (maxErrorEst is None or max2ErrorEst > self.greedyTol) and self.samplingEngine.nsamples < self.maxIter): muTestOld, errorEstTestOld = self.muTest, errorEstTest muidxOld, maxErrorEstOld = muidx, maxErrorEst errorEstTest, muidx, maxErrorEst, mu = self.greedyNextSample(muidx, plotEst) if maxErrorEst is not None and (np.any(np.isnan(maxErrorEst)) or np.any(np.isinf(maxErrorEst))): if self._collinearityFlag == 0 and not self.firstGreedyIter: RROMPyWarning(("Instability in a posteriori " "estimator. Starting preemptive greedy " "loop termination.")) self.muTest, errorEstTest = muTestOld, errorEstTestOld if self.firstGreedyIter and muidx[0] < 0: self.trainedModel = None if self._collinearityFlag: raise RROMPyException(("Starting sample points too " "collinear. Aborting greedy " "iterations.")) raise RROMPyException(("Instability in approximant " "computation. Aborting greedy " "iterations.")) self._S = trainedModelOld.data.approxParameters["S"] self._approxParameters["S"] = self.S while self.samplingEngine.nsamples > self.S: self.samplingEngine.popSample() while len(self.mus) > self.S: self.mus.pop(-1) muidx, maxErrorEst = muidxOld, maxErrorEstOld break if maxErrorEst is not None: max2ErrorEst = np.max(maxErrorEst) vbMng(self, "MAIN", ("Uniform testing error estimate " "{:.4e}.").format(max2ErrorEst), 5) if self.firstGreedyIter: trainedModelOld = copy(self.trainedModel) else: trainedModelOld.data = copy(self.trainedModel.data) self.firstGreedyIter = False vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: " "{}).").format(self.samplingEngine.nsamples), 5) if (maxErrorEst is None or np.any(np.isnan(maxErrorEst)) or np.any(np.isinf(maxErrorEst))): while self.samplingEngine.nsamples > self.S: self.samplingEngine.popSample() while len(self.mus) > self.S: self.mus.pop(-1) elif self._postGreedyRecover: self._S = self.samplingEngine.nsamples while len(self.mus) < self.S: self.mus.append(self.samplingEngine.mus[len(self.mus)]) self.trainedModel = None self.setupApproxLocal() if plotEst == "LAST": self.plotEstimator(errorEstTest, muidx, maxErrorEst) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def assembleReducedResidualGramian(self, pMat:sampList): """ Build residual gramian of reduced linear system through projections. """ if (not hasattr(self.trainedModel.data, "gramian") or self.trainedModel.data.gramian is None): gramian = self.HFEngine.innerProduct(pMat, pMat, dual = True) else: Sold = self.trainedModel.data.gramian.shape[0] S = len(self.mus) if Sold > S: gramian = self.trainedModel.data.gramian[: S, : S] else: idxOld = list(range(Sold)) idxNew = list(range(Sold, S)) gramian = np.empty((S, S), dtype = np.complex) gramian[: Sold, : Sold] = self.trainedModel.data.gramian gramian[: Sold, Sold :] = self.HFEngine.innerProduct( pMat(idxNew), pMat(idxOld), dual = True) gramian[Sold :, : Sold] = gramian[: Sold, Sold :].T.conj() gramian[Sold :, Sold :] = self.HFEngine.innerProduct( pMat(idxNew), pMat(idxNew), dual = True) self.trainedModel.data.gramian = gramian def assembleReducedResidualBlocksbb(self, bs:List[Np1D]): """ Build blocks (of type bb) of reduced linear system through projections. """ nbs = len(bs) if (not hasattr(self.trainedModel.data, "resbb") or self.trainedModel.data.resbb is None): resbb = np.empty((nbs, nbs), dtype = np.complex) for i in range(nbs): Mbi = bs[i] resbb[i, i] = self.HFEngine.innerProduct(Mbi, Mbi, dual = True) for j in range(i): Mbj = bs[j] resbb[i, j] = self.HFEngine.innerProduct(Mbj, Mbi, dual = True) for i in range(nbs): for j in range(i + 1, nbs): resbb[i, j] = resbb[j, i].conj() self.trainedModel.data.resbb = resbb def assembleReducedResidualBlocksAb(self, As:List[Np2D], bs:List[Np1D], pMat:sampList): """ Build blocks (of type Ab) of reduced linear system through projections. """ nAs = len(As) nbs = len(bs) S = len(self.mus) if (not hasattr(self.trainedModel.data, "resAb") or self.trainedModel.data.resAb is None): if isinstance(pMat, (parameterList, sampleList)): pMat = pMat.data resAb = np.empty((nbs, S, nAs), dtype = np.complex) for j in range(nAs): MAj = dot(As[j], pMat) for i in range(nbs): Mbi = bs[i] resAb[i, :, j] = self.HFEngine.innerProduct(MAj, Mbi, dual = True) else: Sold = self.trainedModel.data.resAb.shape[1] if Sold == S: return if Sold > S: resAb = self.trainedModel.data.resAb[:, : S, :] else: if isinstance(pMat, (parameterList, sampleList)): pMat = pMat.data resAb = np.empty((nbs, S, nAs), dtype = np.complex) resAb[:, : Sold, :] = self.trainedModel.data.resAb for j in range(nAs): MAj = dot(As[j], pMat[:, Sold :]) for i in range(nbs): Mbi = bs[i] resAb[i, Sold :, j] = self.HFEngine.innerProduct( MAj, Mbi, dual = True) self.trainedModel.data.resAb = resAb def assembleReducedResidualBlocksAA(self, As:List[Np2D], pMat:sampList): """ Build blocks (of type AA) of reduced linear system through projections. """ nAs = len(As) S = len(self.mus) if (not hasattr(self.trainedModel.data, "resAA") or self.trainedModel.data.resAA is None): if isinstance(pMat, (parameterList, sampleList)): pMat = pMat.data resAA = np.empty((S, nAs, S, nAs), dtype = np.complex) for i in range(nAs): MAi = dot(As[i], pMat) resAA[:, i, :, i] = self.HFEngine.innerProduct(MAi, MAi, dual = True) for j in range(i): MAj = dot(As[j], pMat) resAA[:, i, :, j] = self.HFEngine.innerProduct(MAj, MAi, dual = True) for i in range(nAs): for j in range(i + 1, nAs): resAA[:, i, :, j] = resAA[:, j, :, i].T.conj() else: Sold = self.trainedModel.data.resAA.shape[0] if Sold == S: return if Sold > S: resAA = self.trainedModel.data.resAA[: S, :, : S, :] else: if isinstance(pMat, (parameterList, sampleList)): pMat = pMat.data resAA = np.empty((S, nAs, S, nAs), dtype = np.complex) resAA[: Sold, :, : Sold, :] = self.trainedModel.data.resAA for i in range(nAs): MAi = dot(As[i], pMat) resAA[: Sold, i, Sold :, i] = self.HFEngine.innerProduct( MAi[:, Sold :], MAi[:, : Sold], dual = True) resAA[Sold :, i, : Sold, i] = resAA[: Sold, i, Sold :, i].T.conj() resAA[Sold :, i, Sold :, i] = self.HFEngine.innerProduct( MAi[:, Sold :], MAi[:, Sold :], dual = True) for j in range(i): MAj = dot(As[j], pMat) resAA[: Sold, i, Sold :, j] = ( self.HFEngine.innerProduct(MAj[:, Sold :], MAi[:, : Sold], dual = True)) resAA[Sold :, i, : Sold, j] = ( self.HFEngine.innerProduct(MAj[:, : Sold], MAi[:, Sold :], dual = True)) resAA[Sold :, i, Sold :, j] = ( self.HFEngine.innerProduct(MAj[:, Sold :], MAi[:, Sold :], dual = True)) for i in range(nAs): for j in range(i + 1, nAs): resAA[: Sold, i, Sold :, j] = ( resAA[Sold :, j, : Sold, i].T.conj()) resAA[Sold :, i, : Sold, j] = ( resAA[: Sold, j, Sold :, i].T.conj()) resAA[Sold :, i, Sold :, j] = ( resAA[Sold :, j, Sold :, i].T.conj()) self.trainedModel.data.resAA = resAA def assembleReducedResidualBlocks(self, full : bool = False): """Build affine blocks of affine decomposition of residual.""" if full: checkIfAffine(self.HFEngine, "assemble reduced residual blocks", False, self._affine_lvl) else: checkIfAffine(self.HFEngine, "assemble reduced RHS blocks", True, self._affine_lvl) self.HFEngine.buildb() self.assembleReducedResidualBlocksbb(self.HFEngine.bs) if full: pMat = self.samplingEngine.projectionMatrix self.HFEngine.buildA() self.assembleReducedResidualBlocksAb(self.HFEngine.As, self.HFEngine.bs, pMat) self.assembleReducedResidualBlocksAA(self.HFEngine.As, pMat) diff --git a/rrompy/reduction_methods/standard/nearest_neighbor.py b/rrompy/reduction_methods/standard/nearest_neighbor.py index d916c69..a3b44bf 100644 --- a/rrompy/reduction_methods/standard/nearest_neighbor.py +++ b/rrompy/reduction_methods/standard/nearest_neighbor.py @@ -1,167 +1,167 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from collections.abc import Iterable from copy import deepcopy as copy from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.poly_fitting.nearest_neighbor import ( NearestNeighborInterpolator as NNI) from rrompy.utilities.exception_manager import RROMPyAssert __all__ = ['NearestNeighbor'] class NearestNeighbor(GenericStandardApproximant): """ ROM nearest neighbor approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'nNeighbors': number of nearest neighbors; defaults to 1; - 'radialDirectionalWeights': directional weights for computation of parameter distance; defaults to 1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'nNeighbors': number of nearest neighbors; - 'radialDirectionalWeights': directional weights for computation of parameter distance. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. nNeighbors: Number of nearest neighbors. radialDirectionalWeights: Directional weights for computation of parameter distance. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["nNeighbors", "radialDirectionalWeights"], [1, 1.]) super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_nearest_neighbor import ( TrainedModelNearestNeighbor) return TrainedModelNearestNeighbor @property def nNeighbors(self): """Value of nNeighbors.""" return self._nNeighbors @nNeighbors.setter def nNeighbors(self, nNeighbors): self._nNeighbors = max(1, nNeighbors) self._approxParameters["nNeighbors"] = self.nNeighbors @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): if isinstance(radialDirectionalWeights, Iterable): radialDirectionalWeights = list(radialDirectionalWeights) else: radialDirectionalWeights = [radialDirectionalWeights] self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) def setupApprox(self) -> int: - """Compute RB projection matrix.""" + """Compute NN approximant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() firstRun = self.trainedModel is None self._setupTrainedModel(self.samplingEngine.projectionMatrix) if firstRun: self.trainedModel.data.NN = NNI() if self.POD == 1: R = self.samplingEngine.Rscale if isinstance(R, (np.ndarray,)): vals, supp = list(R.T), [0] * R.shape[1] else: vals, supp = [], [] for j in range(R.shape[1]): idx = R.indices[R.indptr[j] : R.indptr[j + 1]] if len(idx) == 0: supp += [0] val = np.empty(0, dtype = R.dtype) else: supp += [idx[0]] idx = idx - idx[0] val = np.zeros(idx[-1] + 1, dtype = R.dtype) val[idx] = R.data[R.indptr[j] : R.indptr[j + 1]] vals += [val] else: if self.POD == 0: vals = [np.ones(1)] * len(self.mus) else: vals = list(self.samplingEngine.Rscale.reshape(-1, 1)) supp = list(range(len(self.mus))) self.trainedModel.data.NN.setupByInterpolation(self.mus, np.arange(len(self.mus)), self.nNeighbors, self.radialDirectionalWeights) self.trainedModel.data.vals, self.trainedModel.data.supp = vals, supp self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 diff --git a/rrompy/reduction_methods/standard/rational_interpolant.py b/rrompy/reduction_methods/standard/rational_interpolant.py index 9516544..fbf3d3f 100644 --- a/rrompy/reduction_methods/standard/rational_interpolant.py +++ b/rrompy/reduction_methods/standard/rational_interpolant.py @@ -1,721 +1,721 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from scipy.linalg import eig from collections.abc import Iterable from .generic_standard_approximant import GenericStandardApproximant from rrompy.utilities.poly_fitting.polynomial import ( polybases as ppb, polyfitname, polyvander as pvP, polyTimes, PolynomialInterpolator as PI, PolynomialInterpolatorNodal as PIN) from rrompy.utilities.poly_fitting.heaviside import rational2heaviside from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb, RadialBasisInterpolator as RBI) from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramList, interpEng) from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.numerical import pseudoInverse, dot, baseDistanceMatrix from rrompy.utilities.numerical.factorials import multifactorial from rrompy.utilities.numerical.hash_derivative import (nextDerivativeIndices, hashDerivativeToIdx as hashD, hashIdxToDerivative as hashI) from rrompy.utilities.numerical.degree import (reduceDegreeN, degreeTotalToFull, fullDegreeMaxMask, totalDegreeMaxMask) from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert, RROMPyWarning) __all__ = ['RationalInterpolant'] def polyTimesTable(P:interpEng, mus:Np1D, reorder:List[int], derIdxs:List[List[List[int]]], scl : Np1D = None) -> Np2D: """Table of polynomial products.""" if not isinstance(P, PI): raise RROMPyException(("Polynomial to evaluate must be a polynomial " "interpolator.")) Pvals = [[0.] * len(derIdx) for derIdx in derIdxs] for j, derIdx in enumerate(derIdxs): nder = len(derIdx) for der in range(nder): derI = hashI(der, P.npar) Pvals[j][der] = P([mus[j]], derI, scl) / multifactorial(derI) return blockDiagDer(Pvals, reorder, derIdxs) def vanderInvTable(vanInv:Np2D, idxs:List[int], reorder:List[int], derIdxs:List[List[List[int]]]) -> Np2D: """Table of Vandermonde pseudo-inverse.""" S = len(reorder) Ts = [None] * len(idxs) for k in range(len(idxs)): invLocs = [None] * len(derIdxs) idxGlob = 0 for j, derIdx in enumerate(derIdxs): nder = len(derIdx) idxGlob += nder idxLoc = np.arange(S)[(reorder >= idxGlob - nder) * (reorder < idxGlob)] invLocs[j] = vanInv[k, idxLoc] Ts[k] = blockDiagDer(invLocs, reorder, derIdxs, [2, 1, 0]) return Ts def blockDiagDer(vals:List[Np1D], reorder:List[int], derIdxs:List[List[List[int]]], permute : List[int] = None) -> Np2D: """Table of derivative values for point confluence.""" S = len(reorder) T = np.zeros((S, S), dtype = np.complex) if permute is None: permute = [0, 1, 2] idxGlob = 0 for j, derIdx in enumerate(derIdxs): nder = len(derIdx) idxGlob += nder idxLoc = np.arange(S)[(reorder >= idxGlob - nder) * (reorder < idxGlob)] val = vals[j] for derI, derIdxI in enumerate(derIdx): for derJ, derIdxJ in enumerate(derIdx): diffIdx = [x - y for (x, y) in zip(derIdxI, derIdxJ)] if all([x >= 0 for x in diffIdx]): diffj = hashD(diffIdx) i1, i2, i3 = np.array([derI, derJ, diffj])[permute] T[idxLoc[i1], idxLoc[i2]] = val[i3] return T class RationalInterpolant(GenericStandardApproximant): """ ROM rational interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; defaults to 'MONOMIAL'; - 'M': degree of rational interpolant numerator; defaults to 'AUTO', i.e. maximum allowed; - 'N': degree of rational interpolant denominator; defaults to 'AUTO', i.e. maximum allowed; - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL'; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; defaults to 1; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; defaults to [-1, -1]; - 'functionalSolve': strategy for minimization of denominator functional; allowed values include 'NORM', 'DOMINANT', 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in main folder for explanation); defaults to 'NORM'; - 'interpTol': tolerance for interpolation; defaults to None; - 'QTol': tolerance for robust rational denominator management; defaults to 0. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'polybasis': type of polynomial basis for interpolation; - 'M': degree of rational interpolant numerator; - 'N': degree of rational interpolant denominator; - 'polydegreetype': type of polynomial degree; - 'radialDirectionalWeights': radial basis weights for interpolant numerator; - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of radial basis weights; - 'functionalSolve': strategy for minimization of denominator functional; - 'interpTol': tolerance for interpolation via numpy.polyfit; - 'QTol': tolerance for robust rational denominator management. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. polybasis: type of polynomial basis for interpolation. M: Numerator degree of approximant. N: Denominator degree of approximant. polydegreetype: Type of polynomial degree. radialDirectionalWeights: Radial basis weights for interpolant numerator. radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial basis weights. functionalSolve: Strategy for minimization of denominator functional. interpTol: Tolerance for interpolation via numpy.polyfit. QTol: Tolerance for robust rational denominator management. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. """ _allowedFunctionalSolveKinds = ["NORM", "DOMINANT", "BARYCENTRIC_NORM", "BARYCENTRIC_AVERAGE"] def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["polybasis", "M", "N", "polydegreetype", "radialDirectionalWeights", "radialDirectionalWeightsAdapt", "functionalSolve", "interpTol", "QTol"], ["MONOMIAL", "AUTO", "AUTO", "TOTAL", 1., [-1., -1.], "NORM", -1, 0.]) super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_rational import TrainedModelRational return TrainedModelRational @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in ppb + rbpb: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def polybasis0(self): if "_" in self.polybasis: return self.polybasis.split("_")[0] return self.polybasis @property def functionalSolve(self): """Value of functionalSolve.""" return self._functionalSolve @functionalSolve.setter def functionalSolve(self, functionalSolve): try: functionalSolve = functionalSolve.upper().strip().replace(" ","") if functionalSolve == "BARYCENTRIC": functionalSolve += "_NORM" if functionalSolve not in self._allowedFunctionalSolveKinds: raise RROMPyException(("Prescribed functionalSolve not " "recognized.")) self._functionalSolve = functionalSolve except: RROMPyWarning(("Prescribed functionalSolve not recognized. " "Overriding to 'NORM'.")) self._functionalSolve = "NORM" self._approxParameters["functionalSolve"] = self.functionalSolve @property def interpTol(self): """Value of interpTol.""" return self._interpTol @interpTol.setter def interpTol(self, interpTol): self._interpTol = interpTol self._approxParameters["interpTol"] = self.interpTol @property def radialDirectionalWeights(self): """Value of radialDirectionalWeights.""" return self._radialDirectionalWeights @radialDirectionalWeights.setter def radialDirectionalWeights(self, radialDirectionalWeights): if isinstance(radialDirectionalWeights, Iterable): radialDirectionalWeights = list(radialDirectionalWeights) else: radialDirectionalWeights = [radialDirectionalWeights] self._radialDirectionalWeights = radialDirectionalWeights self._approxParameters["radialDirectionalWeights"] = ( self.radialDirectionalWeights) @property def radialDirectionalWeightsAdapt(self): """Value of radialDirectionalWeightsAdapt.""" return self._radialDirectionalWeightsAdapt @radialDirectionalWeightsAdapt.setter def radialDirectionalWeightsAdapt(self, radialDirectionalWeightsAdapt): self._radialDirectionalWeightsAdapt = radialDirectionalWeightsAdapt self._approxParameters["radialDirectionalWeightsAdapt"] = ( self.radialDirectionalWeightsAdapt) @property def M(self): """Value of M.""" return self._M @M.setter def M(self, M): if isinstance(M, str): M = M.strip().replace(" ","") if "-" not in M: M = M + "-0" self._M_isauto, self._M_shift = True, int(M.split("-")[-1]) M = 0 if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M def _setMAuto(self): self.M = max(0, reduceDegreeN(self.S, self.S, self.npar, self.polydegreetype) - self._M_shift) vbMng(self, "MAIN", "Automatically setting M to {}.".format(self.M), 25) @property def N(self): """Value of N.""" return self._N @N.setter def N(self, N): if isinstance(N, str): N = N.strip().replace(" ","") if "-" not in N: N = N + "-0" self._N_isauto, self._N_shift = True, int(N.split("-")[-1]) N = 0 if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N def _setNAuto(self): self.N = max(0, reduceDegreeN(self.S, self.S, self.npar, self.polydegreetype) - self._N_shift) vbMng(self, "MAIN", "Automatically setting N to {}.".format(self.N), 25) @property def polydegreetype(self): """Value of polydegreetype.""" return self._polydegreetype @polydegreetype.setter def polydegreetype(self, polydegreetype): try: polydegreetype = polydegreetype.upper().strip().replace(" ","") if polydegreetype not in ["TOTAL", "FULL"]: raise RROMPyException(("Prescribed polydegreetype not " "recognized.")) self._polydegreetype = polydegreetype except: RROMPyWarning(("Prescribed polydegreetype not recognized. " "Overriding to 'TOTAL'.")) self._polydegreetype = "TOTAL" self._approxParameters["polydegreetype"] = self.polydegreetype @property def QTol(self): """Value of tolerance for robust rational denominator management.""" return self._QTol @QTol.setter def QTol(self, QTol): if QTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) QTol = 0. self._QTol = QTol self._approxParameters["QTol"] = self.QTol def resetSamples(self): """Reset samples.""" super().resetSamples() self._musUniqueCN = None self._derIdxs = None self._reorder = None def _setupInterpolationIndices(self): """Setup parameters for polyvander.""" if self._musUniqueCN is None or len(self._reorder) != len(self.mus): self._musUniqueCN, musIdxsTo, musIdxs, musCount = ( self.trainedModel.centerNormalize(self.mus).unique( return_index = True, return_inverse = True, return_counts = True)) self._musUnique = self.mus[musIdxsTo] self._derIdxs = [None] * len(self._musUniqueCN) self._reorder = np.empty(len(musIdxs), dtype = int) filled = 0 for j, cnt in enumerate(musCount): self._derIdxs[j] = nextDerivativeIndices([], self.mus.shape[1], cnt) jIdx = np.nonzero(musIdxs == j)[0] self._reorder[jIdx] = np.arange(filled, filled + cnt) filled += cnt def _setupDenominator(self): """Compute rational denominator.""" RROMPyAssert(self._mode, message = "Cannot setup denominator.") vbMng(self, "INIT", "Starting computation of denominator.", 7) if hasattr(self, "_N_isauto"): self._setNAuto() else: N = reduceDegreeN(self.N, self.S, self.npar, self.polydegreetype) if N < self.N: RROMPyWarning(("N too large compared to S. Reducing N by " "{}").format(self.N - N)) self.N = N while self.N > 0: if self.functionalSolve != "NORM" and self.npar > 1: RROMPyWarning(("Strategy for functional optimization must be " "'NORM' for more than one parameter. " "Overriding to 'NORM'.")) self.functionalSolve = "NORM" if (self.functionalSolve[:11] == "BARYCENTRIC" and self.N + 1 < self.S): RROMPyWarning(("Barycentric strategy cannot be applied with " "Least Squares. Overriding to 'NORM'.")) self.functionalSolve = "NORM" if self.functionalSolve[:11] == "BARYCENTRIC": invD, TN = None, None self._setupInterpolationIndices() if len(self._musUnique) != self.S: RROMPyWarning(("Barycentric functional optimization " "cannot be applied to repeated samples. " "Overriding to 'NORM'.")) self.functionalSolve = "NORM" if self.functionalSolve[:11] != "BARYCENTRIC": invD, TN = self._computeInterpolantInverseBlocks() if self.POD == 1: sampleE = self.samplingEngine.Rscale Rscaling = None elif self.POD == 1/2: sampleE = self.samplingEngine.samples_normal Rscaling = self.samplingEngine.Rscale else: sampleE = self.samplingEngine.samples Rscaling = None ev, eV = self.findeveVG(sampleE, invD, TN, Rscaling) if self.functionalSolve[:11] == "BARYCENTRIC": break nevBad = np.sum(np.abs(ev / ev[-1]) < self.QTol) if not nevBad: break if self.npar == 1: dN = nevBad else: #if self.npar > 1 and self.functionalSolve == "NORM": dN = self.N - reduceDegreeN(self.N, len(eV) - nevBad, self.npar, self.polydegreetype) vbMng(self, "MAIN", ("Smallest {} eigenvalue{} below tolerance. Reducing N by " "{}.").format(nevBad, "s" * (nevBad > 1), dN), 10) self.N = self.N - dN if hasattr(self, "_gram"): del self._gram if self.N <= 0: self.N, eV = 0, np.ones((1,) * self.npar, dtype = np.complex) if self.N > 0 and self.functionalSolve[:11] == "BARYCENTRIC": q = PIN() q.polybasis, q.nodes = self.polybasis0, eV else: q = PI() q.npar, q.polybasis = self.npar, self.polybasis0 if self.polydegreetype == "TOTAL": q.coeffs = degreeTotalToFull(tuple([self.N + 1] * self.npar), self.npar, eV) else: q.coeffs = eV.reshape([self.N + 1] * self.npar) vbMng(self, "DEL", "Done computing denominator.", 7) return q def _setupNumerator(self): """Compute rational numerator.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of numerator.", 7) self._setupInterpolationIndices() Qevaldiag = polyTimesTable(self.trainedModel.data.Q, self._musUniqueCN, self._reorder, self._derIdxs, self.scaleFactorRel) if self.POD == 1: Qevaldiag = Qevaldiag.dot(self.samplingEngine.Rscale.T) elif self.POD == 1/2: Qevaldiag = Qevaldiag * self.samplingEngine.Rscale if hasattr(self, "_M_isauto"): self._setMAuto() M = self.M else: M = reduceDegreeN(self.M, self.S, self.npar, self.polydegreetype) if M < self.M: RROMPyWarning(("M too large compared to S. Reducing M by " "{}").format(self.M - M)) self.M = M while self.M >= 0: pParRest = [self.M, self.polybasis, self.verbosity >= 5, self.polydegreetype == "TOTAL", {"derIdxs": self._derIdxs, "reorder": self._reorder, "scl": self.scaleFactorRel}] if self.polybasis in ppb: p = PI() else: self.computeScaleFactor() rDWEff = np.array([w * f for w, f in zip( self.radialDirectionalWeights, self.scaleFactor)]) pParRest = pParRest[: 2] + [rDWEff] + pParRest[2 :] pParRest[-1]["optimizeScalingBounds"] = ( self.radialDirectionalWeightsAdapt) p = RBI() if self.polybasis in ppb + rbpb: pParRest += [{"rcond": self.interpTol}] wellCond, msg = p.setupByInterpolation(self._musUniqueCN, Qevaldiag, *pParRest) vbMng(self, "MAIN", msg, 5) if wellCond: break vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing M " "by 1."), 10) self.M = self.M - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) self.M = M vbMng(self, "DEL", "Done computing numerator.", 7) return p def setupApprox(self) -> int: """Compute rational interpolant.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") - vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) + vbMng(self, "INIT", "Setting up {}.".format(self.name()), 5) self.computeSnapshots() self._setupTrainedModel(self.samplingEngine.projectionMatrix) self._setupRational(self._setupDenominator()) self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def _setupRational(self, Q:interpEng, P : interpEng = None): vbMng(self, "INIT", "Starting approximant finalization.", 5) self.trainedModel.data.Q = Q if P is None: P = self._setupNumerator() while self.N > 0 and self.npar == 1: if self.HFEngine._ignoreResidues: pls = Q.roots() cfs, projMat = None, None else: cfs, pls, _ = rational2heaviside(P, Q) cfs = cfs[: self.N].T if self.POD != 1: projMat = self.samplingEngine.projectionMatrix else: projMat = None foci = self.sampler.normalFoci() plsA = self.mapParameterList(self.mapParameterList(self.mu0)(0, 0) + self.scaleFactor * pls, "B")(0) idxBad = self.HFEngine.flagBadPolesResiduesAbsolute(plsA, cfs, projMat) if not self.HFEngine._ignoreResidues: cfs[:, idxBad] = 0. idxBad += self.HFEngine.flagBadPolesResiduesRelative(pls, cfs, projMat, foci) idxBad = idxBad > 0 if not np.any(idxBad): break vbMng(self, "MAIN", "Removing {} spurious pole{} out of {}.".format( np.sum(idxBad), "s" * (np.sum(idxBad) > 1), self.N), 10) if isinstance(Q, PIN): Q.nodes = Q.nodes[idxBad == False] else: Q = PI() Q.npar = self.npar Q.polybasis = self.polybasis0 Q.coeffs = np.ones(1, dtype = np.complex) for pl in pls[idxBad == False]: Q.coeffs = polyTimes(Q.coeffs, [- pl, 1.], Pbasis = Q.polybasis, Rbasis = Q.polybasis) Q.coeffs /= np.linalg.norm(Q.coeffs) self.trainedModel.data.Q = Q self.N = Q.deg[0] P = self._setupNumerator() self.trainedModel.data.P = P vbMng(self, "DEL", "Terminated approximant finalization.", 5) def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]: """ Compute inverse factors for minimal interpolant target functional. """ RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.") self._setupInterpolationIndices() pvPPar = [self.polybasis0, self._derIdxs, self._reorder, self.scaleFactorRel] full = self.N + 1 == self.S == len(self._musUniqueCN) if full: mus = self._musUniqueCN[self._reorder] dist = baseDistanceMatrix(mus, magnitude = False)[..., 0] dist[np.arange(self.N + 1), np.arange(self.N + 1)] = multifactorial([self.N]) fitinvE = np.prod(dist, axis = 1) ** -1 vbMng(self, "MAIN", ("Evaluating quasi-Lagrangian basis of degree {} at {} " "sample points.").format(self.N, self.N + 1), 5) invD = [np.diag(fitinvE)] TN = pvP(self._musUniqueCN, self.N, *pvPPar) else: while self.N >= 0: if self.polydegreetype == "TOTAL": Neff = self.N idxsB = totalDegreeMaxMask(self.N, self.npar) else: #if self.polydegreetype == "FULL": Neff = [self.N] * self.npar idxsB = fullDegreeMaxMask(self.N, self.npar) TN = pvP(self._musUniqueCN, Neff, *pvPPar) fitOut = pseudoInverse(TN, rcond = self.interpTol, full = True) vbMng(self, "MAIN", ("Fitting {} samples with degree {} through {}... " "Conditioning of pseudoinverse system: {:.4e}.").format( TN.shape[0], self.N, polyfitname(self.polybasis0), fitOut[1][1][0] / fitOut[1][1][-1]), 5) if fitOut[1][0] == TN.shape[1]: fitinv = fitOut[0][idxsB, :] break vbMng(self, "MAIN", "Polyfit is poorly conditioned. Reducing N by 1.", 10) self.N = self.N - 1 if self.N < 0: raise RROMPyException(("Instability in computation of " "denominator. Aborting.")) invD = vanderInvTable(fitinv, idxsB, self._reorder, self._derIdxs) return invD, TN def findeveVG(self, sampleE:Np2D, invD:List[Np2D], TN:Np2D, Rscaling : Np1D = None) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of rational denominator matrix, or of its right chol factor if POD. """ RROMPyAssert(self._mode, message = "Cannot solve spectral problem.") if self.POD == 1: if self.functionalSolve[:11] == "BARYCENTRIC": Rstack = sampleE else: vbMng(self, "INIT", "Building generalized half-gramian.", 10) S, eWidth = sampleE.shape[0], len(invD) Rstack = np.zeros((S * eWidth, TN.shape[1]), dtype = np.complex) for k in range(eWidth): Rstack[k * S : (k + 1) * S, :] = dot(sampleE, dot(invD[k], TN)) vbMng(self, "DEL", "Done building half-gramian.", 10) _, s, Vh = np.linalg.svd(Rstack, full_matrices = False) evG, eVG = s[::-1], Vh[::-1].T.conj() evExp, probKind = -2., "svd " else: if not hasattr(self, "_gram"): vbMng(self, "INIT", "Building gramian matrix.", 10) self._gram = self.HFEngine.innerProduct(sampleE, sampleE, is_state = True) if Rscaling is not None: self._gram = (self._gram.T * Rscaling.conj()).T * Rscaling vbMng(self, "DEL", "Done building gramian.", 10) if self.functionalSolve[:11] == "BARYCENTRIC": G = self._gram else: vbMng(self, "INIT", "Building generalized gramian.", 10) G = np.zeros((TN.shape[1],) * 2, dtype = np.complex) for k in range(len(invD)): iDkN = dot(invD[k], TN) G += dot(dot(self._gram, iDkN).T, iDkN.conj()).T vbMng(self, "DEL", "Done building gramian.", 10) evG, eVG = np.linalg.eigh(G) evExp, probKind = -1., "eigen" if (self.functionalSolve in ["NORM", "BARYCENTRIC_NORM"] or np.sum(np.abs(evG) < np.finfo(float).eps * np.abs(evG[-1]) * len(evG)) == 1): eV = eVG[:, 0] elif self.functionalSolve == "BARYCENTRIC_AVERAGE": eV = eVG.dot(evG ** evExp * np.sum(eVG, axis = 0).conj()) else: eV = eVG.dot(evG ** evExp * eVG[0].conj()) vbMng(self, "MAIN", ("Solved {}problem of size {} with condition number " "{:.4e}.").format(probKind, len(evG) - 1, evG[-1] / evG[1]), 5) if self.functionalSolve[:11] == "BARYCENTRIC": S, mus = len(eV), self._musUniqueCN[self._reorder].flatten() arrow = np.zeros((S + 1,) * 2, dtype = np.complex) arrow[1 :, 0] = 1. arrow[0, 1 :] = eV arrow[np.arange(1, S + 1), np.arange(1, S + 1)] = mus active = np.eye(S + 1) active[0, 0] = 0. poles, qTm1 = eig(arrow, active) eVgood = np.isinf(poles) + np.isnan(poles) == False poles = poles[eVgood] self.N = len(poles) if self.QTol > 0: # compare optimal score with self.N poles to those obtained # by removing one of the poles qTm1 = qTm1[1 :, eVgood].conj() ** -1. dists = mus.reshape(-1, 1) - mus dists[np.arange(S), np.arange(S)] = multifactorial([self.N]) dists = np.prod(dists, axis = 1).conj() ** -1. qComp = np.empty((self.N + 1, S), dtype = np.complex) qComp[0] = dists * np.prod(qTm1, axis = 1) for j in range(self.N): qTmj = np.prod(qTm1[:, np.arange(self.N) != j], axis = 1) qComp[j + 1] = dists * qTmj Lqs = qComp.dot(eVG) scores = np.real(np.sum(Lqs * evG ** -evExp * Lqs.conj(), axis = 1)) evBad = scores[1 :] < self.QTol * scores[0] nevBad = np.sum(evBad) if nevBad: vbMng(self, "MAIN", ("Suboptimal pole{} detected. Reducing N by " "{}.").format("s" * (nevBad > 1), nevBad), 10) self.N = self.N - nevBad poles = poles[evBad == False] eV = poles return evG[1 :], eV def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ return self.trainedModel.getResidues(*args, **kwargs) diff --git a/rrompy/reduction_methods/standard/reduced_basis.py b/rrompy/reduction_methods/standard/reduced_basis.py index 8e13c13..a3efcdd 100644 --- a/rrompy/reduction_methods/standard/reduced_basis.py +++ b/rrompy/reduction_methods/standard/reduced_basis.py @@ -1,199 +1,199 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from .generic_standard_approximant import GenericStandardApproximant from rrompy.hfengines.base.linear_affine_engine import checkIfAffine from .reduced_basis_utils import projectAffineDecomposition from rrompy.utilities.base.types import Np1D, Np2D, List, Tuple, sampList from rrompy.utilities.base import verbosityManager as vbMng from rrompy.utilities.exception_manager import (RROMPyWarning, RROMPyException, RROMPyAssert) __all__ = ['ReducedBasis'] class ReducedBasis(GenericStandardApproximant): """ ROM RB approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': kind of snapshots orthogonalization; allowed values include 0, 1/2, and 1; defaults to 1, i.e. POD; - 'scaleFactorDer': scaling factors for derivative computation; defaults to 'AUTO'; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'R': rank for Galerkin projection; defaults to 'AUTO', i.e. maximum allowed; - 'PODTolerance': tolerance for snapshots POD; defaults to -1. Defaults to empty dict. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterListSoft: Recognized keys of soft approximant parameters: - 'POD': kind of snapshots orthogonalization; - 'scaleFactorDer': scaling factors for derivative computation; - 'R': rank for Galerkin projection; - 'PODTolerance': tolerance for snapshots POD. parameterListCritical: Recognized keys of critical approximant parameters: - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator. verbosity: Verbosity level. POD: Kind of snapshots orthogonalization. scaleFactorDer: Scaling factors for derivative computation. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. R: Rank for Galerkin projection. PODTolerance: Tolerance for snapshots POD. muBounds: list of bounds for parameter values. samplingEngine: Sampling engine. uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as sampleList. lastSolvedHF: Parameter(s) corresponding to last computed high fidelity solution(s) as parameterList. uApproxReduced: Reduced approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApproxReduced: Parameter(s) corresponding to last computed reduced approximate solution(s) as parameterList. uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as sampleList. lastSolvedApprox: Parameter(s) corresponding to last computed approximate solution(s) as parameterList. """ def __init__(self, *args, **kwargs): self._preInit() self._addParametersToList(["R", "PODTolerance"], ["AUTO", -1]) if not hasattr(self, "_affine_lvl"): self._affine_lvl = [] self._affine_lvl += [1] super().__init__(*args, **kwargs) self._postInit() @property def tModelType(self): from .trained_model.trained_model_reduced_basis import ( TrainedModelReducedBasis) return TrainedModelReducedBasis @property def R(self): """Value of R. Its assignment may change S.""" return self._R @R.setter def R(self, R): if isinstance(R, str): R = R.strip().replace(" ","") if "-" not in R: R = R + "-0" self._R_isauto, self._R_shift = True, int(R.split("-")[-1]) R = 0 if R < 0: raise RROMPyException("R must be non-negative.") self._R = R self._approxParameters["R"] = self.R def _setRAuto(self): self.R = max(0, self.S - self._R_shift) vbMng(self, "MAIN", "Automatically setting R to {}.".format(self.R), 25) @property def PODTolerance(self): """Value of PODTolerance.""" return self._PODTolerance @PODTolerance.setter def PODTolerance(self, PODTolerance): self._PODTolerance = PODTolerance self._approxParameters["PODTolerance"] = self.PODTolerance def _setupProjectionMatrix(self): """Compute projection matrix.""" RROMPyAssert(self._mode, message = "Cannot setup numerator.") vbMng(self, "INIT", "Starting computation of projection matrix.", 7) if hasattr(self, "_R_isauto"): self._setRAuto() else: if self.S < self.R: RROMPyWarning(("R too large compared to S. Reducing R by " "{}").format(self.R - self.S)) self.S = self.S if self.POD == 1: U, s, _ = np.linalg.svd(self.samplingEngine.Rscale) cs = np.cumsum(np.abs(s[::-1]) ** 2.) nTolTrunc = np.argmax(cs > self.PODTolerance * cs[-1]) nPODTrunc = min(self.S - nTolTrunc, self.R) pMat = self.samplingEngine.projectionMatrix.dot(U[:, : nPODTrunc]) else: pMat = self.samplingEngine.projectionMatrix[:, : self.R] vbMng(self, "MAIN", ("Assembled {}x{} projection matrix from {} " "samples.").format(*(pMat.shape), self.S), 5) vbMng(self, "DEL", "Done computing projection matrix.", 7) return pMat def setupApprox(self) -> int: - """Compute RB projection matrix.""" + """Compute RB approximation.""" if self.checkComputedApprox(): return -1 RROMPyAssert(self._mode, message = "Cannot setup approximant.") vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5) self.computeSnapshots() firstRun = self.trainedModel is None pMat = self._setupProjectionMatrix() self._setupTrainedModel(pMat) if firstRun: self.trainedModel.data.affinePoly = self.HFEngine.affinePoly self.trainedModel.data.thAs = self.HFEngine.thAs self.trainedModel.data.thbs = self.HFEngine.thbs ARBs, bRBs = self.assembleReducedSystem(pMat) self.trainedModel.data.ARBs = ARBs self.trainedModel.data.bRBs = bRBs self.trainedModel.data.approxParameters = copy(self.approxParameters) vbMng(self, "DEL", "Done setting up approximant.", 5) return 0 def assembleReducedSystem(self, pMat : sampList = None, pMatOld : sampList = None)\ -> Tuple[List[Np2D], List[Np1D]]: """Build affine blocks of RB linear system through projections.""" if pMat is None: self.setupApprox() ARBs = self.trainedModel.data.ARBs bRBs = self.trainedModel.data.bRBs else: self.HFEngine.buildA() self.HFEngine.buildb() checkIfAffine(self.HFEngine, "apply RB method", False, self._affine_lvl) vbMng(self, "INIT", "Projecting affine terms of HF model.", 10) ARBsOld = None if pMatOld is None else self.trainedModel.data.ARBs bRBsOld = None if pMatOld is None else self.trainedModel.data.bRBs ARBs, bRBs = projectAffineDecomposition(self.HFEngine.As, self.HFEngine.bs, pMat, ARBsOld, bRBsOld, pMatOld) vbMng(self, "DEL", "Done projecting affine terms.", 10) return ARBs, bRBs diff --git a/rrompy/utilities/numerical/point_distances.py b/rrompy/utilities/numerical/point_distances.py index a6adb03..c21f3f7 100644 --- a/rrompy/utilities/numerical/point_distances.py +++ b/rrompy/utilities/numerical/point_distances.py @@ -1,86 +1,76 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.sparse import spmatrix -from rrompy.utilities.base.types import Tuple, List, Np1D, Np2D, HFEng +from rrompy.utilities.base.types import List, Np1D, Np2D, HFEng __all__ = ['baseDistanceMatrix', 'vectorDistanceMatrix', 'doubleDistanceMatrix'] def baseDistanceMatrix(x:Np2D, y : Np2D = None, npar : int = None, magnitude : bool = True, weights : Np1D = None) -> Np2D: if npar is None: npar = x.shape[1] if x.ndim > 1 else 1 if y is None: y = x if x.ndim != 3 or x.shape[1] != npar: x = x.reshape(-1, 1, npar) if y.ndim != 2 or y.shape[1] != npar: y = y.reshape(-1, npar) dist = np.repeat(x, len(y), axis = 1) - y if weights is not None: dist *= np.array(weights).flatten() if magnitude: if dist.shape[2] == 1: dist = np.abs(dist)[..., 0] else: dist = np.sum(np.abs(dist) ** 2., axis = 2) ** .5 return dist def vectorDistanceMatrix(X:Np2D, Y:Np2D, HFEngine : HFEng = None, - is_state : bool = True, chordalRadius : float = -1, - Xbad : List[bool] = None, + is_state : bool = True, Xbad : List[bool] = None, Ybad : List[bool] = None) -> Np2D: if HFEngine is None: innerT = np.real(Y.T.conj().dot(X)) if isinstance(X, (spmatrix,)): norm2X = np.sum(np.abs(X.todense()) ** 2., axis = 0) else: norm2X = np.sum(np.abs(X) ** 2., axis = 0) if isinstance(Y, (spmatrix,)): norm2Y = np.sum(np.abs(Y.todense()) ** 2., axis = 0) else: norm2Y = np.sum(np.abs(Y) ** 2., axis = 0) else: innerT = np.real(HFEngine.innerProduct(X, Y, is_state = is_state)) norm2X = HFEngine.norm(X, is_state = is_state) ** 2. norm2Y = HFEngine.norm(Y, is_state = is_state) ** 2. if Xbad is None: Xbad = np.where(np.isinf(norm2X))[0] if Ybad is None: Ybad = np.where(np.isinf(norm2Y))[0] dist2T = (np.tile(norm2Y.reshape(-1, 1), len(norm2X)) + norm2X.reshape(1, -1) - 2 * innerT) - if chordalRadius <= 0: - dist2T[:, Xbad], dist2T[Ybad, :] = np.inf, np.inf - else: - dist2T[:, Xbad], dist2T[Ybad, :] = 1., 1. + dist2T[:, Xbad], dist2T[Ybad, :] = np.inf, np.inf dist2T[np.ix_(Ybad, Xbad)] = 0. dist2T[dist2T < 0.] = 0. - if chordalRadius <= 0: return dist2T.T ** .5 - norm2X[Xbad], norm2Y[Ybad] = 0., 0. - norm2X, norm2Y = norm2X / chordalRadius ** 2., norm2Y / chordalRadius ** 2. - return ((dist2T / (norm2X + 1.)).T / (norm2Y + 1.)) ** .5 + return dist2T.T ** .5 def doubleDistanceMatrix(x:Np1D, y:Np1D, w : float = 0, X : Np2D = None, Y : Np2D = None, HFEngine : HFEng = None, - is_state : bool = True, - chordalRadius : Tuple[float, float] = [-1] * 2) \ - -> Np2D: + is_state : bool = True) -> Np2D: Xbad, Ybad = np.where(np.isinf(x))[0], np.where(np.isinf(y))[0] dist = vectorDistanceMatrix(np.reshape(x, [1, -1]), np.reshape(y, [1, -1]), - chordalRadius = chordalRadius[0], Xbad = Xbad, - Ybad = Ybad) + Xbad = Xbad, Ybad = Ybad) if w == 0: return dist - distAdj = vectorDistanceMatrix(X, Y, HFEngine, is_state, chordalRadius[1], - Xbad = Xbad, Ybad = Ybad) + distAdj = vectorDistanceMatrix(X, Y, HFEngine, is_state, Xbad = Xbad, + Ybad = Ybad) return (dist + w * distAdj) / (1. + w) diff --git a/rrompy/utilities/numerical/point_matching.py b/rrompy/utilities/numerical/point_matching.py index 8c65911..c014228 100644 --- a/rrompy/utilities/numerical/point_matching.py +++ b/rrompy/utilities/numerical/point_matching.py @@ -1,120 +1,112 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from scipy.optimize import linear_sum_assignment as LSA from .tensor_la import dot from .point_distances import baseDistanceMatrix, doubleDistanceMatrix from rrompy.utilities.base.types import Tuple, List, ListAny, Np1D, Np2D, HFEng from rrompy.utilities.exception_manager import RROMPyAssert __all__ = ['pointMatching', 'rationalFunctionMatching'] def pointMatching(distMatrix:Np2D) -> Tuple[Np1D, Np1D]: return LSA(distMatrix) def rationalFunctionMatching(poles:List[Np1D], coeffs:List[Np2D], featPts:Np2D, matchingWeight:float, supps:ListAny, projMat:Np2D, HFEngine : HFEng = None, - is_state : bool = True, root : int = None, - chordalRadius : Tuple[float, float] = [-1] * 2) \ + is_state : bool = True, root : int = None) \ -> Tuple[List[Np1D], List[Np2D]]: """ Match poles and residues of a set of rational functions. Args: poles: List of (lists of) poles. coeffs: List of (lists of) residues. featPts: Marginal parameters corresponding to rational models. matchingWeight: Matching weight in distance computation. supps: Support indices for projection matrix. projMat: Projection matrix for residues. HFEngine(optional): Engine for distance evaluation. Defaults to None, i.e. Euclidean metric. is_state(optional): Whether residues are of system state. Defaults to True. root(optional): Root of search tree. Defaults to None, i.e. automatically chosen. - chordalRadius(optional): Radius to be used in chordal metric. If <= 0, - Euclidean metric is used. Defaults to [-1, -1]. Returns: Matched list of (lists of) poles and list of (lists of) residues. """ M, N = len(featPts), len(poles[0]) RROMPyAssert(len(poles), M, "Number of rational functions to be matched") RROMPyAssert(len(coeffs), M, "Number of rational functions to be matched") if M <= 1: return poles, coeffs featDist = baseDistanceMatrix(featPts) free = list(range(M)) if root is None: #start from sample point with closest neighbor, #among those with no inf pole notInfPls = np.where([np.any(np.isinf(p)) == False for p in poles])[0] MEff = len(notInfPls) if MEff == 1: root = notInfPls[0] else: featDistEff = featDist[notInfPls][:, notInfPls] root = notInfPls[np.argpartition(featDistEff.flatten(), MEff)[MEff] % MEff] polesC = copy(poles) if matchingWeight != 0: - resC = [dot(projMat[:, supps[j] : supps[j] + coeffs[j].shape[1]], - coeffs[j][: N].T) for j in range(M)] - if chordalRadius[1] == "AUTO": - if HFEngine is None: - norm2S = [np.sum(np.abs(c) ** 2., axis = 0) for c in resC] - else: - norm2S = [HFEngine.norm(c, is_state = is_state) ** 2. - for c in resC] - chordalRadius[1] = np.mean(norm2S) + if hasattr(projMat, "shape"): + resC = [dot(projMat[:, supps[j] : supps[j] + coeffs[j].shape[1]], + coeffs[j][: N].T) for j in range(M)] + else: + resC = [dot(projMat, coeffs[j][: N].T) for j in range(M)] fixed = [free.pop(root)] for j in range(M - 1, 0, -1): #find closest point idx = np.argmin(featDist[np.ix_(fixed, free)].flatten()) Ifix = fixed[idx // j] fixed += [free.pop(idx % j)] Ifree = fixed[-1] plsfix, plsfree = polesC[Ifix], polesC[Ifree] freeInf = np.where(np.isinf(plsfree))[0] freeNotInf = np.where(np.isinf(plsfree) == False)[0] plsfree = plsfree[freeNotInf] if matchingWeight == 0: resfix, resfree = None, None else: resfix, resfree = resC[Ifix], resC[Ifree][:, freeNotInf] #build assignment distance matrix distj = doubleDistanceMatrix(plsfree, plsfix, matchingWeight, resfree, - resfix, HFEngine, is_state, - chordalRadius) + resfix, HFEngine, is_state) reordering = pointMatching(distj)[1] reorderingInf = [x for x in range(N) if x not in reordering] #reorder good poles poles[Ifree][reordering], poles[Ifree][reorderingInf] = ( poles[Ifree][freeNotInf], poles[Ifree][freeInf]) coeffs[Ifree][reordering], coeffs[Ifree][reorderingInf] = ( coeffs[Ifree][freeNotInf], coeffs[Ifree][freeInf]) #transfer missing poles over polesC[Ifree][reordering], polesC[Ifree][reorderingInf] = ( polesC[Ifree][freeNotInf], polesC[Ifix][reorderingInf]) if matchingWeight != 0: resC[Ifree][:, reordering], resC[Ifree][:, reorderingInf] = ( resC[Ifree][:, freeNotInf], resC[Ifix][:, reorderingInf]) return poles, coeffs diff --git a/tests/4_reduction_methods_multiD/greedy_pivoted_rational_2d.py b/tests/4_reduction_methods_multiD/greedy_pivoted_rational_2d.py index 3d03068..6972df9 100644 --- a/tests/4_reduction_methods_multiD/greedy_pivoted_rational_2d.py +++ b/tests/4_reduction_methods_multiD/greedy_pivoted_rational_2d.py @@ -1,87 +1,86 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from matrix_random import matrixRandom from rrompy.reduction_methods import ( RationalInterpolantPivotedGreedyPoleMatch as RIPG, RationalInterpolantGreedyPivotedGreedyPoleMatch as RIGPG) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, SparseGridSampler as SGS) def test_pivoted_greedy(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": True, "S": 5, "polybasis": "CHEBYSHEV", "samplerPivot": QS([4.75, 5.25], "CHEBYSHEV"), "SMarginal": 3, "greedyTolMarginal": 1e-2, "radialDirectionalWeightsMarginal": 2., "polybasisMarginal": "MONOMIAL_GAUSSIAN", "paramsMarginal":{"MMarginal": 1, "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]}, "errorEstimatorKindMarginal": "LOOK_AHEAD_RECOVER", "matchingWeight": 1., "samplerMarginal":SGS([6.75, 7.25])} approx = RIPG([0], solver, mu0, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), 6.0631706e-04, rtol = 1) def test_greedy_pivoted_greedy(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-3, "S": 2, "polybasis": "CHEBYSHEV", "samplerPivot": QS([4.75, 5.25], "CHEBYSHEV"), "samplerTrainSet": QS([4.75, 5.25], "CHEBYSHEV"), "SMarginal": 3, - "greedyTolMarginal": 1e-2, + "maxIterMarginal": 10, "greedyTolMarginal": 1e-2, "radialDirectionalWeightsMarginal": 2., "polybasisMarginal": "MONOMIAL_GAUSSIAN", "paramsMarginal":{"MMarginal": 1}, "errorEstimatorKindMarginal": "LOOK_AHEAD_RECOVER", - "matchingWeight": 1., "matchingChordalRadius": [1., "AUTO"], - "samplerMarginal":SGS([6.75, 7.25])} + "matchingWeight": 1., "samplerMarginal":SGS([6.75, 7.25])} approx = RIGPG([0], solver, mu0, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), .106066, rtol = 1) diff --git a/tests/4_reduction_methods_multiD/pivoted_rational_2d.py b/tests/4_reduction_methods_multiD/pivoted_rational_2d.py index fb902eb..395aa25 100644 --- a/tests/4_reduction_methods_multiD/pivoted_rational_2d.py +++ b/tests/4_reduction_methods_multiD/pivoted_rational_2d.py @@ -1,113 +1,112 @@ # Copyright (C) 2018-2020 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from matrix_random import matrixRandom from rrompy.reduction_methods import ( RationalInterpolantPivotedPoleMatch as RIP, RationalInterpolantGreedyPivotedPoleMatch as RIGP) from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS, ManualSampler as MS) def test_pivoted_uniform(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": True, "S": 5, "polybasis": "CHEBYSHEV", "samplerPivot": QS([4.75, 5.25], "CHEBYSHEV"), "SMarginal": 5, "polybasisMarginal": "MONOMIAL", "matchingWeight": 1., "samplerMarginal": QS([6.75, 7.25], "UNIFORM")} approx = RIP([0], solver, mu0, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), 6.0631706e-04, rtol = 1) def test_pivoted_manual_grid(capsys): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": False, "S": 5, "polybasis": "MONOMIAL", "samplerPivot": MS([4.75, 5.25], np.array([5.]), normalFoci = [0., 0.]), "SMarginal": 5, "polybasisMarginal": "MONOMIAL", "matchingWeight": 1., - "matchingChordalRadius": [1., "AUTO"], "samplerMarginal": MS([6.75, 7.25], np.linspace(6.75, 7.25, 5)), "QTol": 1e-6, "interpTol": 1e-3} approx = RIP([0], solver, mu0, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), .4763489, rtol = 1) out, err = capsys.readouterr() assert ("poorly conditioned" not in out) assert len(err) == 0 def test_pivoted_greedy(): mu = [5.05, 7.1] mu0 = [5., 7.] solver = matrixRandom() uh = solver.solve(mu)[0] params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "collinearityTol": 1e8, "errorEstimatorKind": "DISCREPANCY", "S": 5, "polybasis": "CHEBYSHEV", "samplerPivot": QS([4.75, 5.25], "UNIFORM"), "samplerTrainSet": QS([4.75, 5.25], "CHEBYSHEV"), "SMarginal": 5, "polybasisMarginal": "MONOMIAL", "matchingWeight": 1., "samplerMarginal": QS([6.75, 7.25], "UNIFORM")} solver.cutOffPolesRMinRel, solver.cutOffPolesRMaxRel = -3., 3. solver.cutOffPolesIMinRel, solver.cutOffPolesIMaxRel = -1.5, 1.5 approx = RIGP([0], solver, mu0, approxParameters = params, verbosity = 0) approx.setupApprox() uhP1 = approx.getApprox(mu)[0] errP = approx.getErr(mu)[0] errNP = approx.normErr(mu)[0] myerrP = uhP1 - uh assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3) assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3) resP = approx.getRes(mu)[0] resNP = approx.normRes(mu) assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3) assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))), 0., rtol = 1e-3) assert np.isclose(errNP / solver.norm(uh), 7.8581e-2, rtol = 1)