diff --git a/examples/fenics/HelmholtzLagrangeApproximantsSweep.py b/examples/fenics/LagrangeSweep.py similarity index 100% rename from examples/fenics/HelmholtzLagrangeApproximantsSweep.py rename to examples/fenics/LagrangeSweep.py diff --git a/examples/fenics/HelmholtzPadeLagrangeApproximant.py b/examples/fenics/PadeLagrange.py similarity index 100% rename from examples/fenics/HelmholtzPadeLagrangeApproximant.py rename to examples/fenics/PadeLagrange.py diff --git a/examples/fenics/HelmholtzPadeTaylorApproximant.py b/examples/fenics/PadeTaylor.py similarity index 100% rename from examples/fenics/HelmholtzPadeTaylorApproximant.py rename to examples/fenics/PadeTaylor.py diff --git a/examples/fenics/HelmholtzRBLagrangeApproximant.py b/examples/fenics/RBLagrange.py similarity index 100% rename from examples/fenics/HelmholtzRBLagrangeApproximant.py rename to examples/fenics/RBLagrange.py diff --git a/examples/fenics/HelmholtzRBTaylorApproximant.py b/examples/fenics/RBTaylor.py similarity index 100% rename from examples/fenics/HelmholtzRBTaylorApproximant.py rename to examples/fenics/RBTaylor.py diff --git a/examples/fenics/HelmholtzTaylorPoleIdentification.py b/examples/fenics/TaylorPoles.py similarity index 100% rename from examples/fenics/HelmholtzTaylorPoleIdentification.py rename to examples/fenics/TaylorPoles.py diff --git a/examples/fenics/HelmholtzTaylorApproximantsSweep.py b/examples/fenics/TaylorSweep.py similarity index 100% rename from examples/fenics/HelmholtzTaylorApproximantsSweep.py rename to examples/fenics/TaylorSweep.py diff --git a/examples/fenics/HelmholtzAirfoil.py b/examples/fenics/airfoil.py similarity index 97% copy from examples/fenics/HelmholtzAirfoil.py copy to examples/fenics/airfoil.py index 12cf64f..4677502 100644 --- a/examples/fenics/HelmholtzAirfoil.py +++ b/examples/fenics/airfoil.py @@ -1,254 +1,254 @@ from copy import copy import fenics as fen import numpy as np import sympy as sp from rrompy.hfengines.fenics import ScatteringProblemEngine as SPE from rrompy.hsengines.fenics import HSEngine as HS from rrompy.reduction_methods.taylor import ApproximantTaylorPade as TP from rrompy.reduction_methods.taylor import ApproximantTaylorRB as TRB from rrompy.reduction_methods.lagrange import ApproximantLagrangePade as LP from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB as LRB from rrompy.reduction_methods.base import ParameterSweeper as Sweeper from rrompy.sampling import QuadratureSampler as QS from matplotlib import pyplot as plt from operator import itemgetter def subdict(d, ks): return dict(zip(ks, itemgetter(*ks)(d))) #################### test = "solve" test = "Taylor" test = "Lagrange" test = "TaylorSweep" test = "LagrangeSweep" ttype = "simple" ###ttype = "augmentedI" ###ttype = "augmentedM" plotSamples = 'ALL' #plotSamples = [] k0 = 10 + 1.j kLeft, kRight = 8 + 1.j, 12 + 1.j ktar = 11 ktars = np.linspace(8, 12, 33) - .5j PI = np.pi R = 2 def Dboundary(x, on_boundary): return on_boundary and (x[0]**2+x[1]**2)**.5 < .95 * R kappa = 10 theta = PI * - 45 / 180. mu = 1.1 epsilon = .1 x, y = sp.symbols('x[0] x[1]', real=True) phiex = kappa * (x * np.cos(theta) + y * np.sin(theta)) u0ex = - sp.exp(1.j * phiex) checkReal = x**2-x+y**2 rhoroot = ((x**2+y**2)/((x-1)**2+y**2))**.25 phiroot1 = sp.atan(-y/(x**2-x+y**2)) / 2 phiroot2 = sp.atan(-y/(x**2-x+y**2)) / 2 - PI * sp.sign(-y/(x**2-x+y**2)) / 2 kappam1 = (((rhoroot*sp.cos(phiroot1)+.5)**2.+(rhoroot*sp.sin(phiroot1))**2.)/ ((rhoroot*sp.cos(phiroot1)-1.)**2.+(rhoroot*sp.sin(phiroot1))**2.) )**.5 - mu kappam2 = (((rhoroot*sp.cos(phiroot2)+.5)**2.+(rhoroot*sp.sin(phiroot2))**2.)/ ((rhoroot*sp.cos(phiroot2)-1.)**2.+(rhoroot*sp.sin(phiroot2))**2.) )**.5 - mu Heps1 = .9 * .5 * (1 + kappam1/epsilon + sp.sin(PI*kappam1/epsilon) / PI) + .1 Heps2 = .9 * .5 * (1 + kappam2/epsilon + sp.sin(PI*kappam2/epsilon) / PI) + .1 mesh = fen.Mesh('../data/mesh/airfoil.xml') a = fen.Expression(('{0}>=0 ? ({2}>=-{1} ? ({2}<={1} ? {4} : 1) : .1) : ' '({3}>=-{1} ? ({3}<={1} ? {5} : 1) : .1)')\ .format(sp.printing.ccode(checkReal), str(epsilon), sp.printing.ccode(kappam1), sp.printing.ccode(kappam2), sp.printing.ccode(Heps1), sp.printing.ccode(Heps2)), degree = 3) DirichletTerm = [sp.printing.ccode(sp.simplify(sp.re(u0ex))), sp.printing.ccode(sp.simplify(sp.im(u0ex)))] if ttype == "simple": solver = SPE() solver.omega = k0 solver.V = fen.FunctionSpace(mesh, "P", 3) solver.diffusivity = a solver.DirichletBoundary = Dboundary solver.RobinBoundary = 'rest' solver.DirichletDatum = [fen.Expression(x, degree = 3)\ for x in DirichletTerm] plotter = HS(solver.V) else: print('NOPE') # if ttype[-1] == "I": constraintType = "IDENTITY" # else: constraintType = "MASS" # solver = HFSA(mesh = mesh, wavenumber = k0, diffusivity = a, # forcingTerm = 0, FEDegree = 3, DirichletBoundary = Dboundary, # RobinBoundary = 'rest', DirichletDatum = DirichletTerm, # constraintType = constraintType) # plotter = HSA(solver.V, d = 2) baseRe, baseIm = solver.DirichletDatum baseRe = fen.project(baseRe, solver.V) baseIm = fen.project(baseIm, solver.V) uinc = np.array(baseRe.vector()) + 1.j * np.array(baseIm.vector()) if ttype[: -1] == "augmented": print('NOPE') # uinc = [uinc, kappa * uinc] if test == "solve": aF = fen.interpolate(a, solver.V) av = aF.vector() uh = solver.solve(k0) print(plotter.norm(uh, kappa)) if ttype == "simple": uhtot = uh - uinc else: uhtot = [x - y for x, y in zip(uh, uinc)] print(plotter.norm(uhtot, kappa)) plotter.plot(av, what = 'Real', name = 'a') plotter.plot(uhtot - uh, what = 'Real', name = 'u_inc') plotter.plot(uh, what = 'ABS') plotter.plot(uhtot, what = 'ABS', name = 'u + u_inc') elif test in ["Taylor", "Lagrange"]: if test == "Taylor": params = {'N':8, 'M':7, 'R':8, 'E':8, 'sampleType':'Krylov', 'POD':False} parPade = subdict(params, ['N', 'M', 'E', 'sampleType', 'POD']) parRB = subdict(params, ['R', 'E', 'sampleType', 'POD']) approxPade = TP(solver, plotter, mu0 = k0, plotDer = plotSamples, approxParameters = parPade) approxRB = TRB(solver, plotter, mu0 = k0, approxParameters = parRB) else: params = {'N':8, 'M':7, 'R':9, 'S':9, 'POD':True, 'sampler':QS([kLeft, kRight], "CHEBYSHEV")} parPade = subdict(params, ['N', 'M', 'S', 'POD', 'sampler']) parRB = subdict(params, ['R', 'S', 'POD', 'sampler']) approxPade = LP(solver, plotter, mu0 = np.mean([kLeft, kRight]), plotSnap = plotSamples, approxParameters = parPade) approxRB = LRB(solver, plotter, mu0 = np.mean([kLeft, kRight]), approxParameters = parRB) PadeErr, solNorm = approxPade.approxError(ktar), approxPade.HFNorm(ktar) RBErr = approxRB.approxError(ktar) print(('SolNorm:\t{}\nErrPade:\t{}\nErrRelPade:\t{}\nErrRB:\t\t{}' '\nErrRelRB:\t{}').format(solNorm, PadeErr, np.divide(PadeErr, solNorm), RBErr, np.divide(RBErr, solNorm))) print('\nPoles Pade'':') print(approxPade.getPoles()) print('\nPoles RB:') print(approxRB.getPoles()) uHF = approxPade.getHF(ktar) plotter.plot(uHF, name = 'u_ex') approxPade.plotApp(ktar, name = 'u_Pade''') approxRB.plotApp(ktar, name = 'u_RB') approxPade.plotErr(ktar, name = 'errPade''') approxRB.plotErr(ktar, name = 'errRB') elif test in ["TaylorSweep", "LagrangeSweep"]: if test == "TaylorSweep": shift = 10 nsets = 3 stride = 3 Emax = stride * (nsets - 1) + shift + 1 params = {'Emax':Emax, 'sampleType':'Krylov', 'POD':False} paramsSetsPade = [None] * nsets paramsSetsRB = [None] * nsets for i in range(nsets): paramsSetsPade[i] = {'N':stride*i+shift + 1, 'M':stride*i+shift, 'E':stride*i+shift + 1} paramsSetsRB[i] = {'E':stride*i+shift + 1,'R':stride*i+shift + 1} approxPade = TP(solver, plotter, mu0 = kappa,approxParameters = params) approxRB = TRB(solver, plotter, mu0 = kappa, approxParameters = params) else: shift = 10 nsets = 3 stride = 3 Smax = stride * (nsets - 1) + shift + 2 paramsPade = {'S':Smax, 'POD':True, 'sampler':QS([kLeft, kRight], "CHEBYSHEV")} paramsRB = copy(paramsPade) paramsSetsPade = [None] * nsets paramsSetsRB = [None] * nsets for i in range(nsets): paramsSetsPade[i] = {'N': stride*i+shift+1, 'M': stride*i+shift, 'S': stride*i+shift+2} paramsSetsRB[i] = {'R': stride*i+shift+1, 'S': stride*i+shift+2} approxPade = LP(solver, plotter, mu0 = np.mean([kLeft, kRight]), approxParameters = paramsPade) approxRB = LRB(solver, plotter, mu0 = np.mean([kLeft, kRight]), approxParameters = paramsRB) sweeper = Sweeper(mutars = ktars, mostExpensive = 'Approx') sweeper.ROMEngine = approxPade sweeper.params = paramsSetsPade - filenamePade = sweeper.sweep('../data/output/HelmholtzAirfoil'+test[:-5]+'PadeFE.dat') + filenamePade = sweeper.sweep('../data/output/airfoil'+test[:-5]+'Pade.dat') sweeper.ROMEngine = approxRB sweeper.params = paramsSetsRB - filenameRB = sweeper.sweep('../data/output/HelmholtzAirfoil'+test[:-5]+'RBFE.dat') + filenameRB = sweeper.sweep('../data/output/airfoil'+test[:-5]+'RB.dat') for i in range(nsets): if test == "TaylorSweep": val = stride*i+shift+1 - PadeOutput = sweeper.read(filenamePade, {'N':val}, + PadeOutput = sweeper.read(filenamePade, {'E':val}, ['muRe', 'HFNorm', 'AppNorm', 'ErrNorm']) RBOutput = sweeper.read(filenameRB, {'E':val}, ['muRe', 'AppNorm', 'ErrNorm']) let = 'E' else: val = stride*i+shift+2 PadeOutput = sweeper.read(filenamePade, {'S':val}, ['muRe', 'HFNorm', 'AppNorm', 'ErrNorm']) RBOutput = sweeper.read(filenameRB, {'S':val}, ['muRe', 'AppNorm', 'ErrNorm']) let = 'S' ktarsF = PadeOutput['muRe'] solNormF = PadeOutput['HFNorm'] PadektarsF = PadeOutput['muRe'] PadeNormF = PadeOutput['AppNorm'] PadeErrorF = PadeOutput['ErrNorm'] RBktarsF = RBOutput['muRe'] RBNormF = RBOutput['AppNorm'] RBErrorF = RBOutput['ErrNorm'] plt.figure(figsize = (10, 5)) plt.plot(ktarsF, solNormF, 'k-', label='Sol norm') plt.plot(PadektarsF, PadeNormF, 'b--', label='Pade'' norm, {1} = {0}'.format(val, let)) plt.plot(RBktarsF, RBNormF, 'g--', label='RB norm, {1} = {0}'.format(val, let)) plt.legend() plt.grid() p = plt.legend(loc = 'upper left') plt.savefig('./normA' + str(i) + '.eps', format='eps', dpi=1000) plt.show() plt.close() plt.figure(figsize = (10, 5)) plt.semilogy(ktarsF, np.divide(PadeErrorF, solNormF), 'b', label='Pade'' relative error, {1} = {0}'.format(val, let)) plt.semilogy(RBktarsF, np.divide(RBErrorF, solNormF), 'g', label='RB relative error, {1} = {0}'.format(val, let)) plt.legend() plt.grid() p = plt.legend(loc = 'lower right') plt.savefig('./errorAR' + str(i) + '.eps', format='eps', dpi=1000) plt.show() plt.close() diff --git a/examples/fenics/HelmholtzMembraneTaylor.py b/examples/fenics/membraneTaylor.py similarity index 92% rename from examples/fenics/HelmholtzMembraneTaylor.py rename to examples/fenics/membraneTaylor.py index 2e038fa..a0b2527 100644 --- a/examples/fenics/HelmholtzMembraneTaylor.py +++ b/examples/fenics/membraneTaylor.py @@ -1,104 +1,105 @@ import fenics as fen import numpy as np import sympy as sp from rrompy.hfengines.fenics import HelmholtzProblemEngine as HPE from rrompy.hsengines.fenics import HSEngine as HS from rrompy.reduction_methods.taylor import ApproximantTaylorPade as TP from rrompy.reduction_methods.base import ParameterSweeper as Sweeper test = "poles" #test = "error" #test = "norm" z0 = 100 + 0.j ztars = np.array([80, 95, 98]) ztarsNorm = np.linspace(78, 122, 250) def boundaryNeumann(x, on_boundary): return on_boundary and x[1] > .25 and x[0] > 0.995 and x[0] < 1.005 x0, y0 = .5, .5 Rr, Ri = .1, .1 x, y = sp.symbols('x[0] x[1]', real=True) fex = sp.exp(- ((x - x0)**2 + (y - y0)**2) / 2 / Rr**2) meshname = '../data/mesh/crack_coarse.xml' #meshname = '../data/mesh/crack_fine.xml' mesh = fen.Mesh(meshname) forcingTerm = fen.Expression(sp.printing.ccode(sp.simplify(fex)), degree = 3) solver = HPE() solver.omega = np.real(z0 ** .5) solver.V = fen.FunctionSpace(mesh, "P", 3) solver.forcingTerm = forcingTerm solver.NeumannBoundary = boundaryNeumann solver.DirichletBoundary = 'rest' plotter = HS(solver.V) if test == "poles": appPoles = {} Emax = 13 params = {'N':6, 'M':0, 'E':6, 'Emax':Emax, 'sampleType':'Arnoldi', 'POD':True} approxPade = TP(solver, plotter, mu0 = z0, approxParameters = params) for E in range(6, Emax + 1): approxPade.E = E appPoles[E] = np.sort(approxPade.getPoles()) a = fen.dot(fen.grad(solver.u), fen.grad(solver.v)) * fen.dx A = fen.assemble(a) fen.DirichletBC(solver.V, fen.Constant(0.), solver.DirichletBoundary).apply(A) AMat = fen.as_backend_type(A).mat() Ar, Ac, Av = AMat.getValuesCSR() import scipy.sparse as scsp A = scsp.csr_matrix((Av, Ac, Ar), shape = AMat.size) m = fen.dot(solver.u, solver.v) * fen.dx M = fen.assemble(m) fen.DirichletBC(solver.V, fen.Constant(0.), solver.DirichletBoundary).apply(M) MMat = fen.as_backend_type(M).mat() Mr, Mc, Mv = MMat.getValuesCSR() import scipy.sparse as scsp M = scsp.csr_matrix((Mv, Mc, Mr), shape = MMat.size) poles = scsp.linalg.eigs(A, k = 7, M = M, sigma = 100., return_eigenvectors = False) II = np.argsort(np.abs(poles - z0)) poles = poles[II] [print('{},{}'.format(np.real(x), np.imag(x)), end = ',') for x in poles] print() for E in range(6, Emax + 1): print(E, end = ',') [print('{},{}'.format(np.real(x), np.imag(x)), end = ',')\ for x in np.sort(appPoles[E])] print() elif test == "error": M0 = 5 Emax = 13 params = {'Emax':Emax, 'sampleType':'Arnoldi', 'POD':True} paramsSetsPade = [None] * (Emax - M0 + 1) for M in range(M0, Emax + 1): paramsSetsPade[M - M0] = {'N':M0 + 1, 'M':M, 'E':max(M, M0 + 1)} approxPade = TP(solver, plotter, mu0 = z0, approxParameters = params) sweeper = Sweeper(mutars = ztars, mostExpensive = 'Approx') sweeper.ROMEngine = approxPade sweeper.params = paramsSetsPade - filenamePade = sweeper.sweep('./error.dat', outputs = 'ALL') + filenamePade = sweeper.sweep('../data/output/membrane_error.dat', + outputs = 'ALL') elif test == "norm": params = [{'N':6, 'M':10, 'E':10, 'sampleType':'Arnoldi', 'POD':True}] approxPade = TP(solver, plotter, mu0 = z0, approxParameters = params[0]) sweeper = Sweeper(mutars = ztarsNorm, mostExpensive = 'Approx') sweeper.ROMEngine = approxPade sweeper.params = params - filenamePade = sweeper.sweep('./norm.dat', outputs = ["HFNorm", "AppNorm", - "ErrNorm"]) + filenamePade = sweeper.sweep('../data/output/membrane_norm.dat', + outputs = ["HFNorm", "AppNorm", "ErrNorm"]) diff --git a/examples/fenics/HelmholtzAirfoil.py b/examples/fenics/scatteringSquare.py similarity index 64% rename from examples/fenics/HelmholtzAirfoil.py rename to examples/fenics/scatteringSquare.py index 12cf64f..d61aea5 100644 --- a/examples/fenics/HelmholtzAirfoil.py +++ b/examples/fenics/scatteringSquare.py @@ -1,254 +1,193 @@ from copy import copy -import fenics as fen import numpy as np -import sympy as sp -from rrompy.hfengines.fenics import ScatteringProblemEngine as SPE +from rrompy.hfengines.fenics import HelmholtzCavityScatteringProblemEngine as CSPE from rrompy.hsengines.fenics import HSEngine as HS from rrompy.reduction_methods.taylor import ApproximantTaylorPade as TP -from rrompy.reduction_methods.taylor import ApproximantTaylorRB as TRB from rrompy.reduction_methods.lagrange import ApproximantLagrangePade as LP +from rrompy.reduction_methods.taylor import ApproximantTaylorRB as TRB from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB as LRB from rrompy.reduction_methods.base import ParameterSweeper as Sweeper from rrompy.sampling import QuadratureSampler as QS from matplotlib import pyplot as plt from operator import itemgetter def subdict(d, ks): return dict(zip(ks, itemgetter(*ks)(d))) #################### test = "solve" -test = "Taylor" -test = "Lagrange" -test = "TaylorSweep" +#test = "Taylor" +#test = "Lagrange" +#test = "TaylorSweep" test = "LagrangeSweep" ttype = "simple" ###ttype = "augmentedI" ###ttype = "augmentedM" plotSamples = 'ALL' #plotSamples = [] -k0 = 10 + 1.j -kLeft, kRight = 8 + 1.j, 12 + 1.j -ktar = 11 -ktars = np.linspace(8, 12, 33) - .5j - -PI = np.pi -R = 2 -def Dboundary(x, on_boundary): - return on_boundary and (x[0]**2+x[1]**2)**.5 < .95 * R - -kappa = 10 -theta = PI * - 45 / 180. -mu = 1.1 -epsilon = .1 - -x, y = sp.symbols('x[0] x[1]', real=True) -phiex = kappa * (x * np.cos(theta) + y * np.sin(theta)) -u0ex = - sp.exp(1.j * phiex) - -checkReal = x**2-x+y**2 -rhoroot = ((x**2+y**2)/((x-1)**2+y**2))**.25 -phiroot1 = sp.atan(-y/(x**2-x+y**2)) / 2 -phiroot2 = sp.atan(-y/(x**2-x+y**2)) / 2 - PI * sp.sign(-y/(x**2-x+y**2)) / 2 -kappam1 = (((rhoroot*sp.cos(phiroot1)+.5)**2.+(rhoroot*sp.sin(phiroot1))**2.)/ - ((rhoroot*sp.cos(phiroot1)-1.)**2.+(rhoroot*sp.sin(phiroot1))**2.) - )**.5 - mu -kappam2 = (((rhoroot*sp.cos(phiroot2)+.5)**2.+(rhoroot*sp.sin(phiroot2))**2.)/ - ((rhoroot*sp.cos(phiroot2)-1.)**2.+(rhoroot*sp.sin(phiroot2))**2.) - )**.5 - mu -Heps1 = .9 * .5 * (1 + kappam1/epsilon + sp.sin(PI*kappam1/epsilon) / PI) + .1 -Heps2 = .9 * .5 * (1 + kappam2/epsilon + sp.sin(PI*kappam2/epsilon) / PI) + .1 - -mesh = fen.Mesh('../data/mesh/airfoil.xml') - -a = fen.Expression(('{0}>=0 ? ({2}>=-{1} ? ({2}<={1} ? {4} : 1) : .1) : ' - '({3}>=-{1} ? ({3}<={1} ? {5} : 1) : .1)')\ - .format(sp.printing.ccode(checkReal), str(epsilon), - sp.printing.ccode(kappam1), - sp.printing.ccode(kappam2), - sp.printing.ccode(Heps1), - sp.printing.ccode(Heps2)), degree = 3) - -DirichletTerm = [sp.printing.ccode(sp.simplify(sp.re(u0ex))), - sp.printing.ccode(sp.simplify(sp.im(u0ex)))] +k0 = 10 +kLeft, kRight = 9, 11 +ktar = 9.5 +ktars = np.linspace(8.5, 11.5, 125) + +kappa = 5 +n = 50 if ttype == "simple": - solver = SPE() + solver = CSPE(kappa = kappa, n = n) solver.omega = k0 - solver.V = fen.FunctionSpace(mesh, "P", 3) - solver.diffusivity = a - solver.DirichletBoundary = Dboundary - solver.RobinBoundary = 'rest' - solver.DirichletDatum = [fen.Expression(x, degree = 3)\ - for x in DirichletTerm] - plotter = HS(solver.V) else: print('NOPE') # if ttype[-1] == "I": constraintType = "IDENTITY" # else: constraintType = "MASS" # solver = HFSA(mesh = mesh, wavenumber = k0, diffusivity = a, # forcingTerm = 0, FEDegree = 3, DirichletBoundary = Dboundary, # RobinBoundary = 'rest', DirichletDatum = DirichletTerm, # constraintType = constraintType) # plotter = HSA(solver.V, d = 2) -baseRe, baseIm = solver.DirichletDatum -baseRe = fen.project(baseRe, solver.V) -baseIm = fen.project(baseIm, solver.V) -uinc = np.array(baseRe.vector()) + 1.j * np.array(baseIm.vector()) -if ttype[: -1] == "augmented": - print('NOPE') -# uinc = [uinc, kappa * uinc] - if test == "solve": - aF = fen.interpolate(a, solver.V) - av = aF.vector() uh = solver.solve(k0) print(plotter.norm(uh, kappa)) - if ttype == "simple": - uhtot = uh - uinc - else: - uhtot = [x - y for x, y in zip(uh, uinc)] - print(plotter.norm(uhtot, kappa)) - plotter.plot(av, what = 'Real', name = 'a') - plotter.plot(uhtot - uh, what = 'Real', name = 'u_inc') - plotter.plot(uh, what = 'ABS') - plotter.plot(uhtot, what = 'ABS', name = 'u + u_inc') + plotter.plot(uh, what = ['ABS', 'REAL']) elif test in ["Taylor", "Lagrange"]: if test == "Taylor": - params = {'N':8, 'M':7, 'R':8, 'E':8, 'sampleType':'Krylov', 'POD':False} + params = {'N':8, 'M':7, 'R':8, 'E':8, 'sampleType':'Krylov', 'POD':True} parPade = subdict(params, ['N', 'M', 'E', 'sampleType', 'POD']) parRB = subdict(params, ['R', 'E', 'sampleType', 'POD']) approxPade = TP(solver, plotter, mu0 = k0, plotDer = plotSamples, approxParameters = parPade) approxRB = TRB(solver, plotter, mu0 = k0, approxParameters = parRB) else: - params = {'N':8, 'M':7, 'R':9, 'S':9, 'POD':True, + params = {'N':8, 'M':7, 'R':8, 'S':9, 'POD':True, 'sampler':QS([kLeft, kRight], "CHEBYSHEV")} parPade = subdict(params, ['N', 'M', 'S', 'POD', 'sampler']) parRB = subdict(params, ['R', 'S', 'POD', 'sampler']) approxPade = LP(solver, plotter, mu0 = np.mean([kLeft, kRight]), plotSnap = plotSamples, approxParameters = parPade) approxRB = LRB(solver, plotter, mu0 = np.mean([kLeft, kRight]), approxParameters = parRB) PadeErr, solNorm = approxPade.approxError(ktar), approxPade.HFNorm(ktar) RBErr = approxRB.approxError(ktar) print(('SolNorm:\t{}\nErrPade:\t{}\nErrRelPade:\t{}\nErrRB:\t\t{}' '\nErrRelRB:\t{}').format(solNorm, PadeErr, np.divide(PadeErr, solNorm), RBErr, np.divide(RBErr, solNorm))) print('\nPoles Pade'':') print(approxPade.getPoles()) print('\nPoles RB:') print(approxRB.getPoles()) uHF = approxPade.getHF(ktar) plotter.plot(uHF, name = 'u_ex') approxPade.plotApp(ktar, name = 'u_Pade''') approxRB.plotApp(ktar, name = 'u_RB') approxPade.plotErr(ktar, name = 'errPade''') approxRB.plotErr(ktar, name = 'errRB') elif test in ["TaylorSweep", "LagrangeSweep"]: if test == "TaylorSweep": - shift = 10 + shift = 6 nsets = 3 stride = 3 Emax = stride * (nsets - 1) + shift + 1 - params = {'Emax':Emax, 'sampleType':'Krylov', 'POD':False} + params = {'Emax':Emax, 'sampleType':'Krylov', 'POD':True} paramsSetsPade = [None] * nsets paramsSetsRB = [None] * nsets for i in range(nsets): paramsSetsPade[i] = {'N':stride*i+shift + 1, 'M':stride*i+shift, 'E':stride*i+shift + 1} paramsSetsRB[i] = {'E':stride*i+shift + 1,'R':stride*i+shift + 1} - approxPade = TP(solver, plotter, mu0 = kappa,approxParameters = params) - approxRB = TRB(solver, plotter, mu0 = kappa, approxParameters = params) + approxPade = TP(solver, plotter, mu0 = k0,approxParameters = params) + approxRB = TRB(solver, plotter, mu0 = k0, approxParameters = params) else: - shift = 10 - nsets = 3 + shift = 7 + nsets = 4 stride = 3 Smax = stride * (nsets - 1) + shift + 2 paramsPade = {'S':Smax, 'POD':True, 'sampler':QS([kLeft, kRight], "CHEBYSHEV")} paramsRB = copy(paramsPade) paramsSetsPade = [None] * nsets paramsSetsRB = [None] * nsets for i in range(nsets): paramsSetsPade[i] = {'N': stride*i+shift+1, 'M': stride*i+shift, 'S': stride*i+shift+2} paramsSetsRB[i] = {'R': stride*i+shift+1, 'S': stride*i+shift+2} approxPade = LP(solver, plotter, mu0 = np.mean([kLeft, kRight]), approxParameters = paramsPade) approxRB = LRB(solver, plotter, mu0 = np.mean([kLeft, kRight]), approxParameters = paramsRB) sweeper = Sweeper(mutars = ktars, mostExpensive = 'Approx') sweeper.ROMEngine = approxPade sweeper.params = paramsSetsPade - filenamePade = sweeper.sweep('../data/output/HelmholtzAirfoil'+test[:-5]+'PadeFE.dat') + filenamePade = sweeper.sweep('../data/output/ssquare'+test[:-5]+'Pade.dat') sweeper.ROMEngine = approxRB sweeper.params = paramsSetsRB - filenameRB = sweeper.sweep('../data/output/HelmholtzAirfoil'+test[:-5]+'RBFE.dat') + filenameRB = sweeper.sweep('../data/output/ssquare'+test[:-5]+'RB.dat') for i in range(nsets): if test == "TaylorSweep": val = stride*i+shift+1 - PadeOutput = sweeper.read(filenamePade, {'N':val}, + PadeOutput = sweeper.read(filenamePade, {'E':val}, ['muRe', 'HFNorm', 'AppNorm', 'ErrNorm']) RBOutput = sweeper.read(filenameRB, {'E':val}, ['muRe', 'AppNorm', 'ErrNorm']) let = 'E' else: val = stride*i+shift+2 PadeOutput = sweeper.read(filenamePade, {'S':val}, ['muRe', 'HFNorm', 'AppNorm', 'ErrNorm']) RBOutput = sweeper.read(filenameRB, {'S':val}, ['muRe', 'AppNorm', 'ErrNorm']) let = 'S' ktarsF = PadeOutput['muRe'] solNormF = PadeOutput['HFNorm'] PadektarsF = PadeOutput['muRe'] PadeNormF = PadeOutput['AppNorm'] PadeErrorF = PadeOutput['ErrNorm'] RBktarsF = RBOutput['muRe'] RBNormF = RBOutput['AppNorm'] RBErrorF = RBOutput['ErrNorm'] + + PadeErrorF[PadeErrorF < 1e-8] = np.nan + RBErrorF[RBErrorF < 1e-8] = np.nan plt.figure(figsize = (10, 5)) plt.plot(ktarsF, solNormF, 'k-', label='Sol norm') plt.plot(PadektarsF, PadeNormF, 'b--', label='Pade'' norm, {1} = {0}'.format(val, let)) plt.plot(RBktarsF, RBNormF, 'g--', label='RB norm, {1} = {0}'.format(val, let)) plt.legend() plt.grid() p = plt.legend(loc = 'upper left') - plt.savefig('./normA' + str(i) + '.eps', + plt.savefig('../data/output/normA' + str(i) + '.eps', format='eps', dpi=1000) plt.show() plt.close() plt.figure(figsize = (10, 5)) plt.semilogy(ktarsF, np.divide(PadeErrorF, solNormF), 'b', label='Pade'' relative error, {1} = {0}'.format(val, let)) plt.semilogy(RBktarsF, np.divide(RBErrorF, solNormF), 'g', label='RB relative error, {1} = {0}'.format(val, let)) plt.legend() plt.grid() p = plt.legend(loc = 'lower right') - plt.savefig('./errorAR' + str(i) + '.eps', + plt.savefig('../data/output/errorAR' + str(i) + '.eps', format='eps', dpi=1000) plt.show() plt.close() diff --git a/examples/fenics/HelmholtzSolver.py b/examples/fenics/solver.py similarity index 100% rename from examples/fenics/HelmholtzSolver.py rename to examples/fenics/solver.py diff --git a/rrompy/hfengines/fenics/__init__.py b/rrompy/hfengines/fenics/__init__.py index 7f1bafd..35e2275 100644 --- a/rrompy/hfengines/fenics/__init__.py +++ b/rrompy/hfengines/fenics/__init__.py @@ -1,38 +1,40 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from rrompy.hfengines.fenics.generic_problem_engine import GenericProblemEngine from rrompy.hfengines.fenics.helmholtz_base_problem_engine import HelmholtzBaseProblemEngine from rrompy.hfengines.fenics.helmholtz_box_scattering_problem_engine import HelmholtzBoxScatteringProblemEngine +from rrompy.hfengines.fenics.helmholtz_cavity_scattering_problem_engine import HelmholtzCavityScatteringProblemEngine from rrompy.hfengines.fenics.helmholtz_problem_engine import HelmholtzProblemEngine from rrompy.hfengines.fenics.helmholtz_square_bubble_problem_engine import HelmholtzSquareBubbleProblemEngine from rrompy.hfengines.fenics.helmholtz_square_transmission_problem_engine import HelmholtzSquareTransmissionProblemEngine from rrompy.hfengines.fenics.scattering_problem_engine import ScatteringProblemEngine __all__ = [ 'GenericProblemEngine', 'HelmholtzBaseProblemEngine', 'HelmholtzBoxScatteringProblemEngine', + 'HelmholtzCavityScatteringProblemEngine', 'HelmholtzProblemEngine', 'HelmholtzSquareBubbleProblemEngine', 'HelmholtzSquareTransmissionProblemEngine', 'ScatteringProblemEngine' ] diff --git a/rrompy/hfengines/fenics/generic_problem_engine.py b/rrompy/hfengines/fenics/generic_problem_engine.py index ed9c266..2d01320 100644 --- a/rrompy/hfengines/fenics/generic_problem_engine.py +++ b/rrompy/hfengines/fenics/generic_problem_engine.py @@ -1,80 +1,86 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np import scipy.sparse.linalg as scspla from rrompy.utilities.types import Np1D, Np2D, Tuple, List __all__ = ['GenericProblemEngine'] class GenericProblemEngine: """ Generic solver for parametric problems. Attributes: As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. """ Aterms, bterms = 1, 1 functional = lambda self, u: 0. + def name(self) -> str: + return self.__class__.__name__ + + def __str__(self) -> str: + return self.name() + @abstractmethod def energyNormMatrix(self) -> Np2D: """Sparse matrix (in CSR format) representative of scalar product.""" pass @abstractmethod def assembleA(self): """Assemble matrix blocks of linear system.""" self.As = [None] * 1 pass @abstractmethod def assembleb(self): """Assemble matrix blocks of linear system.""" self.bs = [None] * 1 pass def A(self, mu:complex) -> Np2D: """Assemble matrix of linear system.""" self.assembleA() A = self.As[0] for j in range(1, len(self.As)): A = A + np.power(mu, j) * self.As[j] return A def b(self, mu:complex) -> Np1D: """Assemble RHS of linear system.""" self.assembleb() b = self.bs[0] for j in range(1, len(self.bs)): b = b + np.power(mu, j) * self.bs[j] return b def solve(self, mu:complex) -> Np1D: """Find solution of linear system.""" return scspla.spsolve(self.A(mu), self.b(mu)) def getLSBlocks(self) -> Tuple[List[Np2D], List[Np1D]]: """Get blocks of linear system.""" self.assembleA() self.assembleb() return self.As, self.bs diff --git a/rrompy/hfengines/fenics/helmholtz_cavity_scattering_problem_engine.py b/rrompy/hfengines/fenics/helmholtz_cavity_scattering_problem_engine.py new file mode 100644 index 0000000..b4cc563 --- /dev/null +++ b/rrompy/hfengines/fenics/helmholtz_cavity_scattering_problem_engine.py @@ -0,0 +1,52 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +import fenics as fen +from rrompy.hfengines.fenics.scattering_problem_engine import ScatteringProblemEngine + +__all__ = ['HelmholtzCavityScatteringProblemEngine'] + +class HelmholtzCavityScatteringProblemEngine(ScatteringProblemEngine): + """ + Solver for scattering problem inside a cavity with parametric wavenumber. + - \Delta u - omega^2 * n^2 * u = 0 in \Omega + u = 0 on \Gamma_D + \partial_nu - i k u = 0 on \Gamma_R + with exact solution a transmitted plane wave. + """ + def __init__(self, kappa:float, n:int, gamma : float = 0., + signR : int = -1): + super().__init__() + + mesh = fen.RectangleMesh(fen.Point(0,0), fen.Point(np.pi,np.pi), n, n) + self.V = fen.FunctionSpace(mesh, "P", 3) + + self.RobinBoundary = (lambda x, on_boundary: on_boundary + and np.isclose(x[1], np.pi)) + self.DirichletBoundary = "REST" + + import sympy as sp + x, y = sp.symbols('x[0] x[1]', real=True) + wex = 4/np.pi**4 * x * y * (x - np.pi) * (y - 2 * np.pi) + phiex = signR * kappa * (x * gamma + y) + uex = wex * sp.exp(-1.j * phiex) + fex = - uex.diff(x, 2) - uex.diff(y, 2) - kappa**2 * uex + forcingTerm = [sp.printing.ccode(sp.simplify(sp.re(fex))), + sp.printing.ccode(sp.simplify(sp.im(fex)))] + self.forcingTerm = [fen.Expression(x, degree = 3) for x in forcingTerm] diff --git a/rrompy/hsengines/fenics/hsengine.py b/rrompy/hsengines/fenics/hsengine.py index 68f4079..042b950 100644 --- a/rrompy/hsengines/fenics/hsengine.py +++ b/rrompy/hsengines/fenics/hsengine.py @@ -1,151 +1,153 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from matplotlib import pyplot as plt import fenics as fen from rrompy.utilities.types import Np1D, strLst, FenSpace, N2FSExpr from rrompy.utilities import purgeList, getNewFilename __all__ = ['HSEngine'] class HSEngine: """ Fenics-based Hilbert space engine. """ def __init__(self, V:FenSpace): self.V = V def name(self) -> str: - """Class label.""" return self.__class__.__name__ + + def __str__(self) -> str: + return self.name() def norm(self, u:Np1D, normType : N2FSExpr = "H10") -> float: """ Compute general norm of complex-valued function with given dofs. Args: u: numpy complex array with function dofs. normType(optional): Target norm identifier. If matrix, target norm is one induced by normType. If number, target norm is weighted H^1 norm with given weight. If string, must be recognizable by Fenics norm command. Defaults to 'H10'. Returns: Norm of the function (non-negative). """ if type(normType).__name__[-6:] == "matrix": return np.abs(u.dot(normType.dot(u).conj())) ** .5 if isinstance(normType, (int, float)): return ( HSEngine.norm(self, u, "H10")**2 + (normType * HSEngine.norm(self, u, "L2"))**2)**.5 uAbs = fen.Function(self.V) uAbs.vector()[:] = np.array(np.abs(u), dtype = float) return fen.norm(uAbs, normType) def plot(self, u:Np1D, name : str = "u", save : bool = False, what : strLst = 'all', **figspecs): """ Do some nice plots of the complex-valued function with given dofs. Args: u: numpy complex array with function dofs. name(optional): Name to be shown as title of the plots. Defaults to 'u'. what(optional): Which plots to do. If list, can contain 'ABS', 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'. Defaults to 'ALL'. save(optional): Whether to save plot(s). Defaults to False. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ if isinstance(what, (str,)): if what.upper() == 'ALL': what = ['ABS', 'PHASE', 'REAL', 'IMAG'] else: what = [what] what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'], listname = self.name() + ".what") if len(what) == 0: return if 'figsize' not in figspecs.keys(): figspecs['figsize'] = (13. * len(what) / 4, 3) subplotcode = 100 + len(what) * 10 if 'REAL' in what and 'IMAG' in what: combined_data_ReIm = np.concatenate([np.real(u), np.imag(u)]) _min = np.amin(combined_data_ReIm) _max = np.amax(combined_data_ReIm) pars = {'vmin' : _min, 'vmax' : _max} else: pars = {} plt.figure(**figspecs) plt.jet() if 'ABS' in what: uAb = fen.Function(self.V) uAb.vector()[:] = np.array(np.abs(u), dtype = float) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uAb, title = "|{0}|".format(name)) plt.colorbar(p) if 'PHASE' in what: uPh = fen.Function(self.V) uPh.vector()[:] = np.array(np.angle(u), dtype = float) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uPh, title = "phase({0})".format(name)) plt.colorbar(p) if 'REAL' in what: uRe = fen.Function(self.V) uRe.vector()[:] = np.array(np.real(u), dtype = float) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uRe, title = "Re({0})".format(name), **pars) plt.colorbar(p) if 'IMAG' in what: uIm = fen.Function(self.V) uIm.vector()[:] = np.array(np.imag(u), dtype = float) subplotcode = subplotcode + 1 plt.subplot(subplotcode) p = fen.plot(uIm, title = "Im({0})".format(name), **pars) plt.colorbar(p) if save: plt.savefig(getNewFilename("fig", "eps"), format='eps', dpi=1000) plt.show() plt.close() def plotmesh(self, name : str = "Mesh", save : bool = False, **figspecs): """ Do a nice plot of the mesh. Args: u: numpy complex array with function dofs. name(optional): Name to be shown as title of the plots. Defaults to 'u'. save(optional): Whether to save plot(s). Defaults to False. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ plt.figure(**figspecs) fen.plot(self.V.mesh()) if save: plt.savefig(getNewFilename("msh", "eps"), format='eps', dpi=1000) plt.show() plt.close() diff --git a/rrompy/reduction_methods/base/generic_approximant.py b/rrompy/reduction_methods/base/generic_approximant.py index 63750b5..9fac596 100644 --- a/rrompy/reduction_methods/base/generic_approximant.py +++ b/rrompy/reduction_methods/base/generic_approximant.py @@ -1,335 +1,339 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np from copy import copy import scipy.sparse.linalg as spla from rrompy.utilities.types import Np1D, Np2D, DictAny, N2FSExpr from rrompy.utilities.types import HFEng, HSEng, Tuple, List from rrompy.utilities import purgeDict __all__ = ['GenericApproximant'] class GenericApproximant: """ ABSTRACT ROM approximant computation for parametric problems. Args: HFEngine: HF problem solver. Should have members: - energyNormMatrix: assemble sparse matrix (in CSC format) associated to weighted H10 norm; - problemData: list of HF problem data (excluding k); - setProblemData: set HF problem data and k to prescribed values; - getLSBlocks: get algebraic system blocks. HSEngine: Hilbert space general purpose engine. Should have members: - norm: compute Hilbert norm of expression represented as complex numpy vector; - plot: plot Hilbert expression represented as complex numpy vector. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True. Defaults to empty dict. Attributes: HFEngine: HF problem solver. HSEngine: Hilbert space general purpose engine. mu0: Default parameter. w: Weight for norm computation. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots. POD: Whether to compute POD of snapshots. energyNormMatrix: Sparse matrix (in CSC format) assoociated to w-weighted H10 norm. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. solLifting: Lifting of Dirichlet boundary data as numpy vector. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt mu. bs: List of numpy vectors representing coefficients of linear system RHS wrt mu. """ def __init__(self, HFEngine:HFEng, HSEngine:HSEng, mu0 : complex = 0, approxParameters : DictAny = {}): self.HFEngine = HFEngine self.HSEngine = HSEngine self._HFEngine0 = copy(HFEngine) if not hasattr(self, "parameterList"): self.parameterList = [] self.parameterList += ["POD"] self.solLifting = self.HFEngine.liftDirichletData() self.mu0 = mu0 self.approxParameters = approxParameters self.energyNormMatrix = self.HFEngine.energyNormMatrix() self.As, self.bs = self.HFEngine.getLSBlocks() def name(self) -> str: - """Approximant label.""" return self.__class__.__name__ + + def __str__(self) -> str: + return self.name() @property def approxParameters(self): """Value of approximant parameters.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): if not hasattr(self, "approxParameters"): self._approxParameters = {} approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters") keyList = list(approxParameters.keys()) if "POD" in keyList: self.POD = approxParameters["POD"] elif hasattr(self, "POD"): self.POD = self.POD else: self.POD = True @property def POD(self): """Value of POD.""" return self._POD @POD.setter def POD(self, POD): if hasattr(self, "POD"): PODold = self.POD else: PODold = -1 self._POD = POD self._approxParameters["POD"] = self.POD if PODold != self.POD: self.resetSamples() def solveHF(self, mu : complex = None): """ Find high fidelity solution with original parameters and arbitrary parameter. Args: mu: Target parameter. """ if mu is None: mu = self.mu0 if (not hasattr(self, "lastSolvedHF") or not np.isclose(self.lastSolvedHF, mu)): A, b = self.buildLS(mu) self.uHF = spla.spsolve(A, b) self.lastSolvedHF = mu - return self.uHF def buildLS(self, mu:complex, As : List[Np2D] = None, bs : List[Np1D] = None) -> Tuple[Np2D, Np1D]: """ Build linear system from polynomial blocks. Args: mu: Parameter value. As: Matrix blocks. If None, set to self.As. Defaults to None. bs: RHS blocks. If None, set to self.bs. Defaults to None. Returns: Matrix and RHS of system. """ if As is None: As = self.As if bs is None: bs = self.bs A = copy(As[0]) b = copy(bs[0]) for j in range(1, len(As)): A = A + np.power(mu, j) * As[j] for j in range(1, len(bs)): b = b + np.power(mu, j) * bs[j] return A, b @abstractmethod def resetSamples(self): """Reset samples. (ABSTRACT)""" pass @abstractmethod def setupApprox(self): """ Setup approximant. (ABSTRACT) Any specialization should include something like self.computeDerivatives() if not self.checkComputedApprox(): ... self.lastApproxParameters = copy(self.approxParameters) """ pass def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (hasattr(self, "lastApproxParameters") and self.approxParameters == self.lastApproxParameters) @abstractmethod - def evalApprox(self, mu:complex) -> Np1D: + def evalApprox(self, mu:complex): """ Evaluate approximant at arbitrary parameter. (ABSTRACT) Any specialization should include something like self.setupApprox() self.uApp = ... + Args: + mu: Target parameter. + """ + pass + + def getHF(self, mu:complex) -> Np1D: + """ + Get HF solution at arbitrary wavenumber. + + Args: + mu: Target parameter. + + Returns: + HFsolution as numpy complex vector. + """ + self.solveHF(mu) + return self.uHF + + def getApp(self, mu:complex) -> Np1D: + """ + Get approximant at arbitrary wavenumber. + Args: mu: Target parameter. Returns: Approximant as numpy complex vector. """ - pass + self.evalApprox(mu) + return self.uApp @abstractmethod def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ pass - def HFNorm(self, mu:complex, normType : N2FSExpr = "H10") -> float: + def HFNorm(self, mu:complex, normType : N2FSExpr = None) -> float: """ Compute norm of HF solution at arbitrary wavenumber. Args: mu: Target parameter. normType(optional): Target norm identifier. Must be recognizable - by HSEngine norm command. If None, set to w. Defaults to None. + by HSEngine norm command. If None, set to + self.energyNormMatrix. Defaults to None. Returns: Target norm of HFsolution. """ - self.solveHF(mu) - return self.HSEngine.norm(self.uHF, normType) + if normType is None: normType = self.energyNormMatrix + uHF = self.getHF(mu) + return self.HSEngine.norm(uHF, normType) - def approxNorm(self, mu:complex, normType : N2FSExpr = "H10") -> float: + def approxNorm(self, mu:complex, normType : N2FSExpr = None) -> float: """ Compute norm of (translated) approximant at arbitrary wavenumber. Args: mu: Target parameter. normType(optional): Target norm identifier. Must be recognizable - by HSEngine norm command. If None, set to w. Defaults to None. + by HSEngine norm command. If None, set to + self.energyNormMatrix. Defaults to None. Returns: Target norm of approximant. """ - self.evalApprox(mu) - return self.HSEngine.norm(self.uApp, normType) + if normType is None: normType = self.energyNormMatrix + uApp = self.getApp(mu) + return self.HSEngine.norm(uApp, normType) - def approxError(self, mu:complex, normType : N2FSExpr = "H10") -> float: + def approxError(self, mu:complex, normType : N2FSExpr = None) -> float: """ Compute norm of approximant at arbitrary wavenumber. Args: mu: Target parameter. normType(optional): Target norm identifier. Must be recognizable - by HSEngine norm command. If None, set to w. Defaults to None. + by HSEngine norm command. If None, set to + self.energyNormMatrix. Defaults to None. Returns: Target norm of (approximant - HFsolution). """ - self.solveHF(mu) - self.evalApprox(mu) - return self.HSEngine.norm(self.uApp - self.uHF, normType) - - def getHF(self, mu:complex) -> Np1D: - """ - Get HF solution at arbitrary wavenumber. - - Args: - mu: Target parameter. - - Returns: - HFsolution as numpy complex vector. - """ - self.solveHF(mu) - return self.uHF - - def getApp(self, mu:complex) -> Np1D: - """ - Get approximant at arbitrary wavenumber. - - Args: - mu: Target parameter. - - Returns: - Approximant as numpy complex vector. - """ - self.evalApprox(mu) - return self.uApp + if normType is None: normType = self.energyNormMatrix + uApp = self.getApp(mu) + uHF = self.getHF(mu) + return self.HSEngine.norm(uApp - uHF, normType) def plotHF(self, mu:complex, name : str = "u", **figspecs): """ Do some nice plots of the HF solution at arbitrary wavenumber. Args: mu: Target parameter. name(optional): Name to be shown as title of the plots. Defaults to 'u'. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ - self.solveHF(mu) - self.HSEngine.plot(self.uHF, name = name, **figspecs) + uHF = self.getHF(mu) + self.HSEngine.plot(uHF, name = name, **figspecs) def plotApp(self, mu:complex, name : str = "u", **figspecs): """ Do some nice plots of approximant at arbitrary wavenumber. Args: mu: Target parameter. name(optional): Name to be shown as title of the plots. Defaults to 'u'. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ - self.evalApprox(mu) - self.HSEngine.plot(self.uApp, name = name, **figspecs) + uApp = self.getApp(mu) + self.HSEngine.plot(uApp, name = name, **figspecs) def plotErr(self, mu:complex, name : str = "u", **figspecs): """ Do some nice plots of approximation error at arbitrary wavenumber. Args: mu: Target parameter. name(optional): Name to be shown as title of the plots. Defaults to 'u'. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ - self.evalApprox(mu) - self.solveHF(mu) - self.HSEngine.plot(self.uApp - self.uHF, name = name, **figspecs) + uApp = self.getApp(mu) + uHF = self.getHF(mu) + self.HSEngine.plot(uApp - uHF, name = name, **figspecs) diff --git a/rrompy/reduction_methods/base/parameter_sweeper.py b/rrompy/reduction_methods/base/parameter_sweeper.py index 3324401..18e6f2f 100644 --- a/rrompy/reduction_methods/base/parameter_sweeper.py +++ b/rrompy/reduction_methods/base/parameter_sweeper.py @@ -1,293 +1,309 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import os import csv import warnings import numpy as np -from rrompy.utilities.types import Np1D, DictAny, List, ROMEng, Tuple +from rrompy.utilities.types import Np1D, N2FSExpr, DictAny, List, ROMEng, Tuple from rrompy.utilities import purgeList __all__ = ['ParameterSweeper'] +def C2R2csv(x): + x = np.ravel(x) + y = np.concatenate((np.real(x), np.imag(x))) + z = np.ravel(np.reshape(y, [2, np.size(x)]).T) + return np.array2string(z, separator = '_', suppress_small = False, + max_line_width = np.inf, sign = '+', + formatter = {'all' : lambda x : "{:.15E}".format(x)} + )[1 : -1] + class ParameterSweeper: """ ROM approximant parameter sweeper. Args: ROMEngine(optional): Generic approximant class. Defaults to None. mutars(optional): Array of parameter values to sweep. Defaults to empty array. params(optional): List of parameter settings (each as a dict) to explore. Defaults to single empty set. mostExpensive(optional): String containing label of most expensive step, to be executed fewer times. Allowed options are 'HF' and 'Approx'. Defaults to 'HF'. + normType(optional): Target norm identifier. Must be recognizable by + HSEngine norm command. Defaults to None. Attributes: ROMEngine: Generic approximant class. mutars: Array of parameter values to sweep. params: List of parameter settings (each as a dict) to explore. mostExpensive: String containing label of most expensive step, to be executed fewer times. """ - allowedOutputs = ["HFNorm", "HFFunc", "AppNorm", "AppFunc", - "ErrNorm", "ErrFunc"] + allowedOutputsStandard = ["HFNorm", "AppNorm", "ErrNorm"] + allowedOutputs = allowedOutputsStandard + ["HFFunc", "AppFunc", "ErrFunc"] + allowedOutputsFull = allowedOutputs + ["poles"] def __init__(self, ROMEngine : ROMEng = None, mutars : Np1D = np.array([]), - params : List[DictAny] = [{}], mostExpensive : str = "HF"): + params : List[DictAny] = [{}], mostExpensive : str = "HF", + normType : N2FSExpr = None): self.ROMEngine = ROMEngine self.mutars = mutars self.params = params self.mostExpensive = mostExpensive + self.normType = normType def name(self) -> str: - """Approximant label.""" return self.__class__.__name__ + + def __str__(self) -> str: + return self.name() @property def mostExpensive(self): """Value of mostExpensive.""" return self._mostExpensive @mostExpensive.setter def mostExpensive(self, mostExpensive:str): mostExpensive = mostExpensive.upper() if mostExpensive not in ["HF", "APPROX"]: warnings.warn(("Value of mostExpensive not recognized. Overriding " "to 'HF'."), stacklevel = 2) mostExpensive = "HF" self._mostExpensive = mostExpensive def checkValues(self) -> bool: """Check if sweep can be performed.""" if self.ROMEngine is None: warnings.warn("ROMEngine is missing. Aborting.", stacklevel = 2) return False if len(self.mutars) == 0: warnings.warn("Empty target parameter vector. Aborting.", stacklevel = 2) return False if len(self.params) == 0: warnings.warn("Empty method parameters vector. Aborting.", stacklevel = 2) return False return True def sweep(self, filename : str = "./out", outputs : List[str] = [], verbose : int = 1): if not self.checkValues(): return try: if outputs.upper() == "ALL": - outputs = self.allowedOutputs + ["poles"] + outputs = self.allowedOutputsFull except: if len(outputs) == 0: - outputs = self.allowedOutputs - outputs = purgeList(outputs, self.allowedOutputs + ["poles"], + outputs = self.allowedOutputsStandard + outputs = purgeList(outputs, self.allowedOutputsFull, listname = self.name() + ".outputs") poles = ("poles" in outputs) if len(outputs) == 0: warnings.warn("Empty outputs. Aborting.", stacklevel = 2) return outParList = self.ROMEngine.parameterList Nparams = len(self.params) allowedParams = self.ROMEngine.parameterList while os.path.exists(filename): filename = filename + "{}".format(np.random.randint(10)) append_write = "w" initial_row = (outParList + ["muRe", "muIm"] + [x for x in self.allowedOutputs if x in outputs] + ["type"] + ["poles"] * poles) with open(filename, append_write, buffering = 1) as fout: writer = csv.writer(fout, delimiter=",") writer.writerow(initial_row) if self.mostExpensive == "HF": outerSet = self.mutars innerSet = self.params elif self.mostExpensive == "APPROX": outerSet = self.params innerSet = self.mutars for outerIdx, outerPar in enumerate(outerSet): if self.mostExpensive == "HF": i, mutar = outerIdx, outerPar elif self.mostExpensive == "APPROX": j, par = outerIdx, outerPar self.ROMEngine.approxParameters = {k: par[k] for k in\ par.keys() & allowedParams} self.ROMEngine.setupApprox() for innerIdx, innerPar in enumerate(innerSet): if self.mostExpensive == "APPROX": i, mutar = innerIdx, innerPar elif self.mostExpensive == "HF": j, par = innerIdx, innerPar self.ROMEngine.approxParameters = {k: par[k] for k in\ par.keys() & allowedParams} self.ROMEngine.setupApprox() if verbose >= 1: - print("Set {}/{}\tmu_{} = {:.10f}{}"\ - .format(j+1, Nparams, i, mutar, " " * 15), - end="\r") + print("Set {}/{}\tmu_{} = {:.10f}".format(j+1, Nparams, + i, mutar)) outData = [] if "HFNorm" in outputs: - val = self.ROMEngine.HFNorm(mutar) + val = self.ROMEngine.HFNorm(mutar, self.normType) if isinstance(val, (list,)): val = val[0] outData = outData + [val] if "HFFunc" in outputs: outData = outData +[self.ROMEngine.HFEngine.functional( - self.ROMEngine.solveHF(mutar))] + self.ROMEngine.getHF(mutar))] if "AppNorm" in outputs: - val = self.ROMEngine.approxNorm(mutar) + val = self.ROMEngine.approxNorm(mutar, self.normType) if isinstance(val, (list,)): val = val[0] outData = outData + [val] if "AppFunc" in outputs: outData = outData +[self.ROMEngine.HFEngine.functional( - self.ROMEngine.evalApprox(mutar))] + self.ROMEngine.getApp(mutar))] if "ErrNorm" in outputs: - val = self.ROMEngine.approxError(mutar) + val = self.ROMEngine.approxError(mutar, self.normType) if isinstance(val, (list,)): val = val[0] outData = outData + [val] if "ErrFunc" in outputs: outData = outData +[self.ROMEngine.HFEngine.functional( - self.ROMEngine.evalApprox(mutar) - - self.ROMEngine.solveHF(mutar))] + self.ROMEngine.getApp(mutar)) + - self.ROMEngine.HFEngine.functional( + self.ROMEngine.getHF(mutar))] writeData = [] for parn in outParList: writeData = (writeData + [self.ROMEngine.approxParameters[parn]]) writeData = (writeData + [mutar.real, mutar.imag] + outData + [self.ROMEngine.name()]) if poles: - writeData = writeData + list(self.ROMEngine.getPoles()) + writeData = writeData + C2R2csv( + self.ROMEngine.getPoles()) writer.writerow(str(x) for x in writeData) if verbose >= 1: if self.mostExpensive == "APPROX": - print("Set {}/{}\tdone{}".format(j+1, Nparams, " "*25)) + print("Set {}/{}\tdone".format(j+1, Nparams)) elif self.mostExpensive == "HF": - print("Point mu_{} = {:.10f}\tdone{}".format(i, mutar, - " " * 25)) + print("Point mu_{} = {:.10f}\tdone".format(i, mutar)) self.filename = filename return self.filename def read(self, filename:str, restrictions : DictAny = {}, outputs : List[str] = []) -> DictAny: """ Execute a query on a custom format CSV. Args: filename: CSV filename. restrictions(optional): Parameter configurations to output. Defaults to empty dictionary, i.e. output all. outputs(optional): Values to output. Defaults to empty list, i.e. no output. Returns: Dictionary of desired results, with a key for each entry of outputs, and a numpy 1D array as corresponding value. """ def pairMuFromCSV(filename:str, muTarget:complex) -> Tuple[str, str]: """ Find complex point in CSV closer to a prescribed value. Args: filename: CSV filename. muTarget: Target complex value. Returns: Strings containing real and imaginary part of desired value, in the same format as in the CSV file. """ mutarsF = np.array([], dtype = complex) muRetarsF = np.array([], dtype = complex) muImtarsF = np.array([], dtype = complex) with open(filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) muReindex = header.index('muRe') muImindex = header.index('muIm') for row in reader: try: if row[muReindex] not in [" ", ""]: muRetarsF = np.append(muRetarsF, row[muReindex]) muImtarsF = np.append(muImtarsF, row[muImindex]) mutarsF = np.append(mutarsF, float(row[muReindex]) + 1.j * float(row[muImindex])) except: pass optimalIndex = np.argmin(np.abs(mutarsF - muTarget)) return [muRetarsF[optimalIndex], muImtarsF[optimalIndex]] with open(filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) restrIndices, outputIndices, outputData = {}, {}, {} for key in restrictions.keys(): try: restrIndices[key] = header.index(key) if not isinstance(restrictions[key], list): restrictions[key] = [restrictions[key]] restrictions[key] = [str(x) for x in restrictions[key]] except: warnings.warn("Ignoring key {} from restrictions"\ .format(key), stacklevel = 2) if 'muRe' in restrIndices.keys() or 'muIm' in restrIndices.keys(): if 'muRe' not in restrIndices.keys(): restrIndices['muRe'] = header.index('muRe') restrictions['muRe'] = [0.] * len(restrictions['muIm']) elif 'muIm' not in restrIndices.keys(): restrIndices['muIm'] = header.index('muIm') restrictions['muIm'] = [0.] * len(restrictions['muRe']) elif len(restrictions['muRe']) != len(restrictions['muIm']): raise Exception(("The lists of values for muRe and muIm " "must have the same length.")) for i in range(len(restrictions['muRe'])): mu = (1.0 * float(restrictions['muRe'][i]) + 1.j * float(restrictions['muIm'][i])) restrictions['muRe'][i], restrictions['muIm'][i] =\ pairMuFromCSV(filename, mu) for key in outputs: try: outputIndices[key] = header.index(key) outputData[key] = np.array([]) except: warnings.warn("Ignoring key {} from outputs".format(key), stacklevel = 2) for row in reader: if all([row[restrIndices[key]] in restrictions[key]\ for key in restrictions.keys()]): for key in outputIndices.keys(): try: val = row[outputIndices[key]] val = int(val) except: try: val = float(val) except: val = np.nan finally: outputData[key] = np.append(outputData[key], val) return outputData diff --git a/rrompy/reduction_methods/base/pod_engine.py b/rrompy/reduction_methods/base/pod_engine.py index 1ba4793..03b7f03 100644 --- a/rrompy/reduction_methods/base/pod_engine.py +++ b/rrompy/reduction_methods/base/pod_engine.py @@ -1,133 +1,139 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import copy from rrompy.utilities.types import Np1D, Np2D, Tuple __all__ = ['PODEngine'] class PODEngine: """ POD engine for general matrix orthogonalization. Args/Attributes: M: Square matrix associated to scalar product. """ def __init__(self, M:Np2D): self.M = M + + def name(self) -> str: + return self.__class__.__name__ + + def __str__(self) -> str: + return self.name() def norm(self, a:Np1D) -> float: """Compute norm of a vector.""" return np.sqrt(np.abs(a.conj().T.dot(self.M.dot(a)))) def GS(self, a:Np1D, Q:Np2D, n : int = None) -> Tuple[Np1D, Np1D]: """ Compute 1 Gram-Schmidt step with given projector. Args: a: vector to be projected; Q: orthogonal projection matrix; n: number of columns of Q to be considered. Returns: Resulting normalized vector, coefficients of a wrt the updated basis. """ if n is None: n = Q.shape[1] Q = Q[:, : n] r = np.zeros((n + 1,), dtype = a.dtype) if n > 0: for j in range(2): # twice is enough! nu = Q.conj().T.dot(self.M.dot(a)) a = a - Q.dot(nu) r[:-1] = r[:-1] + nu r[-1] = self.norm(a) if np.isclose(np.abs(r[-1]), 0.): r[-1] = 1. a = a / r[-1] return a, r def QRGramSchmidt(self, A:Np2D, only_R : bool = False) -> Tuple[Np1D, Np1D]: """ Compute QR decomposition of a matrix through Gram-Schmidt method. Args: A: matrix to be decomposed; only_R(optional): whether to skip reconstruction of Q; defaults to False. Returns: Resulting orthogonal and upper-triangular factors. """ N = A.shape[1] Q = np.zeros_like(A, dtype = A.dtype) R = np.zeros((N, N), dtype = A.dtype) for k in range(N): Q[:, k], R[: k + 1, k] = self.GS(A[:, k], Q, k) if only_R: return R return Q, R def QRHouseholder(self, A:Np2D, Q0 : Np2D = None, only_R : bool = False) -> Tuple[Np1D, Np1D]: """ Compute QR decomposition of a matrix through Householder method. Args: A: matrix to be decomposed; Q0(optional): initial orthogonal guess for Q; defaults to random; only_R(optional): whether to skip reconstruction of Q; defaults to False. Returns: Resulting (orthogonal and )upper-triangular factor(s). """ B = copy(A) N = B.shape[1] V = np.zeros_like(B, dtype = B.dtype) R = np.zeros((N, N), dtype = B.dtype) if Q0 is None: Q = np.zeros_like(B, dtype = B.dtype) + np.random.randn(*(B.shape)) else: Q = copy(Q0) for k in range(N): if Q0 is None: Q[:, k], _ = self.GS(Q[:, k], Q, k) a = B[:, k] R[k, k] = self.norm(a) alpha = Q[:, k].conj().T.dot(self.M.dot(a)) if np.isclose(np.abs(alpha), 0.): s = 1. else: s = - alpha / np.abs(alpha) Q[:, k] = s * Q[:, k] V[:, k], _ = self.GS(R[k, k] * Q[:, k] - a, Q, k) J = np.arange(k + 1, N) vtB = V[:, k].conj().dot(self.M.dot(B[:, J])) B[:, J] = B[:, J] - 2 * np.outer(V[:, k], vtB) R[k, J] = Q[:, k].conj().T.dot(self.M.dot(B[:, J])) B[:, J] = B[:, J] - np.outer(Q[:, k], R[k, J]) if only_R: return R for k in range(N - 1, -1, -1): J = np.arange(k, N) vtQ = V[:, k].conj().T.dot(self.M.dot(Q[:, J])) Q[:, J] = Q[:, J] - 2 * np.outer(V[:, k], vtQ) return Q, R diff --git a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py index 0e75180..e662eda 100644 --- a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py +++ b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py @@ -1,277 +1,273 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import warnings import numpy as np from rrompy.reduction_methods.lagrange.generic_approximant_lagrange import GenericApproximantLagrange from rrompy.utilities.types import Np1D, ListAny, DictAny, HSEng, HFEng from rrompy.utilities import purgeDict __all__ = ['ApproximantLagrangePade'] class ApproximantLagrangePade(GenericApproximantLagrange): """ ROM Lagrange Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. Should have members: - energyNormMatrix: assemble sparse matrix (in CSC format) associated to weighted H10 norm; - problemData: list of HF problem data (excluding mu); - setProblemData: set HF problem data and mu to prescribed values; - getLSBlocks: get algebraic system blocks. HSEngine: Hilbert space general purpose engine. Should have members: - norm: compute Hilbert norm of expression represented as complex numpy vector; - plot: plot Hilbert expression represented as complex numpy vector. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; defaults to 2; - 'sampler': sample point generator; defaults to uniform sampler on [0, 1]; - 'M': degree of Pade' interpolant numerator; defaults to 0; - 'N': degree of Pade' interpolant denominator; defaults to 0. Defaults to empty dict. plotSnap(optional): What to plot of snapshots of the Helmholtz solution map. See plot method in HSEngine. Defaults to []. plotSnapSpecs(optional): How to plot snapshots of the Helmholtz solution map. See plot method in HSEngine. Defaults to {}. Attributes: HFEngine: HF problem solver. HSEngine: Hilbert space general purpose engine. mu0: Default parameter. mus: Array of snapshot parameters. ws: Array of snapshot weigths. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'M': degree of Pade' interpolant numerator; - 'N': degree of Pade' interpolant denominator; extraApproxParameters: List of approxParameters keys in addition to mother class's. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. M: Numerator degree of approximant. N: Denominator degree of approximant. plotSnap: What to plot of snapshots of the Helmholtz solution map. plotSnapSpecs: How to plot snapshots of the Helmholtz solution map. solSnapshots: Array whose columns are FE dofs of snapshots of solution map. RPOD: Right factor of snapshots POD. If no POD, set to None. POD: Whether to compute POD of snapshots. energyNormMatrix: Sparse matrix (in CSC format) assoociated to w-weighted H10 norm. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. solLifting: Lifting of Dirichlet boundary data as numpy vector. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt mu. bs: List of numpy vectors representing coefficients of linear system RHS wrt mu. """ def __init__(self, HFEngine:HFEng, HSEngine:HSEng, mu0 : complex = 0., approxParameters : DictAny = {}, plotSnap : ListAny = [], plotSnapSpecs : DictAny = {}): if not hasattr(self, "parameterList"): self.parameterList = [] self.parameterList += ["M", "N"] super().__init__(HFEngine = HFEngine, HSEngine = HSEngine, mu0 = mu0, approxParameters = approxParameters, plotSnap = plotSnap, plotSnapSpecs = plotSnapSpecs) @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change M, N and S. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters") approxParametersCopy = purgeDict(approxParameters, ["M", "N"], True, True) GenericApproximantLagrange.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "M"): self.M = self.M else: self.M = 0 if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "N"): self.N = self.N else: self.N = 0 @property def M(self): """Value of M. Its assignment may change S.""" return self._M @M.setter def M(self, M): if M < 0: raise ArithmeticError("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if hasattr(self, "S") and self.S < self.M + 1: warnings.warn("Prescribed S is too small. Updating S to M + 1.", stacklevel = 2) self.S = self.M + 1 @property def N(self): """Value of N. Its assignment may change S.""" return self._N @N.setter def N(self, N): if N < 0: raise ArithmeticError("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N if hasattr(self, "S") and self.S < self.N + 1: warnings.warn("Prescribed S is too small. Updating S to N + 1.", stacklevel = 2) self.S = self.N + 1 @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): if S <= 0: raise ArithmeticError("S must be positive.") if hasattr(self, "S"): Sold = self.S else: Sold = -1 vals, label = [0] * 2, {0:"M", 1:"N"} if hasattr(self, "M"): vals[0] = self.M if hasattr(self, "N"): vals[1] = self.N idxmax = np.argmax(vals) if vals[idxmax] + 1 > S: warnings.warn("Prescribed S is too small. Updating S to {} + 1."\ .format(label[idxmax]), stacklevel = 2) self.S = vals[idxmax] + 1 else: self._S = S self._approxParameters["S"] = self.S if Sold != self.S: self.resetSamples() def approxRadius(self) -> float: """Sample radius.""" return np.max(self.mu0 - self.mus) def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): self.computeSnapshots() TN = np.vander(self.radiusPade(self.mus), N = self.N + 1, increasing = True) if self.POD: data = self.RPOD.T else: data = self.solSnapshots.T RHSFull = np.empty((TN.shape[0], data.shape[1] * TN.shape[1]), dtype = np.complex) for j in range(TN.shape[0]): RHSFull[j, :] = np.kron(TN[j, :], data[j, :]) G = np.polyfit(self.radiusPade(self.mus), RHSFull, deg = self.N, w = self.ws)[0, :].reshape((TN.shape[1], data.shape[1])).T if self.POD: _, _, V = np.linalg.svd(G, full_matrices = False) self.Q = V[-1, :].conj() else: G2 = G.conj().T.dot(self.energyNormMatrix.dot(G)) _, eV = np.linalg.eigh(G2) self.Q = eV[:, 0] TNQ = TN.dot(self.Q).reshape((self.S, 1)) if self.POD: data = self.solSnapshots.dot(self.RPOD) else: data = self.solSnapshots RHS = np.multiply(data.T, TNQ) self.P = np.polyfit(self.radiusPade(self.mus), RHS, deg = self.M, w = self.ws)[::-1, :].T self.approxParameters = {"N" : self.N, "M" : self.M, "S" : self.S} self.lastApproxParameters = copy(self.approxParameters) def radiusPade(self, mu:Np1D, mu0 : float = None) -> float: """ Compute translated radius to be plugged into Pade' approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.mu0. Returns: Translated radius to be plugged into Pade' approximant. """ if mu0 is None: mu0 = self.mu0 return (mu - mu0) / self.approxRadius() - def evalApprox(self, mu:complex) -> Np1D: + def evalApprox(self, mu:complex): """ Evaluate Pade' approximant at arbitrary wavenumber. Args: mu: Target parameter. - - Returns: - Real and imaginary parts of approximant. """ self.setupApprox() powerlist = np.power(self.radiusPade(mu), range(max(self.M, self.N) + 1)) self.uApp = (self.P.dot(powerlist[:self.M + 1]) / self.Q.dot(powerlist[:self.N + 1])) - return self.uApp def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() return np.roots(self.Q[::-1]) * self.approxRadius() + self.mu0 diff --git a/rrompy/reduction_methods/lagrange/approximant_lagrange_rb.py b/rrompy/reduction_methods/lagrange/approximant_lagrange_rb.py index d6c1033..34210b4 100644 --- a/rrompy/reduction_methods/lagrange/approximant_lagrange_rb.py +++ b/rrompy/reduction_methods/lagrange/approximant_lagrange_rb.py @@ -1,251 +1,247 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import warnings import numpy as np import scipy as sp from rrompy.reduction_methods.lagrange.generic_approximant_lagrange import GenericApproximantLagrange from rrompy.utilities.types import Np1D, ListAny, DictAny, HFEng, HSEng from rrompy.utilities import purgeDict __all__ = ['ApproximantLagrangeRB'] class ApproximantLagrangeRB(GenericApproximantLagrange): """ ROM RB approximant computation for parametric problems. Args: HFEngine: HF problem solver. Should have members: - energyNormMatrix: assemble sparse matrix (in CSC format) associated to weighted H10 norm; - problemData: list of HF problem data (excluding mu); - setProblemData: set HF problem data and mu to prescribed values; - getLSBlocks: get algebraic system blocks; - liftDirichletData: perform lifting of Dirichlet BC as numpy complex vector. HSEngine: Hilbert space general purpose engine. Should have members: - norm: compute Hilbert norm of expression represented as complex numpy vector; - plot: plot Hilbert expression represented as complex numpy vector. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; defaults to 2; - 'sampler': sample point generator; defaults to uniform sampler on [0, 1]; - 'R': rank for Galerkin projection; defaults to S. Defaults to empty dict. plotSnap(optional): What to plot of snapshots of the Helmholtz solution map. See plot method in HSEngine. Defaults to []. plotSnapSpecs(optional): How to plot snapshots of the Helmholtz solution map. See plot method in HSEngine. Defaults to {}. Attributes: HFEngine: HF problem solver. HSEngine: Hilbert space general purpose engine. mu0: Default parameter. mus: Array of snapshot parameters. ws: Array of snapshot weigths (unused). approxRadius: Dummy radius of approximant (i.e. distance from mu0 to farthest sample point). approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'R': rank for Galerkin projection. extraApproxParameters: List of approxParameters keys in addition to mother class's. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. R: Rank for Galerkin projection. plotSnap: What to plot of snapshots of the Helmholtz solution map. plotSnapSpecs: How to plot snapshots of the Helmholtz solution map. solSnapshots: Array whose columns are FE dofs of snapshots of solution map. RPOD: Right factor of snapshots POD. If no POD, set to None. POD: Whether to compute POD of snapshots. projMat: Projection matrix for RB system assembly. energyNormMatrix: Sparse matrix (in CSC format) assoociated to w-weighted H10 norm. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. solLifting: Lifting of Dirichlet boundary data as numpy vector. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt mu. bs: List of numpy vectors representing coefficients of linear system RHS wrt mu. ARBs: List of sparse matrices (in CSC format) representing coefficients of compressed linear system matrix wrt mu. bRBs: List of numpy vectors representing coefficients of compressed linear system RHS wrt mu. """ def __init__(self, HFEngine:HFEng, HSEngine:HSEng, mu0 : complex = 0., approxParameters : DictAny = {}, plotSnap : ListAny = [], plotSnapSpecs : DictAny = {}): if not hasattr(self, "parameterList"): self.parameterList = [] self.parameterList += ["R"] super().__init__(HFEngine = HFEngine, HSEngine = HSEngine, mu0 = mu0, approxParameters = approxParameters, plotSnap = plotSnap, plotSnapSpecs = plotSnapSpecs) def resetSamples(self): """Reset samples.""" super().resetSamples() self.projMat = None @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change M, N and S. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters") approxParametersCopy = purgeDict(approxParameters, ["R"], True, True) GenericApproximantLagrange.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "R" in keyList: self.R = approxParameters["R"] elif hasattr(self, "R"): self.R = self.R else: self.R = self.S @property def R(self): """Value of R. Its assignment may change S.""" return self._R @R.setter def R(self, R): if R < 0: raise ArithmeticError("R must be non-negative.") self._R = R self._approxParameters["R"] = self.R if hasattr(self, "S") and self.S < self.R: warnings.warn("Prescribed S is too small. Updating S to R.", stacklevel = 2) self.S = self.R def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (self.projMat is not None and super().checkComputedApprox()) def setupApprox(self): """Compute RB projection matrix.""" if not self.checkComputedApprox(): self.computeSnapshots() if self.POD: U, _, _ = np.linalg.svd(self.RPOD, full_matrices = False) self.projMat = self.solSnapshots.dot(U[:, : self.R]) else: self.projMat = self.solSnapshots[:, : self.R] self.lastApproxParameters = copy(self.approxParameters) self.assembleReducedSystem() def assembleReducedSystem(self): """Build affine blocks of RB linear system through projections.""" self.setupApprox() projMatH = self.projMat.T.conjugate() self.ARBs = [None] * len(self.As) self.bRBs = [None] * len(self.bs) for j in range(len(self.As)): self.ARBs[j] = projMatH.dot(self.As[j].dot(self.projMat)) for j in range(len(self.bs)): self.bRBs[j] = projMatH.dot(self.bs[j]) def solveReducedSystem(self, mu:complex) -> Np1D: """ Solve RB linear system. Args: mu: Target parameter. Returns: Solution of RB linear system. """ self.setupApprox() ARBmu = self.ARBs[0][:self.R,:self.R] bRBmu = self.bRBs[0][:self.R] for j in range(1, len(self.ARBs)): ARBmu = ARBmu + np.power(mu, j) * self.ARBs[j][:self.R, :self.R] for j in range(1, len(self.bRBs)): bRBmu = bRBmu + np.power(mu, j) * self.bRBs[j][:self.R] return self.projMat[:, :self.R].dot(np.linalg.solve(ARBmu, bRBmu)) - def evalApprox(self, mu:complex) -> Np1D: + def evalApprox(self, mu:complex): """ Evaluate RB approximant at arbitrary wavenumber. Args: mu: Target parameter. - - Returns: - Numpy 1D array with approximant dofs. """ self.setupApprox() self.uApp = self.solveReducedSystem(mu) - return self.uApp def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() A = np.eye(self.ARBs[0].shape[0] * (len(self.ARBs) - 1), dtype = np.complex) B = np.zeros_like(A) A[: self.ARBs[0].shape[0], : self.ARBs[0].shape[0]] = - self.ARBs[0] for j in range(len(self.ARBs) - 1): Aj = self.ARBs[j + 1] B[: Aj.shape[0], j * Aj.shape[0] : (j + 1) * Aj.shape[0]] = Aj II = np.arange(self.ARBs[0].shape[0], self.ARBs[0].shape[0] * (len(self.ARBs) - 1)) B[II, II - self.ARBs[0].shape[0]] = 1. try: return sp.linalg.eigvals(A, B) except: warnings.warn("Generalized eig algorithm did not converge.", stacklevel = 2) x = np.empty(A.shape[0]) x[:] = np.NaN return x diff --git a/rrompy/reduction_methods/lagrange/generic_approximant_lagrange.py b/rrompy/reduction_methods/lagrange/generic_approximant_lagrange.py index 5eb3dd0..5bcac70 100644 --- a/rrompy/reduction_methods/lagrange/generic_approximant_lagrange.py +++ b/rrompy/reduction_methods/lagrange/generic_approximant_lagrange.py @@ -1,223 +1,223 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.reduction_methods.base.generic_approximant import GenericApproximant from rrompy.reduction_methods.base.pod_engine import PODEngine from rrompy.utilities.types import Np1D, ListAny, DictAny, HFEng, HSEng from rrompy.utilities import purgeDict __all__ = ['GenericApproximantLagrange'] class GenericApproximantLagrange(GenericApproximant): """ ROM Lagrange interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. Should have members: - energyNormMatrix: assemble sparse matrix (in CSC format) associated to weighted H10 norm; - problemData: list of HF problem data (excluding mu); - setProblemData: set HF problem data and k to prescribed values; - getLSBlocks: get algebraic system blocks. HSEngine: Hilbert space general purpose engine. Should have members: - norm: compute Hilbert norm of expression represented as complex numpy vector; - plot: plot Hilbert expression represented as complex numpy vector. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; defaults to uniform sampler on [0, 1]. Defaults to empty dict. plotSnap(optional): What to plot of snapshots of the Helmholtz solution map. See plot method in HSEngine. Defaults to []. plotSnapSpecs(optional): How to plot snapshots of the Helmholtz solution map. See plot method in HSEngine. Defaults to {}. Attributes: HFEngine: HF problem solver. HSEngine: Hilbert space general purpose engine. mu0: Default parameter. mus: Array of snapshot parameters. ws: Array of snapshot weigths. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'S': total number of snapshots current approximant relies upon; - 'sampler': sample point generator. extraApproxParameters: List of approxParameters keys in addition to mother class's. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. plotSnap: What to plot of snapshots of the Helmholtz solution map. plotSnapSpecs: How to plot snapshots of the Helmholtz solution map. solSnapshots: Array whose columns are FE dofs of snapshots of solution map. RPOD: Right factor of snapshots POD. If no POD, set to None. POD: Whether to compute POD of snapshots. energyNormMatrix: Sparse matrix (in CSC format) assoociated to w-weighted H10 norm. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. solLifting: Lifting of Dirichlet boundary data as numpy vector. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt mu. bs: List of numpy vectors representing coefficients of linear system RHS wrt mu. """ def __init__(self, HFEngine:HFEng, HSEngine:HSEng, mu0 : complex = 0., approxParameters : DictAny = {}, plotSnap : ListAny = [], plotSnapSpecs : DictAny = {}): if not hasattr(self, "parameterList"): self.parameterList = [] self.parameterList += ["S", "sampler"] super().__init__(HFEngine = HFEngine, HSEngine = HSEngine, mu0 = mu0, approxParameters = approxParameters) self.plotSnap = plotSnap self.plotSnapSpecs = plotSnapSpecs self.resetSamples() @property def mus(self): """Value of mus. Its assignment may reset snapshots.""" return self._mus @mus.setter def mus(self, mus): musOld = self.mus if hasattr(self, 'mus') else None self._mus = np.array(mus) _, musCounts = np.unique(self._mus, return_counts = True) if len(np.where(musCounts > 1)[0]) > 0: raise Exception("Repeated sample points not allowed.") if (musOld is None or len(self.mus) != len(musOld) or not np.allclose(self.mus, musOld, 1e-14)): self.resetSamples() self.autoNode = None @property def approxParameters(self): """Value of approximant parameters. Its assignment may change S.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters") approxParametersCopy = purgeDict(approxParameters, ["S", "sampler"], True, True) GenericApproximant.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "S" in keyList: self.S = approxParameters["S"] elif hasattr(self, "S"): self.S = self.S else: self.S = 2 if "sampler" in keyList: self.sampler = approxParameters["sampler"] elif not hasattr(self, "S"): from rrompy.sampling import QuadratureSampler self.sampler = QuadratureSampler([0., 1.], "UNIFORM") del QuadratureSampler @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): if S <= 0: raise ArithmeticError("S must be positive.") if hasattr(self, "S"): Sold = self.S else: Sold = -1 self._S = S self._approxParameters["S"] = self.S if Sold != self.S: self.resetSamples() @property def sampler(self): """Value of sampler.""" return self._sampler @sampler.setter def sampler(self, sampler): if 'generatePoints' not in dir(sampler): raise Exception("Sampler type not recognized.") if hasattr(self, '_sampler'): samplerOld = self.sampler self._sampler = sampler self._approxParameters["sampler"] = self.sampler if not 'samplerOld' in locals() or samplerOld != self.sampler: self.resetSamples() def resetSamples(self): """Reset samples.""" self.solSnapshots = None self.RPOD = None def computeSnapshots(self): """ Compute snapshots of solution map. """ if self.solSnapshots is None: self.mus, self.ws = self.sampler.generatePoints(self.S) self.mus = np.array([x[0] for x in self.mus]) for j, mu in enumerate(self.mus): self.solveHF(mu) self.HSEngine.plot(self.uHF, name = "u({:.4f})".format(mu), what = self.plotSnap, **self.plotSnapSpecs) self.manageSnapshots(self.uHF, j) def manageSnapshots(self, u:Np1D, pos:int): """ Store snapshots of solution map. Args: u: solution derivative as numpy complex vector; pos: Derivative index. """ if pos == 0: self.solSnapshots = np.empty((u.shape[0], self.S), dtype = np.complex) self.solSnapshots[:, pos] = u if self.POD: if pos == 0: self.PODEngine = PODEngine(self.energyNormMatrix) self.RPOD = np.eye(self.S, dtype = np.complex) - self.solDerivatives[:, pos], self.RPOD[: pos + 1, pos] = ( - self.PODEngine.GS(self.solDerivatives[:, pos], - self.solDerivatives, pos)) + self.solSnapshots[:, pos], self.RPOD[: pos + 1, pos] = ( + self.PODEngine.GS(self.solSnapshots[:, pos], + self.solSnapshots, pos)) def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return self.solSnapshots is not None and super().checkComputedApprox() diff --git a/rrompy/reduction_methods/taylor/approximant_taylor_pade.py b/rrompy/reduction_methods/taylor/approximant_taylor_pade.py index 831f930..e0f331c 100644 --- a/rrompy/reduction_methods/taylor/approximant_taylor_pade.py +++ b/rrompy/reduction_methods/taylor/approximant_taylor_pade.py @@ -1,433 +1,429 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import warnings import numpy as np from rrompy.reduction_methods.taylor.generic_approximant_taylor import GenericApproximantTaylor from rrompy.reduction_methods.base.pod_engine import PODEngine from rrompy.utilities.types import Np1D, Np2D, ListAny, Tuple, DictAny from rrompy.utilities.types import HFEng, HSEng from rrompy.utilities import purgeDict __all__ = ['ApproximantTaylorPade'] class ApproximantTaylorPade(GenericApproximantTaylor): """ ROM single-point fast Pade' approximant computation for parametric problems. Args: HFEngine: HF problem solver. Should have members: - energyNormMatrix: assemble sparse matrix (in CSC format) associated to weighted H10 norm; - problemData: list of HF problem data (excluding mu); - setProblemData: set HF problem data and mu to prescribed values; - setupDerivativeComputation: setup HF problem data to compute j-th solution derivative at mu0; - solve: get HF solution as complex numpy vector. HSEngine: Hilbert space general purpose engine. Should have members: - norm: compute Hilbert norm of expression represented as complex numpy vector; - plot: plot Hilbert expression represented as complex numpy vector. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'rho': weight for computation of original Pade' approximant; defaults to np.inf, i.e. fast approximant; - 'M': degree of Pade' approximant numerator; defaults to 0; - 'N': degree of Pade' approximant denominator; defaults to 0; - 'E': total number of derivatives current approximant relies upon; defaults to Emax; - 'Emax': total number of derivatives of solution map to be computed; defaults to E; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0; - 'sampleType': label of sampling type; available values are: - 'ARNOLDI': orthogonalization of solution derivatives through Arnoldi algorithm; - 'KRYLOV': standard computation of solution derivatives. Defaults to 'KRYLOV'. Defaults to empty dict. plotDer(optional): What to plot of derivatives of the Helmholtz solution map. See plot method in HSEngine. Defaults to []. plotDerSpecs(optional): How to plot derivatives of the Helmholtz solution map. See plot method in HSEngine. Defaults to {}. Attributes: HFEngine: HF problem solver. HSEngine: Hilbert space general purpose engine. mu0: Default parameter. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'rho': weight for computation of original Pade' approximant; - 'M': degree of Pade' approximant numerator; - 'N': degree of Pade' approximant denominator; - 'E': total number of derivatives current approximant relies upon; - 'Emax': total number of derivatives of solution map to be computed; - 'robustTol': tolerance for robust Pade' denominator management; - 'sampleType': label of sampling type. solDerivatives: Array whose columns are FE dofs of solution derivatives. RArnoldi: Right factor of derivatives QR factorization. If no Arnoldi, set to None. HArnoldi: Hessenberg factor of derivatives Arnoldi algorithm. If no Arnoldi, set to None. POD: Whether to compute QR factorization of derivatives. rho: Weight of approximant. M: Numerator degree of approximant. N: Denominator degree of approximant. E: Number of solution derivatives over which current approximant is based upon. Emax: Total number of solution derivatives to be computed. robustTol: tolerance for robust Pade' denominator management. sampleType: Label of sampling type. plotDer: What to plot of derivatives of the Helmholtz solution map. plotDerSpecs: How to plot derivatives of the Helmholtz solution map. initialHFData: HF problem initial data. energyNormMatrix: Sparse matrix (in CSC format) assoociated to w-weighted H10 norm. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. solLifting: Lifting of Dirichlet boundary data as numpy vector. G: Square Numpy 2D vector of size (N+1) corresponding to Pade' denominator matrix (see paper). Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ def __init__(self, HFEngine:HFEng, HSEngine:HSEng, mu0 : complex = 0, approxParameters : DictAny = {}, plotDer : ListAny = [], plotDerSpecs : DictAny = {}): if not hasattr(self, "parameterList"): self.parameterList = [] self.parameterList += ["M", "N", "robustTol", "rho"] super().__init__(HFEngine = HFEngine, HSEngine = HSEngine, mu0 = mu0, approxParameters = approxParameters, plotDer = plotDer, plotDerSpecs = plotDerSpecs) @property def approxParameters(self): """Value of approximant parameters.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters") approxParametersCopy = purgeDict(approxParameters, ["M", "N", "robustTol", "rho"], True, True) keyList = list(approxParameters.keys()) if "rho" in keyList: self._rho = approxParameters["rho"] elif hasattr(self, "rho"): self._rho = self.rho else: self._rho = np.inf GenericApproximantTaylor.approxParameters.fset(self, approxParametersCopy) self.rho = self._rho if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif hasattr(self, "robustTol"): self.robustTol = self.robustTol else: self.robustTol = 0 self._ignoreParWarnings = True if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "M"): self.M = self.M else: self.M = 0 del self._ignoreParWarnings if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "N"): self.N = self.N else: self.N = 0 @property def rho(self): """Value of rho.""" return self._rho @rho.setter def rho(self, rho): self._rho = np.abs(rho) self._approxParameters["rho"] = self.rho @property def M(self): """Value of M. Its assignment may change Emax and E.""" return self._M @M.setter def M(self, M): if M < 0: raise ArithmeticError("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if not hasattr(self, "_ignoreParWarnings"): self.checkMNEEmax() @property def N(self): """Value of N. Its assignment may change Emax.""" return self._N @N.setter def N(self, N): if N < 0: raise ArithmeticError("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N self.checkMNEEmax() def checkMNEEmax(self): """Check consistency of M, N, E, and Emax.""" M = self.M if hasattr(self, "_M") else 0 N = self.N if hasattr(self, "_N") else 0 E = self.E if hasattr(self, "_E") else M + N Emax = self.Emax if hasattr(self, "_Emax") else M + N if self.rho == np.inf: if Emax < max(N, M): warnings.warn(("Prescribed Emax is too small. Updating Emax " "to max(M, N)."), stacklevel = 3) self.Emax = max(N, M) if E < max(N, M): warnings.warn(("Prescribed E is too small. Updating E to " "max(M, N)."), stacklevel = 3) self.E = max(N, M) else: if Emax < N + M: warnings.warn(("Prescribed Emax is too small. Updating " "Emax to M + N."), stacklevel = 3) self.Emax = self.N + M if E < N + M: warnings.warn(("Prescribed E is too small. Updating E to " "M + N."), stacklevel = 3) self.E = self.N + M @property def robustTol(self): """Value of tolerance for robust Pade' denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: warnings.warn(("Overriding prescribed negative robustness " "tolerance to 0."), stacklevel = 2) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def E(self): """Value of E. Its assignment may change Emax.""" return self._E @E.setter def E(self, E): if E < 0: raise ArithmeticError("E must be non-negative.") self._E = E self.checkMNEEmax() self._approxParameters["E"] = self.E if hasattr(self, "Emax") and self.Emax < self.E: warnings.warn(("Prescribed Emax is too small. Updating Emax " "to E."), stacklevel = 2) self.Emax = self.E @property def Emax(self): """Value of Emax. Its assignment may reset computed derivatives.""" return self._Emax @Emax.setter def Emax(self, Emax): if Emax < 0: raise ArithmeticError("Emax must be non-negative.") if hasattr(self, "Emax"): EmaxOld = self.Emax else: EmaxOld = -1 if hasattr(self, "_N"): N = self.N else: N = 0 if hasattr(self, "_M"): M = self.M else: M = 0 if hasattr(self, "_E"): E = self.E else: E = 0 if self.rho == np.inf: if max(N, M, E) > Emax: warnings.warn(("Prescribed Emax is too small. Updating Emax " "to max(N, M, E)."), stacklevel = 2) Emax = max(N, M, E) else: if max(N + M, E) > Emax: warnings.warn(("Prescribed Emax is too small. Updating Emax " "to max(N + M, E)."), stacklevel = 2) Emax = max(N + M, E) self._Emax = Emax self._approxParameters["Emax"] = Emax if (EmaxOld > self.Emax and self.solDerivatives is not None and hasattr(self, 'HArnoldi') and hasattr(self, 'RArnoldi') and self.HArnoldi is not None and self.RArnoldi is not None): self.solDerivatives = self.solDerivatives[:, : self.Emax + 1] if self.sampleType == "ARNOLDI": self.HArnoldi = self.HArnoldi[: self.Emax + 1, : self.Emax + 1] self.RArnoldi = self.RArnoldi[: self.Emax + 1, : self.Emax + 1] else: self.resetSamples() def setupApprox(self): """ Compute Pade' approximant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): self.computeDerivatives() while True: if self.POD: ev, eV = self.findeveVGQR() else: ev, eV = self.findeveVGExplicit() ts = self.robustTol * np.linalg.norm(ev) Nnew = np.sum(np.abs(ev) >= ts) diff = self.N - Nnew if diff <= 0: break Enew = self.E - diff Mnew = min(self.M, Enew) if Mnew == self.M: strM = "" else: strM = ", M from {0} to {1},".format(self.M, Mnew) print(("Smallest {0} eigenvalues below tolerance.\n" "Reducing N from {1} to {2}{5} and E from {3} to {4}.")\ .format(diff + 1, self.N, Nnew, self.E, Enew, strM)) newParameters = {"N" : Nnew, "M" : Mnew, "E" : Enew} self.approxParameters = newParameters self.Q = eV[::-1, 0] QToeplitz = np.zeros((self.Emax + 1, self.M + 1), dtype = np.complex) for i in range(len(self.Q)): rng = np.arange(self.M + 1 - i) QToeplitz[rng, rng + i] = self.Q[i] if self.sampleType == "ARNOLDI": QToeplitz = self.RArnoldi.dot(QToeplitz) self.P = self.solDerivatives.dot(QToeplitz) self.lastApproxParameters = copy(self.approxParameters) def buildG(self): """Assemble Pade' denominator matrix.""" self.computeDerivatives() if self.rho == np.inf: if self.sampleType == "KRYLOV": DerE = self.solDerivatives[:, self.E - self.N : self.E + 1] self.G = DerE.conj().T.dot(self.energyNormMatrix.dot(DerE)) else: RArnE = self.RArnoldi[: self.E + 1, self.E - self.N : self.E + 1] self.G = RArnE.conj().T.dot(RArnE) else: if self.sampleType == "KRYLOV": DerE = self.solDerivatives[:, self.M - self.N + 1 : self.E + 1] Gbig = DerE.conj().T.dot(self.energyNormMatrix.dot(DerE)) else: RArnE = self.RArnoldi[: self.E + 1, self.M - self.N + 1 : self.E + 1] Gbig = RArnE.conj().T.dot(RArnE) self.G = np.zeros((self.N + 1, self.N + 1), dtype=np.complex) for k in range(self.E - self.M): self.G = (self.G + self.rho ** (2 * k) * Gbig[k : k + self.N + 1, k : k + self.N + 1]) def findeveVGExplicit(self) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of Pade' denominator matrix. """ self.buildG() ev, eV = np.linalg.eigh(self.G) return ev, eV def findeveVGQR(self) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of Pade' denominator matrix through SVD of R factor. See ``Householder triangularization of a quasimatrix'', L.Trefethen, 2008 for QR algorithm. Returns: Eigenvalues in ascending order and corresponding eigenvector matrix. """ self.computeDerivatives() if self.sampleType == "KRYLOV": if self.rho == np.inf: A = copy(self.solDerivatives[:, self.E - self.N : self.E + 1]) else: A = copy(self.solDerivatives[:, self.M - self.N + 1 : self.E + 1]) self.PODEngine = PODEngine(self.energyNormMatrix) R = self.PODEngine.QRHouseholder(A, only_R = True) else: if self.rho == np.inf: R = self.RArnoldi[: self.E + 1, self.E - self.N : self.E + 1] else: R = self.RArnoldi[: self.E + 1, self.M - self.N + 1 : self.E + 1] if self.rho == np.inf: _, s, V = np.linalg.svd(R, full_matrices = False) else: Rtower = np.zeros((R.shape[0] * (self.E - self.M), self.N + 1), dtype = np.complex) for k in range(self.E - self.M): Rtower[k * R.shape[0] : (k + 1) * R.shape[0], :] = ( self.rho ** k * R[:, self.M - self.N + 1 + k : self.M + 1 + k]) _, s, V = np.linalg.svd(Rtower, full_matrices = False) return s[::-1], V.conj().T[:, ::-1] - def evalApprox(self, mu:complex) -> Np1D: + def evalApprox(self, mu:complex): """ Evaluate Pade' approximant at arbitrary wavenumber. Args: mu: Target parameter. - - Returns: - Approximant as numpy complex vector. """ self.setupApprox() powerlist = np.power(mu - self.mu0, range(max(self.M, self.N) + 1)) self.uApp = (self.P.dot(powerlist[:self.M + 1]) / self.Q.dot(powerlist[:self.N + 1])) - return self.uApp def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() return np.roots(self.Q[::-1]) + self.mu0 diff --git a/rrompy/reduction_methods/taylor/approximant_taylor_rb.py b/rrompy/reduction_methods/taylor/approximant_taylor_rb.py index 7db76ce..f3ae5f7 100644 --- a/rrompy/reduction_methods/taylor/approximant_taylor_rb.py +++ b/rrompy/reduction_methods/taylor/approximant_taylor_rb.py @@ -1,319 +1,315 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import warnings import numpy as np import scipy as sp from rrompy.reduction_methods.taylor.generic_approximant_taylor import GenericApproximantTaylor from rrompy.utilities.types import Np1D, ListAny, DictAny, HFEng, HSEng from rrompy.utilities import purgeDict __all__ = ['ApproximantTaylorRB'] class ApproximantTaylorRB(GenericApproximantTaylor): """ ROM single-point fast RB approximant computation for parametric problems with polynomial dependence up to degree 2. Args: HFEngine: HF problem solver. Should have members: - energyNormMatrix: sparse matrix (in CSC format) associated to w-weighted H10 norm; - problemData: list of HF problem data (excluding mu); - setProblemData: set HF problem data and mu to prescribed values; - getLSBlocks: get blocks of HF linear system as sparse matrices in CSC format; - liftDirichletData: perform lifting of Dirichlet BC as numpy complex vector; - setupDerivativeComputation: setup HF problem data to compute j-th solution derivative at mu0; - solve: get HF solution as complex numpy vector. HSEngine: Hilbert space general purpose engine. Should have members: - norm: compute Hilbert norm of expression represented as complex numpy vector; - plot: plot Hilbert expression represented as complex numpy vector. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'R': rank for Galerkin projection; defaults to E + 1; - 'E': total number of derivatives current approximant relies upon; defaults to Emax; - 'Emax': total number of derivatives of solution map to be computed; defaults to E; - 'sampleType': label of sampling type; available values are: - 'ARNOLDI': orthogonalization of solution derivatives through Arnoldi algorithm; - 'KRYLOV': standard computation of solution derivatives. Defaults to 'KRYLOV'. Defaults to empty dict. plotDer(optional): What to plot of derivatives of the Helmholtz solution map. See plot method in HSEngine. Defaults to []. plotDerSpecs(optional): How to plot derivatives of the Helmholtz solution map. See plot method in HSEngine. Defaults to {}. Attributes: HFEngine: HF problem solver. HSEngine: Hilbert space general purpose engine. mu0: Default parameter. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'R': rank for Galerkin projection; - 'E': total number of derivatives current approximant relies upon; - 'Emax': total number of derivatives of solution map to be computed; - 'sampleType': label of sampling type. solDerivatives: Array whose columns are FE dofs of solution derivatives. RArnoldi: Right factor of derivatives QR factorization. If no Arnoldi, set to None. HArnoldi: Hessenberg factor of derivatives Arnoldi algorithm. If no Arnoldi, set to None. POD: Whether to compute QR factorization of derivatives. R: Rank for Galerkin projection. E: Number of solution derivatives over which current approximant is based upon. Emax: Total number of solution derivatives to be computed. sampleType: Label of sampling type, i.e. 'KRYLOV'. plotDer: What to plot of derivatives of the Helmholtz solution map. plotDerSpecs: How to plot derivatives of the Helmholtz solution map. energyNormMatrix: Sparse matrix (in CSC format) associated to w-weighted H10 norm. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. solLifting: Numpy complex vector with lifting of real part of Dirichlet boundary datum. projMat: Numpy matrix representing projection onto RB space. projMat: Numpy matrix representing projection onto RB space. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt mu. bs: List of numpy vectors representing coefficients of linear system RHS wrt mu. ARBs: List of sparse matrices (in CSC format) representing RB coefficients of linear system matrix wrt mu. bRBs: List of numpy vectors representing RB coefficients of linear system RHS wrt mu. """ def __init__(self, HFEngine:HFEng, HSEngine:HSEng, mu0 : complex = 0, approxParameters : DictAny = {}, plotDer : ListAny = [], plotDerSpecs : DictAny = {}): if not hasattr(self, "parameterList"): self.parameterList = [] self.parameterList += ["R"] super().__init__(HFEngine = HFEngine, HSEngine = HSEngine, mu0 = mu0, approxParameters = approxParameters, plotDer = plotDer, plotDerSpecs = plotDerSpecs) def resetSamples(self): """Reset samples.""" super().resetSamples() self.projMat = None @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change M, N and S. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters") approxParametersCopy = purgeDict(approxParameters, ["R"], True, True) GenericApproximantTaylor.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "R" in keyList: self.R = approxParameters["R"] elif hasattr(self, "R"): self.R = self.R else: self.R = self.E + 1 @property def POD(self): """Value of POD.""" return self._POD @POD.setter def POD(self, POD): GenericApproximantTaylor.POD.fset(self, POD) if (hasattr(self, "sampleType") and self.sampleType == "ARNOLDI" and not self.POD): warnings.warn(("Arnoldi sampling implicitly forces POD-type " "derivative management."), stacklevel = 2) @property def sampleType(self): """Value of sampleType.""" return self._sampleType @sampleType.setter def sampleType(self, sampleType): GenericApproximantTaylor.sampleType.fset(self, sampleType) if (hasattr(self, "POD") and not self.POD and self.sampleType == "ARNOLDI"): warnings.warn(("Arnoldi sampling implicitly forces POD-type " "derivative management."), stacklevel = 2) @property def R(self): """Value of R. Its assignment may change S.""" return self._R @R.setter def R(self, R): if R < 0: raise ArithmeticError("R must be non-negative.") self._R = R self._approxParameters["R"] = self.R if hasattr(self, "E") and self.E + 1 < self.R: warnings.warn("Prescribed E is too small. Updating E to R - 1.", stacklevel = 2) self.E = self.R - 1 def setupApprox(self): """ Setup RB system. For usage of correlation matrix in POD see Section 6.3.1 in 'Reduced Basis Methods for PDEs. An introduction', A. Quarteroni, A. Manzoni, F. Negri, Springer, 2016. """ if not self.checkComputedApprox(): self.computeDerivatives() if self.POD and not self.sampleType == "ARNOLDI": A = copy(self.solDerivatives) M = self.energyNormMatrix V = np.zeros_like(A, dtype = np.complex) Q = np.zeros_like(A, dtype = np.complex) R = np.zeros((A.shape[1], A.shape[1]), dtype = np.complex) for k in range(A.shape[1]): Q[:, k] = (np.random.randn(Q.shape[0]) + 1.j * np.random.randn(Q.shape[0])) if k > 0: for times in range(2): #twice is enough! Q[:, k] = Q[:, k] - Q[:, :k].dot( (Q[:, k].conj().dot(M.dot(Q[:, :k]))).conj()) Q[:, k] = Q[:, k]/(Q[:, k].conj().dot(M.dot(Q[:, k])))**.5 R[k, k] = np.abs(A[:, k].conj().dot(M.dot(A[:, k]))) ** .5 alpha = Q[:, k].conj().dot(M.dot(A[:, k])) if np.isclose(np.abs(alpha), 0.): s = 1. else: s = - alpha / np.abs(alpha) Q[:, k] = s * Q[:, k] v = R[k, k] * Q[:, k] - A[:, k] for times in range(2): #twice is enough! v = v - Q[:, :k].dot((v.conj().dot(M.dot(Q[:, :k])) ).conj()) sigma = np.abs(v.conj().dot(M.dot(v))) ** .5 if np.isclose(sigma, 0.): v = Q[:, k] else: v = v / sigma V[:, k] = v J = np.arange(k + 1, A.shape[1]) vtQ = v.conj().dot(M.dot(A[:, J])) A[:, J] = A[:, J] - 2 * np.outer(v, vtQ) R[k, J] = Q[:, k].conj().dot(M.dot(A[:, J])) A[:, J] = A[:, J] - np.outer(Q[:, k], R[k, J]) for k in range(A.shape[1] - 1, -1, -1): v = V[:, k] J = np.arange(k, A.shape[1]) vtQ = v.conj().dot(M.dot(Q[:, J])) Q[:, J] = Q[:, J] - 2 * np.outer(v, vtQ) self.projMatQ = Q self.projMatR = R if self.POD and not self.sampleType == "ARNOLDI": U, _, _ = np.linalg.svd(self.projMatR[: self.R, : self.R]) self.projMat = self.projMatQ[:, : self.R].dot(U) else: self.projMat = self.solDerivatives[:, : self.R] self.assembleReducedSystem() self.lastApproxParameters = copy(self.approxParameters) def assembleReducedSystem(self): """Build affine blocks of RB linear system through projections.""" projMatH = self.projMat.T.conjugate() self.ARBs = [None] * len(self.As) self.bRBs = [None] * len(self.bs) for j in range(len(self.As)): self.ARBs[j] = projMatH.dot(self.As[j].dot(self.projMat)) for j in range(len(self.bs)): self.bRBs[j] = projMatH.dot(self.bs[j]) def solveReducedSystem(self, mu:complex) -> Np1D: """ Solve RB linear system. Args: mu: Target parameter. Returns: Solution of RB linear system. """ self.setupApprox() ARBmu = self.ARBs[0][: self.R, : self.R] bRBmu = self.bRBs[0][: self.R] for j in range(1, len(self.ARBs)): ARBmu = ARBmu + np.power(mu, j) * self.ARBs[j][:self.R, :self.R] for j in range(1, len(self.bRBs)): bRBmu = bRBmu + np.power(mu, j) * self.bRBs[j][:self.R] return self.projMat[:, :self.R].dot(np.linalg.solve(ARBmu, bRBmu)) - def evalApprox(self, mu:complex) -> Np1D: + def evalApprox(self, mu:complex): """ Evaluate RB approximant at arbitrary wavenumber. Args: mu: Target parameter. - - Returns: - Approximant as numpy 1D array. """ self.setupApprox() self.uApp = self.solveReducedSystem(mu) - return self.uApp def getPoles(self) -> Np1D: """ Obtain approximant poles. Returns: Numpy complex vector of poles. """ self.setupApprox() A = np.eye(self.ARBs[0].shape[0] * (len(self.ARBs) - 1), dtype = np.complex) B = np.zeros_like(A) A[: self.ARBs[0].shape[0], : self.ARBs[0].shape[0]] = - self.ARBs[0] for j in range(len(self.ARBs) - 1): Aj = self.ARBs[j + 1] B[: Aj.shape[0], j * Aj.shape[0] : (j + 1) * Aj.shape[0]] = Aj II = np.arange(self.ARBs[0].shape[0], self.ARBs[0].shape[0] * (len(self.ARBs) - 1)) B[II, II - self.ARBs[0].shape[0]] = 1. try: return sp.linalg.eigvals(A, B) except: warnings.warn("Generalized eig algorithm did not converge.", stacklevel = 2) x = np.empty(A.shape[0]) x[:] = np.NaN return x diff --git a/rrompy/sampling/__init__.py b/rrompy/sampling/__init__.py index f38a4ca..5a8ec37 100644 --- a/rrompy/sampling/__init__.py +++ b/rrompy/sampling/__init__.py @@ -1,29 +1,31 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from rrompy.sampling.generic_sampler import GenericSampler from rrompy.sampling.quadrature_sampler import QuadratureSampler from rrompy.sampling.random_sampler import RandomSampler +from rrompy.sampling.fft_sampler import FFTSampler __all__ = [ 'GenericSampler', 'QuadratureSampler', - 'RandomSampler' + 'RandomSampler', + 'FFTSampler' ] diff --git a/rrompy/sampling/fft_sampler.py b/rrompy/sampling/fft_sampler.py new file mode 100644 index 0000000..9ab0c5a --- /dev/null +++ b/rrompy/sampling/fft_sampler.py @@ -0,0 +1,56 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +from rrompy.sampling.generic_sampler import GenericSampler +from rrompy.utilities.types import Np1D, List + +__all__ = ['FFTSampler'] + +class FFTSampler(GenericSampler): + """Generator of FFT-type sample points on scaled roots of unity.""" + + def generatePoints(self, n:Np1D) -> List[Np1D]: + """Array of sample points and array of weights.""" + d = len(self.lims[0]) + try: + len(n) + except: + n = np.array([n]) + if len(n) != d: + raise Exception(("Numbers of points must have same dimension as " + "limits.")) + for j in range(d): + a, b = self.lims[0][j], self.lims[1][j] + c, r = (a + b) / 2., np.abs(a - b) / 2. + xj = c + r * np.exp(1.j * + np.linspace(0, 2 * np.pi, n[j] + 1)[:-1, None]) + wj = r / n[j] * np.ones(n[j]) + if j == 0: + x = xj + w = wj + xsize = n[0] + else: + x = np.concatenate((np.kron(np.ones(n[j])[:, None], x), + np.kron(xj, np.ones(xsize)[:, None])), + axis = 1) + w = np.multiply(np.kron(np.ones(n[j]), w), + np.kron(wj, np.ones(xsize))) + xsize = xsize * n[j] + return [y.flatten() for y in np.split(x, xsize)], w + diff --git a/rrompy/sampling/generic_sampler.py b/rrompy/sampling/generic_sampler.py index f848192..d5cdaac 100644 --- a/rrompy/sampling/generic_sampler.py +++ b/rrompy/sampling/generic_sampler.py @@ -1,58 +1,66 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from abc import abstractmethod import numpy as np from rrompy.utilities.types import Np1D, Tuple, GenExpr __all__ = ['GenericSampler'] class GenericSampler: """ABSTRACT. Generic generator of sample points.""" def __init__(self, lims:Tuple[Np1D, Np1D]): self.lims = lims - def __eq__(self, other): + def name(self) -> str: + return self.__class__.__name__ + + def __str__(self) -> str: + return "{}[{}{}]".format(self.name(), + np.array2string(self.lims[0], separator = '_'), + np.array2string(self.lims[1], separator = '_')) + + def __eq__(self, other) -> bool: return self.__dict__ == other.__dict__ @property def lims(self): """Value of lims.""" return self._lims @lims.setter def lims(self, lims): if len(lims) != 2: raise Exception("2 limits must be specified.") try: lims = lims.tolist() except: lims = list(lims) for j in range(2): try: len(lims[j]) except: lims[j] = np.array([lims[j]]) if len(lims[0]) != len(lims[1]): raise Exception("The limits must have the same length.") self._lims = lims @abstractmethod def generatePoints(self, n:GenExpr) -> Tuple[Np1D, Np1D]: """Array of points and array of weights.""" pass