diff --git a/examples/diapason/pod.py b/examples/diapason/pod.py index 2eb8572..1cbde0f 100644 --- a/examples/diapason/pod.py +++ b/examples/diapason/pod.py @@ -1,181 +1,150 @@ import numpy as np -import fenics as fen -import ufl -from rrompy.hfengines.vector_linear_problem import \ - LinearElasticityHelmholtzProblemEngine as LEHPE -from rrompy.hfengines.vector_linear_problem import \ - LinearElasticityHelmholtzProblemEngineDamped as LEHPED +from diapason_engine import DiapasonEngine, DiapasonEngineDamped from rrompy.reduction_methods.lagrange import ApproximantLagrangePade as Pade from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB as RB from rrompy.utilities.parameter_sampling import QuadratureSampler as QS verb = 100 sol = "single" -#sol = "sweep" +sol = "sweep" algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" -dampingEta = 0 * 1e4 / 2. / np.pi +dampingEta = 0. * 1e4 / 2. / np.pi ktar = 1.e4 # [Hz] k0s = np.array([2.5e2, 1.0e4]) -k0s = np.array([2.5e3, 1.5e4]) +#k0s = np.array([2.5e3, 1.5e4]) #k0s = np.array([5.0e4, 1.0e5]) -#k0s = np.array([2.0e5, 3.0e5]) +k0s = np.array([2.0e5, 3.0e5]) k0 = np.mean(np.power(k0s, 2.)) ** .5 -### -if dampingEta > 0: - rescaling = lambda x: x - rescalingInv = lambda x: x -else: - rescaling = lambda x: np.power(x, 2.) - rescalingInv = lambda x: np.power(x, .5) - -params = {'N':15, 'M':14, 'S':25, 'POD':True, 'polybasis':polyBasis} theta = 20. * np.pi / 180. phi = 10. * np.pi / 180. - -mesh = fen.Mesh("../data/mesh/diapason_1.xml") -subdomains = fen.MeshFunction("size_t", mesh, - "../data/mesh/diapason_1_physical_region.xml") - -meshBall = fen.SubMesh(mesh, subdomains, 2) -meshFork = fen.SubMesh(mesh, subdomains, 1) -Hball = np.max(meshBall.coordinates()[:, 1]) #.00257 -Ltot = np.max(mesh.coordinates()[:, 1]) #.1022 -Lhandle = np.max(meshFork.coordinates()[:, 1]) #.026 -Lrod = Ltot - Lhandle #.0762 -L2var = (Lrod / 4.) ** 2. -Ehandle_ratio = 3. -rhohandle_ratio = 1.5 - c = 3.e2 rho = 8e3 * (2. * np.pi) ** 2. E = 1.93e11 nu = .3 T = 1e6 -lambda_ = E * nu / (1. + nu) / (1. - 2. * nu) -mu_ = E / (1. + nu) - -kWave = (np.cos(theta) * np.cos(phi), np.sin(phi), np.sin(theta) * np.cos(phi)) - -x, y, z = fen.SpatialCoordinate(mesh)[:] -yCorr = y - Ltot -compPlane = kWave[0] * x + kWave[1] * y + kWave[2] * z -xPlane, yPlane, zPlane = (xx - compPlane * xx for xx in (x, y, z)) -xOrtho, yOrtho, zOrtho = (compPlane * xx for xx in (x, y, z)) - -forcingBase = (T / (2. * np.pi * L2var)**.5 - * fen.exp(- (xPlane**2. + yPlane**2. + zPlane**2.) / (2.*L2var))) -forcingWeight = np.real(k0) / c * (xOrtho + yOrtho + zOrtho) -neumannDatum = [ufl.as_vector( - tuple(forcingBase * fen.cos(forcingWeight) * kWavedir for kWavedir in kWave)), - ufl.as_vector( - tuple(forcingBase * fen.sin(forcingWeight) * kWavedir for kWavedir in kWave))] -lambda_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(lambda_), - fen.Constant(Ehandle_ratio * lambda_)) -mu_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(mu_), - fen.Constant(Ehandle_ratio * mu_)) -rho_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(rho), - fen.Constant(rhohandle_ratio * rho)) ### -if dampingEta > 0: - solver = LEHPED(degree_threshold = 8, verbosity = 0) - solver.eta = dampingEta +if np.isclose(dampingEta, 0.): + rescaling = lambda x: np.power(x, 2.) + rescalingInv = lambda x: np.power(x, .5) + solver = DiapasonEngine(kappa = k0, c = c, rho = rho, E = E, nu = nu, + T = T, theta = theta, phi = phi, meshNo = 1, + degree_threshold = 8, verbosity = 0) else: - solver = LEHPE(degree_threshold = 8, verbosity = 0) -solver.omega = np.real(k0) -solver.lambda_ = lambda_eff -solver.mu_ = mu_eff -solver.rho_ = rho_eff -solver.V = fen.VectorFunctionSpace(mesh, "P", 1) -solver.DirichletBoundary = lambda x, on_b: on_b and x[1] < Hball -solver.NeumannBoundary = "REST" -solver.forcingTerm = neumannDatum + rescaling = lambda x: x + rescalingInv = lambda x: x + solver = DiapasonEngineDamped(kappa = k0, c = c, rho = rho, E = E, nu = nu, + T = T, theta = theta, phi = phi, + dampingEta = dampingEta, meshNo = 1, + degree_threshold = 8, verbosity = 0) + +params = {'N':39, 'M':39, 'S':40, 'POD':True, 'polybasis':polyBasis}#, +# 'robustTol':1e-16} if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: + params.pop("N") + params.pop("M") + params.pop("polybasis") +# params.pop("robustTol") approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) approx.sampler = QS(k0s, "CHEBYSHEV", rescaling, rescalingInv) approx.setupApprox() if sol == "single": approx.outParaviewTimeDomainSamples( filename = "out/outSamples{}".format(dampingEta), forceNewFile = False, folders = True) nameBase = "{}_{}".format(ktar, dampingEta) approx.outParaviewTimeDomainApprox(ktar, omega = 2. * np.pi * ktar, filename = "out/outTApp{}".format(nameBase), forceNewFile = False, folder = True) approx.outParaviewTimeDomainHF(ktar, omega = 2. * np.pi * ktar, filename = "out/outTHF{}".format(nameBase), forceNewFile = False, folder = True) approx.outParaviewTimeDomainErr(ktar, omega = 2. * np.pi * ktar, filename = "out/outTErr{}".format(nameBase), forceNewFile = False, folder = True) approx.outParaviewTimeDomainRes(ktar, omega = 2. * np.pi * ktar, filename = "out/outTRes{}".format(nameBase), forceNewFile = False, folder = True) appErr, solNorm = approx.normErr(ktar), approx.normHF(ktar) resNorm, RHSNorm = approx.normRes(ktar), approx.normRHS(ktar) print(('SolNorm:\t{}\nErr:\t{}\nErrRel:\t{}').format(solNorm, appErr, np.divide(appErr, solNorm))) print(('RHSNorm:\t{}\nRes:\t{}\nResRel:\t{}').format(RHSNorm, resNorm, np.divide(resNorm, RHSNorm))) -print('\nPoles:') -print(approx.getPoles()) +poles = approx.getPoles() +print('Poles:', poles) if sol == "sweep": k0s = np.linspace(k0s[0], k0s[1], 100) kl, kr = min(k0s), max(k0s) approx.samplingEngine.verbosity = 0 + approx.trainedModel.verbosity = 0 approx.verbosity = 0 kl, kr = np.real(kl), np.real(kr) from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) err = np.zeros_like(normApp) + res = np.zeros_like(normApp) +# errApp = np.zeros_like(normApp) + fNorm = approx.normRHS(k0s[0]) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) err[j] = approx.normErr(k0s[j]) / norm[j] + res[j] = approx.normRes(k0s[j]) / fNorm +# errApp[j] = res[j] / np.min(np.abs(k0s[j] - poles)) +# errApp *= np.mean(err) / np.mean(errApp) plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(np.real(approx.mus), 1.05*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() + plt.figure() + plt.semilogy(k0s, res) + plt.xlim([kl, kr]) + plt.grid() + plt.show() + plt.close() + plt.figure() plt.semilogy(k0s, err) +# plt.semilogy(k0s, errApp) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/diapason/solver.py b/examples/diapason/solver.py index 10c0826..f223a16 100644 --- a/examples/diapason/solver.py +++ b/examples/diapason/solver.py @@ -1,81 +1,81 @@ import numpy as np import fenics as fen import ufl from rrompy.hfengines.vector_linear_problem import \ LinearElasticityHelmholtzProblemEngine as LEHPE from rrompy.hfengines.vector_linear_problem import \ LinearElasticityHelmholtzProblemEngineDamped as LEHPED verb = 2 dampingEta = 0 * 1e4 / 2. / np.pi -k = 7773.051993943557 # [Hz] +k = 1.8e3 # [Hz] theta = 20. * np.pi / 180. phi = 10. * np.pi / 180. mesh = fen.Mesh("../data/mesh/diapason_1.xml") subdomains = fen.MeshFunction("size_t", mesh, "../data/mesh/diapason_1_physical_region.xml") meshBall = fen.SubMesh(mesh, subdomains, 2) meshFork = fen.SubMesh(mesh, subdomains, 1) Hball = np.max(meshBall.coordinates()[:, 1]) #.00257 Ltot = np.max(mesh.coordinates()[:, 1]) #.1022 Lhandle = np.max(meshFork.coordinates()[:, 1]) #.026 Lrod = Ltot - Lhandle #.0762 L2var = (Lrod / 4.) ** 2. Ehandle_ratio = 3. rhohandle_ratio = 1.5 c = 3.e2 rho = 8e3 * (2. * np.pi) ** 2. E = 1.93e11 nu = .3 T = 1e6 lambda_ = E * nu / (1. + nu) / (1. - 2. * nu) mu_ = E / (1. + nu) kWave = (np.cos(theta) * np.cos(phi), np.sin(phi), np.sin(theta) * np.cos(phi)) x, y, z = fen.SpatialCoordinate(mesh)[:] yCorr = y - Ltot compPlane = kWave[0] * x + kWave[1] * y + kWave[2] * z xPlane, yPlane, zPlane = (xx - compPlane * xx for xx in (x, y, z)) xOrtho, yOrtho, zOrtho = (compPlane * xx for xx in (x, y, z)) forcingBase = (T / (2. * np.pi * L2var)**.5 * fen.exp(- (xPlane**2. + yPlane**2. + zPlane**2.) / (2.*L2var))) forcingWeight = np.real(k) / c * (xOrtho + yOrtho + zOrtho) neumannDatum = [ufl.as_vector( tuple(forcingBase * fen.cos(forcingWeight) * kWavedir for kWavedir in kWave)), ufl.as_vector( tuple(forcingBase * fen.sin(forcingWeight) * kWavedir for kWavedir in kWave))] lambda_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(lambda_), fen.Constant(Ehandle_ratio * lambda_)) mu_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(mu_), fen.Constant(Ehandle_ratio * mu_)) rho_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(rho), fen.Constant(rhohandle_ratio * rho)) ### if dampingEta > 0: solver = LEHPED(degree_threshold = 8, verbosity = verb) solver.eta = dampingEta else: solver = LEHPE(degree_threshold = 8, verbosity = verb) solver.omega = np.real(k) solver.lambda_ = lambda_eff solver.mu_ = mu_eff solver.rho_ = rho_eff -solver.V = fen.VectorFunctionSpace(mesh, "P", 1) +solver.V = fen.VectorFunctionSpace(mesh, "P", 3) solver.DirichletBoundary = lambda x, on_b: on_b and x[1] < Hball solver.NeumannBoundary = "REST" solver.forcingTerm = neumannDatum uh = solver.solve(k) solver.outParaviewTimeDomain(uh, omega = 2. * np.pi * k, filename = "out/outT{}_{}_".format(k, dampingEta), forceNewFile = False) diff --git a/examples/from_papers/pod_membraneTaylor.py b/examples/from_papers/pod_membraneTaylor.py index d6d03c9..e19950e 100644 --- a/examples/from_papers/pod_membraneTaylor.py +++ b/examples/from_papers/pod_membraneTaylor.py @@ -1,96 +1,96 @@ import fenics as fen import numpy as np from rrompy.hfengines.linear_problem import HelmholtzProblemEngine as HPE from rrompy.reduction_methods.taylor import ApproximantTaylorPade as TP from rrompy.utilities.parameter_sweeper import ParameterSweeper as Sweeper verb = 0 test = "poles" test = "error" k0 = 10 ktars = np.linspace(78**.5, 122**.5, 50) def boundaryNeumann(x, on_boundary): return on_boundary and x[1] > .25 and x[0] > 0.995 and x[0] < 1.005 meshname = '../data/mesh/crack_coarse.xml' #meshname = '../data/mesh/crack_fine.xml' mesh = fen.Mesh(meshname) x, y = fen.SpatialCoordinate(mesh)[:] x0, y0 = .5, .5 Rr, Ri = .1, .1 forcingTerm = fen.exp(- ((x - x0)**2 + (y - y0)**2) / 2 / Rr**2) solver = HPE(verbosity = verb) solver.omega = np.real(k0) solver.V = fen.FunctionSpace(mesh, "P", 3) solver.forcingTerm = forcingTerm solver.NeumannBoundary = boundaryNeumann solver.DirichletBoundary = 'rest' if test == "poles": appPoles = {} Emax = 13 - params = {'N':6, 'M':0, 'E':6, 'Emax':Emax, 'sampleType':'Arnoldi', + params = {'N':6, 'M':0, 'E':6, 'sampleType':'Arnoldi', 'POD':True} approxPade = TP(solver, mu0 = k0, approxParameters = params, verbosity = verb) for E in range(6, Emax + 1): approxPade.E = E appPoles[E] = np.sort(approxPade.getPoles()) a = fen.dot(fen.grad(solver.u), fen.grad(solver.v)) * fen.dx A = fen.assemble(a) fen.DirichletBC(solver.V, fen.Constant(0.), solver.DirichletBoundary).apply(A) AMat = fen.as_backend_type(A).mat() Ar, Ac, Av = AMat.getValuesCSR() import scipy.sparse as scsp A = scsp.csr_matrix((Av, Ac, Ar), shape = AMat.size) m = fen.dot(solver.u, solver.v) * fen.dx M = fen.assemble(m) fen.DirichletBC(solver.V, fen.Constant(0.), solver.DirichletBoundary).apply(M) MMat = fen.as_backend_type(M).mat() Mr, Mc, Mv = MMat.getValuesCSR() import scipy.sparse as scsp M = scsp.csr_matrix((Mv, Mc, Mr), shape = MMat.size) poles = scsp.linalg.eigs(A, k = 7, M = M, sigma = 100., return_eigenvectors = False) II = np.argsort(np.abs(poles - k0)) poles = poles[II] print('Exact', end = ': ') [print('{},{}'.format(np.real(x), np.imag(x)), end = ',') for x in poles] print() for E in range(6, Emax + 1): print(E, end = ': ') [print('{},{}'.format(np.real(x), np.imag(x)), end = ',')\ for x in np.sort(appPoles[E])] print() elif test == "error": M0 = 5 Emax = 8 - params = {'Emax':Emax, 'sampleType':'Arnoldi', 'POD':True} + params = {'E':Emax, 'sampleType':'Arnoldi', 'POD':True} paramsSetsPade = [None] * (Emax - M0 + 1) for M in range(M0, Emax + 1): paramsSetsPade[M - M0] = {'N':M0 + 1, 'M':M, 'E':max(M, M0 + 1)} approxPade = TP(solver, mu0 = k0, approxParameters = params, verbosity = verb) sweeper = Sweeper(mutars = ktars, mostExpensive = 'Approx') sweeper.ROMEngine = approxPade sweeper.params = paramsSetsPade - filenamePade = sweeper.sweep('../data/output/membrane_error.dat', + filenamePade = sweeper.sweep('membrane_error.dat', outputs = 'ALL') - sweeper.plot(filenamePade, ['muRe'], ['normHF', 'normApp'], ['M'], + sweeper.plot(filenamePade, ['muRe'], ['normHF', 'normApprox'], ['M'], onePlot = True) sweeper.plot(filenamePade, ['muRe'], ['normErr'], ['M']) diff --git a/examples/greedy/squareBubbleHomog.py b/examples/greedy/squareBubbleHomog.py index 52e8a6d..4d580a5 100644 --- a/examples/greedy/squareBubbleHomog.py +++ b/examples/greedy/squareBubbleHomog.py @@ -1,110 +1,111 @@ import numpy as np from rrompy.hfengines.linear_problem import \ HelmholtzSquareBubbleProblemEngine as HSBPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.utilities.base import squareResonances -verb = 20 +verb = 2 timed = False algo = "Pade" algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" errorEstimatorKind = "SIMPLIFIED" errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(95, 149, 250), .5) -#k0s = np.power(np.linspace(95, 129, 100), .5) +k0s = np.power(np.linspace(95, 129, 100), .5) +k0s = np.power(np.linspace(9.5**2., 10.5**2., 100), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) polesexact = np.unique(np.power(squareResonances(kl**2., kr**2., False), .5)) params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, 'greedyTol':1e-2, 'nTestPoints':2, 'polybasis':polyBasis, 'errorEstimatorKind':errorEstimatorKind} if timed: verb = 0 solver = HSBPE(kappa = 12 ** .5, theta = np.pi / 3, n = 20, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.trainedModel.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros_like(k0s) norm = np.zeros_like(k0s) res = np.zeros_like(k0s) err = np.zeros_like(k0s) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(polesexact, 2.*np.max(norm)*np.ones_like(polesexact, dtype = float), 'm.') plt.semilogy(np.real(approx.mus), 4.*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(polesexact, 2.*np.max(resApp)*np.ones_like(polesexact, dtype = float), 'm.') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.semilogy(polesexact, 2.*np.max(err)*np.ones_like(polesexact, dtype = float), 'm.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.plot(np.real(polesexact), np.imag(polesexact), 'm.') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/rrompy/hfengines/vector_linear_problem/__init__.py b/rrompy/hfengines/vector_linear_problem/__init__.py index d09cdf3..80da6f5 100644 --- a/rrompy/hfengines/vector_linear_problem/__init__.py +++ b/rrompy/hfengines/vector_linear_problem/__init__.py @@ -1,33 +1,34 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from .linear_elasticity_problem_engine import LinearElasticityProblemEngine from .linear_elasticity_helmholtz_problem_engine import LinearElasticityHelmholtzProblemEngine from .linear_elasticity_helmholtz_problem_engine_damped import LinearElasticityHelmholtzProblemEngineDamped from .linear_elasticity_beam_poisson_ratio import LinearElasticityBeamPoissonRatio from .linear_elasticity_helmholtz_archway_frequency import LinearElasticityHelmholtzArchwayFrequency __all__ = [ 'LinearElasticityProblemEngine', 'LinearElasticityHelmholtzProblemEngine', + 'LinearElasticityHelmholtzProblemEngineDamped', 'LinearElasticityBeamPoissonRatio', 'LinearElasticityHelmholtzArchwayFrequency' ] diff --git a/rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py b/rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py index f141e39..e503b28 100644 --- a/rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py +++ b/rrompy/hfengines/vector_linear_problem/linear_elasticity_problem_engine.py @@ -1,369 +1,368 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from scipy.sparse import csr_matrix import fenics as fen from rrompy.hfengines.base.vector_problem_engine_base import \ VectorProblemEngineBase from rrompy.utilities.base.types import Np1D, ScOp from rrompy.utilities.base.fenics import fenZERO, fenZEROS, fenONE from rrompy.utilities.base import verbosityDepth __all__ = ['LinearElasticityProblemEngine'] class LinearElasticityProblemEngine(VectorProblemEngineBase): """ Solver for generic linear elasticity problems. - div(lambda_ * div(u) * I + 2 * mu_ * epsilon(u)) = f in \Omega u = u0 on \Gamma_D \partial_nu = g1 on \Gamma_N \partial_nu + h u = g2 on \Gamma_R Attributes: verbosity: Verbosity level. BCManager: Boundary condition manager. V: Real vector FE space. u: Generic vector trial functions for variational form evaluation. v: Generic vector test functions for variational form evaluation. As: Scipy sparse array representation (in CSC format) of As. bs: Numpy array representation of bs. energyNormMatrix: Scipy sparse matrix representing inner product over V. bsmu: Mu value of last bs evaluation. liftDirichletDatamu: Mu value of last Dirichlet datum evaluation. liftedDirichletDatum: Dofs of Dirichlet datum lifting. mu0BC: Mu value of last Dirichlet datum lifting. degree_threshold: Threshold for ufl expression interpolation degree. lambda_: Value of lambda_. mu_: Value of mu_. forcingTerm: Value of f. DirichletDatum: Value of u0. NeumannDatum: Value of g1. RobinDatumG: Value of g2. RobinDatumH: Value of h. DirichletBoundary: Function handle to \Gamma_D. NeumannBoundary: Function handle to \Gamma_N. RobinBoundary: Function handle to \Gamma_R. ds: Boundary measure 2-tuple (resp. for Neumann and Robin boundaries). A0: Scipy sparse array representation (in CSC format) of A0. b0: Numpy array representation of b0. dsToBeSet: Whether ds needs to be set. """ def __init__(self, degree_threshold : int = np.inf, verbosity : int = 10, timestamp : bool = True): super().__init__(degree_threshold = degree_threshold, verbosity = verbosity, timestamp = timestamp) self.lambda_ = fenONE self.mu_ = fenONE self.forcingTerm = fenZEROS(self.V.mesh().topology().dim()) self.DirichletDatum = fenZEROS(self.V.mesh().topology().dim()) self.NeumannDatum = fenZEROS(self.V.mesh().topology().dim()) self.RobinDatumG = fenZEROS(self.V.mesh().topology().dim()) self.RobinDatumH = fenZERO @property def V(self): """Value of V.""" return self._V @V.setter def V(self, V): VectorProblemEngineBase.V.fset(self, V) self.forcingTerm = fenZEROS(self.V.mesh().topology().dim()) self.DirichletDatum = fenZEROS(self.V.mesh().topology().dim()) self.NeumannDatum = fenZEROS(self.V.mesh().topology().dim()) self.RobinDatumG = fenZEROS(self.V.mesh().topology().dim()) self.dsToBeSet = True @property def lambda_(self): """Value of lambda_.""" return self._lambda_ @lambda_.setter def lambda_(self, lambda_): self.resetAs() if not isinstance(lambda_, (list, tuple,)): lambda_ = [lambda_, fenZERO] self._lambda_ = lambda_ @property def mu_(self): """Value of mu_.""" return self._mu_ @mu_.setter def mu_(self, mu_): self.resetAs() if not isinstance(mu_, (list, tuple,)): mu_ = [mu_, fenZERO] self._mu_ = mu_ @property def forcingTerm(self): """Value of f.""" return self._forcingTerm @forcingTerm.setter def forcingTerm(self, forcingTerm): self.resetbs() if not isinstance(forcingTerm, (list, tuple,)): forcingTerm = [forcingTerm, fenZEROS(self.V.mesh().topology().dim())] self._forcingTerm = forcingTerm @property def DirichletDatum(self): """Value of u0.""" return self._DirichletDatum @DirichletDatum.setter def DirichletDatum(self, DirichletDatum): self.resetbs() if not isinstance(DirichletDatum, (list, tuple,)): DirichletDatum = [DirichletDatum, fenZEROS(self.V.mesh().topology().dim())] self._DirichletDatum = DirichletDatum @property def NeumannDatum(self): """Value of g1.""" return self._NeumannDatum @NeumannDatum.setter def NeumannDatum(self, NeumannDatum): self.resetbs() if not isinstance(NeumannDatum, (list, tuple,)): NeumannDatum = [NeumannDatum, fenZEROS(self.V.mesh().topology().dim())] self._NeumannDatum = NeumannDatum @property def RobinDatumG(self): """Value of g2.""" return self._RobinDatumG @RobinDatumG.setter def RobinDatumG(self, RobinDatumG): self.resetbs() if not isinstance(RobinDatumG, (list, tuple,)): RobinDatumG = [RobinDatumG, fenZEROS(self.V.mesh().topology().dim())] self._RobinDatumG = RobinDatumG @property def RobinDatumH(self): """Value of h.""" return self._RobinDatumH @RobinDatumH.setter def RobinDatumH(self, RobinDatumH): self.resetAs() if not isinstance(RobinDatumH, (list, tuple,)): RobinDatumH = [RobinDatumH, fenZERO] self._RobinDatumH = RobinDatumH @property def DirichletBoundary(self): """Function handle to DirichletBoundary.""" return self.BCManager.DirichletBoundary @DirichletBoundary.setter def DirichletBoundary(self, DirichletBoundary): self.resetAs() self.resetbs() self.BCManager.DirichletBoundary = DirichletBoundary @property def NeumannBoundary(self): """Function handle to NeumannBoundary.""" return self.BCManager.NeumannBoundary @NeumannBoundary.setter def NeumannBoundary(self, NeumannBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.NeumannBoundary = NeumannBoundary @property def RobinBoundary(self): """Function handle to RobinBoundary.""" return self.BCManager.RobinBoundary @RobinBoundary.setter def RobinBoundary(self, RobinBoundary): self.resetAs() self.resetbs() self.dsToBeSet = True self.BCManager.RobinBoundary = RobinBoundary def autoSetDS(self): """Set FEniCS boundary measure based on boundary function handles.""" if self.dsToBeSet: if self.verbosity >= 20: verbosityDepth("INIT", "Initializing boundary measures.", timestamp = self.timestamp) NB = self.NeumannBoundary RB = self.RobinBoundary boundary_markers = fen.MeshFunction("size_t", self.V.mesh(), self.V.mesh().topology().dim() - 1) NB.mark(boundary_markers, 0) RB.mark(boundary_markers, 1) self.ds = fen.Measure("ds", domain = self.V.mesh(), subdomain_data = boundary_markers) self.dsToBeSet = False if self.verbosity >= 20: verbosityDepth("DEL", "Done initializing boundary measures.", timestamp = self.timestamp) def buildEnergyNormForm(self): # energy norm """ Build sparse matrix (in CSR format) representative of scalar product. """ if self.verbosity >= 20: verbosityDepth("INIT", "Assembling energy matrix.", timestamp = self.timestamp) lambda_Re, _ = self.lambda_ mu_Re, _ = self.mu_ termNames = ["lambda_", "mu_"] pars = self.iterReduceQuadratureDegree(zip( [lambda_Re, mu_Re], [x + "Real" for x in termNames])) epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) sigma = lambda u, l_, m_: ( l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + 2. * m_ * epsilon(u)) normMatFen = fen.inner(sigma(self.u, lambda_Re, mu_Re), epsilon(self.v)) * fen.dx - normMatFen = fen.assemble(fen.dot(self.u, self.v) * fen.dx, - form_compiler_parameters = pars) + normMatFen = fen.assemble(normMatFen, form_compiler_parameters = pars) normMat = fen.as_backend_type(normMatFen).mat() self.energyNormMatrix = csr_matrix(normMat.getValuesCSR()[::-1], shape = normMat.size) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling energy matrix.", timestamp = self.timestamp) def A(self, mu:complex, der : int = 0) -> ScOp: """Assemble (derivative of) operator of linear system.""" Anull = self.checkAInBounds(der) if Anull is not None: return Anull self.autoSetDS() if self.As[0] is None: if self.verbosity >= 20: verbosityDepth("INIT", "Assembling operator term A0.", timestamp = self.timestamp) DirichletBC0 = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) lambda_Re, lambda_Im = self.lambda_ mu_Re, mu_Im = self.mu_ hRe, hIm = self.RobinDatumH termNames = ["lambda_", "mu_", "RobinDatumH"] parsRe = self.iterReduceQuadratureDegree(zip( [lambda_Re, mu_Re, hRe], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [lambda_Im, mu_Re, hIm], [x + "Imag" for x in termNames])) epsilon = lambda u: 0.5 * (fen.grad(u) + fen.nabla_grad(u)) sigma = lambda u, l_, m_: ( l_ * fen.div(u) * fen.Identity(u.geometric_dimension()) + 2. * m_ * epsilon(u)) a0Re = (fen.inner(sigma(self.u, lambda_Re, mu_Re), epsilon(self.v)) * fen.dx + hRe * fen.inner(self.u, self.v) * self.ds(1)) a0Im = (fen.inner(sigma(self.u, lambda_Im, mu_Im), epsilon(self.v)) * fen.dx + hIm * fen.inner(self.u, self.v) * self.ds(1)) A0Re = fen.assemble(a0Re, form_compiler_parameters = parsRe) A0Im = fen.assemble(a0Im, form_compiler_parameters = parsIm) DirichletBC0.apply(A0Re) DirichletBC0.zero(A0Im) A0ReMat = fen.as_backend_type(A0Re).mat() A0ImMat = fen.as_backend_type(A0Im).mat() A0Rer, A0Rec, A0Rev = A0ReMat.getValuesCSR() A0Imr, A0Imc, A0Imv = A0ImMat.getValuesCSR() self.As[0] = (csr_matrix((A0Rev, A0Rec, A0Rer), shape = A0ReMat.size) + 1.j * csr_matrix((A0Imv, A0Imc, A0Imr), shape = A0ImMat.size)) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling operator term.", timestamp = self.timestamp) return self.As[0] def b(self, mu:complex, der : int = 0, homogeneized : bool = False) -> Np1D: """Assemble (derivative of) RHS of linear system.""" bnull = self.checkbInBounds(der, homogeneized) if bnull is not None: return bnull if homogeneized and not np.isclose(self.mu0BC, mu): self.u0BC = self.liftDirichletData(mu) if (max(self.nbs, self.nAs * homogeneized) > 1 and not np.isclose(self.bsmu, mu)): self.bsmu = mu self.resetbs() if self.bs[homogeneized][der] is None: self.autoSetDS() if self.verbosity >= 20: verbosityDepth("INIT", ("Assembling forcing term " "b{}.").format(der), timestamp = self.timestamp) if der < self.nbs: fRe, fIm = self.forcingTerm g1Re, g1Im = self.NeumannDatum g2Re, g2Im = self.RobinDatumG else: fRe = fenZEROS(self.V.mesh().topology().dim()) fIm = fenZEROS(self.V.mesh().topology().dim()) g1Re = fenZEROS(self.V.mesh().topology().dim()) g1Im = fenZEROS(self.V.mesh().topology().dim()) g2Re = fenZEROS(self.V.mesh().topology().dim()) g2Im = fenZEROS(self.V.mesh().topology().dim()) termNames = ["forcingTerm", "NeumannDatum", "RobinDatumG"] parsRe = self.iterReduceQuadratureDegree(zip( [fRe, g1Re, g2Re], [x + "Real" for x in termNames])) parsIm = self.iterReduceQuadratureDegree(zip( [fIm, g1Im, g2Im], [x + "Imag" for x in termNames])) L0Re = (fen.inner(fRe, self.v) * fen.dx + fen.inner(g1Re, self.v) * self.ds(0) + fen.inner(g2Re, self.v) * self.ds(1)) L0Im = (fen.inner(fIm, self.v) * fen.dx + fen.inner(g1Im, self.v) * self.ds(0) + fen.inner(g2Im, self.v) * self.ds(1)) b0Re = fen.assemble(L0Re, form_compiler_parameters = parsRe) b0Im = fen.assemble(L0Im, form_compiler_parameters = parsIm) if homogeneized: Ader = self.A(mu, der) b0Re[:] -= np.real(Ader.dot(self.u0BC)) b0Im[:] -= np.imag(Ader.dot(self.u0BC)) DBCR = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, fenZEROS(self.V.mesh().topology().dim()), self.DirichletBoundary) else: DBCR = fen.DirichletBC(self.V, self.DirichletDatum[0], self.DirichletBoundary) DBCI = fen.DirichletBC(self.V, self.DirichletDatum[1], self.DirichletBoundary) DBCR.apply(b0Re) DBCI.apply(b0Im) self.bs[homogeneized][der] = np.array(b0Re[:] + 1.j * b0Im[:], dtype = np.complex) if self.verbosity >= 20: verbosityDepth("DEL", "Done assembling forcing term.", timestamp = self.timestamp) return self.bs[homogeneized][der] diff --git a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py index 2089479..127e97a 100644 --- a/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py +++ b/rrompy/reduction_methods/lagrange/approximant_lagrange_pade.py @@ -1,486 +1,487 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import checkRobustTolerance from .generic_approximant_lagrange import GenericApproximantLagrange from rrompy.reduction_methods.base.fit_utils import (polybases, polyvander, polyvalder, polyfitname) from rrompy.reduction_methods.trained_model import TrainedModelPade as tModel from rrompy.reduction_methods.trained_model import TrainedModelData from rrompy.utilities.base.types import Np1D, Np2D, HFEng, DictAny, Tuple from rrompy.utilities.base import verbosityDepth, purgeDict, customFit from rrompy.utilities.exception_manager import (RROMPyException, modeAssert, RROMPyWarning) __all__ = ['ApproximantLagrangePade'] class ApproximantLagrangePade(GenericApproximantLagrange): """ ROM Lagrange Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'S': total number of samples current approximant relies upon; defaults to 2; - 'sampler': sample point generator; defaults to uniform sampler on [0, 1]; - 'polybasis': type of polynomial basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'E': coefficient of interpolant to be minimized; defaults to min(S, M + 1); - 'M': degree of Pade' interpolant numerator; defaults to 0; - 'N': degree of Pade' interpolant denominator; defaults to 0; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. ws: Array of snapshot weigths. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'S': total number of samples current approximant relies upon; - 'sampler': sample point generator; - 'polybasis': type of polynomial basis for interpolation; - 'E': coefficient of interpolant to be minimized; - 'M': degree of Pade' interpolant numerator; - 'N': degree of Pade' interpolant denominator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. S: Number of solution snapshots over which current approximant is based upon. sampler: Sample point generator. M: Numerator degree of approximant. N: Denominator degree of approximant. POD: Whether to compute POD of snapshots. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["polybasis", "E", "M", "N", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["polybasis", "E", "M", "N", "interpRcond", "robustTol"], True, True, baselevel = 1) if hasattr(self, "_M") and self.M is not None: Mold = self.M self._M = 0 if hasattr(self, "_N") and self.N is not None: Nold = self.N self._N = 0 if hasattr(self, "_E") and self.E is not None: self._E = 0 GenericApproximantLagrange.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "polybasis" in keyList: self.polybasis = approxParameters["polybasis"] elif not hasattr(self, "_polybasis") or self._polybasis is None: self.polybasis = "MONOMIAL" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif not hasattr(self, "interpRcond") or self.interpRcond is None: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif not hasattr(self, "_robustTol") or self._robustTol is None: self.robustTol = 0 if "M" in keyList: self.M = approxParameters["M"] elif hasattr(self, "_M") and self.M is not None: self.M = Mold else: self.M = 0 if "N" in keyList: self.N = approxParameters["N"] elif hasattr(self, "_N") and self.N is not None: self.N = Nold else: self.N = 0 if "E" in keyList: self.E = approxParameters["E"] else: self.E = min(self.S - 1, self.M + 1) @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in polybases: raise RROMPyException("Prescribed polybasis not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._sampleType = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def interpRcond(self): """Value of interpRcond.""" return self._interpRcond @interpRcond.setter def interpRcond(self, interpRcond): self._interpRcond = interpRcond self._approxParameters["interpRcond"] = self.interpRcond @property def M(self): """Value of M. Its assignment may change S.""" return self._M @M.setter def M(self, M): if M < 0: raise RROMPyException("M must be non-negative.") self._M = M self._approxParameters["M"] = self.M if hasattr(self, "_S") and self.S < self.M + 1: RROMPyWarning("Prescribed S is too small. Updating S to M + 1.") self.S = self.M + 1 @property def N(self): """Value of N. Its assignment may change S.""" return self._N @N.setter def N(self, N): if N < 0: raise RROMPyException("N must be non-negative.") self._N = N self._approxParameters["N"] = self.N if hasattr(self, "_S") and self.S < self.N + 1: RROMPyWarning("Prescribed S is too small. Updating S to N + 1.") self.S = self.N + 1 @property def E(self): """Value of E. Its assignment may change S.""" return self._E @E.setter def E(self, E): if E < 0: raise RROMPyException("E must be non-negative.") self._E = E self._approxParameters["E"] = self.E if hasattr(self, "_S") and self.S < self.E + 1: RROMPyWarning("Prescribed S is too small. Updating S to E + 1.") self.S = self.E + 1 @property def robustTol(self): """Value of tolerance for robust Pade' denominator management.""" return self._robustTol @robustTol.setter def robustTol(self, robustTol): if robustTol < 0.: RROMPyWarning(("Overriding prescribed negative robustness " "tolerance to 0.")) robustTol = 0. self._robustTol = robustTol self._approxParameters["robustTol"] = self.robustTol @property def S(self): """Value of S.""" return self._S @S.setter def S(self, S): if S <= 0: raise RROMPyException("S must be positive.") if hasattr(self, "_S"): Sold = self.S else: Sold = -1 vals, label = [0] * 3, {0:"M", 1:"N", 2:"E"} if hasattr(self, "_M") and self._M is not None: vals[0] = self.M if hasattr(self, "_N") and self._N is not None: vals[1] = self.N if hasattr(self, "_E") and self._E is not None: vals[2] = self.E idxmax = np.argmax(vals) if vals[idxmax] + 1 > S: RROMPyWarning(("Prescribed S is too small. Updating S to {} + " "1.").format(label[idxmax])) self.S = vals[idxmax] + 1 else: self._S = S self._approxParameters["S"] = self.S if Sold != self.S: self.resetSamples() def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return modeAssert(self._mode, message = "Cannot setup approximant.") if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.computeScaleFactor() self.computeSnapshots() if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, np.copy(self.samplingEngine.samples), self.HFEngine.rescalingExp) data.polytype = self.polybasis data.scaleFactor = self.scaleFactor data.mus = np.copy(self.mus) self.trainedModel.data = data else: self.trainedModel.data.projMat = np.copy( self.samplingEngine.samples) if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of denominator.", timestamp = self.timestamp) TE = polyvander[self.polybasis](self.radiusPade(self.mus), self.E) TE = (TE.T * self.ws).T while self.N > 0: RHS = np.zeros(self.E + 1) RHS[-1] = 1. fitOut = customFit(TE.T, RHS, full = True, rcond = self.interpRcond) if self.verbosity >= 5: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree " "{} through {}... Conditioning of " "LS system: {:.4e}.").format( self.S, self.E, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] < self.E + 1: Enew = fitOut[1][1] - 1 Nnew = min(self.N, Enew) Mnew = min(self.M, Enew) if Nnew == self.N: strN = "" else: strN = "N from {} to {} and ".format(self.N, Nnew) if Mnew == self.M: strM = "" else: strM = "M from {} to {} and ".format(self.M, Mnew) RROMPyWarning(("Polyfit is poorly conditioned.\nReducing " "{}{}E from {} to {}.").format(strN, strM, self.E, Enew)) newParams = {"N" : Nnew, "M" : Mnew, "E" : Enew} self.approxParameters = newParams TE = TE[:, : self.E + 1] continue Ghalf = (TE[:, : self.N + 1].T * fitOut[0]).T if self.POD: self.Ghalf = self.samplingEngine.RPOD.dot(Ghalf) ev, eV = self.findeveVGQR() else: self.Ghalf = self.samplingEngine.samples.dot(Ghalf) ev, eV = self.findeveVGExplicit() newParams = checkRobustTolerance(ev, self.E, self.robustTol) if not newParams: break self.approxParameters = newParams + TE = TE[:, : self.E + 1] if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.", timestamp = self.timestamp) else: Q = np.ones(1, dtype = np.complex) self.trainedModel.data.Q = np.copy(Q) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.", timestamp = self.timestamp) Qevaldiag = np.diag(self.trainedModel.getQVal(self.mus)) while self.M >= 0: fitVander = polyvander[self.polybasis](self.radiusPade(self.mus), self.M) fitOut = customFit(fitVander, Qevaldiag, w = self.ws, full = True, rcond = self.interpRcond) if self.verbosity >= 5: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of LS " "system: {:.4e}.").format( self.S, self.M, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break RROMPyWarning(("Polyfit is poorly conditioned. Reducing M from {} " "to {}. Exact snapshot interpolation not " "guaranteed.").format(self.M, fitOut[1][1] - 1)) self.M = fitOut[1][1] - 1 if self.M <= 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) P = np.atleast_2d(P) if self.POD: P = self.samplingEngine.RPOD.dot(P) self.trainedModel.data.P = np.copy(P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.", timestamp = self.timestamp) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def findeveVGExplicit(self, verbOutput : int = 5) -> Tuple[Np1D, Np2D]: """ Compute explicitly eigenvalues and eigenvectors of Pade' denominator matrix. """ modeAssert(self._mode, message = "Cannot solve eigenvalue problem.") if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", timestamp = self.timestamp) self.G = self.HFEngine.innerProduct(self.Ghalf, self.Ghalf) if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving eigenvalue problem for gramian " "matrix."), timestamp = self.timestamp) ev, eV = np.linalg.eigh(self.G) if self.verbosity >= verbOutput: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue problem of size {} " "with condition number {:.4e}.").format( self.N + 1, condev), timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.", timestamp = self.timestamp) return ev, eV def findeveVGQR(self, verbOutput : int = 5) -> Tuple[Np1D, Np2D]: """ Compute eigenvalues and eigenvectors of Pade' denominator matrix through SVD of R factor. """ if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square root of gramian " "matrix."), timestamp = self.timestamp) _, s, eV = np.linalg.svd(self.Ghalf, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].T.conj() if self.verbosity >= verbOutput: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of size {} x {} with " "condition number {:.4e}.").format( self.S, self.N + 1, condev), timestamp = self.timestamp) if self.verbosity >= 7: verbosityDepth("DEL", "Done solving eigenvalue problem.", timestamp = self.timestamp) return ev, eV def radiusPade(self, mu:Np1D, mu0 : float = None) -> float: """ Compute translated radius to be plugged into Pade' approximant. Args: mu: Parameter(s) 1. mu0: Parameter(s) 2. If None, set to self.mu0. Returns: Translated radius to be plugged into Pade' approximant. """ return self.trainedModel.radiusPade(mu, mu0) def getResidues(self) -> Np1D: """ Obtain approximant residues. Returns: Matrix with residues as columns. """ self.setupApprox() if self.verbosity >= 20: verbosityDepth("INIT", "Computing residues of model.", timestamp = self.timestamp) poles = self.getPoles() Pvals = self.trainedModel.data.projMat.dot(self.getPVal(poles)) Qder = polyvalder[self.polybasis](self.radiusPade(poles), self.trainedModel.data.Q) residues = Pvals / Qder if self.verbosity >= 20: verbosityDepth("DEL", "Done computing residues.", timestamp = self.timestamp) return residues diff --git a/rrompy/reduction_methods/taylor/generic_approximant_taylor.py b/rrompy/reduction_methods/taylor/generic_approximant_taylor.py index 7d848d9..33658fa 100644 --- a/rrompy/reduction_methods/taylor/generic_approximant_taylor.py +++ b/rrompy/reduction_methods/taylor/generic_approximant_taylor.py @@ -1,185 +1,185 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.utilities.base.types import DictAny, HFEng from rrompy.utilities.base import purgeDict, verbosityDepth from rrompy.utilities.exception_manager import (RROMPyException, modeAssert, RROMPyWarning) __all__ = ['GenericApproximantTaylor'] class GenericApproximantTaylor(GenericApproximant): """ ROM single-point approximant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'E': total number of derivatives current approximant relies upon; defaults to 1; - 'sampleType': label of sampling type; available values are: - 'ARNOLDI': orthogonalization of solution derivatives through Arnoldi algorithm; - 'KRYLOV': standard computation of solution derivatives. Defaults to 'KRYLOV'. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'E': total number of derivatives current approximant relies upon; - 'sampleType': label of sampling type. POD: Whether to compute QR factorization of derivatives. E: Number of solution derivatives over which current approximant is based upon. sampleType: Label of sampling type. initialHFData: HF problem initial data. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0, approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["E", "sampleType"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) self._postInit() def setupSampling(self): """Setup sampling engine.""" modeAssert(self._mode, message = "Cannot setup sampling engine.") if not hasattr(self, "_sampleType"): return if self.sampleType == "ARNOLDI": from rrompy.sampling.linear_problem.sampling_engine_arnoldi \ import SamplingEngineArnoldi super().setupSampling(SamplingEngineArnoldi) elif self.sampleType == "KRYLOV": from rrompy.sampling.linear_problem.sampling_engine_krylov \ import SamplingEngineKrylov super().setupSampling(SamplingEngineKrylov) else: raise RROMPyException("Sample type not recognized.") @property def approxParameters(self): """Value of approximant parameters. Its assignment may change E.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["E", "sampleType"], True, True, baselevel = 1) GenericApproximant.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "E" in keyList: self.E = approxParameters["E"] elif hasattr(self, "_E") and self._E is not None: self.E = self.E else: self.E = 1 if "sampleType" in keyList: self.sampleType = approxParameters["sampleType"] elif not hasattr(self, "_sampleType") or self._sampleType is None: self.sampleType = "KRYLOV" @property def E(self): """Value of E.""" return self._E @E.setter def E(self, E): if E < 0: raise RROMPyException("E must be non-negative.") self._E = E self._approxParameters["E"] = self.E @property def sampleType(self): """Value of sampleType.""" return self._sampleType @sampleType.setter def sampleType(self, sampleType): if hasattr(self, "_sampleType") and self._sampleType is not None: sampleTypeOld = self.sampleType else: sampleTypeOld = -1 try: sampleType = sampleType.upper().strip().replace(" ","") if sampleType not in ["ARNOLDI", "KRYLOV"]: raise RROMPyException("Sample type not recognized.") self._sampleType = sampleType except: RROMPyWarning(("Prescribed sampleType not recognized. Overriding " "to 'KRYLOV'.")) self._sampleType = "KRYLOV" self._approxParameters["sampleType"] = self.sampleType if sampleTypeOld != self.sampleType: self.resetSamples() def computeDerivatives(self): """Compute derivatives of solution map starting from order 0.""" modeAssert(self._mode, message = "Cannot start derivative computation.") - if self.samplingEngine.samples is None: + if self.samplingEngine.nsamples <= self.E: if self.verbosity >= 5: verbosityDepth("INIT", "Starting computation of derivatives.", timestamp = self.timestamp) self.samplingEngine.iterSample(self.mu0, self.E + 1, homogeneized = self.homogeneized) if self.verbosity >= 5: verbosityDepth("DEL", "Done computing derivatives.", timestamp = self.timestamp) def normApprox(self, mu:complex, homogeneized : bool = False) -> float: """ Compute norm of approximant at arbitrary parameter. Args: mu: Target parameter. homogeneized(optional): Whether to remove Dirichlet BC. Defaults to False. Returns: Target norm of approximant. """ if self.sampleType != "ARNOLDI" or self.homogeneized != homogeneized: return super().normApprox(mu, homogeneized) return np.linalg.norm(self.getApproxReduced(mu, homogeneized)) diff --git a/rrompy/utilities/base/custom_fit.py b/rrompy/utilities/base/custom_fit.py index a64d896..622702b 100644 --- a/rrompy/utilities/base/custom_fit.py +++ b/rrompy/utilities/base/custom_fit.py @@ -1,146 +1,146 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np import numpy.linalg as la -from warnings import warn +from rrompy.utilities.exception_manager import RROMPyException, RROMPyWarning __all__ = ["customFit"] def customFit(van, y, rcond=None, full=False, w=None): """ Least-squares fit of a polynomial to data. Copied from numpy.polynomial.polynomial. Parameters ---------- va : array_like, shape (`M`,`deg` + 1) Vandermonde-like matrix. y : array_like, shape (`M`,) or (`M`, `K`) y-coordinates of the sample points. Several sets of sample points sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be ignored. The default value is ``len(van)*eps``, where `eps` is the relative precision of the platform's float type, about 2e-16 in most cases. full : bool, optional Switch determining the nature of the return value. When ``False`` (the default) just the coefficients are returned; when ``True``, diagnostic information from the singular value decomposition (used to solve the fit's matrix equation) is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) Polynomial coefficients ordered from low to high. If `y` was 2-D, the coefficients in column `k` of `coef` represent the polynomial fit to the data in `y`'s `k`-th column. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Raises ------ RankWarning Raised if the matrix in the least-squares fit is rank deficient. The warning is only raised if `full` == False. The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) """ van = np.asarray(van) + 0.0 y = np.asarray(y) + 0.0 # check arguments. if van.ndim != 2: raise RROMPyException("expected 2D vector for van") if van.size == 0: raise RROMPyException("expected non-empty vector for van") if y.ndim < 1 or y.ndim > 2: raise RROMPyException("expected 1D or 2D array for y") if len(van) != len(y): raise RROMPyException("expected van and y to have same length") order = van.shape[1] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T if isinstance(w, (str, )) and w.upper() == "AUTO": # Determine the norms of the design matrix rows. if issubclass(van.dtype.type, np.complexfloating): w = np.sqrt((np.square(van.real) + np.square(van.imag)).sum(1)) else: w = np.sqrt(np.square(van).sum(1)) w[w == 0] = 1 w = np.power(w, -1.) if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise RROMPyException("expected 1D vector for w") if len(van) != len(w): raise RROMPyException("expected van and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(van)*np.finfo(van.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" RROMPyWarning(msg, np.polynomial.polyutils.RankWarning, stacklevel = 2) if full: return c, [resids, rank, s, rcond] else: return c diff --git a/rrompy/utilities/parameter_sampling/quadrature_sampler.py b/rrompy/utilities/parameter_sampling/quadrature_sampler.py index 40b2852..237e7a0 100644 --- a/rrompy/utilities/parameter_sampling/quadrature_sampler.py +++ b/rrompy/utilities/parameter_sampling/quadrature_sampler.py @@ -1,131 +1,147 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.parameter_sampling.generic_sampler import GenericSampler from rrompy.utilities.base.types import Np1D, Tuple, List from rrompy.utilities.exception_manager import RROMPyException __all__ = ['QuadratureSampler'] class QuadratureSampler(GenericSampler): """Generator of quadrature sample points.""" - allowedKinds = ["UNIFORM", "CHEBYSHEV", "GAUSSLEGENDRE"] + allowedKinds = ["UNIFORM", "CHEBYSHEV", "GAUSSLEGENDRE", "CLENSHAWCURTIS"] def __init__(self, lims:Tuple[Np1D, Np1D], kind : str = "UNIFORM", scaling : List[callable] = None, scalingInv : List[callable] = None): super().__init__(lims = lims, scaling = scaling, scalingInv = scalingInv) self.kind = kind def __str__(self) -> str: return "{}_{}".format(super().__str__(), self.kind) def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) @property def kind(self): """Value of kind.""" return self._kind @kind.setter def kind(self, kind): if kind.upper() not in self.allowedKinds: raise RROMPyException("Generator kind not recognized.") self._kind = kind.upper() def generatePoints(self, n:Np1D) -> Tuple[List[Np1D], Np1D]: """Array of quadrature points and array of weights.""" super().generatePoints(n) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) if self.kind == "UNIFORM": xj = np.linspace(a, b, n[j])[:, None] wj = np.abs(a - b) / n[j] * np.ones(n[j]) elif self.kind == "CHEBYSHEV": nodes, weights = np.polynomial.chebyshev.chebgauss(n[j]) xj = (a + b) / 2 + (a - b) / 2 * nodes[:, None] wj = np.abs(a - b) / np.pi * weights elif self.kind == "GAUSSLEGENDRE": nodes, weights = np.polynomial.legendre.leggauss(n[j]) xj = (a + b) / 2 + (a - b) / 2 * nodes[:, None] wj = np.abs(a - b) * weights + elif self.kind == "CLENSHAWCURTIS": + thetas = np.pi / (n[j] - 1) * np.arange(n[j] - 1) + nodes = np.cos(thetas) + weights = np.ones(n[j]) + if n[j] == 1: weights[0] = 2. + else: + for i in range(n[j]): + jhi = (n[j] - 1) // 2 + for j in range(jhi): + b = 1. + 1. * (2 * (j + 1) != n[j] - 1) + weights[i] -= (b * np.cos(2. * (j + 1) * thetas[i]) + / (4. * j * (j + 2) + 3)) + weights[:] /= (n[j] - 1) + weights[1 : -1] *= 2. + xj = (a + b) / 2 + (a - b) / 2 * nodes[:, None] + wj = np.abs(a - b) / 2 * weights if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] else: x = np.concatenate((np.kron(np.ones(n[j])[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j]), w), np.kron(wj, np.ones(xsize))) xsize = xsize * n[j] return [y.flatten() for y in np.split(x, xsize)], w def refine(self, n:int) -> Tuple[List[Np1D], Np1D]: """ Apply refinement. If points are not nested, equivalent to generatePoints([2 * x - 1 for x in n]). """ if self.kind != "UNIFORM": return super().refine(n) super().generatePoints(n) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) xj = np.linspace(a + (b - a) / 2. / (n[j] - 1), b + (a - b) / 2. / (n[j] - 1), n[j] - 1)[:, None] wj = np.abs(a - b) / (n[j] - 1) * np.ones(n[j] - 1) if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] - 1 else: x = np.concatenate((np.kron(np.ones(n[j] - 1)[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j] - 1), w), np.kron(wj, np.ones(xsize))) xsize = xsize * (n[j] - 1) return [y.flatten() for y in np.split(x, xsize)], w diff --git a/rrompy/utilities/parameter_sweeper/parameter_sweeper.py b/rrompy/utilities/parameter_sweeper/parameter_sweeper.py index d84a720..d82cf6c 100644 --- a/rrompy/utilities/parameter_sweeper/parameter_sweeper.py +++ b/rrompy/utilities/parameter_sweeper/parameter_sweeper.py @@ -1,504 +1,503 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import itertools import csv import numpy as np from matplotlib import pyplot as plt from rrompy.utilities.base.types import Np1D, DictAny, List, ROMEng from rrompy.utilities.base import purgeList, getNewFilename, verbosityDepth from rrompy.utilities.exception_manager import RROMPyWarning from rrompy.utilities.exception_manager import RROMPyException __all__ = ['ParameterSweeper'] def C2R2csv(x): x = np.ravel(x) y = np.concatenate((np.real(x), np.imag(x))) z = np.ravel(np.reshape(y, [2, np.size(x)]).T) return np.array2string(z, separator = '_', suppress_small = False, max_line_width = np.inf, sign = '+', formatter = {'all' : lambda x : "{:.15E}".format(x)} )[1 : -1] class ParameterSweeper: """ ROM approximant parameter sweeper. Args: ROMEngine(optional): Generic approximant class. Defaults to None. mutars(optional): Array of parameter values to sweep. Defaults to empty array. params(optional): List of parameter settings (each as a dict) to explore. Defaults to single empty set. mostExpensive(optional): String containing label of most expensive step, to be executed fewer times. Allowed options are 'HF' and 'Approx'. Defaults to 'HF'. Attributes: ROMEngine: Generic approximant class. mutars: Array of parameter values to sweep. params: List of parameter settings (each as a dict) to explore. mostExpensive: String containing label of most expensive step, to be executed fewer times. """ allowedOutputsStandard = ["normHF", "normApprox", "normRes", "normResRel", "normErr", "normErrRel"] allowedOutputs = allowedOutputsStandard + ["HFFunc", "ApproxFunc", "ErrFunc", "ErrFuncRel"] allowedOutputsFull = allowedOutputs + ["poles"] def __init__(self, ROMEngine : ROMEng = None, mutars : Np1D = np.array([]), params : List[DictAny] = [{}], mostExpensive : str = "HF"): self.ROMEngine = ROMEngine self.mutars = mutars self.params = params self.mostExpensive = mostExpensive def name(self) -> str: return self.__class__.__name__ def __str__(self) -> str: return self.name() def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) @property def mostExpensive(self): """Value of mostExpensive.""" return self._mostExpensive @mostExpensive.setter def mostExpensive(self, mostExpensive:str): mostExpensive = mostExpensive.upper() if mostExpensive not in ["HF", "APPROX"]: RROMPyWarning(("Value of mostExpensive not recognized. Overriding " "to 'APPROX'.")) mostExpensive = "APPROX" self._mostExpensive = mostExpensive def checkValues(self) -> bool: """Check if sweep can be performed.""" if self.ROMEngine is None: raise RROMPyException("ROMEngine is missing. Aborting.") if len(self.mutars) == 0: raise RROMPyException("Empty target parameter vector. Aborting.") if len(self.params) == 0: raise RROMPyException("Empty method parameters vector. Aborting.") def sweep(self, filename : str = "out.dat", outputs : List[str] = [], verbose : int = 10, timestamp : bool = True): self.checkValues() try: if outputs.upper() == "ALL": outputs = self.allowedOutputsFull except: if len(outputs) == 0: outputs = self.allowedOutputsStandard outputs = purgeList(outputs, self.allowedOutputsFull, listname = self.name() + ".outputs", baselevel = 1) poles = ("poles" in outputs) if len(outputs) == 0: raise RROMPyException("Empty outputs. Aborting.") outParList = self.ROMEngine.parameterList Nparams = len(self.params) if poles: polesCheckList = [] allowedParams = self.ROMEngine.parameterList dotPos = filename.rfind('.') if dotPos in [-1, len(filename) - 1]: filename = getNewFilename(filename[:dotPos]) else: filename = getNewFilename(filename[:dotPos], filename[dotPos + 1:]) append_write = "w" initial_row = (outParList + ["muRe", "muIm"] + [x for x in self.allowedOutputs if x in outputs] + ["type"] + ["poles"] * poles) with open(filename, append_write, buffering = 1) as fout: writer = csv.writer(fout, delimiter=",") writer.writerow(initial_row) if self.mostExpensive == "HF": outerSet = self.mutars innerSet = self.params elif self.mostExpensive == "APPROX": outerSet = self.params innerSet = self.mutars for outerIdx, outerPar in enumerate(outerSet): if self.mostExpensive == "HF": i, mutar = outerIdx, outerPar elif self.mostExpensive == "APPROX": j, par = outerIdx, outerPar self.ROMEngine.approxParameters = {k: par[k] for k in\ par.keys() & allowedParams} self.ROMEngine.setupApprox() for innerIdx, innerPar in enumerate(innerSet): if self.mostExpensive == "APPROX": i, mutar = innerIdx, innerPar elif self.mostExpensive == "HF": j, par = innerIdx, innerPar self.ROMEngine.approxParameters = {k: par[k] for k in\ par.keys() & allowedParams} self.ROMEngine.setupApprox() if verbose >= 5: - verbosityDepth("INIT", ("Set {}/{}\tmu_{} = " - "{:.10f}").format(j + 1, - Nparams, i, - mutar), + verbtype = "MAIN" + if innerIdx == 0: + verbtype = "INIT" + verbosityDepth(verbtype, ("Set {}/{}\tmu_{} = " + "{:.10f}").format(j + 1, + Nparams, i, + mutar), timestamp = timestamp) outData = [] if "normHF" in outputs: valNorm = self.ROMEngine.normHF(mutar) outData = outData + [valNorm] if "normApprox" in outputs: val = self.ROMEngine.normApprox(mutar) outData = outData + [val] if "normRes" in outputs: valNRes = self.ROMEngine.normRes(mutar) outData = outData + [valNRes] if "normResRel" in outputs: if "normRes" not in outputs: valNRes = self.ROMEngine.normRes(mutar) val = self.ROMEngine.normRHS(mutar) outData = outData + [valNRes / val] if "normErr" in outputs: valNErr = self.ROMEngine.normErr(mutar) outData = outData + [valNErr] if "normErrRel" in outputs: if "normHF" not in outputs: valNorm = self.ROMEngine.normHF(mutar) if "normErr" not in outputs: valNErr = self.ROMEngine.normErr(mutar) outData = outData + [valNErr / valNorm] if "HFFunc" in outputs: valFunc = self.ROMEngine.HFEngine.functional( self.ROMEngine.getHF(mutar)) outData = outData + [valFunc] if "ApproxFunc" in outputs: valFApp = self.ROMEngine.HFEngine.functional( self.ROMEngine.getApprox(mutar)) outData = outData + [valFApp] if "ErrFunc" in outputs: if "HFFunc" not in outputs: valFunc = self.ROMEngine.HFEngine.functional( self.ROMEngine.getHF(mutar)) if "ApproxFunc" not in outputs: valFApp = self.ROMEngine.HFEngine.functional( self.ROMEngine.getApprox(mutar)) valFErr = np.abs(valFApp - valFunc) outData = outData + [valFErr] if "ErrFuncRel" in outputs: if not ("HFFunc" in outputs or "ErrFunc" in outputs): valFunc = self.ROMEngine.HFEngine.functional( self.ROMEngine.getHF(mutar)) if not ("AppFunc" in outputs or "ErrFunc" in outputs): valFApp = self.ROMEngine.HFEngine.functional( self.ROMEngine.getApprox(mutar)) val = np.nan if not np.isclose(valFunc, 0.): val = valFApp / valFunc outData = outData + [val] writeData = [] for parn in outParList: writeData = (writeData + [self.ROMEngine.approxParameters[parn]]) writeData = (writeData + [mutar.real, mutar.imag] + outData + [self.ROMEngine.name()]) if poles: if j not in polesCheckList: polesCheckList += [j] writeData = writeData + [C2R2csv( self.ROMEngine.getPoles())] else: writeData = writeData + [""] writer.writerow(str(x) for x in writeData) - if verbose >= 5: - verbosityDepth("DEL", "", timestamp = timestamp) - if verbose >= 5: if self.mostExpensive == "APPROX": out = "Set {}/{}\tdone.".format(j + 1, Nparams) elif self.mostExpensive == "HF": out = "Point mu_{} = {:.10f}\tdone.".format(i, mutar) - verbosityDepth("INIT", out, timestamp = timestamp) - verbosityDepth("DEL", "", timestamp = timestamp) + verbosityDepth("DEL", out, timestamp = timestamp) self.filename = filename return self.filename def read(self, filename:str, restrictions : DictAny = {}, outputs : List[str] = []) -> DictAny: """ Execute a query on a custom format CSV. Args: filename: CSV filename. restrictions(optional): Parameter configurations to output. Defaults to empty dictionary, i.e. output all. outputs(optional): Values to output. Defaults to empty list, i.e. no output. Returns: Dictionary of desired results, with a key for each entry of outputs, and a numpy 1D array as corresponding value. """ with open(filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) restrIndices, outputIndices, outputData = {}, {}, {} for key in restrictions.keys(): try: restrIndices[key] = header.index(key) if not isinstance(restrictions[key], list): restrictions[key] = [restrictions[key]] restrictions[key] = copy(restrictions[key]) except: RROMPyWarning("Ignoring key {} from restrictions.".format( key)) for key in outputs: try: outputIndices[key] = header.index(key) outputData[key] = np.array([]) except: RROMPyWarning("Ignoring key {} from outputs.".format(key)) for row in reader: restrTrue = True for key in restrictions.keys(): if row[restrIndices[key]] == restrictions[key]: continue try: if np.any(np.isclose(float(row[restrIndices[key]]), [float(x) for x in restrictions[key]])): continue except: pass restrTrue = False if restrTrue: for key in outputIndices.keys(): try: val = row[outputIndices[key]] val = float(val) finally: outputData[key] = np.append(outputData[key], val) return outputData def plot(self, filename:str, xs:List[str], ys:List[str], zs:List[str], onePlot : bool = False, save : str = None, saveFormat : str = "eps", saveDPI : int = 100, **figspecs): """ Perform plots from data in filename. Args: filename: CSV filename. xs: Values to put on x axes. ys: Values to put on y axes. zs: Meta-values for constraints. onePlot(optional): Whether to create a single figure per x. Defaults to False. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ if save is not None: save = save.strip() zsVals = self.read(filename, outputs = zs) zs = list(zsVals.keys()) zss = None for key in zs: vals = np.unique(zsVals[key]) if zss is None: zss = copy(vals) else: zss = list(itertools.product(zss, vals)) lzs = len(zs) for z in zss: if lzs <= 1: constr = {zs[0] : z} else: constr = {zs[j] : z[j] for j in range(len(zs))} data = self.read(filename, restrictions = constr, outputs = xs+ys) if onePlot: for x in xs: xVals = data[x] p = plt.figure(**figspecs) logScale = False for y in ys: yVals = data[y] label = '{} vs {} for {}'.format(y, x, constr) if np.min(yVals) <= - np.finfo(float).eps: plt.plot(xVals, yVals, label = label) else: plt.plot(xVals, yVals, label = label) if np.log10(np.max(yVals) / np.min(yVals)) > 1.: logScale = True if logScale: ax = p.get_axes()[0] ax.set_yscale('log') plt.legend() plt.grid() if save is not None: prefix = "{}_{}_vs_{}_{}".format(save, ys, x, constr) plt.savefig(getNewFilename(prefix, saveFormat), format = saveFormat, dpi = saveDPI) plt.show() plt.close() else: for x, y in itertools.product(xs, ys): xVals, yVals = data[x], data[y] label = '{} vs {} for {}'.format(y, x, constr) p = plt.figure(**figspecs) if np.min(yVals) <= - np.finfo(float).eps: plt.plot(xVals, yVals, label = label) else: plt.plot(xVals, yVals, label = label) if np.log10(np.max(yVals) / np.min(yVals)) > 1.: ax = p.get_axes()[0] ax.set_yscale('log') plt.legend() plt.grid() if save is not None: prefix = "{}_{}_vs_{}_{}".format(save, y, x, constr) plt.savefig(getNewFilename(prefix, saveFormat), format = saveFormat, dpi = saveDPI) plt.show() plt.close() def plotCompare(self, filenames:List[str], xs:List[str], ys:List[str], zs:List[str], onePlot : bool = False, save : str = None, ylims : dict = None, saveFormat : str = "eps", saveDPI : int = 100, labels : List[str] = None, **figspecs): """ Perform plots from data in filename1 and filename2. Args: filenames: CSV filenames. xs: Values to put on x axes. ys: Values to put on y axes. zs: Meta-values for constraints. onePlot(optional): Whether to create a single figure per x. Defaults to False. save(optional): Where to save plot(s). Defaults to None, i.e. no saving. clip(optional): Custom y axis limits. If None, automatic values are kept. Defaults to None. saveFormat(optional): Format for saved plot(s). Defaults to "eps". saveDPI(optional): DPI for saved plot(s). Defaults to 100. labels: Label for each dataset. figspecs(optional key args): Optional arguments for matplotlib figure creation. """ nfiles = len(filenames) if save is not None: save = save.strip() if labels is None: labels = ["{}".format(j + 1) for j in range(nfiles)] zsVals = self.read(filenames[0], outputs = zs) zs = list(zsVals.keys()) zss = None for key in zs: vals = np.unique(zsVals[key]) if zss is None: zss = copy(vals) else: zss = list(itertools.product(zss, vals)) lzs = len(zs) for z in zss: if lzs <= 1: constr = {zs[0] : z} else: constr = {zs[j] : z[j] for j in range(len(zs))} data = [None] * nfiles for j in range(nfiles): data[j] = self.read(filenames[j], restrictions = constr, outputs = xs + ys) if onePlot: for x in xs: xVals = [None] * nfiles for j in range(nfiles): try: xVals[j] = data[j][x] except: pass p = plt.figure(**figspecs) logScale = False for y in ys: for j in range(nfiles): try: yVals = data[j][y] except: pass l = '{} vs {} for {}, {}'.format(y, x, constr, labels[j]) if np.min(yVals) <= - np.finfo(float).eps: plt.plot(xVals[j], yVals, label = l) else: plt.plot(xVals[j], yVals, label = l) if np.log10(np.max(yVals)/np.min(yVals)) > 1.: logScale = True if logScale: ax = p.get_axes()[0] ax.set_yscale('log') if ylims is not None: plt.ylim(**ylims) plt.legend() plt.grid() if save is not None: prefix = "{}_{}_vs_{}_{}".format(save, ys, x, constr) plt.savefig(getNewFilename(prefix, saveFormat), format = saveFormat, dpi = saveDPI) plt.show() plt.close() else: for x, y in itertools.product(xs, ys): p = plt.figure(**figspecs) logScale = False for j in range(nfiles): xVals, yVals = data[j][x], data[j][y] l = '{} vs {} for {}, {}'.format(y, x, constr, labels[j]) if np.min(yVals) <= - np.finfo(float).eps: plt.plot(xVals, yVals, label = l) else: plt.plot(xVals, yVals, label = l) if np.log10(np.max(yVals)/np.min(yVals)) > 1.: logScale = True if logScale: ax = p.get_axes()[0] ax.set_yscale('log') if ylims is not None: plt.ylim(**ylims) plt.legend() plt.grid() if save is not None: prefix = "{}_{}_vs_{}_{}".format(save, y, x, constr) plt.savefig(getNewFilename(prefix, saveFormat), format = saveFormat, dpi = saveDPI) plt.show() plt.close()