diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..9f8e9b6 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.0 \ No newline at end of file diff --git a/bump_version.py b/bump_version.py new file mode 100644 index 0000000..b5264fa --- /dev/null +++ b/bump_version.py @@ -0,0 +1,74 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import os +import sys +from warnings import warn + +filename = "./setup.py" +filenameOut = filename[:-3] + "_out" + filename[-3:] + +if not os.path.exists(filename): + raise Exception(("Could not find setup.py file. Check if current folder" + "is correct.")) +if len(sys.argv) > 2: + warn("Ignoring all arguments except first.") + +findSingleVersion = False +try: + with open(filename, 'r') as filein, open(filenameOut, 'w') as fileout: + for line in filein: + versionpos = line.find("version=\"") + if versionpos > -1: + if findSingleVersion: + raise + commapos = line.find("\",") + if len(sys.argv) > 1: + version = sys.argv[1] + else: + version = line[versionpos + 9 : commapos] + try: + if version[-1] in [str(x) for x in range(9)]: + lastdigit = int(version[-1]) + 1 + elif version[-1] == "9": + lastdigit = "X" + else: + lastdigit = "XX" + version = "{}{}".format(version[:-1], lastdigit) + except: + warn(("Could not read old version. Keeping old " + "version. Try specifying new version manually.")) + line = (line[: versionpos] + "version=\"" + version + + line[commapos :]) + findSingleVersion = True + fileout.write(line) + if not findSingleVersion: + os.remove(filenameOut) + raise Exception(("Found no occurrences of version in setup.py. " + "Aborting.")) +except: + os.remove(filenameOut) + raise Exception(("Found multiple occurrences of version in setup.py. " + "Aborting.")) + +os.remove(filename) +os.rename(filenameOut, filename) + +filenameVer = "./VERSION" +with open(filenameVer, 'w') as filever: + filever.write(version) diff --git a/examples/diapason/greedy.py b/examples/diapason/greedy.py index 9cc8a52..b85da41 100644 --- a/examples/diapason/greedy.py +++ b/examples/diapason/greedy.py @@ -1,177 +1,177 @@ import numpy as np import fenics as fen import ufl from rrompy.hfengines.vector_linear_problem import \ LinearElasticityHelmholtzProblemEngine as LEHPE from rrompy.hfengines.vector_linear_problem import \ LinearElasticityHelmholtzProblemEngineDamped as LEHPED from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB verb = 2 timed = False algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" if timed: verb = 0 dampingEta = 0 * 1e4 / 2. / np.pi k0s = np.linspace(2.5e2, 7.5e3, 100) k0s = np.linspace(2.5e3, 1.5e4, 100) k0s = np.linspace(5.0e4, 1.0e5, 100) k0s = np.linspace(2.0e5, 2.5e5, 100) k0 = np.mean(np.power(k0s, 2.)) ** .5 # [Hz] kl, kr = min(k0s), max(k0s) -params = {'muBounds':[kl, kr], 'nTrainingPoints':5e2, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'polybasis':polyBasis, +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':2, 'polybasis':polyBasis, 'robustTol':2e-16, 'interpRcond':None, 'errorEstimatorKind':'EXACT'} theta = 20. * np.pi / 180. phi = 10. * np.pi / 180. mesh = fen.Mesh("../data/mesh/diapason_1.xml") subdomains = fen.MeshFunction("size_t", mesh, "../data/mesh/diapason_1_physical_region.xml") meshBall = fen.SubMesh(mesh, subdomains, 2) meshFork = fen.SubMesh(mesh, subdomains, 1) Hball = np.max(meshBall.coordinates()[:, 1]) #.00257 Ltot = np.max(mesh.coordinates()[:, 1]) #.1022 Lhandle = np.max(meshFork.coordinates()[:, 1]) #.026 Lrod = Ltot - Lhandle #.0762 L2var = (Lrod / 4.) ** 2. Ehandle_ratio = 3. rhohandle_ratio = 1.5 c = 3.e2 rho = 8e3 * (2. * np.pi) ** 2. E = 1.93e11 nu = .3 T = 1e6 lambda_ = E * nu / (1. + nu) / (1. - 2. * nu) mu_ = E / (1. + nu) kWave = (np.cos(theta) * np.cos(phi), np.sin(phi), np.sin(theta) * np.cos(phi)) x, y, z = fen.SpatialCoordinate(mesh)[:] yCorr = y - Ltot compPlane = kWave[0] * x + kWave[1] * y + kWave[2] * z xPlane, yPlane, zPlane = (xx - compPlane * xx for xx in (x, y, z)) xOrtho, yOrtho, zOrtho = (compPlane * xx for xx in (x, y, z)) forcingBase = (T / (2. * np.pi * L2var)**.5 * fen.exp(- (xPlane**2. + yPlane**2. + zPlane**2.) / (2.*L2var))) forcingWeight = np.real(k0) / c * (xOrtho + yOrtho + zOrtho) neumannDatum = [ufl.as_vector( tuple(forcingBase * fen.cos(forcingWeight) * kWavedir for kWavedir in kWave)), ufl.as_vector( tuple(forcingBase * fen.sin(forcingWeight) * kWavedir for kWavedir in kWave))] lambda_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(lambda_), fen.Constant(Ehandle_ratio * lambda_)) mu_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(mu_), fen.Constant(Ehandle_ratio * mu_)) rho_eff = ufl.conditional(ufl.ge(y, Lhandle), fen.Constant(rho), fen.Constant(rhohandle_ratio * rho)) ### if dampingEta > 0: solver = LEHPED(degree_threshold = 8, verbosity = 0) solver.eta = dampingEta else: solver = LEHPE(degree_threshold = 8, verbosity = 0) solver.omega = np.real(k0) solver.lambda_ = lambda_eff solver.mu_ = mu_eff solver.rho_ = rho_eff solver.V = fen.VectorFunctionSpace(mesh, "P", 1) solver.DirichletBoundary = lambda x, on_b: on_b and x[1] < Hball solver.NeumannBoundary = "REST" solver.forcingTerm = neumannDatum if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: params.pop("Delta") params.pop("polybasis") params.pop("robustTol") params.pop("interpRcond") params.pop("errorEstimatorKind") approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) polesApp = approx.getPoles() print("Poles:\n", polesApp) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 kl, kr = np.real(kl), np.real(kr) from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / norm[j] resApp = approx.errorEstimator(k0s) res[res < 1e-5 * approx.greedyTol] = np.nan resApp[resApp < 1e-5 * approx.greedyTol] = np.nan err[err < 1e-8 * approx.greedyTol] = np.nan plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(np.real(approx.mus), 1.05*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), approx.greedyTol*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesAppEff = polesApp[~mask] plt.figure() plt.plot(np.real(polesAppEff), np.imag(polesAppEff), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/elasticity (arch) greedy.py b/examples/elasticity (arch) greedy.py index 1e8c0ec..9cb59db 100644 --- a/examples/elasticity (arch) greedy.py +++ b/examples/elasticity (arch) greedy.py @@ -1,102 +1,102 @@ import numpy as np from rrompy.hfengines.vector_linear_problem import \ LinearElasticityHelmholtzArchwayFrequency as LEHAF from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB verb = 2 timed = True algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" errorEstimatorKind = "SIMPLIFIED" errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(1e5, 3e5, 150), .5) k0 = np.mean(k0s) kl, kr = min(k0s), max(k0s) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis, +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':2, 'basis':polyBasis, 'errorEstimatorKind':errorEstimatorKind} if timed: verb = 0 E = 1e6 nu = .47 lambda_ = E * nu / (1. + nu) / (1. - 2. * nu) mu_ = E / (1. + nu) solver = LEHAF(kappa = k0, n = 200, rho_ = 1.5e3, T = 1e4, lambda_ = lambda_, mu_ = mu_, R = 1., r = .85, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(np.real(approx.mus), 1.25*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/from_papers/greedy_internalBox.py b/examples/from_papers/greedy_internalBox.py index dbaa219..2615fe7 100644 --- a/examples/from_papers/greedy_internalBox.py +++ b/examples/from_papers/greedy_internalBox.py @@ -1,110 +1,110 @@ import numpy as np import fenics as fen from rrompy.hfengines.linear_problem import HelmholtzProblemEngine as HPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB dim = 3 verb = 2 timed = False algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" k0s = np.power(np.linspace(500 ** 2., 2250 ** 2., 200), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis} +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':2, 'basis':polyBasis} if dim == 2: mesh = fen.RectangleMesh(fen.Point(0., 0.), fen.Point(.1, .15), 10, 15) x, y = fen.SpatialCoordinate(mesh)[:] f = fen.exp(- 1e2 * (x + y)) else:#if dim == 3: mesh = fen.BoxMesh(fen.Point(0., 0., 0.), fen.Point(.1, .15, .25), 4, 6,10) x, y, z = fen.SpatialCoordinate(mesh)[:] f = fen.exp(- 1e2 * (x + y + z)) solver = HPE(verbosity = verb) solver.omega = np.real(k0) solver.V = fen.FunctionSpace(mesh, "P", 3) solver.refractionIndex = fen.Constant(1. / 54.6) solver.forcingTerm = f solver.NeumannBoundary = "ALL" ######### if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 kl, kr = np.real(kl), np.real(kr) from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.plot(k0s, norm) plt.plot(k0s, normApp, '--') plt.plot(np.real(approx.mus), 1.05*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/from_papers/greedy_scatteringAirfoil.py b/examples/from_papers/greedy_scatteringAirfoil.py index 0a6e506..09e6a59 100644 --- a/examples/from_papers/greedy_scatteringAirfoil.py +++ b/examples/from_papers/greedy_scatteringAirfoil.py @@ -1,146 +1,146 @@ import numpy as np import fenics as fen import ufl from rrompy.hfengines.linear_problem import \ HelmholtzBoxScatteringProblemEngine as HSP from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.utilities.base.fenics import fenONE verb = 2 timed = False algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" homog = True homog = False k0s = np.linspace(5, 20, 25) k0 = np.mean(k0s) kl, kr = min(k0s), max(k0s) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis} +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':2, 'basis':polyBasis} ######### PI = np.pi R = 2 def Dboundary(x, on_boundary): return on_boundary and (x[0]**2+x[1]**2)**.5 < .95 * R kappa = 10 theta = PI * - 45 / 180. mu = 1.1 epsilon = .1 mesh = fen.Mesh('../data/mesh/airfoil.xml') c, s = np.cos(theta), np.sin(theta) x, y = fen.SpatialCoordinate(mesh)[:] u0R = - fen.cos(kappa * (c * x + s * y)) u0I = - fen.sin(kappa * (c * x + s * y)) checkReal = x**2-x+y**2 rhop5 = ((x**2+y**2)/((x-1)**2+y**2))**.25 phiroot1 = fen.atan(-y/(x**2-x+y**2)) / 2 phiroot2 = fen.atan(-y/(x**2-x+y**2)) / 2 - PI * ufl.sign(-y/(x**2-x+y**2)) / 2 kappam1 = (((rhop5*fen.cos(phiroot1)+.5)**2.+(rhop5*fen.sin(phiroot1))**2.)/ ((rhop5*fen.cos(phiroot1)-1.)**2.+(rhop5*fen.sin(phiroot1))**2.) )**.5 - mu kappam2 = (((rhop5*fen.cos(phiroot2)+.5)**2.+(rhop5*fen.sin(phiroot2))**2.)/ ((rhop5*fen.cos(phiroot2)-1.)**2.+(rhop5*fen.sin(phiroot2))**2.) )**.5 - mu Heps1 = .9 * .5 * (1 + kappam1/epsilon + fen.sin(PI*kappam1/epsilon) / PI) + .1 Heps2 = .9 * .5 * (1 + kappam2/epsilon + fen.sin(PI*kappam2/epsilon) / PI) + .1 cTT = ufl.conditional(ufl.le(kappam1, epsilon), Heps1, fenONE) c_F = fen.Constant(.1) cFT = ufl.conditional(ufl.le(kappam2, epsilon), Heps2, fenONE) c_F = fen.Constant(.1) cT = ufl.conditional(ufl.ge(kappam1, - epsilon), cTT, c_F) cF = ufl.conditional(ufl.ge(kappam2, - epsilon), cFT, c_F) a = ufl.conditional(ufl.ge(checkReal, 0.), cT, cF) solver = HSP(R, np.real(k0), theta, n = 1, verbosity = verb, degree_threshold = 8) solver.omega = np.real(k0) solver.V = fen.FunctionSpace(mesh, "P", 3) solver.diffusivity = a solver.DirichletBoundary = Dboundary solver.RobinBoundary = "REST" solver.DirichletDatum = [u0R, u0I] ######### if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 kl, kr = np.real(kl), np.real(kr) from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j], homogeneized=homog)) / approx.estNormer.norm(approx.getRHS(k0s[j], homogeneized=homog))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.plot(k0s, norm) plt.plot(k0s, normApp, '--') plt.plot(np.real(approx.mus), 1.05*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/parametricDomain.py b/examples/greedy/parametricDomain.py index 375412b..8724384 100644 --- a/examples/greedy/parametricDomain.py +++ b/examples/greedy/parametricDomain.py @@ -1,99 +1,99 @@ import numpy as np from rrompy.hfengines.linear_problem import \ HelmholtzSquareBubbleDomainProblemEngine as HSBDPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB verb = 2 timed = True algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" errorEstimatorKind = "SIMPLIFIED" errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(9, 19, 100), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'basis':polyBasis, +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':2, 'basis':polyBasis, 'errorEstimatorKind':errorEstimatorKind} if timed: verb = 0 solver = HSBDPE(kappa = 12 ** .5, theta = np.pi / 3, n = 20, mu0 = k0, degree_threshold = 15, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(np.real(approx.mus), 1.25*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/squareBubbleHomog.py b/examples/greedy/squareBubbleHomog.py index 4d580a5..21936f9 100644 --- a/examples/greedy/squareBubbleHomog.py +++ b/examples/greedy/squareBubbleHomog.py @@ -1,111 +1,111 @@ import numpy as np from rrompy.hfengines.linear_problem import \ HelmholtzSquareBubbleProblemEngine as HSBPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.utilities.base import squareResonances verb = 2 timed = False algo = "Pade" -algo = "RB" +#algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" errorEstimatorKind = "SIMPLIFIED" errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(95, 149, 250), .5) -k0s = np.power(np.linspace(95, 129, 100), .5) -k0s = np.power(np.linspace(9.5**2., 10.5**2., 100), .5) +#k0s = np.power(np.linspace(95, 129, 100), .5) +#k0s = np.power(np.linspace(9.5**2., 10.5**2., 100), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) polesexact = np.unique(np.power(squareResonances(kl**2., kr**2., False), .5)) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'polybasis':polyBasis, +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':10, 'polybasis':polyBasis, 'errorEstimatorKind':errorEstimatorKind} if timed: verb = 0 solver = HSBPE(kappa = 12 ** .5, theta = np.pi / 3, n = 20, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.trainedModel.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros_like(k0s) norm = np.zeros_like(k0s) res = np.zeros_like(k0s) err = np.zeros_like(k0s) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(polesexact, 2.*np.max(norm)*np.ones_like(polesexact, dtype = float), 'm.') plt.semilogy(np.real(approx.mus), 4.*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(polesexact, 2.*np.max(resApp)*np.ones_like(polesexact, dtype = float), 'm.') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.semilogy(polesexact, 2.*np.max(err)*np.ones_like(polesexact, dtype = float), 'm.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.plot(np.real(polesexact), np.imag(polesexact), 'm.') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/squareScatteringHomog.py b/examples/greedy/squareScatteringHomog.py index 89d2db6..64066e0 100644 --- a/examples/greedy/squareScatteringHomog.py +++ b/examples/greedy/squareScatteringHomog.py @@ -1,101 +1,101 @@ import numpy as np from rrompy.hfengines.linear_problem import \ HelmholtzCavityScatteringProblemEngine as HCSPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB verb = 20 timed = False algo = "Pade" algo = "RB" polyBasis = "LEGENDRE" polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" errorEstimatorKind = "SIMPLIFIED" errorEstimatorKind = "EXACT" k0s = np.linspace(10, 17, 100) k0 = np.mean(k0s) kl, kr = min(k0s), max(k0s) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':2, 'polybasis':polyBasis, +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':2, 'polybasis':polyBasis, 'errorEstimatorKind':errorEstimatorKind} if timed: verb = 0 solver = HCSPE(kappa = 5, n = 20, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) else: params.pop('Delta') params.pop('polybasis') params.pop('errorEstimatorKind') approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.trainedModel.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j])) / approx.estNormer.norm(approx.getRHS(k0s[j]))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) plt.figure() plt.plot(k0s, norm) plt.plot(k0s, normApp, '--') plt.plot(np.real(approx.mus), 1.25*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/greedy/squareTransmissionNonHomog.py b/examples/greedy/squareTransmissionNonHomog.py index 0e4255a..fe9b8c8 100644 --- a/examples/greedy/squareTransmissionNonHomog.py +++ b/examples/greedy/squareTransmissionNonHomog.py @@ -1,108 +1,108 @@ import numpy as np from rrompy.hfengines.linear_problem import \ HelmholtzSquareTransmissionProblemEngine as HSTPE from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangeRBGreedy as RB from rrompy.reduction_methods.lagrange_greedy import \ ApproximantLagrangePadeGreedy as Pade timed = False verb = 2 algo = "Pade" #algo = "RB" polyBasis = "LEGENDRE" #polyBasis = "CHEBYSHEV" #polyBasis = "MONOMIAL" homog = True #homog = False errorEstimatorKind = "SIMPLIFIED" errorEstimatorKind = "EXACT" k0s = np.power(np.linspace(4, 15, 100), .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 kl, kr = min(k0s), max(k0s) rescaling = lambda x: np.power(x, 2.) rescalingInv = lambda x: np.power(x, .5) -params = {'muBounds':[kl, kr], 'nTrainingPoints':500, 'Delta':0, - 'greedyTol':1e-2, 'nTestPoints':5, 'basis':polyBasis, +params = {'muBounds':[kl, kr], 'nTestPoints':500, 'Delta':0, + 'greedyTol':1e-2, 'nTrainPoints':5, 'basis':polyBasis, 'errorEstimatorKind':errorEstimatorKind} solver = HSTPE(nT = 1, nB = 2, theta = np.pi * 20 / 180, kappa = 4., n = 20, verbosity = verb) solver.omega = np.real(k0) if algo == "Pade": approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) else: approx = RB(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) if timed: from time import clock start_time = clock() approx.greedy() print("Time: ", clock() - start_time) else: approx.greedy(True) approx.samplingEngine.verbosity = 0 approx.verbosity = 0 from matplotlib import pyplot as plt normApp = np.zeros(len(k0s)) norm = np.zeros_like(normApp) res = np.zeros_like(normApp) err = np.zeros_like(normApp) for j in range(len(k0s)): normApp[j] = approx.normApprox(k0s[j]) norm[j] = approx.normHF(k0s[j]) res[j] = (approx.estNormer.norm(approx.getRes(k0s[j], homogeneized=homog)) / approx.estNormer.norm(approx.getRHS(k0s[j], homogeneized=homog))) err[j] = approx.normErr(k0s[j]) / approx.normHF(k0s[j]) resApp = approx.errorEstimator(k0s) polesApp = approx.getPoles() polesApp = polesApp[np.abs(np.imag(polesApp)) < 1e-3] plt.figure() plt.semilogy(k0s, norm) plt.semilogy(k0s, normApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(norm)*np.ones_like(approx.mus, dtype = float), 'rx') plt.semilogy(np.real(polesApp), 2.*np.max(norm)*np.ones_like(polesApp, dtype = float), 'k.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, res) plt.semilogy(k0s, resApp, '--') plt.semilogy(np.real(approx.mus), 4.*np.max(resApp)*np.ones_like(approx.mus, dtype = float), 'rx') plt.semilogy(np.real(polesApp), 2.*np.max(resApp)*np.ones_like(polesApp, dtype = float), 'k.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() plt.figure() plt.semilogy(k0s, err) plt.semilogy(np.real(polesApp), 2.*np.max(err)*np.ones_like(polesApp, dtype = float), 'k.') plt.xlim([kl, kr]) plt.grid() plt.show() plt.close() polesApp = approx.getPoles() mask = (np.real(polesApp) < kl) | (np.real(polesApp) > kr) print("Outliers:", polesApp[mask]) polesApp = polesApp[~mask] plt.figure() plt.plot(np.real(polesApp), np.imag(polesApp), 'kx') plt.axis('equal') plt.grid() plt.show() plt.close() diff --git a/examples/pod/PadeLagrange.py b/examples/pod/PadeLagrange.py index 03c5992..38b7190 100644 --- a/examples/pod/PadeLagrange.py +++ b/examples/pod/PadeLagrange.py @@ -1,122 +1,122 @@ import numpy as np from rrompy.hfengines.linear_problem import \ HelmholtzSquareBubbleProblemEngine as HSBPE from rrompy.hfengines.linear_problem import \ HelmholtzSquareTransmissionProblemEngine as HSTPE from rrompy.hfengines.linear_problem import \ HelmholtzBoxScatteringProblemEngine as HBSPE from rrompy.reduction_methods.lagrange import ApproximantLagrangePade as Pade from rrompy.utilities.parameter_sampling import QuadratureSampler as QS -testNo = -1 +testNo = 1 verb = 100 polyBasis = "CHEBYSHEV" polyBasis = "LEGENDRE" #polyBasis = "MONOMIAL" homog = True #homog = False loadName = "PadeLagrangeModel.pkl" if testNo in [1, -1]: if testNo > 0: k0s = np.power([10 + 0.j, 14 + 0.j], .5) k0 = np.mean(np.power(k0s, 2.)) ** .5 rescaling = lambda x: np.power(x, 2.) rescalingInv = lambda x: np.power(x, .5) params = {'N':4, 'M':3, 'S':5, 'POD':True, 'polybasis':polyBasis} ktar = (11 + .5j) ** .5 solver = HSBPE(kappa = 12 ** .5, theta = np.pi / 3, n = 40, verbosity = verb) if testNo > 0: solver.omega = np.real(k0) approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb) approx.sampler = QS(k0s, "CHEBYSHEV", rescaling, rescalingInv) approx.setupApprox() # approx.plotSamples() else: approx = Pade(solver, mu0 = 0, verbosity = verb) approx.loadTrainedModel(loadName) approx.plotApprox(ktar, name = 'u_Pade''') approx.plotHF(ktar, name = 'u_HF') approx.plotErr(ktar, name = 'err') approx.plotRes(ktar, name = 'res') appErr, solNorm = approx.normErr(ktar), approx.normHF(ktar) resNorm, RHSNorm = approx.normRes(ktar), approx.normRHS(ktar) print(('SolNorm:\t{}\nErr:\t{}\nErrRel:\t{}').format(solNorm, appErr, np.divide(appErr, solNorm))) print(('RHSNorm:\t{}\nRes:\t{}\nResRel:\t{}').format(RHSNorm, resNorm, np.divide(resNorm, RHSNorm))) print('\nPoles Pade'':') print(approx.getPoles()) if testNo > 0: approx.storeTrainedModel("PadeLagrangeModel", forceNewFile = False) print(approx.trainedModel.data.__dict__) ############ elif testNo == 2: k0s = [3.85 + 0.j, 4.15 + 0.j] k0 = np.mean(k0s) ktar = 4 + 0.j rescaling = lambda x: np.power(x, 2.) rescalingInv = lambda x: np.power(x, .5) params = {'N':8, 'M':9, 'S':10, 'POD':True, 'polybasis':polyBasis}, solver = HSTPE(nT = 2, nB = 1, theta = np.pi * 45/180, kappa = 4., n = 50, verbosity = verb) solver.omega = np.real(k0) approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) approx.sampler = QS(k0s, "CHEBYSHEV", rescaling, rescalingInv) approx.setupApprox() # approx.plotSamples() approx.plotApprox(ktar, name = 'u_Pade''') approx.plotHF(ktar, name = 'u_HF') approx.plotErr(ktar, name = 'err') approx.plotRes(ktar, name = 'res') appErr, solNorm = approx.normErr(ktar), approx.normHF(ktar) resNorm, RHSNorm = approx.normRes(ktar), approx.normRHS(ktar) print(('SolNorm:\t{}\nErr:\t{}\nErrRel:\t{}').format(solNorm, appErr, np.divide(appErr, solNorm))) print(('RHSNorm:\t{}\nRes:\t{}\nResRel:\t{}').format(RHSNorm, resNorm, np.divide(resNorm, RHSNorm))) print('\nPoles Pade'':') print(approx.getPoles()) ############ elif testNo == 3: k0s = [2, 5] k0 = np.mean(k0s) ktar = 4.5 - .1j params = {'N':10, 'M':10, 'S':11, 'POD':True, 'polybasis':polyBasis} solver = HBSPE(R = 7, kappa = 3, theta = - np.pi * 75 / 180, n = 40, verbosity = verb) solver.omega = np.real(k0) approx = Pade(solver, mu0 = k0, approxParameters = params, verbosity = verb, homogeneized = homog) approx.sampler = QS(k0s, "CHEBYSHEV") approx.setupApprox() # approx.plotSamples() approx.plotApprox(ktar, name = 'u_Pade''') approx.plotHF(ktar, name = 'u_HF') approx.plotErr(ktar, name = 'err') approx.plotRes(ktar, name = 'res') appErr, solNorm = approx.normErr(ktar), approx.normHF(ktar) resNorm, RHSNorm = approx.normRes(ktar), approx.normRHS(ktar) print(('SolNorm:\t{}\nErr:\t{}\nErrRel:\t{}').format(solNorm, appErr, np.divide(appErr, solNorm))) print(('RHSNorm:\t{}\nRes:\t{}\nResRel:\t{}').format(RHSNorm, resNorm, np.divide(resNorm, RHSNorm))) print('\nPoles Pade'':') print(approx.getPoles()) diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py index 966a028..5202d40 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py @@ -1,518 +1,523 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.base.fit_utils import (polybases, polyvander, polydomcoeff, polyfitname) from rrompy.reduction_methods.lagrange import ApproximantLagrangePade from rrompy.reduction_methods.trained_model import TrainedModelPade as tModel from rrompy.reduction_methods.trained_model import TrainedModelData from rrompy.utilities.base.types import DictAny, List, HFEng from rrompy.utilities.base import purgeDict, verbosityDepth, customFit from rrompy.utilities.exception_manager import RROMPyWarning from rrompy.utilities.exception_manager import RROMPyException __all__ = ['ApproximantLagrangePadeGreedy'] class ApproximantLagrangePadeGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangePade): """ ROM greedy Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'basis': type of basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'Delta': difference between M and N in rational approximant; defaults to 0; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'errorEstimatorKind': kind of error estimator; available values include 'EXACT' and 'SIMPLIFIED'; defaults to 'SIMPLIFIED'; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - - 'nTrainingPoints': number of training points; defaults to - maxIter / refinementRatio; - - 'nTestPoints': number of starting test points; defaults to 1; - - 'trainingSetGenerator': training sample points generator; - defaults to uniform sampler within muBounds; + - 'nTestPoints': number of test points; defaults to maxIter / + refinementRatio; + - 'nTrainPoints': number of starting training points; defaults to + 1; + - 'trainSetGenerator': training sample points generator; defaults + to Chebyshev sampler within muBounds; + - 'testSetGenerator': test sample points generator; defaults to + uniform sampler within muBounds; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'basis': type of basis for interpolation; - 'Delta': difference between M and N in rational approximant; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'errorEstimatorKind': kind of error estimator; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - - 'nTrainingPoints': number of training points; - - 'nTestPoints': number of starting test points; - - 'trainingSetGenerator': training sample points generator; + - 'nTestPoints': number of test points; + - 'nTrainPoints': number of starting training points; + - 'trainSetGenerator': training sample points generator; + - 'testSetGenerator': test sample points generator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. errorEstimatorKind: kind of error estimator. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. - nTrainingPoints: number of training points. - nTestPoints: number of starting test points. - trainingSetGenerator: training sample points generator. + nTrainPoints: number of test points. + nTestPoints: number of starting training points. + trainSetGenerator: training sample points generator. + testSetGenerator: test sample points generator. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["polybasis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) if self.verbosity >= 7: verbosityDepth("INIT", "Computing Taylor blocks of system.", timestamp = self.timestamp) nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs, (nAs + 1) * self.homogeneized) self.As = [self.HFEngine.A(self.mu0, j + 1) for j in range(nAs)] self.bs = [self.HFEngine.b(self.mu0, j, self.homogeneized) for j in range(nbs)] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing Taylor blocks.", timestamp = self.timestamp) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change robustTol. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["polybasis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"], True, True, baselevel = 1) if "Delta" in list(approxParameters.keys()): self._Delta = approxParameters["Delta"] elif not hasattr(self, "_Delta") or self._Delta is None: self._Delta = 0 GenericApproximantLagrangeGreedy.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) self.Delta = self.Delta if "polybasis" in keyList: self.polybasis = approxParameters["polybasis"] elif not hasattr(self, "_polybasis") or self._polybasis is None: self.polybasis = "MONOMIAL" if "errorEstimatorKind" in keyList: self.errorEstimatorKind = approxParameters["errorEstimatorKind"] elif (not hasattr(self, "_errorEstimatorKind") or self.errorEstimatorKind is None): self.errorEstimatorKind = "SIMPLIFIED" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif not hasattr(self, "interpRcond") or self.interpRcond is None: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif not hasattr(self, "_robustTol") or self._robustTol is None: self.robustTol = 0 @property def polybasis(self): """Value of polybasis.""" return self._polybasis @polybasis.setter def polybasis(self, polybasis): try: polybasis = polybasis.upper().strip().replace(" ","") if polybasis not in polybases: raise RROMPyException("Sample type not recognized.") self._polybasis = polybasis except: RROMPyWarning(("Prescribed polybasis not recognized. Overriding " "to 'MONOMIAL'.")) self._polybasis = "MONOMIAL" self._approxParameters["polybasis"] = self.polybasis @property def Delta(self): """Value of Delta.""" return self._Delta @Delta.setter def Delta(self, Delta): if not np.isclose(Delta, np.floor(Delta)): raise RROMPyException("Delta must be an integer.") if Delta < 0: RROMPyWarning(("Error estimator unreliable for Delta < 0. " "Overloading of errorEstimator is suggested.")) else: Deltamin = (max(self.HFEngine.nbs, self.HFEngine.nAs * self.homogeneized) - 1 - 1 * (self.HFEngine.nAs > 1)) if Delta < Deltamin: RROMPyWarning(("Method may be unreliable for selected Delta. " "Suggested minimal value of Delta: {}.").format( Deltamin)) self._Delta = Delta self._approxParameters["Delta"] = self.Delta @property def errorEstimatorKind(self): """Value of errorEstimatorKind.""" return self._errorEstimatorKind @errorEstimatorKind.setter def errorEstimatorKind(self, errorEstimatorKind): errorEstimatorKind = errorEstimatorKind.upper() if errorEstimatorKind not in ["EXACT", "SIMPLIFIED"]: RROMPyWarning(("Error estimator kind not recognized. Overriding " "to 'SIMPLIFIED'.")) errorEstimatorKind = "SIMPLIFIED" self._errorEstimatorKind = errorEstimatorKind self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= np.abs(self.Delta): RROMPyWarning(("nTestPoints must be at least abs(Delta) + 1. " "Increasing value to abs(Delta) + 1.")) nTestPoints = np.abs(self.Delta) + 1 if not np.isclose(nTestPoints, np.int(nTestPoints)): raise RROMPyException("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "_nTestPoints") and self.nTestPoints is not None: nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """Standard residual-based error estimator.""" self.setupApprox() PM = self.trainedModel.data.P[:, -1] if np.any(np.isnan(PM)) or np.any(np.isinf(PM)): err = np.empty(len(mus)) err[:] = np.inf return err nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized) radiusmus = self.radiusPade(mus) radiussmus = self.radiusPade(self.mus) musTile = np.tile(radiusmus.reshape(-1, 1), [1, self.S]) smusCol = radiussmus.reshape(1, -1) num = np.prod(musTile[:, : self.S] - smusCol, axis = 1) den = self.trainedModel.getQVal(mus) self.assembleReducedResidualBlocks() vanderBase = np.polynomial.polynomial.polyvander(radiusmus, max(nAs, nbs)).T radiusb0 = vanderBase[: nbs + 1, :] # 'ij,jk,ik->k', resbb, radiusb0, radiusb0.conj() b0resb0 = np.sum(self.trainedModel.data.resbb.dot(radiusb0) * radiusb0.conj(), axis = 0) RHSnorms = np.power(np.abs(b0resb0), .5) vanderBase = vanderBase[: -1, :] delta = self.S - len(self.trainedModel.data.Q) nbsEff = max(0, nbs - delta) if self.errorEstimatorKind == "SIMPLIFIED": radiusA = np.tensordot(PM, vanderBase[: nAs, :], 0) if delta == 0: radiusb = (np.abs(self.trainedModel.data.Q[-1]) * radiusb0[: -1, :]) else: #if self.errorEstimatorKind == "EXACT": momentQ = np.zeros(nbsEff, dtype = np.complex) momentQu = np.zeros((self.S, nAs), dtype = np.complex) radiusbTen = np.zeros((nbsEff, nbsEff, len(mus)), dtype = np.complex) radiusATen = np.zeros((nAs, nAs, len(mus)), dtype = np.complex) if nbsEff > 0: momentQ[0] = self.trainedModel.data.Q[-1] radiusbTen[0, :, :] = vanderBase[: nbsEff, :] momentQu[:, 0] = self.trainedModel.data.P[:, -1] radiusATen[0, :, :] = vanderBase[: nAs, :] Qvals = self.trainedModel.getQVal(self.mus) for k in range(1, max(nAs, nbs * (nbsEff > 0))): Qvals = Qvals * radiussmus if k > delta and k < nbs: momentQ[k - delta] = self._fitinv.dot(Qvals) radiusbTen[k - delta, k :, :] = ( radiusbTen[0, : delta - k, :]) if k < nAs: momentQu[:, k] = Qvals * self._fitinv radiusATen[k, k :, :] = radiusATen[0, : - k, :] if self.POD and nAs > 1: momentQu[:, 1 :] = self.samplingEngine.RPOD.dot( momentQu[:, 1 :]) radiusA = np.tensordot(momentQu, radiusATen, 1) if nbsEff > 0: radiusb = np.tensordot(momentQ, radiusbTen, 1) if ((self.errorEstimatorKind == "SIMPLIFIED" and delta == 0) or (self.errorEstimatorKind == "EXACT" and nbsEff > 0)): # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() ff = np.sum(self.trainedModel.data.resbb[delta + 1 :, delta + 1 :]\ .dot(radiusb) * radiusb.conj(), axis = 0) # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() Lf = np.sum(np.tensordot( self.trainedModel.data.resAb[delta :, :, :], radiusA, 2) * radiusb.conj(), axis = 0) else: ff, Lf = 0., 0. # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2) * radiusA.conj(), axis = (0, 1)) jOpt = np.power(np.abs(ff - 2. * np.real(Lf) + LL), .5) return (polydomcoeff[self.polybasis](self.S - 1) * jOpt * np.abs(num / den) / RHSnorms) def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if self.checkComputedApprox(): return if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.computeScaleFactor() self.greedy() self._M = self.S - 1 self._N = self.S - 1 if self.Delta < 0: self._M += self.Delta else: self._N -= self.Delta if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, np.copy(self.samplingEngine.samples), self.HFEngine.rescalingExp) data.polytype = self.polybasis data.scaleFactor = self.scaleFactor data.mus = np.copy(self.mus) self.trainedModel.data = data else: self.trainedModel.data.projMat = np.copy( self.samplingEngine.samples) self.trainedModel.data.mus = np.copy(self.mus) if min(self.M, self.N) < 0: if self.verbosity >= 5: verbosityDepth("MAIN", "Minimal sample size not achieved.", timestamp = self.timestamp) Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) Q[:] = np.nan P[:] = np.nan self.trainedModel.data.Q = np.copy(Q) self.trainedModel.data.P = np.copy(P) self.trainedModel.data.approxParameters = copy( self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Aborting computation of approximant.", timestamp = self.timestamp) return if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of denominator.", timestamp = self.timestamp) TS = polyvander[self.polybasis](self.radiusPade(self.mus), self.S - 1).T RHS = np.zeros(self.S) RHS[-1] = 1. fitOut = customFit(TS, RHS, full = True, rcond = self.interpRcond) if self.verbosity >= 2: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of " "system: {:.4e}.").format( self.S, self.S - 1, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] < self.S: RROMPyWarning(("Polyfit is poorly conditioned. Starting " "preemptive termination of computation of " "approximant.")) Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) Q[:] = np.nan P[:] = np.nan self.trainedModel.data.Q = np.copy(Q) self.trainedModel.data.P = np.copy(P) self.trainedModel.data.approxParameters = copy( self.approxParameters) if self.verbosity >= 7: verbosityDepth("DEL", "Aborting computation of denominator.", timestamp = self.timestamp) if self.verbosity >= 5: verbosityDepth("DEL", "Aborting computation of approximant.", timestamp = self.timestamp) return self._fitinv = fitOut[0] while self.N > 0: Ghalf = (TS[: self.N + 1, :] * self._fitinv).T if self.POD: self.Ghalf = self.samplingEngine.RPOD.dot(Ghalf) ev, eV = self.findeveVGQR(2) else: self.Ghalf = self.samplingEngine.samples.dot(Ghalf) ev, eV = self.findeveVGQR(2) Nstable = np.sum(np.abs(ev) >= self.robustTol * np.linalg.norm(ev)) if self.N <= Nstable: break if self.verbosity >= 2: verbosityDepth("MAIN", ("Smallest {} eigenvalues below " "tolerance. Reducing N to {}.")\ .format(self.N - Nstable + 1, Nstable), timestamp = self.timestamp) self._N = Nstable if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.", timestamp = self.timestamp) else: Q = np.ones((1,), dtype = np.complex) self.trainedModel.data.Q = np.copy(Q) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.", timestamp = self.timestamp) Qevaldiag = np.diag(self.trainedModel.getQVal(self.mus)) while self.M >= 0: fitVander = polyvander[self.polybasis](self.radiusPade(self.mus), self.M) w = None if self.M == self.S - 1: w = "AUTO" fitOut = customFit(fitVander, Qevaldiag, full = True, w = w, rcond = self.interpRcond) if self.verbosity >= 2: condfit = fitOut[1][2][0] / fitOut[1][2][-1] verbosityDepth("MAIN", ("Fitting {} samples with degree {} " "through {}... Conditioning of " "system: {:.4e}.").format( self.S, self.M, polyfitname[self.polybasis], condfit), timestamp = self.timestamp) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break RROMPyWarning(("Polyfit is poorly conditioned. Reducing M from {} " "to {}. Exact snapshot interpolation not " "guaranteed.").format(self.M, fitOut[1][1] - 1)) self._M = fitOut[1][1] - 1 if self.M < 0: raise RROMPyException(("Instability in computation of numerator. " "Aborting.")) P = np.atleast_2d(P) if self.POD: P = self.samplingEngine.RPOD.dot(P) self.trainedModel.data.P = np.copy(P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.", timestamp = self.timestamp) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def assembleReducedResidualBlocks(self): """Build affine blocks of reduced linear system through projections.""" computeResbb = not hasattr(self.trainedModel.data, "resbb") computeResAb = (not hasattr(self.trainedModel.data, "resAb") or self.trainedModel.data.resAb.shape[1] != self.S) computeResAA = (not hasattr(self.trainedModel.data, "resAA") or self.trainedModel.data.resAA.shape[0] != self.S) pMat = self.trainedModel.data.projMat scaling = self.trainedModel.data.scaleFactor if computeResbb or computeResAb or computeResAA: if self.verbosity >= 7: verbosityDepth("INIT", "Projecting Taylor terms of residual.", timestamp = self.timestamp) if computeResbb: self.assembleReducedResidualBlocksbb(self.bs, pMat, scaling) if computeResAb: self.assembleReducedResidualBlocksAb(self.As, self.bs[1 :], pMat, scaling) if computeResAA: self.assembleReducedResidualBlocksAA(self.As, pMat, scaling) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up Taylor " "decomposition of residual."), timestamp = self.timestamp) diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py index 48283ac..4894c98 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_rb.py @@ -1,245 +1,250 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from copy import copy from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.lagrange import ApproximantLagrangeRB from rrompy.reduction_methods.trained_model import TrainedModelRB as tModel from rrompy.reduction_methods.trained_model import TrainedModelData from rrompy.utilities.base.types import DictAny, HFEng, List from rrompy.utilities.base import verbosityDepth from rrompy.utilities.exception_manager import RROMPyException __all__ = ['ApproximantLagrangeRBGreedy'] class ApproximantLagrangeRBGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangeRB): """ ROM greedy RB approximant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - - 'nTrainingPoints': number of training points; defaults to - maxIter / refinementRatio; - - 'nTestPoints': number of starting test points; defaults to 1; - - 'trainingSetGenerator': training sample points generator; - defaults to uniform sampler within muBounds. + - 'nTestPoints': number of test points; defaults to maxIter / + refinementRatio; + - 'nTrainPoints': number of starting training points; defaults to + 1; + - 'trainSetGenerator': training sample points generator; defaults + to Chebyshev sampler within muBounds; + - 'testSetGenerator': test sample points generator; defaults to + uniform sampler within muBounds. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - - 'nTrainingPoints': number of training points; - - 'nTestPoints': number of starting test points; - - 'trainingSetGenerator': training sample points generator; + - 'nTestPoints': number of test points; + - 'nTrainPoints': number of starting training points; + - 'trainSetGenerator': training sample points generator; + - 'testSetGenerator': test sample points generator; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. - nTrainingPoints: number of training points. - nTestPoints: number of starting test points. - trainingSetGenerator: training sample points generator. + nTrainPoints: number of test points. + nTestPoints: number of starting training points. + trainSetGenerator: training sample points generator. + testSetGenerator: test sample points generator. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. As: List of sparse matrices (in CSC format) representing coefficients of linear system matrix wrt theta(mu). bs: List of numpy vectors representing coefficients of linear system RHS wrt theta(mu). thetaAs: List of callables representing coefficients of linear system matrix wrt mu. thetabs: List of callables representing coefficients of linear system RHS wrt mu. ARBs: List of sparse matrices (in CSC format) representing coefficients of compressed linear system matrix wrt theta(mu). bRBs: List of numpy vectors representing coefficients of compressed linear system RHS wrt theta(mu). """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) if self.verbosity >= 10: verbosityDepth("INIT", "Computing affine blocks of system.", timestamp = self.timestamp) self.As = self.HFEngine.affineLinearSystemA(self.mu0) self.bs = self.HFEngine.affineLinearSystemb(self.mu0, self.homogeneized) if self.verbosity >= 10: verbosityDepth("DEL", "Done computing affine blocks.", timestamp = self.timestamp) self._postInit() @property def R(self): """Value of R.""" return self._S @R.setter def R(self, R): raise RROMPyException(("R is used just to simplify inheritance, and " "its value cannot be changed from that of S.")) def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator. Unreliable for unstable problems (inf-sup constant is missing). """ self.setupApprox() self.assembleReducedResidualBlocks() nmus = len(mus) nAs = self.trainedModel.data.resAA.shape[1] nbs = self.trainedModel.data.resbb.shape[0] thetaAs = self.trainedModel.data.thetaAs thetabs = self.trainedModel.data.thetabs radiusA = np.empty((self.S, nAs, nmus), dtype = np.complex) radiusb = np.empty((nbs, nmus), dtype = np.complex) verb = self.trainedModel.verbosity self.trainedModel.verbosity = 0 if verb >= 5: mustr = mus if nmus > 2: mustr = "[{} ..({}).. {}]".format(mus[0], nmus - 2, mus[-1]) verbosityDepth("INIT", ("Computing RB solution at mu = " "{}.").format(mustr), timestamp = self.timestamp) for j in range(nmus): mu = mus[j] uApp = self.getApproxReduced(mu) for i in range(nAs): radiusA[:, i, j] = eval(thetaAs[i]) * uApp for i in range(nbs): radiusb[i, j] = eval(thetabs[i]) if verb >= 5: verbosityDepth("DEL", "Done computing RB solution.", timestamp = self.timestamp) self.trainedModel.verbosity = verb # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() ff = np.sum(self.trainedModel.data.resbb.dot(radiusb) * radiusb.conj(), axis = 0) # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() Lf = np.sum(np.tensordot(self.trainedModel.data.resAb, radiusA, 2) * radiusb.conj(), axis = 0) # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() LL = np.sum(np.tensordot(self.trainedModel.data.resAA, radiusA, 2) * radiusA.conj(), axis = (0, 1)) return np.abs((LL - 2. * np.real(Lf) + ff) / ff) ** .5 def setupApprox(self): """Compute RB projection matrix.""" if self.checkComputedApprox(): return if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name()), timestamp = self.timestamp) self.greedy() if self.verbosity >= 7: verbosityDepth("INIT", "Computing projection matrix.", timestamp = self.timestamp) pMat = self.samplingEngine.samples if self.trainedModel is None: self.trainedModel = tModel() self.trainedModel.verbosity = self.verbosity self.trainedModel.timestamp = self.timestamp data = TrainedModelData(self.trainedModel.name(), self.mu0, np.copy(pMat), self.HFEngine.rescalingExp) data.thetaAs = self.HFEngine.affineWeightsA(self.mu0) data.thetabs = self.HFEngine.affineWeightsb(self.mu0, self.homogeneized) data.ARBs, data.bRBs = self.assembleReducedSystem(pMat) data.mus = np.copy(self.mus) self.trainedModel.data = data else: pMatOld = self.trainedModel.data.projMat Sold = pMatOld.shape[1] ARBs, bRBs = self.assembleReducedSystem(pMat[:, Sold :], pMatOld) self.trainedModel.data.ARBs = ARBs self.trainedModel.data.bRBs = bRBs self.trainedModel.data.projMat = np.copy(pMat) self.trainedModel.data.mus = np.copy(self.mus) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing projection matrix.", timestamp = self.timestamp) self.trainedModel.data.approxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.", timestamp = self.timestamp) def assembleReducedResidualBlocks(self): """Build affine blocks of RB linear system through projections.""" computeResbb = not hasattr(self.trainedModel.data, "resbb") computeResAb = (not hasattr(self.trainedModel.data, "resAb") or self.trainedModel.data.resAb.shape[1] != self.S) computeResAA = (not hasattr(self.trainedModel.data, "resAA") or self.trainedModel.data.resAA.shape[0] != self.S) if computeResbb or computeResAb or computeResAA: pMat = self.trainedModel.data.projMat if self.verbosity >= 7: verbosityDepth("INIT", "Projecting affine terms of residual.", timestamp = self.timestamp) if computeResbb: self.assembleReducedResidualBlocksbb(self.bs, pMat) if computeResAb: self.assembleReducedResidualBlocksAb(self.As, self.bs, pMat) if computeResAA: self.assembleReducedResidualBlocksAA(self.As, pMat) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up affine decomposition " "of residual."), timestamp = self.timestamp) diff --git a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py index e2b7417..2bd2c07 100644 --- a/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py +++ b/rrompy/reduction_methods/lagrange_greedy/generic_approximant_lagrange_greedy.py @@ -1,625 +1,651 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import deepcopy as copy import numpy as np from rrompy.reduction_methods.base.generic_approximant import ( GenericApproximant) from rrompy.reduction_methods.lagrange.generic_approximant_lagrange import ( GenericApproximantLagrange) from rrompy.utilities.base.types import Np1D, Np2D, DictAny, HFEng, Tuple, List from rrompy.utilities.base import purgeDict, verbosityDepth from rrompy.utilities.exception_manager import (RROMPyException, modeAssert, RROMPyWarning) __all__ = ['GenericApproximantLagrangeGreedy'] class GenericApproximantLagrangeGreedy(GenericApproximantLagrange): """ ROM greedy Lagrange interpolant computation for parametric problems (ABSTRACT). Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'interactive': whether to interactively terminate greedy algorithm; defaults to False; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - - 'refinementRatio': ratio of training points to be exhausted - before training set refinement; defaults to 0.2; - - 'nTrainingPoints': number of training points; defaults to - maxIter / refinementRatio; - - 'nTestPoints': number of starting test points; defaults to 1; - - 'trainingSetGenerator': training sample points generator; - defaults to uniform sampler within muBounds; - - 'robustTol': tolerance for robust Pade' denominator management; - defaults to 0. + - 'refinementRatio': ratio of test points to be exhausted before + test set refinement; defaults to 0.2; + - 'nTestPoints': number of test points; defaults to maxIter / + refinementRatio; + - 'nTrainPoints': number of starting training points; defaults to + 1; + - 'trainSetGenerator': training sample points generator; defaults + to Chebyshev sampler within muBounds; + - 'testSetGenerator': test sample points generator; defaults to + uniform sampler within muBounds. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'greedyTol': uniform error tolerance for greedy algorithm; + - 'interactive': whether to interactively terminate greedy + algorithm; - 'maxIter': maximum number of greedy steps; - - 'refinementRatio': ratio of training points to be exhausted - before training set refinement; - - 'nTrainingPoints': number of training points; - - 'nTestPoints': number of starting test points; - - 'trainingSetGenerator': training sample points generator. - - 'robustTol': tolerance for robust Pade' denominator management. + - 'refinementRatio': ratio of test points to be exhausted before + test set refinement; + - 'nTestPoints': number of test points; + - 'nTrainPoints': number of starting training points; + - 'trainSetGenerator': training sample points generator; + - 'testSetGenerator': test sample points generator. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. - nTrainingPoints: number of training points. - nTestPoints: number of starting test points. - trainingSetGenerator: training sample points generator. + nTrainPoints: number of test points. + nTestPoints: number of starting training points. + trainSetGenerator: training sample points generator. + testSetGenerator: test sample points generator. robustTol: tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. uApp: Last evaluated approximant as numpy complex vector. """ TOL_INSTABILITY = 1e-6 def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10, timestamp : bool = True): self._preInit() self._addParametersToList(["muBounds", "greedyTol", "interactive", "maxIter", "refinementRatio", - "nTrainingPoints", "nTestPoints", - "trainingSetGenerator"]) + "nTestPoints", "nTrainPoints", + "trainSetGenerator", "testSetGenerator"]) super(GenericApproximantLagrange, self).__init__( HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity, timestamp = timestamp) self._postInit() @property def approxParameters(self): """Value of approximant parameters. Its assignment may change S.""" return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["muBounds", "greedyTol", "interactive", "maxIter", - "refinementRatio", "nTrainingPoints", - "nTestPoints", - "trainingSetGenerator"], + "refinementRatio", "nTestPoints", + "nTrainPoints", "trainSetGenerator", + "testSetGenerator"], True, True, baselevel = 1) GenericApproximant.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) if "muBounds" in keyList: self.muBounds = approxParameters["muBounds"] elif not hasattr(self, "_muBounds") or self.muBounds is None: self.muBounds = [[0.], [1.]] if "greedyTol" in keyList: self.greedyTol = approxParameters["greedyTol"] elif not hasattr(self, "_greedyTol") or self.greedyTol is None: self.greedyTol = 1e-2 if "interactive" in keyList: self.interactive = approxParameters["interactive"] elif not hasattr(self, "interactive") or self.interactive is None: self.interactive = False if "maxIter" in keyList: self.maxIter = approxParameters["maxIter"] elif not hasattr(self, "_maxIter") or self.maxIter is None: self.maxIter = 1e2 if "refinementRatio" in keyList: self.refinementRatio = approxParameters["refinementRatio"] elif (not hasattr(self, "_refinementRatio") or self.refinementRatio is None): self.refinementRatio = 0.2 - if "nTrainingPoints" in keyList: - self.nTrainingPoints = approxParameters["nTrainingPoints"] - elif (not hasattr(self, "_nTrainingPoints") - or self.nTrainingPoints is None): - self.nTrainingPoints = np.int(np.ceil(self.maxIter - / self.refinementRatio)) if "nTestPoints" in keyList: self.nTestPoints = approxParameters["nTestPoints"] - elif not hasattr(self, "_nTestPoints") or self.nTestPoints is None: - self.nTestPoints = 1 - if "trainingSetGenerator" in keyList: - self.trainingSetGenerator = ( - approxParameters["trainingSetGenerator"]) - elif (not hasattr(self, "_trainingSetGenerator") - or self.trainingSetGenerator is None): + elif (not hasattr(self, "_nTestPoints") + or self.nTestPoints is None): + self.nTestPoints = np.int(np.ceil(self.maxIter + / self.refinementRatio)) + if "nTrainPoints" in keyList: + self.nTrainPoints = approxParameters["nTrainPoints"] + elif not hasattr(self, "_nTrainPoints") or self.nTrainPoints is None: + self.nTrainPoints = 1 + if "trainSetGenerator" in keyList: + self.trainSetGenerator = approxParameters["trainSetGenerator"] + elif (not hasattr(self, "_trainSetGenerator") + or self.trainSetGenerator is None): + from rrompy.utilities.parameter_sampling import QuadratureSampler + self.trainSetGenerator = QuadratureSampler(self.muBounds, + "CHEBYSHEV") + del QuadratureSampler + if "testSetGenerator" in keyList: + self.testSetGenerator = approxParameters["testSetGenerator"] + elif (not hasattr(self, "_testSetGenerator") + or self.testSetGenerator is None): from rrompy.utilities.parameter_sampling import QuadratureSampler - self.trainingSetGenerator = QuadratureSampler(self.muBounds, - "UNIFORM") + self.testSetGenerator = QuadratureSampler(self.muBounds, + "UNIFORM") del QuadratureSampler @property def S(self): """Value of S.""" if not hasattr(self, "_mus") or self.mus is None: return 0 return len(self.mus) @S.setter def S(self, S): raise RROMPyException(("S is used just to simplify inheritance, and " "its value cannot be changed.")) @property def mus(self): """Value of mus.""" return self._mus @mus.setter def mus(self, mus): self._mus = np.array(mus, dtype = np.complex) @property def muBounds(self): """Value of muBounds.""" return self._muBounds @muBounds.setter def muBounds(self, muBounds): if len(muBounds) != 2: raise RROMPyException("2 limits must be specified.") try: muBounds = muBounds.tolist() except: muBounds = list(muBounds) for j in range(2): try: len(muBounds[j]) except: muBounds[j] = np.array([muBounds[j]]) if len(muBounds[0]) != len(muBounds[1]): raise RROMPyException("The bounds must have the same length.") self._muBounds = muBounds @property def greedyTol(self): """Value of greedyTol.""" return self._greedyTol @greedyTol.setter def greedyTol(self, greedyTol): if greedyTol < 0: raise RROMPyException("greedyTol must be non-negative.") if hasattr(self, "_greedyTol") and self.greedyTol is not None: greedyTolold = self.greedyTol else: greedyTolold = -1 self._greedyTol = greedyTol self._approxParameters["greedyTol"] = self.greedyTol if greedyTolold != self.greedyTol: self.resetSamples() @property def maxIter(self): """Value of maxIter.""" return self._maxIter @maxIter.setter def maxIter(self, maxIter): if maxIter <= 0: raise RROMPyException("maxIter must be positive.") if hasattr(self, "_maxIter") and self.maxIter is not None: maxIterold = self.maxIter else: maxIterold = -1 self._maxIter = maxIter self._approxParameters["maxIter"] = self.maxIter if maxIterold != self.maxIter: self.resetSamples() @property def refinementRatio(self): """Value of refinementRatio.""" return self._refinementRatio @refinementRatio.setter def refinementRatio(self, refinementRatio): if refinementRatio <= 0. or refinementRatio > 1.: raise RROMPyException(("refinementRatio must be between 0 " "(excluded) and 1.")) if (hasattr(self, "_refinementRatio") and self.refinementRatio is not None): refinementRatioold = self.refinementRatio else: refinementRatioold = -1 self._refinementRatio = refinementRatio self._approxParameters["refinementRatio"] = self.refinementRatio if refinementRatioold != self.refinementRatio: self.resetSamples() - @property - def nTrainingPoints(self): - """Value of nTrainingPoints.""" - return self._nTrainingPoints - @nTrainingPoints.setter - def nTrainingPoints(self, nTrainingPoints): - if nTrainingPoints <= 1: - raise RROMPyException("nTrainingPoints must be greater than 1.") - if not np.isclose(nTrainingPoints, np.int(nTrainingPoints)): - raise RROMPyException("nTrainingPoints must be an integer.") - nTrainingPoints = np.int(nTrainingPoints) - if (hasattr(self, "_nTrainingPoints") - and self.nTrainingPoints is not None): - nTrainingPointsold = self.nTrainingPoints - else: - nTrainingPointsold = -1 - self._nTrainingPoints = nTrainingPoints - self._approxParameters["nTrainingPoints"] = self.nTrainingPoints - if nTrainingPointsold != self.nTrainingPoints: - self.resetSamples() - @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= 0: raise RROMPyException("nTestPoints must be positive.") if not np.isclose(nTestPoints, np.int(nTestPoints)): raise RROMPyException("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "_nTestPoints") and self.nTestPoints is not None: nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() @property - def trainingSetGenerator(self): - """Value of trainingSetGenerator.""" - return self._trainingSetGenerator - @trainingSetGenerator.setter - def trainingSetGenerator(self, trainingSetGenerator): - if 'generatePoints' not in dir(trainingSetGenerator): - raise RROMPyException("trainingSetGenerator type not recognized.") - if (hasattr(self, '_trainingSetGenerator') - and self.trainingSetGenerator is not None): - trainingSetGeneratorOld = self.trainingSetGenerator - self._trainingSetGenerator = trainingSetGenerator - self._approxParameters["trainingSetGenerator"] = ( - self.trainingSetGenerator) - if (not 'trainingSetGeneratorOld' in locals() - or trainingSetGeneratorOld != self.trainingSetGenerator): + def nTrainPoints(self): + """Value of nTrainPoints.""" + return self._nTrainPoints + @nTrainPoints.setter + def nTrainPoints(self, nTrainPoints): + if nTrainPoints <= 1: + raise RROMPyException("nTrainPoints must be greater than 1.") + if not np.isclose(nTrainPoints, np.int(nTrainPoints)): + raise RROMPyException("nTrainPoints must be an integer.") + nTrainPoints = np.int(nTrainPoints) + if (hasattr(self, "_nTrainPoints") + and self.nTrainPoints is not None): + nTrainPointsOld = self.nTrainPoints + else: + nTrainPointsOld = -1 + self._nTrainPoints = nTrainPoints + self._approxParameters["nTrainPoints"] = self.nTrainPoints + if nTrainPointsOld != self.nTrainPoints: + self.resetSamples() + + @property + def trainSetGenerator(self): + """Value of trainSetGenerator.""" + return self._trainSetGenerator + @trainSetGenerator.setter + def trainSetGenerator(self, trainSetGenerator): + if 'generatePoints' not in dir(trainSetGenerator): + raise RROMPyException("trainSetGenerator type not recognized.") + if (hasattr(self, '_trainSetGenerator') + and self.trainSetGenerator is not None): + trainSetGeneratorOld = self.trainSetGenerator + self._trainSetGenerator = trainSetGenerator + self._approxParameters["trainSetGenerator"] = self.trainSetGenerator + if (not 'trainSetGeneratorOld' in locals() + or trainSetGeneratorOld != self.trainSetGenerator): + self.resetSamples() + + @property + def testSetGenerator(self): + """Value of testSetGenerator.""" + return self._testSetGenerator + @testSetGenerator.setter + def testSetGenerator(self, testSetGenerator): + if 'generatePoints' not in dir(testSetGenerator): + raise RROMPyException("testSetGenerator type not recognized.") + if (hasattr(self, '_testSetGenerator') + and self.testSetGenerator is not None): + testSetGeneratorOld = self.testSetGenerator + self._testSetGenerator = testSetGenerator + self._approxParameters["testSetGenerator"] = self.testSetGenerator + if (not 'testSetGeneratorOld' in locals() + or testSetGeneratorOld != self.testSetGenerator): self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self._mus = [] def initEstNormer(self): """Initialize estimator norm engine.""" if not hasattr(self, "estNormer"): from rrompy.hfengines.base import ProblemEngineBase as PEB self.estNormer = PEB() # L2 norm self.estNormer.V = self.HFEngine.V self.estNormer.buildEnergyNormForm() def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """ Standard residual-based error estimator with explicit residual computation. """ self.setupApprox() nmus = len(mus) err = np.empty(nmus) if self.HFEngine.nbs == 1: RHS = self.getRHS(mus[0], homogeneized = self.homogeneized) RHSNorm = self.estNormer.norm(RHS) for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / RHSNorm else: for j in range(nmus): res = self.getRes(mus[j], homogeneized = self.homogeneized) RHS = self.getRHS(mus[j], homogeneized = self.homogeneized) err[j] = self.estNormer.norm(res) / self.estNormer.norm(RHS) return np.abs(err) def getMaxErrorEstimator(self, mus, plot : bool = False)\ -> Tuple[Np1D, int, float]: """ Compute maximum of (and index of maximum of) error estimator over given parameters. """ - errorEstTrain = self.errorEstimator(mus) - idxMaxEst = np.argmax(errorEstTrain) - maxEst = errorEstTrain[idxMaxEst] - if plot and not np.all(np.isinf(errorEstTrain)): + errorEstTest = self.errorEstimator(mus) + idxMaxEst = np.argmax(errorEstTest) + maxEst = errorEstTest[idxMaxEst] + if plot and not np.all(np.isinf(errorEstTest)): from matplotlib import pyplot as plt onemus = np.ones(self.S) plt.figure() - plt.semilogy(np.real(mus), errorEstTrain, 'k') + plt.semilogy(np.real(mus), errorEstTest, 'k') plt.semilogy(np.real(mus[[0, -1]]), [self.greedyTol] * 2, 'r--') plt.semilogy(np.real(self.mus), 2. * self.greedyTol * onemus, '*m') plt.semilogy(np.real(mus[idxMaxEst]), maxEst, 'xr') plt.grid() plt.show() plt.close() - return errorEstTrain, idxMaxEst, maxEst + return errorEstTest, idxMaxEst, maxEst def greedyNextSample(self, muidx:int, plotEst : bool = False)\ -> Tuple[Np1D, int, float, complex]: """Compute next greedy snapshot of solution map.""" modeAssert(self._mode, message = "Cannot add greedy sample.") - mu = self.muTrain[muidx] + mu = self.muTest[muidx] if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} to " "training set.").format( self.samplingEngine.nsamples + 1, mu), timestamp = self.timestamp) self.mus = np.append(self.mus, mu) - idxs = np.arange(len(self.muTrain)) + idxs = np.arange(len(self.muTest)) mask = np.ones_like(idxs, dtype = bool) mask[muidx] = False idxs = idxs[mask] - self.muTrain = self.muTrain[idxs] + self.muTest = self.muTest[idxs] self.samplingEngine.nextSample(mu, homogeneized = self.homogeneized) - errorEstTrain, muidx, maxErrorEst = self.getMaxErrorEstimator( - self.muTrain, plotEst) - return errorEstTrain, muidx, maxErrorEst, self.muTrain[muidx] + errorEstTest, muidx, maxErrorEst = self.getMaxErrorEstimator( + self.muTest, plotEst) + return errorEstTest, muidx, maxErrorEst, self.muTest[muidx] def greedy(self, plotEst : bool = False): """Compute greedy snapshots of solution map.""" modeAssert(self._mode, message = "Cannot start greedy algorithm.") if self.samplingEngine.samples is not None: return if self.verbosity >= 2: verbosityDepth("INIT", "Starting computation of snapshots.", timestamp = self.timestamp) self.resetSamples() - self.mus, _ = self.trainingSetGenerator.generatePoints( - self.nTestPoints) + self.mus, _ = self.trainSetGenerator.generatePoints(self.nTrainPoints) self.mus = np.array([x[0] for x in self.mus], dtype = np.complex) - nTrain = self.nTrainingPoints - muTrainBase, _ = self.trainingSetGenerator.generatePoints(nTrain) - self.muTrain = np.empty(len(muTrainBase) + 1, dtype = np.complex) + nTest = self.nTestPoints + muTestBase, _ = np.sort(self.testSetGenerator.generatePoints(nTest)) + self.muTest = np.empty(len(muTestBase) + 1, dtype = np.complex) j = 0 - for mu in muTrainBase: + for mu in muTestBase: if not np.any(np.isclose(self.mus, mu)): - self.muTrain[j] = mu[0] + self.muTest[j] = mu[0] j += 1 - self.muTrain[j] = self.mus[-1] - self.muTrain = self.muTrain[: j + 1] + self.muTest[j] = self.mus[-1] + self.muTest = self.muTest[: j + 1] self.mus = self.mus[:-1] for j in range(len(self.mus)): if self.verbosity >= 2: verbosityDepth("MAIN", ("Adding {}-th sample point at {} to " "training set.").format( self.samplingEngine.nsamples + 1, self.mus[j]), timestamp = self.timestamp) self.samplingEngine.nextSample(self.mus[j], homogeneized = self.homogeneized) - errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample(-1, - plotEst) + errorEstTest, muidx, maxErrorEst, mu = self.greedyNextSample(-1, + plotEst) if self.verbosity >= 2: - verbosityDepth("MAIN", "Uniform error estimate {:.4e}.".format( - maxErrorEst), + verbosityDepth("MAIN", ("Uniform testing error estimate " + "{:.4e}.").format(maxErrorEst), timestamp = self.timestamp) trainedModelOld = copy(self.trainedModel) while (self.samplingEngine.nsamples < self.maxIter and maxErrorEst > self.greedyTol): - if (1. - self.refinementRatio) * nTrain > len(self.muTrain): - muTrainExtra, _ = self.trainingSetGenerator.refine(nTrain) - self.muTrain = np.sort(np.append(self.muTrain, muTrainExtra)) - nTrain += len(muTrainExtra) + if (1. - self.refinementRatio) * nTest > len(self.muTest): + muTestExtra, _ = self.testSetGenerator.refine(nTest) + self.muTest = np.sort(np.append(self.muTest, muTestExtra)) + nTest += len(muTestExtra) if self.verbosity >= 5: - verbosityDepth("MAIN", ("Enriching training set by {} " + verbosityDepth("MAIN", ("Enriching test set by {} " "elements.").format( - len(muTrainExtra)), + len(muTestExtra)), timestamp = self.timestamp) - muTrainOld, maxErrorEstOld = self.muTrain, maxErrorEst - errorEstTrain, muidx, maxErrorEst, mu = self.greedyNextSample( + muTestOld, maxErrorEstOld = self.muTest, maxErrorEst + errorEstTest, muidx, maxErrorEst, mu = self.greedyNextSample( muidx, plotEst) if self.verbosity >= 2: - verbosityDepth("MAIN", "Uniform error estimate {:.4e}.".format( - maxErrorEst), + verbosityDepth("MAIN", ("Uniform testing error estimate " + "{:.4e}.").format(maxErrorEst), timestamp = self.timestamp) if (np.isnan(maxErrorEst) or np.isinf(maxErrorEst) or maxErrorEstOld < maxErrorEst * self.TOL_INSTABILITY): RROMPyWarning(("Instability in a posteriori estimator. " "Starting preemptive greedy loop termination.")) maxErrorEst = maxErrorEstOld - self.muTrain = muTrainOld + self.muTest = muTestOld self.mus = self.mus[:-1] self.samplingEngine.popSample() self.trainedModel.data = copy(trainedModelOld.data) break trainedModelOld.data = copy(self.trainedModel.data) if (self.interactive and self.samplingEngine.nsamples >= self.maxIter): verbosityDepth("MAIN", ("Maximum number of iterations {} " "reached. Want to increase maxIter " "and continue? Y/N").format( self.maxIter), timestamp = self.timestamp, end = "") increasemaxIter = input() if increasemaxIter.upper() == "Y": verbosityDepth("MAIN", "Doubling value of maxIter...", timestamp = self.timestamp) self._maxIter *= 2 if (self.interactive and maxErrorEst <= self.greedyTol): verbosityDepth("MAIN", ("Required tolerance {} achieved. Want " "to decrease greedyTol and continue? " "Y/N").format(self.greedyTol), timestamp = self.timestamp, end = "") increasemaxIter = input() if increasemaxIter.upper() == "Y": verbosityDepth("MAIN", "Halving value of greedyTol...", timestamp = self.timestamp) self._greedyTol *= .5 if self.verbosity >= 2: verbosityDepth("DEL", ("Done computing snapshots (final snapshot " "count: {}).").format( self.samplingEngine.nsamples), timestamp = self.timestamp) def checkComputedApprox(self) -> bool: """ Check if setup of new approximant is not needed. Returns: True if new setup is not needed. False otherwise. """ return (super().checkComputedApprox() and self.S == self.trainedModel.data.projMat.shape[1]) def computeScaleFactor(self): """Compute parameter rescaling factor.""" modeAssert(self._mode, message = "Cannot compute rescaling factor.") - self.scaleFactor= np.abs(np.power(self.trainingSetGenerator.lims[0][0], + self.scaleFactor= np.abs(np.power(self.trainSetGenerator.lims[0][0], self.HFEngine.rescalingExp) - - np.power(self.trainingSetGenerator.lims[1][0], + - np.power(self.trainSetGenerator.lims[1][0], self.HFEngine.rescalingExp)) / 2. def assembleReducedResidualBlocksbb(self, bs:List[Np1D], pMat:Np2D, scaling : float = 1.): """ Build blocks (of type bb) of reduced linear system through projections. """ self.initEstNormer() nbs = len(bs) resbb = np.empty((nbs, nbs), dtype = np.complex) for i in range(nbs): Mbi = scaling ** i * bs[i] resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) for j in range(i): Mbj = scaling ** j * bs[j] resbb[i, j] = self.estNormer.innerProduct(Mbj, Mbi) for i in range(nbs): for j in range(i + 1, nbs): resbb[i, j] = resbb[j, i].conj() self.trainedModel.data.resbb = resbb def assembleReducedResidualBlocksAb(self, As:List[Np2D], bs:List[Np1D], pMat:Np2D, scaling : float = 1.): """ Build blocks (of type Ab) of reduced linear system through projections. """ self.initEstNormer() nAs = len(As) nbs = len(bs) if (not hasattr(self.trainedModel.data, "resAb") or self.trainedModel.data.resAb is None): resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) for j in range(nAs): MAj = scaling ** (j + 1) * As[j].dot(pMat) for i in range(nbs): Mbi = scaling ** (i + 1) * bs[i] resAb[i, :, j] = self.estNormer.innerProduct(MAj, Mbi) else: Sold = self.trainedModel.data.resAb.shape[1] if Sold > self.S: resAb = self.trainedModel.data.resAb[:, : self.S, :] else: resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) resAb[:, : Sold, :] = self.trainedModel.data.resAb for j in range(nAs): MAj = scaling ** (j + 1) * As[j].dot(pMat[:, Sold :]) for i in range(nbs): Mbi = scaling ** (i + 1) * bs[i] resAb[i, Sold :, j] = self.estNormer.innerProduct(MAj, Mbi) self.trainedModel.data.resAb = resAb def assembleReducedResidualBlocksAA(self, As:List[Np2D], pMat:Np2D, scaling : float = 1.): """ Build blocks (of type AA) of reduced linear system through projections. """ self.initEstNormer() nAs = len(As) if (not hasattr(self.trainedModel.data, "resAA") or self.trainedModel.data.resAA is None): resAA = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) for i in range(nAs): MAi = scaling ** (i + 1) * As[i].dot(pMat) resAA[:, i, :, i] = self.estNormer.innerProduct(MAi, MAi) for j in range(i): MAj = scaling ** (j + 1) * As[j].dot(pMat) resAA[:, i, :, j] = self.estNormer.innerProduct(MAj, MAi) for i in range(nAs): for j in range(i + 1, nAs): resAA[:, i, :, j] = resAA[:, j, :, i].T.conj() else: Sold = self.trainedModel.data.resAA.shape[0] if Sold > self.S: resAA = self.trainedModel.data.resAA[: self.S, :, : self.S, :] else: resAA = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) resAA[: Sold, :, : Sold, :] = self.trainedModel.data.resAA for i in range(nAs): MAi = scaling ** (i + 1) * As[i].dot(pMat) resAA[: Sold, i, Sold :, i] = self.estNormer.innerProduct( MAi[:, Sold :], MAi[:, : Sold]) resAA[Sold :, i, : Sold, i] = resAA[: Sold, i, Sold :, i].T.conj() resAA[Sold :, i, Sold :, i] = self.estNormer.innerProduct( MAi[:, Sold :], MAi[:, Sold :]) for j in range(i): MAj = scaling ** (j + 1) * As[j].dot(pMat) resAA[: Sold, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, : Sold])) resAA[Sold :, i, : Sold, j] = ( self.estNormer.innerProduct(MAj[:, : Sold], MAi[:, Sold :])) resAA[Sold :, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, Sold :])) for i in range(nAs): for j in range(i + 1, nAs): resAA[: Sold, i, Sold :, j] = resAA[Sold :, j, : Sold, i].T.conj() resAA[Sold :, i, : Sold, j] = resAA[: Sold, j, Sold :, i].T.conj() resAA[Sold :, i, Sold :, j] = resAA[Sold :, j, Sold :, i].T.conj() self.trainedModel.data.resAA = resAA diff --git a/rrompy/utilities/base/__init__.py b/rrompy/utilities/base/__init__.py index f69c98e..7fe0fb3 100644 --- a/rrompy/utilities/base/__init__.py +++ b/rrompy/utilities/base/__init__.py @@ -1,46 +1,47 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from .find_dict_str_key import findDictStrKey from .get_new_filename import getNewFilename from .get_timestamp import getTimestamp from .purge_dict import purgeDict from .purge_list import purgeList from .number_theory import (squareResonances, primeFactorize, getLowestPrimeFactor) from .sobol import sobolGenerate +from .low_discrepancy import vanderCorput, lowDiscrepancy from . import types as Types from .verbosity_depth import verbosityDepth from .custom_fit import customFit __all__ = [ 'findDictStrKey', 'getNewFilename', 'getTimestamp', 'purgeDict', 'purgeList', 'squareResonances', 'primeFactorize', 'getLowestPrimeFactor', 'sobolGenerate', 'Types', 'verbosityDepth', 'customFit' ] diff --git a/rrompy/utilities/base/low_discrepancy.py b/rrompy/utilities/base/low_discrepancy.py new file mode 100644 index 0000000..bcbfe92 --- /dev/null +++ b/rrompy/utilities/base/low_discrepancy.py @@ -0,0 +1,47 @@ +# Copyright (C) 2018 by the RROMPy authors +# +# This file is part of RROMPy. +# +# RROMPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# RROMPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with RROMPy. If not, see . +# + +import numpy as np +from rrompy.utilities.base.types import Np1D +from rrompy.utilities.exception_manager import RROMPyException + +__all__ = ['vanderCorput', 'lowDiscrepancy'] + +def vanderCorput(n:int) -> Np1D: + if n <= 0: + raise RROMPyException("Only positive integers allowed.") + x = [0] * n + ln = int(np.ceil(np.log2(n))) + for j in range(n): + x[j] = int(np.binary_repr(j, width = ln)[::-1], 2) + return x + +def lowDiscrepancy(n:int) -> Np1D: + if n <= 0: + raise RROMPyException("Only positive integers allowed.") + x = [0] * n + max2Pow = np.binary_repr(n)[::-1].find('1') + max2Fac = 2 ** max2Pow + res2 = n // max2Fac + xBase = res2 * np.array(vanderCorput(max2Fac), dtype = np.int) + stride = ((n - 1) // 2 - 1) * (max2Pow == 0) + 1 + print(res2, max2Fac) + for i in range(res2): + x[i * max2Fac : (i + 1) * max2Fac] = (i * stride + xBase) % n + return x + diff --git a/rrompy/utilities/parameter_sampling/fft_sampler.py b/rrompy/utilities/parameter_sampling/fft_sampler.py index 7c87e68..d3b5059 100644 --- a/rrompy/utilities/parameter_sampling/fft_sampler.py +++ b/rrompy/utilities/parameter_sampling/fft_sampler.py @@ -1,98 +1,105 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.parameter_sampling.generic_sampler import GenericSampler from rrompy.utilities.base.types import Np1D, List, Tuple from rrompy.utilities.exception_manager import RROMPyException +from rrompy.utilities.base import lowDiscrepancy __all__ = ['FFTSampler'] class FFTSampler(GenericSampler): """Generator of FFT-type sample points on scaled roots of unity.""" def generatePoints(self, n:Np1D) -> Tuple[List[Np1D], Np1D]: """Array of sample points and array of weights.""" super().generatePoints(n) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) c, r = (a + b) / 2., np.abs(a - b) / 2. xj = c + r * np.exp(1.j * np.linspace(0, 2 * np.pi, n[j] + 1)[:-1, None]) wj = r / n[j] * np.ones(n[j]) + fejerOrdering = lowDiscrepancy(len(xj)) + xj = xj[fejerOrdering] + wj = wj[fejerOrdering] if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] else: x = np.concatenate((np.kron(np.ones(n[j])[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j]), w), np.kron(wj, np.ones(xsize))) xsize = xsize * n[j] return [y.flatten() for y in np.split(x, xsize)], w def refine(self, n:int) -> Tuple[List[Np1D], Np1D]: """ Apply refinement. If points are not nested, equivalent to generatePoints([2 * x for x in n]). """ super().generatePoints(n) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) c, r = (a + b) / 2., np.abs(a - b) / 2. xj = c + r * np.exp(1.j * (np.pi / n[j] + np.linspace(0, 2 * np.pi, n[j] + 1)[:-1, None])) wj = r / n[j] * np.ones(n[j]) + fejerOrdering = lowDiscrepancy(len(xj)) + xj = xj[fejerOrdering] + wj = wj[fejerOrdering] if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] else: x = np.concatenate((np.kron(np.ones(n[j])[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j]), w), np.kron(wj, np.ones(xsize))) xsize = xsize * n[j] return [y.flatten() for y in np.split(x, xsize)], w diff --git a/rrompy/utilities/parameter_sampling/quadrature_sampler.py b/rrompy/utilities/parameter_sampling/quadrature_sampler.py index 237e7a0..2a97149 100644 --- a/rrompy/utilities/parameter_sampling/quadrature_sampler.py +++ b/rrompy/utilities/parameter_sampling/quadrature_sampler.py @@ -1,147 +1,156 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.parameter_sampling.generic_sampler import GenericSampler from rrompy.utilities.base.types import Np1D, Tuple, List from rrompy.utilities.exception_manager import RROMPyException +from rrompy.utilities.base import lowDiscrepancy __all__ = ['QuadratureSampler'] class QuadratureSampler(GenericSampler): """Generator of quadrature sample points.""" allowedKinds = ["UNIFORM", "CHEBYSHEV", "GAUSSLEGENDRE", "CLENSHAWCURTIS"] def __init__(self, lims:Tuple[Np1D, Np1D], kind : str = "UNIFORM", scaling : List[callable] = None, scalingInv : List[callable] = None): super().__init__(lims = lims, scaling = scaling, scalingInv = scalingInv) self.kind = kind def __str__(self) -> str: return "{}_{}".format(super().__str__(), self.kind) def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) @property def kind(self): """Value of kind.""" return self._kind @kind.setter def kind(self, kind): if kind.upper() not in self.allowedKinds: raise RROMPyException("Generator kind not recognized.") self._kind = kind.upper() def generatePoints(self, n:Np1D) -> Tuple[List[Np1D], Np1D]: """Array of quadrature points and array of weights.""" super().generatePoints(n) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) if self.kind == "UNIFORM": xj = np.linspace(a, b, n[j])[:, None] wj = np.abs(a - b) / n[j] * np.ones(n[j]) elif self.kind == "CHEBYSHEV": nodes, weights = np.polynomial.chebyshev.chebgauss(n[j]) xj = (a + b) / 2 + (a - b) / 2 * nodes[:, None] wj = np.abs(a - b) / np.pi * weights elif self.kind == "GAUSSLEGENDRE": nodes, weights = np.polynomial.legendre.leggauss(n[j]) xj = (a + b) / 2 + (a - b) / 2 * nodes[:, None] wj = np.abs(a - b) * weights elif self.kind == "CLENSHAWCURTIS": thetas = np.pi / (n[j] - 1) * np.arange(n[j] - 1) nodes = np.cos(thetas) weights = np.ones(n[j]) if n[j] == 1: weights[0] = 2. else: for i in range(n[j]): jhi = (n[j] - 1) // 2 for j in range(jhi): b = 1. + 1. * (2 * (j + 1) != n[j] - 1) weights[i] -= (b * np.cos(2. * (j + 1) * thetas[i]) / (4. * j * (j + 2) + 3)) weights[:] /= (n[j] - 1) weights[1 : -1] *= 2. xj = (a + b) / 2 + (a - b) / 2 * nodes[:, None] wj = np.abs(a - b) / 2 * weights + if len(xj) > 1: + fejerOrdering = [len(xj) - 1] + lowDiscrepancy(len(xj) - 1) + xj = xj[fejerOrdering] + wj = wj[fejerOrdering] if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] else: x = np.concatenate((np.kron(np.ones(n[j])[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j]), w), np.kron(wj, np.ones(xsize))) xsize = xsize * n[j] return [y.flatten() for y in np.split(x, xsize)], w def refine(self, n:int) -> Tuple[List[Np1D], Np1D]: """ Apply refinement. If points are not nested, equivalent to generatePoints([2 * x - 1 for x in n]). """ if self.kind != "UNIFORM": return super().refine(n) super().generatePoints(n) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) xj = np.linspace(a + (b - a) / 2. / (n[j] - 1), b + (a - b) / 2. / (n[j] - 1), n[j] - 1)[:, None] wj = np.abs(a - b) / (n[j] - 1) * np.ones(n[j] - 1) + if len(xj) > 1: + fejerOrdering = [len(xj) - 1] + lowDiscrepancy(len(xj) - 1) + xj = xj[fejerOrdering] + wj = wj[fejerOrdering] if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] - 1 else: x = np.concatenate((np.kron(np.ones(n[j] - 1)[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j] - 1), w), np.kron(wj, np.ones(xsize))) xsize = xsize * (n[j] - 1) return [y.flatten() for y in np.split(x, xsize)], w diff --git a/rrompy/utilities/parameter_sampling/warping_sampler.py b/rrompy/utilities/parameter_sampling/warping_sampler.py index 39d27be..cf0c6ff 100644 --- a/rrompy/utilities/parameter_sampling/warping_sampler.py +++ b/rrompy/utilities/parameter_sampling/warping_sampler.py @@ -1,154 +1,163 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # import numpy as np from rrompy.utilities.parameter_sampling.generic_sampler import GenericSampler from rrompy.utilities.base.types import Np1D, Tuple, List from rrompy.utilities.exception_manager import RROMPyException +from rrompy.utilities.base import lowDiscrepancy __all__ = ['WarpingSampler', 'WarpingFunction'] class WarpingFunction: """Wrapper for warping function.""" def __init__(self, other : "WarpingFunction" = None, **kwargs): if other is not None: self._call_ = other._call_ self._repr_ = other._repr_ else: self._call_ = kwargs["call"] self._repr_ = kwargs["repr"] if not callable(self._call_): self._call_ = lambda x: self._call_ if callable(self._repr_): self._repr_ = self._repr_() def __call__(self, x): return self._call_(x) def __str__(self): return self._repr_ def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) class WarpingSampler(GenericSampler): """Generator of sample points from warping of uniform ones.""" def __init__(self, lims:Tuple[Np1D, Np1D], warping : List[callable], scaling : List[callable] = None, scalingInv : List[callable] = None): super().__init__(lims = lims, scaling = scaling, scalingInv = scalingInv) self.warping = warping def __str__(self) -> str: return "{}_{}".format(super().__str__(), self.warping) def __repr__(self) -> str: return self.__str__() + " at " + hex(id(self)) @property def warping(self): """Value of warping.""" return self._warping @warping.setter def warping(self, warping): if not isinstance(warping, (list, tuple,)): warping = [warping] * len(self.lims[0]) for j in range(len(warping)): if not isinstance(warping[j], (WarpingFunction,)): warping[j] = WarpingFunction(call = warping[j].__call__, repr = warping[j].__repr__) self._warping = warping def generatePoints(self, n:Np1D) -> Tuple[List[Np1D], Np1D]: """Array of quadrature points and array of weights.""" super().generatePoints(n) assert len(self.warping) == len(self.lims[0]) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) x0, sigma = (a + b) / 2, (a - b) / 2 xj0 = np.linspace(- 1., 1., n[j])[:, None] xj = x0 + sigma * self.warping[j](xj0) wj = np.abs(a - b) / (n[j] - 1) * np.ones(n[j]) + if len(xj) > 1: + fejerOrdering = [len(xj) - 1] + lowDiscrepancy(len(xj) - 1) + xj = xj[fejerOrdering] + wj = wj[fejerOrdering] if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] else: x = np.concatenate((np.kron(np.ones(n[j])[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j]), w), np.kron(wj, np.ones(xsize))) xsize = xsize * n[j] return [y.flatten() for y in np.split(x, xsize)], w def refine(self, n:int) -> Tuple[List[Np1D], Np1D]: """ Apply refinement. If points are not nested, equivalent to generatePoints([2 * x - 1 for x in n]). """ super().generatePoints(n) assert len(self.warping) == len(self.lims[0]) d = len(self.lims[0]) try: len(n) except: n = np.array([n]) if len(n) != d: raise RROMPyException(("Numbers of points must have same " "dimension as limits.")) for j in range(d): a, b = self.lims[0][j], self.lims[1][j] if self.scaling is not None: a, b = self.scaling[j](a), self.scaling[j](b) x0, sigma = (a + b) / 2, (a - b) / 2 xj0 = np.linspace(1. / (n[j] - 1) - 1., 1. - 1. / (n[j] - 1), n[j] - 1)[:, None] xj = x0 + sigma * self.warping[j](xj0) wj = np.abs(a - b) / (n[j] - 2) * np.ones(n[j] - 1) + if len(xj) > 1: + fejerOrdering = [len(xj) - 1] + lowDiscrepancy(len(xj) - 1) + xj = xj[fejerOrdering] + wj = wj[fejerOrdering] if self.scalingInv is not None: xj = self.scalingInv[j](xj) if j == 0: x = xj w = wj xsize = n[0] - 1 else: x = np.concatenate((np.kron(np.ones(n[j] - 1)[:, None], x), np.kron(xj, np.ones(xsize)[:, None])), axis = 1) w = np.multiply(np.kron(np.ones(n[j] - 1), w), np.kron(wj, np.ones(xsize))) xsize = xsize * (n[j] - 1) return [y.flatten() for y in np.split(x, xsize)], w diff --git a/setup.py b/setup.py index 52486f2..ae499b3 100644 --- a/setup.py +++ b/setup.py @@ -1,46 +1,46 @@ # Copyright (C) 2015-2018 by the RBniCS authors # # This file is part of RBniCS. # # RBniCS is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RBniCS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RBniCS. If not, see . # import os from setuptools import find_packages, setup rrompy_directory = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) #rrompy_directory = os.path.join(rrompy_directory, 'rrompy') setup(name="RROMPy", description="Rational reduced order modelling in Python", long_description="Rational reduced order modelling in Python", author="Davide Pradovera", author_email="davide.pradovera@epfl.ch", - version="0.0.dev2", + version="1.0", license="GNU Library or Lesser General Public License (LGPL)", classifiers=[ "Development Status :: 1 - Planning" "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Software Development :: Libraries :: Python Modules", ], packages=find_packages(rrompy_directory), zip_safe=False )