diff --git a/examples/3_sector_angle/sector_angle.py b/examples/3_sector_angle/sector_angle.py
index 80002d2..244682b 100644
--- a/examples/3_sector_angle/sector_angle.py
+++ b/examples/3_sector_angle/sector_angle.py
@@ -1,107 +1,124 @@
 import numpy as np
 import matplotlib.pyplot as plt
 from sector_angle_engine import SectorAngleEngine as engine
 from rrompy.reduction_methods import (NearestNeighbor as NN,
+                             RationalInterpolantPivotedMatch as RIM,
+                             RationalInterpolantGreedyPivotedMatch as RIGM,
                              RationalInterpolantPivotedPoleMatch as RIP,
                              RationalInterpolantGreedyPivotedPoleMatch as RIGP)
 from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS,
                                                  EmptySampler as ES)
 
 ks, ts = [10., 15.], [.4, .6]
 k0, t0, n = np.mean(np.power(ks, 2.)) ** .5, np.mean(ts), 50
 solver = engine(k0, t0, n)
 
 murange = [[ks[0], ts[0]], [ks[-1], ts[-1]]]
 mu = [12., .535]
 
 fighandles = []
 for method in ["RI", "RI_GREEDY"]:
-    print("Testing {} method".format(method))
+    print("Testing {} method.".format(method))
 
     if method == "RI":
         params = {'S':20, "paramsMarginal":{"MMarginal": 3}, 'SMarginal':11,
                   'POD':True, 'polybasis':"CHEBYSHEV",
                   'polybasisMarginal':"MONOMIAL_GAUSSIAN",
                   'radialDirectionalWeightsMarginal': 100.,
                   'matchingWeight':1., 'samplerPivot':QS(ks, "CHEBYSHEV", 2.),
                   'samplerMarginal':QS(ts, "UNIFORM")}
-        algo = RIP
+        algoP, algoM = RIP, RIM
     if method == "RI_GREEDY":
         params = {'S':10, "paramsMarginal":{"MMarginal": 3}, 'SMarginal':11,
                   'POD':True, 'polybasis':"LEGENDRE",
                   'polybasisMarginal':"MONOMIAL_GAUSSIAN",
                   'radialDirectionalWeightsMarginal': 100.,
                   'matchingWeight':1., 'samplerPivot':QS(ks, "UNIFORM", 2.),
                   'greedyTol':1e-3, 'errorEstimatorKind':"LOOK_AHEAD_RES",
                   'samplerTrainSet':QS(ks, "CHEBYSHEV", 2.),
                   'samplerMarginal':QS(ts, "UNIFORM")}
-        algo = RIGP
+        algoP, algoM = RIGP, RIGM
         
-    approx = algo([0], solver, mu0 = [k0, t0], approxParameters = params,
-                  verbosity = 10, storeAllSamples = True)
-    if len(method) == 2:
-        approx.setupApprox()
+    approxP = algoP([0], solver, mu0 = [k0, t0], approxParameters = params,
+                    verbosity = 10, storeAllSamples = True)
+    if method == "RI":
+        approxP.setupApprox()
     else:
-        approx.setupApprox("LAST")
+        approxP.setupApprox("LAST")
+    
+    print("--- Pole-matching approximant ---")
+    approxP.plotApprox(mu, plotargs = {"name": 'u_app'})
+    approxP.plotHF(mu, plotargs = {"name": 'u_HF'})
+    approxP.plotErr(mu, plotargs = {"name": 'err_app'})
+    approxP.plotRes(mu, plotargs = {"name": 'res_app'})
+    normErr = approxP.normErr(mu)[0]
+    normSol = approxP.normHF(mu)[0]
+    normRes = approxP.normRes(mu)[0]
+    normRHS = approxP.normRHS(mu)[0]
+    print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format(
+                                          normSol, normErr, normErr / normSol))
+    print("RHSNorm:\t{:.5e}\nRes_app: \t{:.5e}\nResRel_app:\t{:.5e}".format(
+                                          normRHS, normRes, normRes / normRHS))
     
-    print("--- Approximant ---")
-    approx.plotApprox(mu, plotargs = {"name": 'u_app'})
-    approx.plotHF(mu, plotargs = {"name": 'u_HF'})
-    approx.plotErr(mu, plotargs = {"name": 'err_app'})
-    approx.plotRes(mu, plotargs = {"name": 'res_app'})
-    normErr = approx.normErr(mu)[0]
-    normSol = approx.normHF(mu)[0]
-    normRes = approx.normRes(mu)[0]
-    normRHS = approx.normRHS(mu)[0]
+    print("--- Rational-matching approximant ---")
+    approxM = algoM([0], solver, mu0 = [k0, t0], approxParameters = params,
+                    verbosity = 0)
+    approxM.setTrainedModel(approxP)
+    approxM.plotApprox(mu, plotargs = {"name": 'u_app'})
+    approxM.plotHF(mu, plotargs = {"name": 'u_HF'})
+    approxM.plotErr(mu, plotargs = {"name": 'err_app'})
+    approxM.plotRes(mu, plotargs = {"name": 'res_app'})
+    normErr = approxM.normErr(mu)[0]
+    normSol = approxM.normHF(mu)[0]
+    normRes = approxM.normRes(mu)[0]
+    normRHS = approxM.normRHS(mu)[0]
     print("SolNorm:\t{:.5e}\nErr_app: \t{:.5e}\nErrRel_app:\t{:.5e}".format(
                                           normSol, normErr, normErr / normSol))
     print("RHSNorm:\t{:.5e}\nRes_app: \t{:.5e}\nResRel_app:\t{:.5e}".format(
                                           normRHS, normRes, normRes / normRHS))
     
     print("--- Closest snapshot ---")
-    paramsNN = {'S':len(approx.mus), 'POD':True, 'sampler':ES()}
+    paramsNN = {'S':len(approxP.mus), 'POD':True, 'sampler':ES()}
     approxNN = NN(solver, mu0 = [k0, t0], approxParameters = paramsNN,
                   verbosity = 0)
-    approxNN.setSamples(approx.storedSamplesFilenames)
-    approx.purgeStoredSamples()
+    approxNN.setSamples(approxP.storedSamplesFilenames)
+    approxP.purgeStoredSamples()
     approxNN.plotApprox(mu, plotargs = {"name": 'u_close'})
     approxNN.plotHF(mu, plotargs = {"name": 'u_HF'})
     approxNN.plotErr(mu, plotargs = {"name": 'err_close'})
     approxNN.plotRes(mu, plotargs = {"name": 'res_close'})
     normErr = approxNN.normErr(mu)[0]
     normSol = approxNN.normHF(mu)[0]
     normRes = approxNN.normRes(mu)[0]
     normRHS = approxNN.normRHS(mu)[0]
     print("SolNorm:\t{:.5e}\nErr_close:\t{:.5e}\nErrRel_close:\t{:.5e}".format(
                                           normSol, normErr, normErr / normSol))
     print("RHSNorm:\t{:.5e}\nRes_close:\t{:.5e}\nResRel_close:\t{:.5e}".format(
                                           normRHS, normRes, normRes / normRHS))
 
-    verb = approx.verbosity
-    approx.verbosity = 0
+    approxP.verbosity = approxM.verbosity = 0
     tspace = np.linspace(ts[0], ts[-1], 100)
-    for j, t in enumerate(tspace):
-        pls = approx.getPoles([None, t])
-        pls[np.abs(np.imag(pls ** 2.)) > 1e-5] = np.nan
-        if j == 0: poles = np.empty((len(tspace), len(pls)))
-        poles[j] = np.real(pls)
-    approx.verbosity = verb
-    fighandles += [plt.figure(figsize = (12, 5))]
-    ax1 = fighandles[-1].add_subplot(1, 2, 1)
-    ax2 = fighandles[-1].add_subplot(1, 2, 2)
-    ax1.plot(poles, tspace)
-    ax1.set_ylim(ts)
-    ax1.set_xlabel('mu_1')
-    ax1.set_ylabel('mu_2')
-    ax1.grid()
-    ax2.plot(poles, tspace)
-    for mm in approx.musMarginal:
-        ax2.plot(ks, [mm[0, 0]] * 2, 'k--', linewidth = 1)
-    ax2.set_xlim(ks)
-    ax2.set_ylim(ts)
-    ax2.set_xlabel('mu_1')
-    ax2.set_ylabel('mu_2')
-    ax2.grid()
+    poless = []
+    for app in [approxP, approxM]:
+        for j, t in enumerate(tspace):
+            pls = app.getPoles([None, t])
+            if j == 0: poles = np.empty((len(tspace), len(pls)))
+            poles[j] = np.real(pls)
+        poless += [poles]
+    fighandles += [plt.figure(figsize = (10, 7))]
+    axs = [fighandles[-1].add_subplot(2, 2, 1 + x) for x in range(4)]
+    for x, appname in enumerate(["POLE", "RATIONAL"]):
+        axs[2 * x].plot(poless[x], tspace, '.' * x, ms = 2.5)
+        axs[2 * x].set_xlim(5., 20.), axs[2 * x].set_ylim(ts)
+        axs[2 * x].set_xlabel('mu_1'), axs[2 * x].set_ylabel('mu_2')
+        axs[2 * x].set_title(appname), axs[2 * x].grid()
+        axs[2 * x + 1].plot(poless[x], tspace, '.' * x, ms = 2.5)
+        for mm in approxP.musMarginal:
+            axs[2 * x + 1].plot(ks, [mm[0, 0]] * 2, 'k--', linewidth = 1)
+        axs[2 * x + 1].set_xlim(ks), axs[2 * x + 1].set_ylim(ts)
+        axs[2 * x + 1].set_xlabel('mu_1'), axs[2 * x + 1].set_ylabel('mu_2')
+        axs[2 * x + 1].set_title(appname), axs[2 * x + 1].grid()
+    plt.tight_layout()
     plt.show()
     
     print("\n")
diff --git a/examples/5_anisotropic_square/anisotropic_square.py b/examples/5_anisotropic_square/anisotropic_square.py
index 1c7e05a..479886e 100644
--- a/examples/5_anisotropic_square/anisotropic_square.py
+++ b/examples/5_anisotropic_square/anisotropic_square.py
@@ -1,81 +1,78 @@
 ### example from Smetana, Zahm, Patera. Randomized residual-based error
 ### estimators for parametrized equations.
 import numpy as np
 import matplotlib.pyplot as plt
-from itertools import product
 from anisotropic_square_engine import (AnisotropicSquareEngine as engine,
                                        AnisotropicSquareEnginePoles as plsEx)
 from rrompy.reduction_methods import (
                       RationalInterpolantGreedyPivotedGreedyPoleMatch as RIGPG)
 from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS,
                                                  SparseGridSampler as SGS)
 
 zs, Ls = [10., 50.], [.2, 1.2]
 z0, L0, n = np.mean(zs), np.mean(Ls), 50
 murange = [[zs[0], Ls[0]], [zs[-1], Ls[-1]]]
 np.random.seed(4020)
 mu = [zs[0] + np.random.rand() * (zs[-1] - zs[0]),
       Ls[0] + np.random.rand() * (Ls[-1] - Ls[0])]
 solver = engine(z0, L0, n)
+solver.cutOffPolesRMinRel, solver.cutOffPolesRMaxRel = -2, 2
 
 fighandles = []
 params = {"POD": True, "nTestPoints": 100, "greedyTol": 1e-4, "S": 3,
           "polybasisMarginal": "PIECEWISE_LINEAR_UNIFORM",
           "polybasis": "LEGENDRE", "samplerPivot":QS(zs, "UNIFORM"),
-          "samplerTrainSet":QS(zs, "UNIFORM"),
+          "samplerTrainSet":QS(zs, "UNIFORM"), "maxIterMarginal":50,
           "errorEstimatorKind":"LOOK_AHEAD_RES",
           "errorEstimatorKindMarginal":"LOOK_AHEAD_RECOVER",
           "SMarginal": 3, "paramsMarginal": {"MMarginal": 2,
                          "radialDirectionalWeightsMarginalAdapt": [1e9, 1e12]},
           "greedyTolMarginal": 1e-2, "samplerMarginal":SGS(Ls),
           "radialDirectionalWeightsMarginal": [4.], "matchingWeight": 1.,
           "badPoleCorrection": "POLYNOMIAL", "autoCollapse": 1}
 
-for shared, tol in product([1., 0.], [1., 3.]):
-    print("Testing cutoff tolerance {} with shared ratio {}.".format(tol,
-                                                                     shared))
-    solver.cutOffPolesRMinRel = - 1. - tol
-    solver.cutOffPolesRMaxRel = 1. + tol
+for shared in [1., .5, 0.]:
+    print("Testing shared ratio {}.".format(shared))
     params["matchingShared"] = shared
 
     approx = RIGPG([0], solver, mu0 = [z0, L0], approxParameters = params,
                    verbosity = 5)
     approx.setupApprox("ALL")
     verb = approx.verbosity
     approx.verbosity = 0
     tspace = np.linspace(Ls[0], Ls[-1], 100)
     for j, t in enumerate(tspace):
         plsE = plsEx(t, 0., zs[-1])
         pls = approx.getPoles([None, t])
         pls[np.abs(np.imag(pls)) > 1e-5] = np.nan
         if j == 0:
             polesE = np.empty((len(tspace), len(plsE)))
             poles = np.empty((len(tspace), len(pls)))
             polesE[:] = np.nan
         if len(plsE) > polesE.shape[1]:
             nanR = np.empty((len(tspace), len(plsE) - polesE.shape[1]))
             nanR[:] = np.nan
             polesE = np.hstack((polesE, nanR))
         polesE[j, : len(plsE)] = np.real(plsE)
         poles[j] = np.real(pls)
     approx.verbosity = verb
     fighandles += [plt.figure(figsize = (17, 5))]
     ax1 = fighandles[-1].add_subplot(1, 2, 1)
     ax2 = fighandles[-1].add_subplot(1, 2, 2)
     ax1.plot(poles, tspace)
     ax1.set_ylim(Ls)
     ax1.set_xlabel("mu_1")
     ax1.set_ylabel("mu_2")
     ax1.grid()
     ax2.plot(polesE, tspace, "k-.", linewidth = 1)
     ax2.plot(poles, tspace)
     for mm in approx.musMarginal:
         ax2.plot(zs, [mm[0, 0]] * 2, "k--", linewidth = 1)
     ax2.set_xlim(zs)
     ax2.set_ylim(Ls)
     ax2.set_xlabel("mu_1")
     ax2.set_ylabel("mu_2")
     ax2.grid()
     plt.show()
 
     print("\n")
diff --git a/rrompy/hfengines/base/scipy_engine_base.py b/rrompy/hfengines/base/scipy_engine_base.py
index 2d5db42..93d997c 100644
--- a/rrompy/hfengines/base/scipy_engine_base.py
+++ b/rrompy/hfengines/base/scipy_engine_base.py
@@ -1,127 +1,135 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 import numpy as np
 from matplotlib import pyplot as plt
 from .hfengine_base import HFEngineBase
 from rrompy.utilities.base.types import Np1D, strLst, List, Tuple, FigHandle
 from rrompy.utilities.base.data_structures import purgeList, getNewFilename
 from rrompy.utilities.exception_manager import RROMPyException
 from rrompy.utilities.parallel import masterCore, bcast
 
 __all__ = ['ScipyEngineBase', 'ScipyEngineBaseTensorized']
     
 def checknports(eng) -> int:
     if not hasattr(eng, "nports"):
         raise RROMPyException(("Engine.nports should be assigned before using "
                                "tensorized plotting functionalities."))
     return eng.nports
 
 class ScipyEngineBase(HFEngineBase):
     """Generic solver for parametric matricial problems."""
 
     def plot(self, u:Np1D, warping : List[callable] = None,
              is_state : bool = False, name : str = "u", save : str = None,
              what : strLst = 'all', forceNewFile : bool = True,
              saveFormat : str = "eps", saveDPI : int = 100, show : bool = True,
              colorMap : str = "jet", pyplotArgs : dict = {},
              **figspecs) -> Tuple[FigHandle, str]:
         """
         Do some nice plots of the complex-valued function with given dofs.
 
         Args:
             u: numpy complex array with function dofs.
             name(optional): Name to be shown as title of the plots. Defaults to
                 'u'.
             is_state(optional): whether given u is value before multiplication
                 by c. Defaults to False.
             save(optional): Where to save plot(s). Defaults to None, i.e. no
                 saving.
             what(optional): Which plots to do. If list, can contain 'ABS',
                 'PHASE', 'REAL', 'IMAG'. If str, same plus wildcard 'ALL'.
                 Defaults to 'ALL'.
             forceNewFile(optional): Whether to create new output file.
             saveFormat(optional): Format for saved plot(s). Defaults to "eps".
             saveDPI(optional): DPI for saved plot(s). Defaults to 100.
             show(optional): Whether to show figure. Defaults to True.
             colorMap(optional): Pyplot colormap. Defaults to 'jet'.
             pyplotArgs(optional): Optional arguments for pyplot.
             figspecs(optional key args): Optional arguments for matplotlib
                 figure creation.
 
         Returns:
             Output filename and figure handle.
         """
         if isinstance(what, (str,)):
             if what.upper() == 'ALL':
                 what = ['ABS', 'PHASE', 'REAL', 'IMAG']
             else:
                 what = [what]
         what = purgeList(what, ['ABS', 'PHASE', 'REAL', 'IMAG'],
                          listname = self.name() + ".what", baselevel = 1)
         if len(what) == 0: return
         out = None
         if masterCore():
             if 'figsize' not in figspecs.keys():
                 figspecs['figsize'] = plt.figaspect(1. / len(what))
     
             idxs = np.arange(len(u))
             if warping is not None:
                 idxs = warping[0](idxs)
             subplotidx = 0
             fig = plt.figure(**figspecs)
             plt.set_cmap(colorMap)
             if 'ABS' in what:
                 subplotidx = subplotidx + 1
                 ax = fig.add_subplot(1, len(what), subplotidx)
                 ax.plot(idxs, np.abs(u), **pyplotArgs)
                 ax.set_title("|{0}|".format(name))
+                ax.set_xlim(idxs[0], idxs[-1])
+                ax.grid()
             if 'PHASE' in what:
                 subplotidx = subplotidx + 1
                 ax = fig.add_subplot(1, len(what), subplotidx)
                 ax.plot(idxs, np.angle(u), **pyplotArgs)
                 ax.set_title("phase({0})".format(name))
+                ax.set_xlim(idxs[0], idxs[-1])
+                ax.grid()
             if 'REAL' in what:
                 subplotidx = subplotidx + 1
                 ax = fig.add_subplot(1, len(what), subplotidx)
                 ax.plot(idxs, np.real(u), **pyplotArgs)
                 ax.set_title("Re({0})".format(name))
+                ax.set_xlim(idxs[0], idxs[-1])
+                ax.grid()
             if 'IMAG' in what:
                 subplotidx = subplotidx + 1
                 ax = fig.add_subplot(1, len(what), subplotidx)
                 ax.plot(idxs, np.imag(u), **pyplotArgs)
                 ax.set_title("Im({0})".format(name))
+                ax.set_xlim(idxs[0], idxs[-1])
+                ax.grid()
             plt.tight_layout()
             if save is not None:
                 save = save.strip()
                 if forceNewFile:
                     fileOut = getNewFilename("{}_fig_".format(save),
                                              saveFormat)
                 else:
                     fileOut = "{}_fig.{}".format(save, saveFormat)
                 fig.savefig(fileOut, format = saveFormat, dpi = saveDPI)
             else: fileOut = None
             if show: plt.show()
             out = fig if fileOut is None else (fig, fileOut)
         return bcast(out)
 
 class ScipyEngineBaseTensorized(ScipyEngineBase):
     """The number of tensorized dimensions should be assigned to nports."""
 
     def plot(self, u:Np1D, *args, **kwargs) -> Tuple[FigHandle, str]:
         return super().plot(u.reshape(-1, checknports(self)), *args, **kwargs)
diff --git a/rrompy/reduction_methods/__init__.py b/rrompy/reduction_methods/__init__.py
index cc4c46c..b1e327e 100644
--- a/rrompy/reduction_methods/__init__.py
+++ b/rrompy/reduction_methods/__init__.py
@@ -1,42 +1,50 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from .standard import NearestNeighbor, RationalInterpolant, ReducedBasis
 from .standard.greedy import RationalInterpolantGreedy, ReducedBasisGreedy
 from .pivoted import (RationalInterpolantPivotedNoMatch,
+                      RationalInterpolantPivotedMatch,
                       RationalInterpolantPivotedPoleMatch,
                       RationalInterpolantGreedyPivotedNoMatch,
+                      RationalInterpolantGreedyPivotedMatch,
                       RationalInterpolantGreedyPivotedPoleMatch)
-from .pivoted.greedy import (RationalInterpolantPivotedGreedyPoleMatch,
+from .pivoted.greedy import (RationalInterpolantPivotedGreedyMatch,
+                             RationalInterpolantPivotedGreedyPoleMatch,
+                             RationalInterpolantGreedyPivotedGreedyMatch,
                              RationalInterpolantGreedyPivotedGreedyPoleMatch)
 
 __all__ = [
         'NearestNeighbor',
         'RationalInterpolant',
         'ReducedBasis',
         'RationalInterpolantGreedy',
         'ReducedBasisGreedy',
         'RationalInterpolantPivotedNoMatch',
+        'RationalInterpolantPivotedMatch',
         'RationalInterpolantPivotedPoleMatch',
         'RationalInterpolantGreedyPivotedNoMatch',
+        'RationalInterpolantGreedyPivotedMatch',
         'RationalInterpolantGreedyPivotedPoleMatch',
+        'RationalInterpolantPivotedGreedyMatch',
         'RationalInterpolantPivotedGreedyPoleMatch',
+        'RationalInterpolantGreedyPivotedGreedyMatch',
         'RationalInterpolantGreedyPivotedGreedyPoleMatch'
           ]
 
 
diff --git a/rrompy/reduction_methods/pivoted/__init__.py b/rrompy/reduction_methods/pivoted/__init__.py
index ed82ffa..f714779 100644
--- a/rrompy/reduction_methods/pivoted/__init__.py
+++ b/rrompy/reduction_methods/pivoted/__init__.py
@@ -1,31 +1,35 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from .rational_interpolant_pivoted import (RationalInterpolantPivotedNoMatch,
+                                           RationalInterpolantPivotedMatch,
                                            RationalInterpolantPivotedPoleMatch)
 from .rational_interpolant_greedy_pivoted import (RationalInterpolantGreedyPivotedNoMatch,
+                                                  RationalInterpolantGreedyPivotedMatch,
                                                   RationalInterpolantGreedyPivotedPoleMatch)
 
 __all__ = [
         'RationalInterpolantPivotedNoMatch',
+        'RationalInterpolantPivotedMatch',
         'RationalInterpolantPivotedPoleMatch',
         'RationalInterpolantGreedyPivotedNoMatch',
+        'RationalInterpolantGreedyPivotedMatch',
         'RationalInterpolantGreedyPivotedPoleMatch'
           ]
 
 
diff --git a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py
index 29a319a..4e1c61f 100644
--- a/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py
+++ b/rrompy/reduction_methods/pivoted/generic_pivoted_approximant.py
@@ -1,812 +1,978 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from abc import abstractmethod
 from os import mkdir, remove, rmdir
 import numpy as np
 from collections.abc import Iterable
 from copy import deepcopy as copy
 from rrompy.reduction_methods.base.generic_approximant import (
                                                             GenericApproximant)
 from .trained_model.convert_trained_model_pivoted import (
                                                     convertTrainedModelPivoted)
 from rrompy.utilities.base.data_structures import purgeDict, getNewFilename
 from rrompy.utilities.poly_fitting.polynomial import polybases as ppb
 from rrompy.utilities.poly_fitting.radial_basis import polybases as rbpb
 from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk
 from rrompy.utilities.base.types import Np2D, paramList, List, ListAny
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.numerical.degree import reduceDegreeN
 from rrompy.utilities.exception_manager import RROMPyException, RROMPyWarning
 from rrompy.parameter import checkParameterList
 from rrompy.utilities.parallel import poolRank, bcast
 
 __all__ = ['GenericPivotedApproximantNoMatch',
+           'GenericPivotedApproximantMatch',
            'GenericPivotedApproximantPoleMatch']
 
 class GenericPivotedApproximantBase(GenericApproximant):
     def __init__(self, directionPivot:ListAny, *args,
                  storeAllSamples : bool = False, **kwargs):
         self._preInit()
         if len(directionPivot) > 1:
             raise RROMPyException(("Exactly 1 pivot parameter allowed in pole "
                                    "matching."))
         from rrompy.parameter.parameter_sampling import (EmptySampler as ES,
                                                        SparseGridSampler as SG)
         self._addParametersToList(["radialDirectionalWeightsMarginal"], [-1],
                                   ["samplerPivot", "SMarginal",
                                    "samplerMarginal"],
                                   [ES(), 1, SG([[-1.], [1.]])],
                                   toBeExcluded = ["sampler"])
         self._directionPivot = directionPivot
         self.storeAllSamples = storeAllSamples
         if not hasattr(self, "_output_lvl"): self._output_lvl = []
         self._output_lvl += [1 / 2]
         super().__init__(*args, **kwargs)
         self._postInit()
 
     def setupSampling(self): super().setupSampling(False)
 
     def initializeModelData(self, datadict):
         if "directionPivot" in datadict.keys():
             from .trained_model.trained_model_pivoted_data import (
                                                        TrainedModelPivotedData)
             data = TrainedModelPivotedData(datadict["mu0"], datadict["mus"],
                                            datadict.pop("projMat"),
                                            datadict["scaleFactor"],
                                            datadict.pop("parameterMap"),
                                            datadict["directionPivot"])
             return (data, ["mu0", "scaleFactor", "directionPivot", "mus"])
         else:
             return super().initializeModelData(datadict)
 
     @property
     def npar(self):
         """Number of parameters."""
         if hasattr(self, "_temporaryPivot"): return self.nparPivot
         return super().npar
 
     def checkParameterListPivot(self, mu:paramList,
                                 check_if_single : bool = False) -> paramList:
         return checkParameterList(mu, self.nparPivot, check_if_single)
 
     def checkParameterListMarginal(self, mu:paramList,
                                   check_if_single : bool = False) -> paramList:
         return checkParameterList(mu, self.nparMarginal, check_if_single)
 
     def mapParameterList(self, *args, **kwargs):
         if hasattr(self, "_temporaryPivot"):
             return self.mapParameterListPivot(*args, **kwargs)
         return super().mapParameterList(*args, **kwargs)
 
     def mapParameterListPivot(self, mu:paramList, direct : str = "F",
                               idx : List[int] = None):
         if idx is None:
             idx = self.directionPivot
         else:
             idx = [self.directionPivot[j] for j in idx]
         return super().mapParameterList(mu, direct, idx)
 
     def mapParameterListMarginal(self, mu:paramList, direct : str = "F",
                                  idx : List[int] = None):
         if idx is None:
             idx = self.directionMarginal
         else:
             idx = [self.directionMarginal[j] for j in idx]
         return super().mapParameterList(mu, direct, idx)
 
     @property
     def mu0(self):
         """Value of mu0."""
         if hasattr(self, "_temporaryPivot"):
             return self.checkParameterListPivot(self._mu0(self.directionPivot))
         return self._mu0
     @mu0.setter
     def mu0(self, mu0):
         GenericApproximant.mu0.fset(self, mu0)
 
     @property
     def mus(self):
         """Value of mus. Its assignment may reset snapshots."""
         return self._mus
     @mus.setter
     def mus(self, mus):
         mus = self.checkParameterList(mus)
         musOld =  copy(self.mus) if hasattr(self, '_mus') else None
         if (musOld is None or len(mus) != len(musOld) or not mus == musOld):
             self.resetSamples()
             self._mus = mus
 
     @property
     def musMarginal(self):
         """Value of musMarginal. Its assignment may reset snapshots."""
         return self._musMarginal
     @musMarginal.setter
     def musMarginal(self, musMarginal):
         musMarginal = self.checkParameterListMarginal(musMarginal)
         if hasattr(self, '_musMarginal'):
             musMOld =  copy(self.musMarginal)
         else:
             musMOld = None
         if (musMOld is None or len(musMarginal) != len(musMOld)
                             or not musMarginal == musMOld):
             self.resetSamples()
             self._musMarginal = musMarginal
 
     @property
     def SMarginal(self):
         """Value of SMarginal."""
         return self._SMarginal
     @SMarginal.setter
     def SMarginal(self, SMarginal):
         if SMarginal <= 0:
             raise RROMPyException("SMarginal must be positive.")
         if hasattr(self, "_SMarginal") and self._SMarginal is not None:
             Sold = self.SMarginal
         else: Sold = -1
         self._SMarginal = SMarginal
         self._approxParameters["SMarginal"] = self.SMarginal
         if Sold != self.SMarginal: self.resetSamples()
 
     @property
     def radialDirectionalWeightsMarginal(self):
         """Value of radialDirectionalWeightsMarginal."""
         return self._radialDirectionalWeightsMarginal
     @radialDirectionalWeightsMarginal.setter
     def radialDirectionalWeightsMarginal(self, radialDirWeightsMarg):
         if radialDirWeightsMarg == -1:
             radialDirWeightsMarg = [1.] * self.nparMarginal
         if isinstance(radialDirWeightsMarg, Iterable):
             radialDirWeightsMarg = list(radialDirWeightsMarg)
         else:
             radialDirWeightsMarg = [radialDirWeightsMarg]
         self._radialDirectionalWeightsMarginal = radialDirWeightsMarg
         self._approxParameters["radialDirectionalWeightsMarginal"] = (
                                          self.radialDirectionalWeightsMarginal)
 
     @property
     def directionPivot(self):
         """Value of directionPivot. Its assignment may reset snapshots."""
         return self._directionPivot
     @directionPivot.setter
     def directionPivot(self, directionPivot):
         if hasattr(self, '_directionPivot'):
             directionPivotOld = copy(self.directionPivot)
         else:
             directionPivotOld = None
         if (directionPivotOld is None
          or len(directionPivot) != len(directionPivotOld)
          or not directionPivot == directionPivotOld):
             self.resetSamples()
             self._directionPivot = directionPivot
 
     @property
     def directionMarginal(self):
         return [x for x in range(self.HFEngine.npar) \
                                                if x not in self.directionPivot]
 
     @property
     def nparPivot(self):
         return len(self.directionPivot)
 
     @property
     def nparMarginal(self):
         return self.npar - self.nparPivot
 
     @property
     def muBounds(self):
         """Value of muBounds."""
         return self.samplerPivot.lims
 
     @property
     def muBoundsMarginal(self):
         """Value of muBoundsMarginal."""
         return self.samplerMarginal.lims
 
     @property
     def sampler(self):
         """Proxy of samplerPivot."""
         return self._samplerPivot
 
     @property
     def samplerPivot(self):
         """Value of samplerPivot."""
         return self._samplerPivot
     @samplerPivot.setter
     def samplerPivot(self, samplerPivot):
         if 'generatePoints' not in dir(samplerPivot):
             raise RROMPyException("Pivot sampler type not recognized.")
         if hasattr(self, '_samplerPivot') and self._samplerPivot is not None:
             samplerOld = self.samplerPivot
         self._samplerPivot = samplerPivot
         self._approxParameters["samplerPivot"] = self.samplerPivot
         if not 'samplerOld' in locals() or samplerOld != self.samplerPivot:
             self.resetSamples()
     
     @property
     def samplerMarginal(self):
         """Value of samplerMarginal."""
         return self._samplerMarginal
     @samplerMarginal.setter
     def samplerMarginal(self, samplerMarginal):
         if 'generatePoints' not in dir(samplerMarginal):
             raise RROMPyException("Marginal sampler type not recognized.")
         if (hasattr(self, '_samplerMarginal')
         and self._samplerMarginal is not None):
             samplerOld = self.samplerMarginal
         self._samplerMarginal = samplerMarginal
         self._approxParameters["samplerMarginal"] = self.samplerMarginal
         if not 'samplerOld' in locals() or samplerOld != self.samplerMarginal:
             self.resetSamples()
     
     @property
     def matchState(self):
         """Utility value of matchState."""
         return False
     
     def computeScaleFactor(self):
         """Compute parameter rescaling factor."""
         self.scaleFactorPivot = .5 * np.abs((
                               self.mapParameterListPivot(self.muBounds[0])
                             - self.mapParameterListPivot(self.muBounds[1]))[0])
         self.scaleFactorMarginal = .5 * np.abs((
                    self.mapParameterListMarginal(self.muBoundsMarginal[0])
                  - self.mapParameterListMarginal(self.muBoundsMarginal[1]))[0])
         self.scaleFactor = np.empty(self.npar)
         self.scaleFactor[self.directionPivot] = self.scaleFactorPivot
         self.scaleFactor[self.directionMarginal] = self.scaleFactorMarginal
 
     def _setupTrainedModel(self, pMat:Np2D, pMatUpdate : bool = False,
                            pMatOld : Np2D = None, forceNew : bool = False):
         if forceNew or self.trainedModel is None:
             self.trainedModel = self.tModelType()
             self.trainedModel.verbosity = self.verbosity
             self.trainedModel.timestamp = self.timestamp
             datadict = {"mu0": self.mu0, "mus": copy(self.mus),
                         "projMat": pMat, "scaleFactor": self.scaleFactor,
                         "parameterMap": self.HFEngine.parameterMap,
                         "directionPivot": self.directionPivot}
             self.trainedModel.data = self.initializeModelData(datadict)[0]
         else:
             self.trainedModel = self.trainedModel
             if pMatUpdate:
                 self.trainedModel.data.projMat = np.hstack(
                                         (self.trainedModel.data.projMat, pMat))
             else:
                 self.trainedModel.data.projMat = copy(pMat)
             self.trainedModel.data.mus = copy(self.mus)
         self.trainedModel.data.musMarginal = copy(self.musMarginal)
 
     def addSamplePoints(self, mus:paramList):
         """Add global sample points to reduced model."""
         raise RROMPyException(("Cannot add global samples to pivoted reduced "
                                "model."))
 
     def normApprox(self, mu:paramList) -> float:
         _PODOld, self._POD = self.POD, 0
         result = super().normApprox(mu)
         self._POD = _PODOld
         return result
 
     @property
     def storedSamplesFilenames(self) -> List[str]:
         if not hasattr(self, "_sampleBaseFilename"): return []
         return [self._sampleBaseFilename
               + "{}_{}.pkl" .format(idx + 1, self.name())
                                        for idx in range(len(self.musMarginal))]
 
     def purgeStoredSamples(self):
         if not hasattr(self, "_sampleBaseFilename"): return
         for file in self.storedSamplesFilenames: remove(file)
         rmdir(self._sampleBaseFilename[: -8])
 
     def storeSamples(self, idx : int = None):
         """Store samples to file."""
         if not hasattr(self, "_sampleBaseFilename"):
             filenameBase = None
             if poolRank() == 0:
                 foldername = getNewFilename(self.name(), "samples")
                 mkdir(foldername)
                 filenameBase = foldername + "/sample_"
             self._sampleBaseFilename = bcast(filenameBase, force = True)
         if idx is not None:
             super().storeSamples(self._sampleBaseFilename + str(idx + 1),
                                  False)
 
     def loadTrainedModel(self, filename:str):
         """Load trained reduced model from file."""
         super().loadTrainedModel(filename)
         self._musMarginal = self.trainedModel.data.musMarginal
 
     def setTrainedModel(self, model):
         """Deepcopy approximation from trained model."""
         super().setTrainedModel(model)
         self.trainedModel = convertTrainedModelPivoted(self.trainedModel,
-                                                       self.tModelType, self,
-                                                       True)
+                                                       self.tModelType, True)
         self._preliminaryMarginalFinalization()
         self._finalizeMarginalization()
         self.trainedModel.data.approxParameters = self.approxParameters
 
 class GenericPivotedApproximantNoMatch(GenericPivotedApproximantBase):
     """
     ROM pivoted approximant (without pole matching) computation for parametric
         problems (ABSTRACT).
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
 
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
 
     @property
     def tModelType(self):
         from .trained_model.trained_model_pivoted_rational_nomatch import (
                                             TrainedModelPivotedRationalNoMatch)
         return TrainedModelPivotedRationalNoMatch
 
     def _finalizeMarginalization(self):
         self.trainedModel.setupMarginalInterp(
                                        [self.radialDirectionalWeightsMarginal])
         self.trainedModel.data.approxParameters = copy(self.approxParameters)
 
     def _preliminaryMarginalFinalization(self):
         pass
 
-class GenericPivotedApproximantPoleMatch(GenericPivotedApproximantBase):
+class GenericPivotedApproximantMatch(GenericPivotedApproximantBase):
     """
-    ROM pivoted approximant (with pole matching) computation for parametric
+    ROM pivoted approximant (with some matching) computation for parametric
         problems (ABSTRACT).
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'matchState': whether to match the system state rather than the
                 system output; defaults to False;
-            - 'matchingWeight': weight for pole matching optimization; defaults
-                to 1;
-            - 'matchingShared': required ratio of marginal points to share
-                resonance; defaults to 1.;
-            - 'badPoleCorrection': strategy for correction of bad poles;
-                available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
-                defaults to 'ERASE';
+            - 'matchingWeight': weight for matching; defaults to 1;
+            - 'matchingKind': kind of matching; allowed values include 'ROTATE'
+                and 'PROJECT'; defaults to 'ROTATE';
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation; allowed values include 'MONOMIAL_*',
                 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
                 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant; defaults to
                     'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                      defaults to 1; only for 'NEARESTNEIGHBOR';
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                     defaults to None; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights; only for
                     radial basis.
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
 
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'matchState': whether to match the system state rather than the
                 system output;
-            - 'matchingWeight': weight for pole matching optimization;
-            - 'matchingShared': required ratio of marginal points to share
-                resonance;
-            - 'badPoleCorrection': strategy for correction of bad poles;
+            - 'matchingWeight': weight for matching;
+            - 'matchingKind': kind of matching;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation;
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant;
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal;
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights.
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         matchState: Whether to match the system state rather than the system
             output.
-        matchingWeight: Weight for pole matching optimization.
-        matchingShared: Required ratio of marginal points to share resonance.
-        badPoleCorrection: Strategy for correction of bad poles.
+        matchingWeight: Weight for matching.
+        matchingKind: Kind of matching.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator.
         polybasisMarginal: Type of polynomial basis for marginal interpolation.
         paramsMarginal: Dictionary of parameters for marginal interpolation.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
 
     _allowedBadPoleCorrectionKinds = ["ERASE", "RATIONAL", "POLYNOMIAL"]
 
     def __init__(self, *args, **kwargs):
         self._preInit()
         self._addParametersToList(["matchState", "matchingWeight",
-                                   "matchingShared", "badPoleCorrection",
-                                   "polybasisMarginal", "paramsMarginal"],
-                                  [False, 1., 1., "ERASE", "MONOMIAL", {}])
+                                   "matchingKind", "polybasisMarginal",
+                                   "paramsMarginal"],
+                                  [False, 1., "ROTATE", "MONOMIAL", {}])
         self.parameterMarginalList = ["MMarginal", "nNeighborsMarginal",
                                       "polydegreetypeMarginal",
                                       "interpTolMarginal",
                                       "radialDirectionalWeightsMarginalAdapt"]
         super().__init__(*args, **kwargs)
         self._postInit()
 
     @property
     def tModelType(self):
-        from .trained_model.trained_model_pivoted_rational_polematch import (
-                                          TrainedModelPivotedRationalPoleMatch)
-        return TrainedModelPivotedRationalPoleMatch
+        from .trained_model.trained_model_pivoted_rational_match import (
+                                              TrainedModelPivotedRationalMatch)
+        return TrainedModelPivotedRationalMatch
 
     @property
     def matchState(self):
         """Value of matchState."""
         return self._matchState
     @matchState.setter
     def matchState(self, matchState):
         self._matchState = matchState
         self._approxParameters["matchState"] = self.matchState
 
     @property
     def matchingWeight(self):
         """Value of matchingWeight."""
         return self._matchingWeight
     @matchingWeight.setter
     def matchingWeight(self, matchingWeight):
         self._matchingWeight = matchingWeight
         self._approxParameters["matchingWeight"] = self.matchingWeight
 
     @property
-    def matchingShared(self):
-        """Value of matchingShared."""
-        return self._matchingShared
-    @matchingShared.setter
-    def matchingShared(self, matchingShared):
-        if matchingShared > 1.:
-            RROMPyWarning("Shared ratio too large. Clipping to 1.")
-            matchingShared = 1.
-        elif matchingShared < 0.:
-            RROMPyWarning("Shared ratio too small. Clipping to 0.")
-            matchingShared = 0.
-        self._matchingShared = matchingShared
-        self._approxParameters["matchingShared"] = self.matchingShared
-
-    @property
-    def badPoleCorrection(self):
-        """Value of badPoleCorrection."""
-        return self._badPoleCorrection
-    @badPoleCorrection.setter
-    def badPoleCorrection(self, badPoleC):
+    def matchingKind(self):
+        """Value of matchingKind."""
+        return self._matchingKind
+    @matchingKind.setter
+    def matchingKind(self, matchingKind):
         try:
-            badPoleC = badPoleC.upper().strip().replace(" ","")
-            if badPoleC not in self._allowedBadPoleCorrectionKinds:
-                raise RROMPyException(("Prescribed badPoleCorrection not "
-                                       "recognized."))
-            self._badPoleCorrection = badPoleC
+            matchingKind = matchingKind.upper().strip().replace(" ", "")
+            if matchingKind not in ["ROTATE", "PROJECT"]:
+                raise RROMPyException(
+                                    "Prescribed matching kind not recognized.")
+            self._matchingKind = matchingKind
         except:
-            RROMPyWarning(("Prescribed badPoleCorrection not recognized. "
-                           "Overriding to 'ERASE'."))
-            self._badPoleCorrection = "ERASE"
-        self._approxParameters["badPoleCorrection"] = self.badPoleCorrection
+            RROMPyWarning(("Prescribed matching kind not recognized. "
+                           "Overriding to 'ROTATE'."))
+            self._matchingKind = "ROTATE"
+        self._approxParameters["matchingKind"] = self.matchingKind
 
     @property
     def polybasisMarginal(self):
         """Value of polybasisMarginal."""
         return self._polybasisMarginal
     @polybasisMarginal.setter
     def polybasisMarginal(self, polybasisMarginal):
         try:
             polybasisMarginal = polybasisMarginal.upper().strip().replace(" ",
                                                                           "")
             if polybasisMarginal not in ppb + rbpb + ["NEARESTNEIGHBOR"] + sk:
                 raise RROMPyException(
                                "Prescribed marginal polybasis not recognized.")
             self._polybasisMarginal = polybasisMarginal
         except:
             RROMPyWarning(("Prescribed marginal polybasis not recognized. "
                            "Overriding to 'MONOMIAL'."))
             self._polybasisMarginal = "MONOMIAL"
         self._approxParameters["polybasisMarginal"] = self.polybasisMarginal
 
     @property
     def paramsMarginal(self):
         """Value of paramsMarginal."""
         return self._paramsMarginal
     @paramsMarginal.setter
     def paramsMarginal(self, paramsMarginal):
         paramsMarginal = purgeDict(paramsMarginal, self.parameterMarginalList,
                                    dictname = self.name() + ".paramsMarginal",
                                    baselevel = 1)
         keyList = list(paramsMarginal.keys())
         if not hasattr(self, "_paramsMarginal"): self._paramsMarginal = {}
         
         if "MMarginal" in keyList:
             MMarg = paramsMarginal["MMarginal"]
         elif ("MMarginal" in self.paramsMarginal
           and not hasattr(self, "_MMarginal_isauto")):
             MMarg = self.paramsMarginal["MMarginal"]
         else:
             MMarg = "AUTO"
         if isinstance(MMarg, str):
             MMarg = MMarg.strip().replace(" ","")
             if "-" not in MMarg: MMarg = MMarg + "-0"
             self._MMarginal_isauto = True
             self._MMarginal_shift = int(MMarg.split("-")[-1])
             MMarg = 0
         if MMarg < 0:
             raise RROMPyException("MMarginal must be non-negative.")
         self._paramsMarginal["MMarginal"] = MMarg
         
         if "nNeighborsMarginal" in keyList:
             self._paramsMarginal["nNeighborsMarginal"] = max(1,
                                           paramsMarginal["nNeighborsMarginal"])
         elif "nNeighborsMarginal" not in self.paramsMarginal:
             self._paramsMarginal["nNeighborsMarginal"] = 1
         
         if "polydegreetypeMarginal" in keyList:
             try:
                 polydegtypeM = paramsMarginal["polydegreetypeMarginal"]\
                                                .upper().strip().replace(" ","")
                 if polydegtypeM not in ["TOTAL", "FULL"]: 
                     raise RROMPyException(("Prescribed polydegreetypeMarginal "
                                            "not recognized."))
                 self._paramsMarginal["polydegreetypeMarginal"] = polydegtypeM
             except:
                 RROMPyWarning(("Prescribed polydegreetypeMarginal not "
                                "recognized. Overriding to 'TOTAL'."))
                 self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL"
         elif "polydegreetypeMarginal" not in self.paramsMarginal:
             self._paramsMarginal["polydegreetypeMarginal"] = "TOTAL"
 
         if "interpTolMarginal" in keyList:
             self._paramsMarginal["interpTolMarginal"] = (
                                            paramsMarginal["interpTolMarginal"])
         elif "interpTolMarginal" not in self.paramsMarginal:
             self._paramsMarginal["interpTolMarginal"] = -1
 
         if "radialDirectionalWeightsMarginalAdapt" in keyList:
             self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = (
                        paramsMarginal["radialDirectionalWeightsMarginalAdapt"])
         elif "radialDirectionalWeightsMarginalAdapt" not in self.paramsMarginal:
             self._paramsMarginal["radialDirectionalWeightsMarginalAdapt"] = [
                                                                       -1., -1.]
         self._approxParameters["paramsMarginal"] = self.paramsMarginal
 
     def _setMMarginalAuto(self):
         if (self.polybasisMarginal not in ppb + rbpb
          or "MMarginal" not in self.paramsMarginal
          or "polydegreetypeMarginal" not in self.paramsMarginal):
             raise RROMPyException(("Cannot set MMarginal if "
                                    "polybasisMarginal does not allow it."))
         self.paramsMarginal["MMarginal"] = max(0, reduceDegreeN(
                                  len(self.musMarginal), len(self.musMarginal),
                                  self.nparMarginal,
                                  self.paramsMarginal["polydegreetypeMarginal"])
                                                 - self._MMarginal_shift)
         vbMng(self, "MAIN", ("Automatically setting MMarginal to {}.").format(
                                          self.paramsMarginal["MMarginal"]), 25)
 
     def purgeparamsMarginal(self):
         self.paramsMarginal = {}
         paramsMbadkeys = []
         if self.polybasisMarginal in ppb + rbpb + sk:
             paramsMbadkeys += ["nNeighborsMarginal"]
         if self.polybasisMarginal not in rbpb:
             paramsMbadkeys += ["radialDirectionalWeightsMarginalAdapt"]
         if self.polybasisMarginal in ["NEARESTNEIGHBOR"] + sk:
             paramsMbadkeys += ["MMarginal", "polydegreetypeMarginal",
                                "interpTolMarginal"]
             if hasattr(self, "_MMarginal_isauto"): del self._MMarginal_isauto
             if hasattr(self, "_MMarginal_shift"): del self._MMarginal_shift
         for key in paramsMbadkeys:
             if key in self._paramsMarginal: del self._paramsMarginal[key]
         self._approxParameters["paramsMarginal"] = self.paramsMarginal
 
     def _finalizeMarginalization(self):
-        vbMng(self, "INIT", "Checking shared ratio.", 10)
-        msg = self.trainedModel.checkShared(self.matchingShared,
-                                           self.badPoleCorrection)
-        vbMng(self, "DEL", "Done checking. " + msg, 10)
         if self.polybasisMarginal in rbpb + ["NEARESTNEIGHBOR"]:
             self.computeScaleFactor()
             rDWMEff = np.array([w * f for w, f in zip(
                                          self.radialDirectionalWeightsMarginal,
                                          self.scaleFactorMarginal)])
         if self.polybasisMarginal in ppb + rbpb + sk:
             interpPars = [self.polybasisMarginal]
             if self.polybasisMarginal in ppb + rbpb:
                 if self.polybasisMarginal in rbpb: interpPars += [rDWMEff]
                 interpPars += [self.verbosity >= 5,
                       self.paramsMarginal["polydegreetypeMarginal"] == "TOTAL"]
                 if self.polybasisMarginal in ppb:
                     interpPars += [{}]
                 else: # if self.polybasisMarginal in rbpb:
                     interpPars += [{"optimizeScalingBounds":self.paramsMarginal[
                                      "radialDirectionalWeightsMarginalAdapt"]}]
                 interpPars += [
                             {"rcond":self.paramsMarginal["interpTolMarginal"]}]
                 extraPar = hasattr(self, "_MMarginal_isauto")
             else: # if self.polybasisMarginal in sk:
                 idxEff = [x for x in range(self.samplerMarginal.npoints)
                                   if not hasattr(self.trainedModel, "_idxExcl")
                                   or x not in self.trainedModel._idxExcl]
                 extraPar = self.samplerMarginal.depth[idxEff]
         else: # if self.polybasisMarginal == "NEARESTNEIGHBOR":
             interpPars = [self.paramsMarginal["nNeighborsMarginal"], rDWMEff]
             extraPar = None
         self.trainedModel.setupMarginalInterp(self, interpPars, extraPar)
         self.trainedModel.data.approxParameters = copy(self.approxParameters)
 
     def _preliminaryMarginalFinalization(self):
-        vbMng(self, "INIT", "Compressing and matching poles.", 10)
+        vbMng(self, "INIT", "Matching rational functions.", 10)
         self.trainedModel.initializeFromRational(self.matchingWeight,
+                                                 self.matchingKind,
                                                  self.HFEngine,
                                                  self.matchState)
-        vbMng(self, "DEL", "Done compressing and matching poles.", 10)
+        vbMng(self, "DEL", "Done matching rational functions.", 10)
 
     def _postApplyC(self):
         if self.POD == 1 and not (
                         hasattr(self.HFEngine.C, "is_mu_independent")
                     and self.HFEngine.C.is_mu_independent in self._output_lvl):
             raise RROMPyException(("Cannot apply mu-dependent C to "
                                    "orthonormalized samples."))
         vbMng(self, "INIT", "Extracting system output from state.", 35)
         pMat = None
         for j, mu in enumerate(self.trainedModel.data.mus):
             pMatj = self.trainedModel.data.projMat[:, j]
             pMatj = np.expand_dims(self.HFEngine.applyC(pMatj, mu), -1)
             if pMat is None:
                 pMat = np.array(pMatj)
             else:
                 pMat = np.append(pMat, pMatj, axis = 1)
         vbMng(self, "DEL", "Done extracting system output.", 35)
         self.trainedModel.data.projMat = pMat
 
     @abstractmethod
     def setupApprox(self, *args, **kwargs) -> int:
         if self.checkComputedApprox(): return -1
         self.purgeparamsMarginal()
         setupOK = super().setupApprox(*args, **kwargs)
         if self.matchState: self._postApplyC()
         return setupOK
+
+class GenericPivotedApproximantPoleMatch(GenericPivotedApproximantMatch):
+    """
+    ROM pivoted approximant (with pole matching) computation for parametric
+        problems (ABSTRACT).
+
+    Args:
+        HFEngine: HF problem solver.
+        mu0(optional): Default parameter. Defaults to 0.
+        directionPivot(optional): Pivot components. Defaults to [0].
+        approxParameters(optional): Dictionary containing values for main
+            parameters of approximant. Recognized keys are:
+            - 'POD': kind of snapshots orthogonalization; allowed values
+                include 0, 1/2, and 1; defaults to 1, i.e. POD;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+                defaults to 'AUTO';
+            - 'matchState': whether to match the system state rather than the
+                system output; defaults to False;
+            - 'matchingWeight': weight for pole matching optimization; defaults
+                to 1;
+            - 'matchingShared': required ratio of marginal points to share
+                resonance; defaults to 1.;
+            - 'badPoleCorrection': strategy for correction of bad poles;
+                available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
+                defaults to 'ERASE';
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation; allowed values include 'MONOMIAL_*',
+                'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
+                'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant; defaults to
+                    'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                     defaults to 1; only for 'NEARESTNEIGHBOR';
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                    defaults to None; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights; only for
+                    radial basis.
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant; defaults to 1.
+            Defaults to empty dict.
+        verbosity(optional): Verbosity level. Defaults to 10.
+
+    Attributes:
+        HFEngine: HF problem solver.
+        mu0: Default parameter.
+        directionPivot: Pivot components.
+        mus: Array of snapshot parameters.
+        musMarginal: Array of marginal snapshot parameters.
+        approxParameters: Dictionary containing values for main parameters of
+            approximant. Recognized keys are in parameterList.
+        parameterListSoft: Recognized keys of soft approximant parameters:
+            - 'POD': kind of snapshots orthogonalization;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+            - 'matchState': whether to match the system state rather than the
+                system output;
+            - 'matchingWeight': weight for pole matching optimization;
+            - 'matchingShared': required ratio of marginal points to share
+                resonance;
+            - 'badPoleCorrection': strategy for correction of bad poles;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation;
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant;
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal;
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights.
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant.
+        parameterListCritical: Recognized keys of critical approximant
+            parameters:
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator.
+        verbosity: Verbosity level.
+        POD: Kind of snapshots orthogonalization.
+        scaleFactorDer: Scaling factors for derivative computation.
+        matchState: Whether to match the system state rather than the system
+            output.
+        matchingWeight: Weight for pole matching optimization.
+        matchingShared: Required ratio of marginal points to share resonance.
+        badPoleCorrection: Strategy for correction of bad poles.
+        S: Total number of pivot samples current approximant relies upon.
+        samplerPivot: Pivot sample point generator.
+        SMarginal: Total number of marginal samples current approximant relies
+            upon.
+        samplerMarginal: Marginal sample point generator.
+        polybasisMarginal: Type of polynomial basis for marginal interpolation.
+        paramsMarginal: Dictionary of parameters for marginal interpolation.
+        radialDirectionalWeightsMarginal: Radial basis weights for marginal
+            interpolant.
+        muBounds: list of bounds for pivot parameter values.
+        muBoundsMarginal: list of bounds for marginal parameter values.
+        samplingEngine: Sampling engine.
+        uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
+            sampleList.
+        lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
+            solution(s) as parameterList.
+        uApproxReduced: Reduced approximate solution(s) with parameter(s)
+            lastSolvedApprox as sampleList.
+        lastSolvedApproxReduced: Parameter(s) corresponding to last computed
+            reduced approximate solution(s) as parameterList.
+        uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
+            sampleList.
+        lastSolvedApprox: Parameter(s) corresponding to last computed
+            approximate solution(s) as parameterList.
+    """
+
+    _allowedBadPoleCorrectionKinds = ["ERASE", "RATIONAL", "POLYNOMIAL"]
+
+    def __init__(self, *args, **kwargs):
+        self._preInit()
+        self._addParametersToList(["matchingShared", "badPoleCorrection"],
+                                  [1., "ERASE"],
+                                  toBeExcluded = ["matchingKind"])
+        super().__init__(*args, **kwargs)
+        self._postInit()
+
+    @property
+    def tModelType(self):
+        from .trained_model.trained_model_pivoted_rational_polematch import (
+                                          TrainedModelPivotedRationalPoleMatch)
+        return TrainedModelPivotedRationalPoleMatch
+
+    @property
+    def matchingShared(self):
+        """Value of matchingShared."""
+        return self._matchingShared
+    @matchingShared.setter
+    def matchingShared(self, matchingShared):
+        if matchingShared > 1.:
+            RROMPyWarning("Shared ratio too large. Clipping to 1.")
+            matchingShared = 1.
+        elif matchingShared < 0.:
+            RROMPyWarning("Shared ratio too small. Clipping to 0.")
+            matchingShared = 0.
+        self._matchingShared = matchingShared
+        self._approxParameters["matchingShared"] = self.matchingShared
+
+    @property
+    def badPoleCorrection(self):
+        """Value of badPoleCorrection."""
+        return self._badPoleCorrection
+    @badPoleCorrection.setter
+    def badPoleCorrection(self, badPoleC):
+        try:
+            badPoleC = badPoleC.upper().strip().replace(" ","")
+            if badPoleC not in self._allowedBadPoleCorrectionKinds:
+                raise RROMPyException(("Prescribed badPoleCorrection not "
+                                       "recognized."))
+            self._badPoleCorrection = badPoleC
+        except:
+            RROMPyWarning(("Prescribed badPoleCorrection not recognized. "
+                           "Overriding to 'ERASE'."))
+            self._badPoleCorrection = "ERASE"
+        self._approxParameters["badPoleCorrection"] = self.badPoleCorrection
+
+    def _finalizeMarginalization(self):
+        vbMng(self, "INIT", "Checking shared ratio.", 10)
+        msg = self.trainedModel.checkShared(self.matchingShared,
+                                            self.badPoleCorrection)
+        vbMng(self, "DEL", "Done checking. " + msg, 10)
+        super()._finalizeMarginalization()
+
+    def _preliminaryMarginalFinalization(self):
+        vbMng(self, "INIT", "Matching poles and residues.", 10)
+        self.trainedModel.initializeFromRational(self.matchingWeight,
+                                                 self.HFEngine,
+                                                 self.matchState)
+        vbMng(self, "DEL", "Done matching poles and residues.", 10)
+
diff --git a/rrompy/reduction_methods/pivoted/greedy/__init__.py b/rrompy/reduction_methods/pivoted/greedy/__init__.py
index 80b0c7c..0552b2c 100644
--- a/rrompy/reduction_methods/pivoted/greedy/__init__.py
+++ b/rrompy/reduction_methods/pivoted/greedy/__init__.py
@@ -1,27 +1,31 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
-from .rational_interpolant_pivoted_greedy import RationalInterpolantPivotedGreedyPoleMatch
-from .rational_interpolant_greedy_pivoted_greedy import RationalInterpolantGreedyPivotedGreedyPoleMatch
+from .rational_interpolant_pivoted_greedy import (RationalInterpolantPivotedGreedyMatch,
+                                                  RationalInterpolantPivotedGreedyPoleMatch)
+from .rational_interpolant_greedy_pivoted_greedy import (RationalInterpolantGreedyPivotedGreedyMatch,
+                                                         RationalInterpolantGreedyPivotedGreedyPoleMatch)
 
 __all__ = [
+        'RationalInterpolantPivotedGreedyMatch',
         'RationalInterpolantPivotedGreedyPoleMatch',
+        'RationalInterpolantGreedyPivotedGreedyMatch',
         'RationalInterpolantGreedyPivotedGreedyPoleMatch'
           ]
 
 
diff --git a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py
index a82405f..f3f5cfc 100644
--- a/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py
+++ b/rrompy/reduction_methods/pivoted/greedy/generic_pivoted_greedy_approximant.py
@@ -1,666 +1,896 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from abc import abstractmethod
 from copy import deepcopy as copy
 import numpy as np
 from collections.abc import Iterable
 from matplotlib import pyplot as plt
 from rrompy.reduction_methods.pivoted.generic_pivoted_approximant import (
                                             GenericPivotedApproximantBase,
+                                            GenericPivotedApproximantMatch,
                                             GenericPivotedApproximantPoleMatch)
 from rrompy.reduction_methods.pivoted.gather_pivoted_approximant import (
                                                       gatherPivotedApproximant)
 from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal,
                                          paramList, ListAny)
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.numerical import dot
 from rrompy.utilities.numerical.point_matching import pointMatching
 from rrompy.utilities.numerical.point_distances import doubleDistanceMatrix
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 from rrompy.parameter import emptyParameterList
 from rrompy.utilities.parallel import (masterCore, indicesScatter,
                                        arrayGatherv, isend)
 
-__all__ = ['GenericPivotedGreedyApproximantPoleMatch']
+__all__ = ['GenericPivotedGreedyApproximantMatch',
+           'GenericPivotedGreedyApproximantPoleMatch']
 
 class GenericPivotedGreedyApproximantBase(GenericPivotedApproximantBase):
     _allowedEstimatorKindsMarginal = ["LOOK_AHEAD", "LOOK_AHEAD_RECOVER",
                                       "NONE"]
 
     def __init__(self, *args, **kwargs):
         self._preInit()
         self._addParametersToList(["matchingWeightError",
+                                   "matchingErrorRelative",
                                    "errorEstimatorKindMarginal",
                                    "greedyTolMarginal", "maxIterMarginal",
                                    "autoCollapse"],
-                                  [0., "NONE", 1e-1, 1e2, False])
+                                  [0., False, "NONE", 1e-1, 1e2, False])
         super().__init__(*args, **kwargs)
         self._postInit()
 
     @property
     def scaleFactorDer(self):
         """Value of scaleFactorDer."""
         if self._scaleFactorDer == "NONE": return 1.
         if self._scaleFactorDer == "AUTO": return self._scaleFactorOldPivot
         return self._scaleFactorDer
     @scaleFactorDer.setter
     def scaleFactorDer(self, scaleFactorDer):
         if isinstance(scaleFactorDer, (str,)):
             scaleFactorDer = scaleFactorDer.upper()
         elif isinstance(scaleFactorDer, Iterable):
             scaleFactorDer = list(scaleFactorDer)
         self._scaleFactorDer = scaleFactorDer
         self._approxParameters["scaleFactorDer"] = self._scaleFactorDer
 
     @property
     def samplerMarginal(self):
         """Value of samplerMarginal."""
         return self._samplerMarginal
     @samplerMarginal.setter
     def samplerMarginal(self, samplerMarginal):
         if 'refine' not in dir(samplerMarginal):
             raise RROMPyException("Marginal sampler type not recognized.")
         GenericPivotedApproximantBase.samplerMarginal.fset(self,
                                                            samplerMarginal)
 
     @property
     def errorEstimatorKindMarginal(self):
         """Value of errorEstimatorKindMarginal."""
         return self._errorEstimatorKindMarginal
     @errorEstimatorKindMarginal.setter
     def errorEstimatorKindMarginal(self, errorEstimatorKindMarginal):
         errorEstimatorKindMarginal = errorEstimatorKindMarginal.upper()
         if errorEstimatorKindMarginal not in (
                                           self._allowedEstimatorKindsMarginal):
             RROMPyWarning(("Marginal error estimator kind not recognized. "
                            "Overriding to 'NONE'."))
             errorEstimatorKindMarginal = "NONE"
         self._errorEstimatorKindMarginal = errorEstimatorKindMarginal
         self._approxParameters["errorEstimatorKindMarginal"] = (
                                                self.errorEstimatorKindMarginal)
 
     @property
     def matchingWeightError(self):
         """Value of matchingWeightError."""
         return self._matchingWeightError
     @matchingWeightError.setter
     def matchingWeightError(self, matchingWeightError):
         self._matchingWeightError = matchingWeightError
         self._approxParameters["matchingWeightError"] = (
                                                       self.matchingWeightError)
 
+    @property
+    def matchingErrorRelative(self):
+        """Value of matchingErrorRelative."""
+        return self._matchingErrorRelative
+    @matchingErrorRelative.setter
+    def matchingErrorRelative(self, matchingErrorRelative):
+        self._matchingErrorRelative = matchingErrorRelative
+        self._approxParameters["matchingErrorRelative"] = (
+                                                    self.matchingErrorRelative)
+
     @property
     def greedyTolMarginal(self):
         """Value of greedyTolMarginal."""
         return self._greedyTolMarginal
     @greedyTolMarginal.setter
     def greedyTolMarginal(self, greedyTolMarginal):
         if greedyTolMarginal < 0:
             raise RROMPyException("greedyTolMarginal must be non-negative.")
         if (hasattr(self, "_greedyTolMarginal")
         and self.greedyTolMarginal is not None):
             greedyTolMarginalold = self.greedyTolMarginal
         else:
             greedyTolMarginalold = -1
         self._greedyTolMarginal = greedyTolMarginal
         self._approxParameters["greedyTolMarginal"] = self.greedyTolMarginal
         if greedyTolMarginalold != self.greedyTolMarginal:
             self.resetSamples()
 
     @property
     def maxIterMarginal(self):
         """Value of maxIterMarginal."""
         return self._maxIterMarginal
     @maxIterMarginal.setter
     def maxIterMarginal(self, maxIterMarginal):
         if maxIterMarginal <= 0:
             raise RROMPyException("maxIterMarginal must be positive.")
         if (hasattr(self, "_maxIterMarginal")
         and self.maxIterMarginal is not None):
             maxIterMarginalold = self.maxIterMarginal
         else:
             maxIterMarginalold = -1
         self._maxIterMarginal = maxIterMarginal
         self._approxParameters["maxIterMarginal"] = self.maxIterMarginal
         if maxIterMarginalold != self.maxIterMarginal:
             self.resetSamples()
 
     @property
     def autoCollapse(self):
         """Value of autoCollapse."""
         return self._autoCollapse
     @autoCollapse.setter
     def autoCollapse(self, autoCollapse):
         self._autoCollapse = autoCollapse
         self._approxParameters["autoCollapse"] = self.autoCollapse
 
     def resetSamples(self):
         """Reset samples."""
         super().resetSamples()
         if not hasattr(self, "_temporaryPivot"):
             self._mus = emptyParameterList()
             self._musMarginal = emptyParameterList()
             if hasattr(self, "samplerMarginal"): self.samplerMarginal.reset()
         if hasattr(self, "samplingEngine") and self.samplingEngine is not None:
             self.samplingEngine.resetHistory()
 
-    def _getDistanceApp(self, polesEx:Np1D, resEx:Np2D,
-                        muTest:paramVal) -> float:
-        polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0]
-        if self.matchingWeightError != 0:
-            resAp = self.trainedModel.interpolateMarginalCoeffs(muTest)[0][
-                                                             : len(polesAp), :]
-            resEx = dot(self.trainedModel.data.projMat, resEx)
-            resAp = dot(self.trainedModel.data.projMat, resAp)
-        else:
-            resAp = None
-        dist = doubleDistanceMatrix(polesEx, polesAp, self.matchingWeightError,
-                                    resEx, resAp, self.HFEngine, False)
-        pmR, pmC = pointMatching(dist)
-        return np.mean(dist[pmR, pmC])
-    
+    @abstractmethod
     def getErrorEstimatorMarginalLookAhead(self) -> Np1D:
-        if not hasattr(self.trainedModel, "_musMExcl"):
-            err = np.zeros(0)
-            err[:] = np.inf
-            self._musMarginalTestIdxs = np.zeros(0, dtype = int)
-            return err
-        self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl,
-                                             dtype = int)
-        idx, sizes = indicesScatter(len(self.trainedModel._musMExcl),
-                                    return_sizes = True)
-        err = []
-        if len(idx) > 0:
-            self.verbosity -= 25
-            self.trainedModel.verbosity -= 25
-            for j in idx:
-                muTest = self.trainedModel._musMExcl[j]
-                HITest = self.trainedModel._HIsExcl[j]
-                polesEx = HITest.poles
-                idxGood = np.isinf(polesEx) + np.isnan(polesEx) == False
-                polesEx = polesEx[idxGood]
-                if self.matchingWeightError != 0:
-                    resEx = HITest.coeffs[np.where(idxGood)[0]]
-                else:
-                    resEx = None
-                if len(polesEx) == 0:
-                    err += [0.]
-                    continue
-                err += [self._getDistanceApp(polesEx, resEx, muTest)]
-            self.verbosity += 25
-            self.trainedModel.verbosity += 25
-        return arrayGatherv(np.array(err), sizes)
+        pass
 
     def getErrorEstimatorMarginalNone(self) -> Np1D:
         nErr = len(self.trainedModel.data.musMarginal)
         self._musMarginalTestIdxs = np.arange(nErr)
         return (1. + self.greedyTolMarginal) * np.ones(nErr)
 
     def errorEstimatorMarginal(self, return_max : bool = False) -> Np1D:
         vbMng(self.trainedModel, "INIT",
               "Evaluating error estimator at mu = {}.".format(
                                        self.trainedModel.data.musMarginal), 10)
         if self.errorEstimatorKindMarginal == "NONE":
             nErr = len(self.trainedModel.data.musMarginal)
             self._musMarginalTestIdxs = np.arange(nErr)
             err = (1. + self.greedyTolMarginal) * np.ones(nErr)
         else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD":
             err = self.getErrorEstimatorMarginalLookAhead()
         vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10)
         if not return_max: return err
         idxMaxEst = np.where(err > self.greedyTolMarginal)[0]
         maxErr = err[idxMaxEst]
         if self.errorEstimatorKindMarginal == "NONE": maxErr = None
         return err, idxMaxEst, maxErr
 
     def plotEstimatorMarginal(self, est:Np1D, idxMax:List[int],
                               estMax:List[float]):
         if self.errorEstimatorKindMarginal == "NONE": return
         if (not (np.any(np.isnan(est)) or np.any(np.isinf(est)))
         and masterCore() and hasattr(self.trainedModel, "_musMExcl")):
             fig = plt.figure(figsize = plt.figaspect(1. / self.nparMarginal))
             for jpar in range(self.nparMarginal):
                 ax = fig.add_subplot(1, self.nparMarginal, 1 + jpar)
                 musre = np.real(self.trainedModel._musMExcl)
                 if len(idxMax) > 0 and estMax is not None:
                     maxrej = musre[idxMax, jpar]
                 errCP = copy(est)
                 idx = np.delete(np.arange(self.nparMarginal), jpar)
                 while len(musre) > 0:
                     if self.nparMarginal == 1:
                         currIdx = np.arange(len(musre))
                     else:
                         currIdx = np.where(np.isclose(np.sum(
                                      np.abs(musre[:, idx] - musre[0, idx]), 1),
                                                       0., atol = 1e-15))[0]
                     currIdxSorted = currIdx[np.argsort(musre[currIdx, jpar])]
                     ax.semilogy(musre[currIdxSorted, jpar],
                                 errCP[currIdxSorted], 'k.-', linewidth = 1)
                     musre = np.delete(musre, currIdx, 0)
                     errCP = np.delete(errCP, currIdx)
                 ax.semilogy(self.musMarginal.re(jpar),
                             (self.greedyTolMarginal,) * len(self.musMarginal),
                             '*m')
                 if len(idxMax) > 0 and estMax is not None:
                     ax.semilogy(maxrej, estMax, 'xr')
                 ax.set_xlim(*list(self.samplerMarginal.lims.re(jpar)))
                 ax.grid()
             plt.tight_layout()
             plt.show()
 
+    def _updateTrainedModelMarginalSamples(self, idx : ListAny = []):
+        self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight,
+                                                 self.HFEngine,
+                                                 self.matchState)
+
     def _addMarginalSample(self, mus:paramList):
         mus = self.checkParameterListMarginal(mus)
         if len(mus) == 0: return
         self._nmusOld, nmus = len(self.musMarginal), len(mus)
         if (hasattr(self, "trainedModel") and self.trainedModel is not None
         and hasattr(self.trainedModel, "_musMExcl")):
             self._nmusOld += len(self.trainedModel._musMExcl)
         vbMng(self, "MAIN",
               ("Adding marginal sample point{} no. {}{} at {} to training "
                "set.").format("s" * (nmus > 1), self._nmusOld + 1,
                               "--{}".format(self._nmusOld + nmus) * (nmus > 1),
                               mus), 3)
         self.musMarginal.append(mus)
         self.setupApproxPivoted(mus)
         self._preliminaryMarginalFinalization()
         del self._nmusOld
         if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD"
         and not self.firstGreedyIterM):
             ubRange = len(self.trainedModel.data.musMarginal)
             if hasattr(self.trainedModel, "_idxExcl"):
                 shRange = len(self.trainedModel._musMExcl)
             else:
                 shRange = 0
             testIdxs = list(range(ubRange + shRange - len(mus),
                                   ubRange + shRange))
             for j in testIdxs[::-1]:
                 self.musMarginal.pop(j - shRange)
             if hasattr(self.trainedModel, "_idxExcl"):
                 testIdxs = self.trainedModel._idxExcl + testIdxs
             self._updateTrainedModelMarginalSamples(testIdxs)
         self._finalizeMarginalization()
         self._SMarginal = len(self.musMarginal)
         self._approxParameters["SMarginal"] = self.SMarginal
         self.trainedModel.data.approxParameters["SMarginal"] = self.SMarginal
 
     def greedyNextSampleMarginal(self, muidx:List[int],
                                  plotEst : str = "NONE") \
                                     -> Tuple[Np1D, List[int], float, paramVal]:
         RROMPyAssert(self._mode, message = "Cannot add greedy sample.")
         muidx = self._musMarginalTestIdxs[muidx]
         if (self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD"
         and not self.firstGreedyIterM):
             if not hasattr(self.trainedModel, "_idxExcl"):
                 raise RROMPyException(("Sample index to be added not present "
                                        "in trained model."))
             testIdxs = copy(self.trainedModel._idxExcl)
             skippedIdx = 0
             for cj, j in enumerate(self.trainedModel._idxExcl):
                 if j in muidx:
                     testIdxs.pop(skippedIdx)
                     self.musMarginal.insert(self.trainedModel._musMExcl[cj],
                                             j - skippedIdx)
                 else:
                     skippedIdx += 1
             if len(self.trainedModel._idxExcl) < (len(muidx)
                                                 + len(testIdxs)):
                 raise RROMPyException(("Sample index to be added not present "
                                        "in trained model."))
             self._updateTrainedModelMarginalSamples(testIdxs)
             self._SMarginal = len(self.musMarginal)
             self._approxParameters["SMarginal"] = self.SMarginal
             self.trainedModel.data.approxParameters["SMarginal"] = (
                                                                 self.SMarginal)
         self.firstGreedyIterM = False
         idxAdded = self.samplerMarginal.refine(muidx)[0]
         self._addMarginalSample(self.samplerMarginal.points[idxAdded])
         errorEstTest, muidx, maxErrorEst = self.errorEstimatorMarginal(True)
         if plotEst == "ALL":
             self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst)
         return (errorEstTest, muidx, maxErrorEst,
                 self.samplerMarginal.points[muidx])
                                               
     def _preliminaryTrainingMarginal(self):
         """Initialize starting snapshots of solution map."""
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
         if np.sum(self.samplingEngine.nsamples) > 0: return
         self.resetSamples()
         self._addMarginalSample(self.samplerMarginal.generatePoints(
                                                                self.SMarginal))
 
     def _preSetupApproxPivoted(self, mus:paramList) \
                                            -> Tuple[ListAny, ListAny, ListAny]:
         self.computeScaleFactor()
         if self.trainedModel is None:
             self._setupTrainedModel(np.zeros((0, 0)))
             self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], []
             self.trainedModel.data.Psupp = []
         self._trainedModelOld = copy(self.trainedModel)
         self._scaleFactorOldPivot = copy(self.scaleFactor)
         self.scaleFactor = self.scaleFactorPivot
         self._temporaryPivot = 1
         self._musLoc = copy(self.mus)
         idx, sizes = indicesScatter(len(mus), return_sizes = True)
         emptyCores = np.where(sizes == 0)[0]
         self.verbosity -= 10
         self.samplingEngine.verbosity -= 10
         return idx, sizes, emptyCores
 
     def _postSetupApproxPivoted(self, mus:Np2D, pMat:Np2D, Ps:ListAny,
                                 Qs:ListAny, sizes:ListAny):
         self.scaleFactor = self._scaleFactorOldPivot
         del self._scaleFactorOldPivot, self._temporaryPivot
         pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs,
                                                                mus, sizes,
                                                                self.polybasis)
         if len(self._musLoc) > 0:
             self._mus = self.checkParameterList(self._musLoc)
             self._mus.append(mus)
         else:
             self._mus = self.checkParameterList(mus)
         self.trainedModel = self._trainedModelOld
         del self._trainedModelOld
         if not self.matchState and self.autoCollapse:
             pMat, padLeft, suppNew = 1., 0, [0] * len(nsamples)
         else:
             padLeft = self.trainedModel.data.projMat.shape[1]
             suppNew = list(padLeft + np.append(0, np.cumsum(nsamples[: -1])))
         self._setupTrainedModel(pMat, padLeft > 0)
         if not self.matchState and self.autoCollapse:
             self.trainedModel.data._collapsed = True
         self.trainedModel.data.Qs += Qs
         self.trainedModel.data.Ps += Ps
         self.trainedModel.data.Psupp += suppNew
         self.trainedModel.data.approxParameters = copy(self.approxParameters)
         self.verbosity += 10
         self.samplingEngine.verbosity += 10
 
     def _localPivotedResult(self, pMat:Np2D, req:ListAny, emptyCores:ListAny,
                             mus:Np2D) -> Tuple[Np2D, ListAny, Np2D]:
         pMati = self.samplingEngine.projectionMatrix
         musi = self.samplingEngine.mus
         if not self.matchState:
             if self.POD == 1 and not (
                 hasattr(self.HFEngine.C, "is_mu_independent")
             and self.HFEngine.C.is_mu_independent in self._output_lvl):
                 raise RROMPyException(("Cannot apply mu-dependent C "
                                        "to orthonormalized samples."))
             vbMng(self, "INIT", "Extracting system output from state.", 35)
             pMatiEff = None
             for j, mu in enumerate(musi):
                 pMij = np.expand_dims(self.HFEngine.applyC(pMati[:, j], mu),
                                       -1)
                 if pMatiEff is None:
                     pMatiEff = np.array(pMij)
                 else:
                     pMatiEff = np.append(pMatiEff, pMij, axis = 1)
             pMati = pMatiEff
             vbMng(self, "DEL", "Done extracting system output.", 35)
         if pMat is None:
             mus = copy(musi.data)
             pMat = copy(pMati)
             if masterCore():
                 for dest in emptyCores:
                     req += [isend((len(pMat), pMat.dtype, mus.dtype),
                                   dest = dest, tag = dest)]
         else:
             mus = np.vstack((mus, musi.data))
             if not self.matchState and self.autoCollapse:
                 pMat = copy(pMati)
             else:
                 pMat = np.hstack((pMat, pMati))
         return pMat, req, mus
     
     @abstractmethod
     def setupApproxPivoted(self, mus:paramList) -> int:
         if self.checkComputedApproxPivoted(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         vbMng(self, "INIT", "Setting up pivoted approximant.", 10)
         self._preSetupApproxPivoted()
         data = []
         pass
         self._postSetupApproxPivoted(mus, data)
         vbMng(self, "DEL", "Done setting up pivoted approximant.", 10)
         return 0
 
     def setupApprox(self, plotEst : str = "NONE") -> int:
         """Compute greedy snapshots of solution map."""
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
         vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5)
         vbMng(self, "INIT", "Starting computation of snapshots.", 5)
         max2ErrorEst, self.firstGreedyIterM = np.inf, True
         self._preliminaryTrainingMarginal()
         if self.errorEstimatorKindMarginal == "NONE":
             muidx = []
         else:#if self.errorEstimatorKindMarginal[: 10] == "LOOK_AHEAD":
             muidx = np.arange(len(self.trainedModel.data.musMarginal))
         self._musMarginalTestIdxs = np.array(muidx)
         while self.firstGreedyIterM or (max2ErrorEst > self.greedyTolMarginal
                       and self.samplerMarginal.npoints < self.maxIterMarginal):
             errorEstTest, muidx, maxErrorEst, mu = \
                                   self.greedyNextSampleMarginal(muidx, plotEst)
             if maxErrorEst is None:
                 max2ErrorEst = 1. + self.greedyTolMarginal
             else:
                 if len(maxErrorEst) > 0:
                     max2ErrorEst = np.max(maxErrorEst)
                 else:
                     max2ErrorEst = np.max(errorEstTest)
                 vbMng(self, "MAIN", ("Uniform testing error estimate "
                                      "{:.4e}.").format(max2ErrorEst), 5)
         if plotEst == "LAST":
             self.plotEstimatorMarginal(errorEstTest, muidx, maxErrorEst)
         vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: "
                             "{}).").format(len(self.mus)), 5)
         if (self.errorEstimatorKindMarginal == "LOOK_AHEAD_RECOVER"
         and hasattr(self.trainedModel, "_idxExcl")
         and len(self.trainedModel._idxExcl) > 0):
             vbMng(self, "INIT", "Recovering {} test models.".format(
                                            len(self.trainedModel._idxExcl)), 7)
             for j, mu in zip(self.trainedModel._idxExcl,
                              self.trainedModel._musMExcl):
                 self.musMarginal.insert(mu, j)
             self._preliminaryMarginalFinalization()
             self._updateTrainedModelMarginalSamples()
             self._finalizeMarginalization()
             self._SMarginal = len(self.musMarginal)
             self._approxParameters["SMarginal"] = self.SMarginal
             self.trainedModel.data.approxParameters["SMarginal"] = (
                                                                 self.SMarginal)
             vbMng(self, "DEL", "Done recovering test models.", 7)
         vbMng(self, "DEL", "Done setting up approximant.", 5)
         return 0
 
     def checkComputedApproxPivoted(self) -> bool:
         return (super().checkComputedApprox()
           and len(self.musMarginal) == len(self.trainedModel.data.musMarginal))
 
+class GenericPivotedGreedyApproximantMatch(GenericPivotedGreedyApproximantBase,
+                                           GenericPivotedApproximantMatch):
+    """
+    ROM pivoted greedy interpolant computation for parametric problems (with
+        some matching) (ABSTRACT).
+
+    Args:
+        HFEngine: HF problem solver.
+        mu0(optional): Default parameter. Defaults to 0.
+        directionPivot(optional): Pivot components. Defaults to [0].
+        approxParameters(optional): Dictionary containing values for main
+            parameters of approximant. Recognized keys are:
+            - 'POD': kind of snapshots orthogonalization; allowed values
+                include 0, 1/2, and 1; defaults to 1, i.e. POD;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+                defaults to 'AUTO';
+            - 'matchState': whether to match the system state rather than the
+                system output; defaults to False;
+            - 'matchingWeight': weight for pole matching optimization; defaults
+                to 1;
+            - 'matchingKind': kind of matching; allowed values include 'ROTATE'
+                and 'PROJECT'; defaults to 'ROTATE';
+            - 'matchingWeightError': weight for matching in error estimation;
+                defaults to 0;
+            - 'matchingErrorRelative': whether error estimation is relative;
+                defaults to False, i.e. absolute error;
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': number of starting marginal samples;
+            - 'samplerMarginal': marginal sample point generator via sparse
+                grid;
+            - 'errorEstimatorKindMarginal': kind of marginal error estimator;
+                available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER',
+                and 'NONE'; defaults to 'NONE';
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation; allowed values include 'MONOMIAL_*',
+                'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
+                'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant; defaults to
+                    'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                     defaults to 1; only for 'NEARESTNEIGHBOR';
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                    defaults to None; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights; only for
+                    radial basis.
+            - 'greedyTolMarginal': uniform error tolerance for marginal greedy
+               algorithm; defaults to 1e-1;
+            - 'maxIterMarginal': maximum number of marginal greedy steps;
+               defaults to 1e2;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant; defaults to 1;
+            - 'autoCollapse': whether to collapse trained reduced model as soon
+                as it is built; defaults to False.
+            Defaults to empty dict.
+        verbosity(optional): Verbosity level. Defaults to 10.
+
+    Attributes:
+        HFEngine: HF problem solver.
+        mu0: Default parameter.
+        directionPivot: Pivot components.
+        mus: Array of snapshot parameters.
+        musMarginal: Array of marginal snapshot parameters.
+        approxParameters: Dictionary containing values for main parameters of
+            approximant. Recognized keys are in parameterList.
+        parameterListSoft: Recognized keys of soft approximant parameters:
+            - 'POD': kind of snapshots orthogonalization;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+            - 'matchState': whether to match the system state rather than the
+                system output;
+            - 'matchingWeight': weight for matching;
+            - 'matchingKind': kind of matching;
+            - 'matchingWeightError': weight for matching in error estimation;
+            - 'matchingErrorRelative': whether error estimation is relative;
+            - 'errorEstimatorKindMarginal': kind of marginal error estimator;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation;
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant;
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal;
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights.
+            - 'greedyTolMarginal': uniform error tolerance for marginal greedy
+                algorithm;
+            - 'maxIterMarginal': maximum number of marginal greedy steps;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant;
+            - 'autoCollapse': whether to collapse trained reduced model as soon
+                as it is built.
+        parameterListCritical: Recognized keys of critical approximant
+            parameters:
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator via sparse
+                grid.
+        verbosity: Verbosity level.
+        POD: Kind of snapshots orthogonalization.
+        scaleFactorDer: Scaling factors for derivative computation.
+        matchState: Whether to match the system state rather than the system
+            output.
+        matchingWeight: Weight for pole matching optimization.
+        matchingKind: Kind of matching.
+        matchingWeightError: Weight for pole matching optimization in error
+            estimation.
+        matchingErrorRelative: Whether error estimation is relative.
+        S: Total number of pivot samples current approximant relies upon.
+        samplerPivot: Pivot sample point generator.
+        SMarginal: Total number of marginal samples current approximant relies
+            upon.
+        samplerMarginal: Marginal sample point generator via sparse grid.
+        errorEstimatorKindMarginal: Kind of marginal error estimator.
+        polybasisMarginal: Type of polynomial basis for marginal interpolation.
+        paramsMarginal: Dictionary of parameters for marginal interpolation.
+        greedyTolMarginal: Uniform error tolerance for marginal greedy
+            algorithm.
+        maxIterMarginal: Maximum number of marginal greedy steps.
+        radialDirectionalWeightsMarginal: Radial basis weights for marginal
+            interpolant.
+        autoCollapse: Whether to collapse trained reduced model as soon as it
+            is built.
+        muBounds: list of bounds for pivot parameter values.
+        muBoundsMarginal: list of bounds for marginal parameter values.
+        samplingEngine: Sampling engine.
+        uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
+            sampleList.
+        lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
+            solution(s) as parameterList.
+        uApproxReduced: Reduced approximate solution(s) with parameter(s)
+            lastSolvedApprox as sampleList.
+        lastSolvedApproxReduced: Parameter(s) corresponding to last computed
+            reduced approximate solution(s) as parameterList.
+        uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
+            sampleList.
+        lastSolvedApprox: Parameter(s) corresponding to last computed
+            approximate solution(s) as parameterList.
+    """
+
+    def getErrorEstimatorMarginalLookAhead(self) -> Np1D:
+        if not hasattr(self.trainedModel, "_musMExcl"):
+            err = np.zeros(0)
+            err[:] = np.inf
+            self._musMarginalTestIdxs = np.zeros(0, dtype = int)
+            return err
+        self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl,
+                                             dtype = int)
+        idx, sizes = indicesScatter(len(self.trainedModel._musMExcl),
+                                    return_sizes = True)
+        err = []
+        if len(idx) > 0:
+            self.verbosity -= 25
+            self.trainedModel.verbosity -= 25
+            for j in idx:
+                muTest = self.trainedModel._musMExcl[j]
+                QEx = self.trainedModel._QsExcl[j].coeffs
+                QAp = self.trainedModel.interpolateMarginalQ(muTest)[0]
+                no2Ex = np.sum(np.abs(QEx) ** 2.)
+                no2Ap = np.sum(np.abs(QAp) ** 2.)
+                inner = np.sum([QEx[j] * QAp[j].conj()
+                                      for j in range(min(len(QEx), len(QAp)))])
+                if self.matchingWeightError != 0:
+                    PEx = self.trainedModel._PsExcl[j].coeffs
+                    PAp = self.trainedModel.interpolateMarginalP(muTest)[0]
+                    PEx = dot(self.trainedModel.data.projMat, PEx.T)
+                    PAp = dot(self.trainedModel.data.projMat, PAp.T)
+                    no2PEx = self.HFEngine.norm(PEx,
+                                              is_state = self.matchState) ** 2.
+                    no2PAp = self.HFEngine.norm(PAp,
+                                              is_state = self.matchState) ** 2.
+                    innerP = [self.HFEngine.innerProduct(PEx[:, j], PAp[:, j],
+                                                    is_state = self.matchState)
+                               for j in range(min(PEx.shape[1], PAp.shape[1]))]
+                    no2Ex = no2Ex + self.matchingWeightError * np.sum(no2PEx)
+                    no2Ap = no2Ap + self.matchingWeightError * np.sum(no2PAp)
+                    inner = inner + self.matchingWeightError * np.sum(innerP)
+                dist2 = no2Ex - np.abs(inner) ** 2. / no2Ap
+                if self.matchingErrorRelative:
+                    dist2 /= no2Ex
+                else:
+                    dist2 /= 1. + self.matchingWeightError
+                err += [dist2 ** .5]
+            self.verbosity += 25
+            self.trainedModel.verbosity += 25
+        return arrayGatherv(np.array(err), sizes)
+
+    def setupApprox(self, *args, **kwargs) -> int:
+        if self.checkComputedApprox(): return -1
+        self.purgeparamsMarginal()
+        _polybasisMarginal = self.polybasisMarginal
+        self._polybasisMarginal = ("PIECEWISE_LINEAR_"
+                                 + self.samplerMarginal.kind)
+        setupOK = super().setupApprox(*args, **kwargs)
+        self._polybasisMarginal = _polybasisMarginal
+        if self.matchState: self._postApplyC()
+        return setupOK
+
 class GenericPivotedGreedyApproximantPoleMatch(
-                                           GenericPivotedGreedyApproximantBase,
-                                           GenericPivotedApproximantPoleMatch):
+                                          GenericPivotedGreedyApproximantMatch,
+                                          GenericPivotedApproximantPoleMatch):
     """
     ROM pivoted greedy interpolant computation for parametric problems (with
         pole matching) (ABSTRACT).
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'matchState': whether to match the system state rather than the
                 system output; defaults to False;
             - 'matchingWeight': weight for pole matching optimization; defaults
                 to 1;
             - 'matchingShared': required ratio of marginal points to share
                 resonance; defaults to 1.;
             - 'badPoleCorrection': strategy for correction of bad poles;
                 available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
                 defaults to 'ERASE';
             - 'matchingWeightError': weight for pole matching optimization in
                 error estimation; defaults to 0;
+            - 'matchingErrorRelative': whether error estimation is relative;
+                defaults to False, i.e. absolute error;
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': number of starting marginal samples;
             - 'samplerMarginal': marginal sample point generator via sparse
                 grid;
             - 'errorEstimatorKindMarginal': kind of marginal error estimator;
                 available values include 'LOOK_AHEAD', 'LOOK_AHEAD_RECOVER',
                 and 'NONE'; defaults to 'NONE';
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation; allowed values include 'MONOMIAL_*',
                 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
                 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant; defaults to
                     'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                      defaults to 1; only for 'NEARESTNEIGHBOR';
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                     defaults to None; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights; only for
                     radial basis.
             - 'greedyTolMarginal': uniform error tolerance for marginal greedy
                algorithm; defaults to 1e-1;
             - 'maxIterMarginal': maximum number of marginal greedy steps;
                defaults to 1e2;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'autoCollapse': whether to collapse trained reduced model as soon
                 as it is built; defaults to False.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
 
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'matchState': whether to match the system state rather than the
                 system output;
             - 'matchingWeight': weight for pole matching optimization;
             - 'matchingShared': required ratio of marginal points to share
                 resonance;
             - 'badPoleCorrection': strategy for correction of bad poles;
             - 'matchingWeightError': weight for pole matching optimization in
                 error estimation;
+            - 'matchingErrorRelative': whether error estimation is relative;
             - 'errorEstimatorKindMarginal': kind of marginal error estimator;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation;
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant;
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal;
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights.
             - 'greedyTolMarginal': uniform error tolerance for marginal greedy
                 algorithm;
             - 'maxIterMarginal': maximum number of marginal greedy steps;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'autoCollapse': whether to collapse trained reduced model as soon
                 as it is built.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator via sparse
                 grid.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         matchState: Whether to match the system state rather than the system
             output.
         matchingWeight: Weight for pole matching optimization.
         matchingShared: Required ratio of marginal points to share resonance.
         badPoleCorrection: Strategy for correction of bad poles.
         matchingWeightError: Weight for pole matching optimization in error
             estimation.
+        matchingErrorRelative: Whether error estimation is relative.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator via sparse grid.
         errorEstimatorKindMarginal: Kind of marginal error estimator.
         polybasisMarginal: Type of polynomial basis for marginal interpolation.
         paramsMarginal: Dictionary of parameters for marginal interpolation.
         greedyTolMarginal: Uniform error tolerance for marginal greedy
             algorithm.
         maxIterMarginal: Maximum number of marginal greedy steps.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         autoCollapse: Whether to collapse trained reduced model as soon as it
             is built.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
 
-    def _updateTrainedModelMarginalSamples(self, idx : ListAny = []):
-        self.trainedModel.updateEffectiveSamples(idx, self.matchingWeight,
-                                                 self.HFEngine, False)
-
-    def setupApprox(self, *args, **kwargs) -> int:
-        if self.checkComputedApprox(): return -1
-        self.purgeparamsMarginal()
-        _polybasisMarginal = self.polybasisMarginal
-        self._polybasisMarginal = ("PIECEWISE_LINEAR_"
-                                 + self.samplerMarginal.kind)
-        setupOK = super().setupApprox(*args, **kwargs)
-        self._polybasisMarginal = _polybasisMarginal
-        if self.matchState: self._postApplyC()
-        return setupOK
+    def getErrorEstimatorMarginalLookAhead(self) -> Np1D:
+        if not hasattr(self.trainedModel, "_musMExcl"):
+            err = np.zeros(0)
+            err[:] = np.inf
+            self._musMarginalTestIdxs = np.zeros(0, dtype = int)
+            return err
+        self._musMarginalTestIdxs = np.array(self.trainedModel._idxExcl,
+                                             dtype = int)
+        idx, sizes = indicesScatter(len(self.trainedModel._musMExcl),
+                                    return_sizes = True)
+        err = []
+        if len(idx) > 0:
+            self.verbosity -= 25
+            self.trainedModel.verbosity -= 25
+            for j in idx:
+                muTest = self.trainedModel._musMExcl[j]
+                HITest = self.trainedModel._HIsExcl[j]
+                polesEx = HITest.poles
+                idxGood = np.isinf(polesEx) + np.isnan(polesEx) == False
+                polesEx = polesEx[idxGood]
+                if len(polesEx) == 0:
+                    err += [0.]
+                    continue
+                polesAp = self.trainedModel.interpolateMarginalPoles(muTest)[0]
+                if self.matchingWeightError != 0:
+                    resEx = HITest.coeffs[np.where(idxGood)[0]]
+                    resAp = self.trainedModel.interpolateMarginalCoeffs(
+                                                  muTest)[0][: len(polesAp), :]
+                    resEx = dot(self.trainedModel.data.projMat, resEx)
+                    resAp = dot(self.trainedModel.data.projMat, resAp)
+                else:
+                    resEx, resAp = None, None
+                #match Ap to Ex
+                distMat = doubleDistanceMatrix(polesEx, polesAp,
+                                               self.matchingWeightError,
+                                               resEx, resAp, self.HFEngine,
+                                               self.matchState)
+                pmR, pmC = pointMatching(distMat)
+                dist = np.linalg.norm(distMat[pmR, pmC].flatten())
+                if self.matchingErrorRelative:
+                    if self.matchingWeightError != 0:
+                        resEx0 = resEx[:, pmR]
+                        res0 = np.zeros_like(resEx[:, [0]])
+                    else:
+                        resEx0, res0 = None, None
+                    dist0 = doubleDistanceMatrix(polesEx[pmR], [0.],
+                                                 self.matchingWeightError,
+                                                 resEx0, res0, self.HFEngine,
+                                                 self.matchState).flatten()
+                    dist /= np.linalg.norm(dist0)
+                err += [dist]
+            self.verbosity += 25
+            self.trainedModel.verbosity += 25
+        return arrayGatherv(np.array(err), sizes)
diff --git a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py
index 9509dec..2ab282f 100644
--- a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py
+++ b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_greedy_pivoted_greedy.py
@@ -1,363 +1,551 @@
 #Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from copy import deepcopy as copy
 import numpy as np
 from .generic_pivoted_greedy_approximant import (
                                       GenericPivotedGreedyApproximantBase,
+                                      GenericPivotedGreedyApproximantMatch,
                                       GenericPivotedGreedyApproximantPoleMatch)
 from rrompy.reduction_methods.standard.greedy import RationalInterpolantGreedy
 from rrompy.reduction_methods.standard.greedy.generic_greedy_approximant \
                                                             import pruneSamples
 from rrompy.reduction_methods.pivoted import (
+                                     RationalInterpolantGreedyPivotedMatch,
                                      RationalInterpolantGreedyPivotedPoleMatch)
 from rrompy.utilities.base.types import Np1D, Tuple, paramVal, paramList
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.exception_manager import RROMPyAssert
 from rrompy.parameter import emptyParameterList
 from rrompy.utilities.parallel import poolRank, recv
 
-__all__ = ['RationalInterpolantGreedyPivotedGreedyPoleMatch']
+__all__ = ['RationalInterpolantGreedyPivotedGreedyMatch',
+           'RationalInterpolantGreedyPivotedGreedyPoleMatch']
 
 class RationalInterpolantGreedyPivotedGreedyBase(
                                           GenericPivotedGreedyApproximantBase):
-    @property
-    def sampleBatchSize(self):
-        """Value of sampleBatchSize."""
-        return 1
-
-    @property
-    def sampleBatchIdx(self):
-        """Value of sampleBatchIdx."""
-        return self.S
-
     def greedyNextSample(self, muidx:int, plotEst : str = "NONE")\
                                           -> Tuple[Np1D, int, float, paramVal]:
         """Compute next greedy snapshot of solution map."""
         RROMPyAssert(self._mode, message = "Cannot add greedy sample.")
         mus = copy(self.muTest[muidx])
         self.muTest.pop(muidx)
         for j, mu in enumerate(mus):
             vbMng(self, "MAIN",
                   ("Adding sample point no. {} at {} to training "
                    "set.").format(len(self.mus) + 1, mu), 3)
             self.mus.append(mu)
             self._S = len(self.mus)
             self._approxParameters["S"] = self.S
             if (self.samplingEngine.nsamples <= len(mus) - j - 1
              or not np.allclose(mu, self.samplingEngine.mus[j - len(mus)])):
                 self.samplingEngine.nextSample(mu)
             if self._isLastSampleCollinear():
                 vbMng(self, "MAIN",
                       ("Collinearity above tolerance detected. Starting "
                        "preemptive greedy loop termination."), 3)
                 self._collinearityFlag = 1
                 errorEstTest = np.empty(len(self.muTest))
                 errorEstTest[:] = np.nan
                 return errorEstTest, [-1], np.nan, np.nan
         errorEstTest, muidx, maxErrorEst = self.errorEstimator(self.muTest,
                                                                True)
         if plotEst == "ALL":
             self.plotEstimator(errorEstTest, muidx, maxErrorEst)
         return errorEstTest, muidx, maxErrorEst, self.muTest[muidx]
 
-    def _setSampleBatch(self, maxS:int):
-        return self.S
-
     def _preliminaryTraining(self):
         """Initialize starting snapshots of solution map."""
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
         if self.samplingEngine.nsamples > 0: return
         self.resetSamples()
         self.samplingEngine.scaleFactor = self.scaleFactorDer
         musPivot = self.samplerTrainSet.generatePoints(self.S)
         while len(musPivot) > self.S: musPivot.pop()
         muTestBasePivot = self.samplerPivot.generatePoints(self.nTestPoints,
                                                            False)
         idxPop = pruneSamples(self.mapParameterListPivot(muTestBasePivot),
                               self.mapParameterListPivot(musPivot),
                               1e-10 * self.scaleFactorPivot[0])
         muTestBasePivot.pop(idxPop)
         self._mus = emptyParameterList()
         self.mus.reset((self.S - 1, self.HFEngine.npar))
         self.muTest = emptyParameterList()
         self.muTest.reset((len(muTestBasePivot) + 1, self.HFEngine.npar))
         self.mus.data[:, self.directionPivot] = musPivot[: -1]
         self.mus.data[:, self.directionMarginal] = np.repeat(self.muMargLoc,
                                                           self.S - 1, axis = 0)
         self.muTest.data[: -1, self.directionPivot] = muTestBasePivot.data
         self.muTest.data[-1, self.directionPivot] = musPivot[-1]
         self.muTest.data[:, self.directionMarginal] = np.repeat(self.muMargLoc,
                                                       len(muTestBasePivot) + 1,
                                                       axis = 0)
         if len(self.mus) > 0:
             vbMng(self, "MAIN", 
                   ("Adding first {} sample point{} at {} to training "
                    "set.").format(self.S - 1, "" + "s" * (self.S > 2),
                                   self.mus), 3)
             self.samplingEngine.iterSample(self.mus)
         self._S = len(self.mus)
         self._approxParameters["S"] = self.S
         self.M, self.N = ("AUTO",) * 2
 
     def setupApproxPivoted(self, mus:paramList) -> int:
         if self.checkComputedApproxPivoted(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         vbMng(self, "INIT", "Setting up pivoted approximant.", 10)
         if not hasattr(self, "_plotEstPivot"): self._plotEstPivot = "NONE"
         idx, sizes, emptyCores = self._preSetupApproxPivoted(mus)
         S0 = copy(self.S)
         pMat, Ps, Qs, req, musA = None, [], [], [], None
         if len(idx) == 0:
             vbMng(self, "MAIN", "Idling.", 45)
             if self.storeAllSamples: self.storeSamples()
             pL, pT, mT = recv(source = 0, tag = poolRank())
             pMat = np.empty((pL, 0), dtype = pT)
             musA = np.empty((0, self.mu0.shape[1]), dtype = mT)
         else:
             for i in idx:
                 self.muMargLoc = mus[[i]]
                 vbMng(self, "MAIN", "Building marginal model no. {} at "
                                     "{}.".format(i + 1, self.muMargLoc[0]), 25)
                 self.samplingEngine.resetHistory()
                 self.trainedModel = None
                 self.verbosity -= 5
                 self.samplingEngine.verbosity -= 5
                 RationalInterpolantGreedy.setupApprox(self, self._plotEstPivot)
                 self.verbosity += 5
                 self.samplingEngine.verbosity += 5
                 if self.storeAllSamples: self.storeSamples(i + self._nmusOld)
                 pMat, req, musA = self._localPivotedResult(pMat, req,
                                                            emptyCores, musA)
                 Ps += [copy(self.trainedModel.data.P)]
                 Qs += [copy(self.trainedModel.data.Q)]
                 if not self.matchState and self.autoCollapse:
                     Ps[-1].postmultiplyTensorize(pMat.T)
                 self._S = S0
             del self.muMargLoc
         for r in req: r.wait()
         if not self.matchState and self.autoCollapse: pMat = pMat[:, : 0]
         self._postSetupApproxPivoted(musA, pMat, Ps, Qs, sizes)
         vbMng(self, "DEL", "Done setting up pivoted approximant.", 10)
         return 0
 
     def setupApprox(self, plotEst : str = "NONE") -> int:
         if self.checkComputedApprox(): return -1
         if '_' not in plotEst: plotEst = plotEst + "_NONE"
         plotEstM, self._plotEstPivot = plotEst.split("_")
         val = super().setupApprox(plotEstM)
         return val
 
+class RationalInterpolantGreedyPivotedGreedyMatch(
+                                    RationalInterpolantGreedyPivotedGreedyBase,
+                                    GenericPivotedGreedyApproximantMatch,
+                                    RationalInterpolantGreedyPivotedMatch):
+    """
+    ROM greedy pivoted greedy rational interpolant computation for parametric
+        problems (with some matching).
+
+    Args:
+        HFEngine: HF problem solver.
+        mu0(optional): Default parameter. Defaults to 0.
+        directionPivot(optional): Pivot components. Defaults to [0].
+        approxParameters(optional): Dictionary containing values for main
+            parameters of approximant. Recognized keys are:
+            - 'POD': kind of snapshots orthogonalization; allowed values
+                include 0, 1/2, and 1; defaults to 1, i.e. POD;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+                defaults to 'AUTO';
+            - 'matchState': whether to match the system state rather than the
+                system output; defaults to False;
+            - 'matchingWeight': weight for matching; defaults to 1;
+            - 'matchingKind': kind of matching; allowed values include 'ROTATE'
+                and 'PROJECT'; defaults to 'ROTATE';
+            - 'matchingWeightError': weight for matching in error estimation;
+                defaults to 0;
+            - 'matchingErrorRelative': whether error estimation is relative;
+                defaults to False, i.e. absolute error;
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': number of starting marginal samples;
+            - 'samplerMarginal': marginal sample point generator via sparse
+                grid;
+            - 'errorEstimatorKindMarginal': kind of marginal error estimator;
+                available values include 'LOOK_AHEAD' and 'LOOK_AHEAD_RECOVER';
+                defaults to 'NONE';
+            - 'polybasis': type of polynomial basis for pivot interpolation;
+                defaults to 'MONOMIAL';
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation; allowed values include 'MONOMIAL_*',
+                'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
+                'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant; defaults to
+                    'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                     defaults to 1; only for 'NEARESTNEIGHBOR';
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                    defaults to None; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights; only for
+                    radial basis.
+            - 'greedyTol': uniform error tolerance for greedy algorithm;
+                defaults to 1e-2;
+            - 'collinearityTol': collinearity tolerance for greedy algorithm;
+                defaults to 0.;
+            - 'maxIter': maximum number of greedy steps; defaults to 1e2;
+            - 'nTestPoints': number of test points; defaults to 5e2;
+            - 'samplerTrainSet': training sample points generator; defaults to
+                samplerPivot;
+            - 'errorEstimatorKind': kind of error estimator; available values
+                include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
+                'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE';
+            - 'greedyTolMarginal': uniform error tolerance for marginal greedy
+               algorithm; defaults to 1e-1;
+            - 'maxIterMarginal': maximum number of marginal greedy steps;
+               defaults to 1e2;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant; defaults to 1;
+            - 'autoCollapse': whether to collapse trained reduced model as soon
+                as it is built; defaults to False;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional; allowed values include 'NORM', 'DOMINANT',
+                'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
+                main folder for explanation); defaults to 'NORM';
+            - 'interpTol': tolerance for pivot interpolation; defaults to None;
+            - 'QTol': tolerance for robust rational denominator management;
+                defaults to 0.
+            Defaults to empty dict.
+        verbosity(optional): Verbosity level. Defaults to 10.
+            
+    Attributes:
+        HFEngine: HF problem solver.
+        mu0: Default parameter.
+        directionPivot: Pivot components.
+        mus: Array of snapshot parameters.
+        musMarginal: Array of marginal snapshot parameters.
+        approxParameters: Dictionary containing values for main parameters of
+            approximant. Recognized keys are in parameterList.
+        parameterListSoft: Recognized keys of soft approximant parameters:
+            - 'POD': kind of snapshots orthogonalization;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+            - 'matchState': whether to match the system state rather than the
+                system output;
+            - 'matchingWeight': weight for matching;
+            - 'matchingKind': kind of matching;
+            - 'matchingWeightError': weight for matching in error estimation;
+            - 'matchingErrorRelative': whether error estimation is relative;
+            - 'errorEstimatorKindMarginal': kind of marginal error estimator;
+            - 'polybasis': type of polynomial basis for pivot interpolation;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation;
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant;
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal;
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights.
+            - 'greedyTol': uniform error tolerance for greedy algorithm;
+            - 'collinearityTol': collinearity tolerance for greedy algorithm;
+            - 'maxIter': maximum number of greedy steps;
+            - 'nTestPoints': number of test points;
+            - 'samplerTrainSet': training sample points generator;
+            - 'errorEstimatorKind': kind of error estimator;
+            - 'greedyTolMarginal': uniform error tolerance for marginal greedy
+                algorithm;
+            - 'maxIterMarginal': maximum number of marginal greedy steps;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant;
+            - 'autoCollapse': whether to collapse trained reduced model as soon
+                as it is built;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional;
+            - 'interpTol': tolerance for pivot interpolation;
+            - 'QTol': tolerance for robust rational denominator management.
+        parameterListCritical: Recognized keys of critical approximant
+            parameters:
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator via sparse
+                grid.
+        verbosity: Verbosity level.
+        POD: Kind of snapshots orthogonalization.
+        scaleFactorDer: Scaling factors for derivative computation.
+        matchState: Whether to match the system state rather than the system
+            output.
+        matchingWeight: Weight for matching.
+        matchingKind: Kind of matching.
+        matchingWeightError: Weight for matching in error estimation.
+        matchingErrorRelative: Whether error estimation is relative.
+        S: Total number of pivot samples current approximant relies upon.
+        samplerPivot: Pivot sample point generator.
+        SMarginal: Total number of marginal samples current approximant relies
+            upon.
+        samplerMarginal: Marginal sample point generator via sparse grid.
+        errorEstimatorKindMarginal: Kind of marginal error estimator.
+        polybasis: Type of polynomial basis for pivot interpolation.
+        polybasisMarginal: Type of polynomial basis for marginal interpolation.
+        paramsMarginal: Dictionary of parameters for marginal interpolation.
+        greedyTol: uniform error tolerance for greedy algorithm.
+        collinearityTol: Collinearity tolerance for greedy algorithm.
+        maxIter: maximum number of greedy steps.
+        nTestPoints: number of starting training points.
+        samplerTrainSet: training sample points generator.
+        errorEstimatorKind: kind of error estimator.
+        greedyTolMarginal: Uniform error tolerance for marginal greedy
+            algorithm.
+        maxIterMarginal: Maximum number of marginal greedy steps.
+        radialDirectionalWeightsMarginal: Radial basis weights for marginal
+            interpolant.
+        autoCollapse: Whether to collapse trained reduced model as soon as it
+            is built.
+        functionalSolve: Strategy for minimization of denominator functional.
+        interpTol: Tolerance for pivot interpolation.
+        QTol: Tolerance for robust rational denominator management.
+        muBounds: list of bounds for pivot parameter values.
+        muBoundsMarginal: list of bounds for marginal parameter values.
+        samplingEngine: Sampling engine.
+        uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
+            sampleList.
+        lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
+            solution(s) as parameterList.
+        uApproxReduced: Reduced approximate solution(s) with parameter(s)
+            lastSolvedApprox as sampleList.
+        lastSolvedApproxReduced: Parameter(s) corresponding to last computed
+            reduced approximate solution(s) as parameterList.
+        uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
+            sampleList.
+        lastSolvedApprox: Parameter(s) corresponding to last computed
+            approximate solution(s) as parameterList.
+    """
+
 class RationalInterpolantGreedyPivotedGreedyPoleMatch(
                                     RationalInterpolantGreedyPivotedGreedyBase,
                                     GenericPivotedGreedyApproximantPoleMatch,
                                     RationalInterpolantGreedyPivotedPoleMatch):
     """
     ROM greedy pivoted greedy rational interpolant computation for parametric
         problems (with pole matching).
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'matchState': whether to match the system state rather than the
                 system output; defaults to False;
             - 'matchingWeight': weight for pole matching optimization; defaults
                 to 1;
             - 'matchingShared': required ratio of marginal points to share
                 resonance; defaults to 1.;
             - 'badPoleCorrection': strategy for correction of bad poles;
                 available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
                 defaults to 'ERASE';
             - 'matchingWeightError': weight for pole matching optimization in
                 error estimation; defaults to 0;
+            - 'matchingErrorRelative': whether error estimation is relative;
+                defaults to False, i.e. absolute error;
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': number of starting marginal samples;
             - 'samplerMarginal': marginal sample point generator via sparse
                 grid;
             - 'errorEstimatorKindMarginal': kind of marginal error estimator;
                 available values include 'LOOK_AHEAD' and 'LOOK_AHEAD_RECOVER';
                 defaults to 'NONE';
             - 'polybasis': type of polynomial basis for pivot interpolation;
                 defaults to 'MONOMIAL';
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation; allowed values include 'MONOMIAL_*',
                 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
                 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant; defaults to
                     'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                      defaults to 1; only for 'NEARESTNEIGHBOR';
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                     defaults to None; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights; only for
                     radial basis.
             - 'greedyTol': uniform error tolerance for greedy algorithm;
                 defaults to 1e-2;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
                 defaults to 0.;
             - 'maxIter': maximum number of greedy steps; defaults to 1e2;
             - 'nTestPoints': number of test points; defaults to 5e2;
             - 'samplerTrainSet': training sample points generator; defaults to
                 samplerPivot;
             - 'errorEstimatorKind': kind of error estimator; available values
                 include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
                 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE';
             - 'greedyTolMarginal': uniform error tolerance for marginal greedy
                algorithm; defaults to 1e-1;
             - 'maxIterMarginal': maximum number of marginal greedy steps;
                defaults to 1e2;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'autoCollapse': whether to collapse trained reduced model as soon
                 as it is built; defaults to False;
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for pivot interpolation; defaults to None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'matchState': whether to match the system state rather than the
                 system output;
             - 'matchingWeight': weight for pole matching optimization;
             - 'matchingShared': required ratio of marginal points to share
                 resonance;
             - 'badPoleCorrection': strategy for correction of bad poles;
             - 'matchingWeightError': weight for pole matching optimization in
                 error estimation;
+            - 'matchingErrorRelative': whether error estimation is relative;
             - 'errorEstimatorKindMarginal': kind of marginal error estimator;
             - 'polybasis': type of polynomial basis for pivot interpolation;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation;
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant;
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal;
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights.
             - 'greedyTol': uniform error tolerance for greedy algorithm;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
             - 'maxIter': maximum number of greedy steps;
             - 'nTestPoints': number of test points;
             - 'samplerTrainSet': training sample points generator;
             - 'errorEstimatorKind': kind of error estimator;
             - 'greedyTolMarginal': uniform error tolerance for marginal greedy
                 algorithm;
             - 'maxIterMarginal': maximum number of marginal greedy steps;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'autoCollapse': whether to collapse trained reduced model as soon
                 as it is built;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for pivot interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator via sparse
                 grid.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         matchState: Whether to match the system state rather than the system
             output.
         matchingWeight: Weight for pole matching optimization.
         matchingShared: Required ratio of marginal points to share resonance.
         badPoleCorrection: Strategy for correction of bad poles.
         matchingWeightError: Weight for pole matching optimization in error
             estimation.
+        matchingErrorRelative: Whether error estimation is relative.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator via sparse grid.
         errorEstimatorKindMarginal: Kind of marginal error estimator.
         polybasis: Type of polynomial basis for pivot interpolation.
         polybasisMarginal: Type of polynomial basis for marginal interpolation.
         paramsMarginal: Dictionary of parameters for marginal interpolation.
         greedyTol: uniform error tolerance for greedy algorithm.
         collinearityTol: Collinearity tolerance for greedy algorithm.
         maxIter: maximum number of greedy steps.
         nTestPoints: number of starting training points.
         samplerTrainSet: training sample points generator.
         errorEstimatorKind: kind of error estimator.
         greedyTolMarginal: Uniform error tolerance for marginal greedy
             algorithm.
         maxIterMarginal: Maximum number of marginal greedy steps.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         autoCollapse: Whether to collapse trained reduced model as soon as it
             is built.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for pivot interpolation.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
diff --git a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py
index 601a663..9e3c1d3 100644
--- a/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py
+++ b/rrompy/reduction_methods/pivoted/greedy/rational_interpolant_pivoted_greedy.py
@@ -1,302 +1,500 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from copy import deepcopy as copy
 import numpy as np
 from .generic_pivoted_greedy_approximant import (
                                       GenericPivotedGreedyApproximantBase,
+                                      GenericPivotedGreedyApproximantMatch,
                                       GenericPivotedGreedyApproximantPoleMatch)
 from rrompy.reduction_methods.standard import RationalInterpolant
 from rrompy.reduction_methods.pivoted import (
+                                           RationalInterpolantPivotedMatch,
                                            RationalInterpolantPivotedPoleMatch)
 from rrompy.utilities.base.types import paramList
 from rrompy.utilities.base import verbosityManager as vbMng
-from rrompy.utilities.exception_manager import RROMPyAssert
+from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert
 from rrompy.parameter import emptyParameterList
 from rrompy.utilities.parallel import poolRank, recv
 
-__all__ = ['RationalInterpolantPivotedGreedyPoleMatch']
+__all__ = ['RationalInterpolantPivotedGreedyMatch',
+           'RationalInterpolantPivotedGreedyPoleMatch']
 
 class RationalInterpolantPivotedGreedyBase(
                                           GenericPivotedGreedyApproximantBase):
     
     def computeSnapshots(self):
         """Compute snapshots of solution map."""
         RROMPyAssert(self._mode,
                      message = "Cannot start snapshot computation.")
         vbMng(self, "INIT", "Starting computation of snapshots.", 5)
         self.samplingEngine.scaleFactor = self.scaleFactorDer
         if not hasattr(self, "musPivot") or len(self.musPivot) != self.S:
             self.musPivot = self.samplerPivot.generatePoints(self.S)
             while len(self.musPivot) > self.S: self.musPivot.pop()
         musLoc = emptyParameterList()
         musLoc.reset((self.S, self.HFEngine.npar))
         self.samplingEngine.resetHistory()
         musLoc.data[:, self.directionPivot] = self.musPivot.data
         musLoc.data[:, self.directionMarginal] = np.repeat(self.muMargLoc,
                                                            self.S, axis = 0)        
         self.samplingEngine.iterSample(musLoc)
         vbMng(self, "DEL", "Done computing snapshots.", 5)
         self._m_selfmus = copy(musLoc)
         self._mus = self.musPivot
         self._m_HFEparameterMap = copy(self.HFEngine.parameterMap)
         self.HFEngine.parameterMap = {
                 "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]],
                 "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]}
 
     def addMarginalSamplePoints(self, musMarginal:paramList, *args, **kwargs):
         """Add marginal sample points to reduced model."""
         raise RROMPyException(("Cannot add marginal samples to marginal "
                                "greedy reduced model."))
 
     def setupApproxPivoted(self, mus:paramList) -> int:
         if self.checkComputedApproxPivoted(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         vbMng(self, "INIT", "Setting up pivoted approximant.", 10)
         idx, sizes, emptyCores = self._preSetupApproxPivoted(mus)
         pMat, Ps, Qs, req, musA = None, [], [], [], None
         if len(idx) == 0:
             vbMng(self, "MAIN", "Idling.", 45)
             if self.storeAllSamples: self.storeSamples()
             pL, pT, mT = recv(source = 0, tag = poolRank())
             pMat = np.empty((pL, 0), dtype = pT)
             musA = np.empty((0, self.mu0.shape[1]), dtype = mT)
         else:
             for i in idx:
                 self.muMargLoc = mus[[i]]
                 vbMng(self, "MAIN", "Building marginal model no. {} at "
                                     "{}.".format(i + 1, self.muMargLoc[0]), 25)
                 self.samplingEngine.resetHistory()
                 self.trainedModel = None
                 self.verbosity -= 5
                 self.samplingEngine.verbosity -= 5
                 RationalInterpolant.setupApprox(self)
                 self.verbosity += 5
                 self.samplingEngine.verbosity += 5
                 self._mus = self._m_selfmus
                 self.HFEngine.parameterMap = self._m_HFEparameterMap
                 del self._m_selfmus, self._m_HFEparameterMap
                 if self.storeAllSamples: self.storeSamples(i + self._nmusOld)
                 pMat, req, musA = self._localPivotedResult(pMat, req,
                                                            emptyCores, musA)
                 Ps += [copy(self.trainedModel.data.P)]
                 Qs += [copy(self.trainedModel.data.Q)]
                 if not self.matchState and self.autoCollapse:
                     Ps[-1].postmultiplyTensorize(pMat.T)
             del self.muMargLoc
         for r in req: r.wait()
         if not self.matchState and self.autoCollapse: pMat = pMat[:, : 0]
         self._postSetupApproxPivoted(musA, pMat, Ps, Qs, sizes)
         vbMng(self, "DEL", "Done setting up pivoted approximant.", 10)
         return 0
 
+class RationalInterpolantPivotedGreedyMatch(
+                                          RationalInterpolantPivotedGreedyBase,
+                                          GenericPivotedGreedyApproximantMatch,
+                                          RationalInterpolantPivotedMatch):
+    """
+    ROM pivoted greedy rational interpolant computation for parametric
+        problems (with some matching).
+
+    Args:
+        HFEngine: HF problem solver.
+        mu0(optional): Default parameter. Defaults to 0.
+        directionPivot(optional): Pivot components. Defaults to [0].
+        approxParameters(optional): Dictionary containing values for main
+            parameters of approximant. Recognized keys are:
+            - 'POD': kind of snapshots orthogonalization; allowed values
+                include 0, 1/2, and 1; defaults to 1, i.e. POD;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+                defaults to 'AUTO';
+            - 'matchState': whether to match the system state rather than the
+                system output; defaults to False;
+            - 'matchingWeight': weight for matching; defaults to 1;
+            - 'matchingKind': kind of matching; allowed values include 'ROTATE'
+                and 'PROJECT'; defaults to 'ROTATE';
+            - 'matchingWeightError': weight for matching in error estimation;
+                defaults to 0;
+            - 'matchingErrorRelative': whether error estimation is relative;
+                defaults to False, i.e. absolute error;
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': number of starting marginal samples;
+            - 'samplerMarginal': marginal sample point generator via sparse
+                grid;
+            - 'errorEstimatorKindMarginal': kind of marginal error estimator;
+                available values include 'LOOK_AHEAD' and 'LOOK_AHEAD_RECOVER';
+                defaults to 'NONE';
+            - 'polybasis': type of polynomial basis for pivot interpolation;
+                defaults to 'MONOMIAL';
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation; allowed values include 'MONOMIAL_*',
+                'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
+                'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant; defaults to
+                    'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                     defaults to 1; only for 'NEARESTNEIGHBOR';
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                    defaults to None; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights; only for
+                    radial basis.
+            - 'M': degree of rational interpolant numerator; defaults to
+                'AUTO', i.e. maximum allowed;
+            - 'N': degree of rational interpolant denominator; defaults to
+                'AUTO', i.e. maximum allowed;
+            - 'greedyTolMarginal': uniform error tolerance for marginal greedy
+               algorithm; defaults to 1e-1;
+            - 'maxIterMarginal': maximum number of marginal greedy steps;
+               defaults to 1e2;
+            - 'radialDirectionalWeights': radial basis weights for pivot
+                numerator; defaults to 1;
+            - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
+                radial basis weights; defaults to [-1, -1];
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant; defaults to 1;
+            - 'autoCollapse': whether to collapse trained reduced model as soon
+                as it is built; defaults to False;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional; allowed values include 'NORM', 'DOMINANT',
+                'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
+                main folder for explanation); defaults to 'NORM';
+            - 'interpTol': tolerance for pivot interpolation; defaults to None;
+            - 'QTol': tolerance for robust rational denominator management;
+                defaults to 0.
+            Defaults to empty dict.
+        verbosity(optional): Verbosity level. Defaults to 10.
+            
+    Attributes:
+        HFEngine: HF problem solver.
+        mu0: Default parameter.
+        directionPivot: Pivot components.
+        mus: Array of snapshot parameters.
+        musPivot: Array of pivot snapshot parameters.
+        musMarginal: Array of marginal snapshot parameters.
+        approxParameters: Dictionary containing values for main parameters of
+            approximant. Recognized keys are in parameterList.
+        parameterListSoft: Recognized keys of soft approximant parameters:
+            - 'POD': kind of snapshots orthogonalization;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+            - 'matchState': whether to match the system state rather than the
+                system output;
+            - 'matchingWeight': weight for matching;
+            - 'matchingKind': kind of matching;
+            - 'matchingWeightError': weight for matching in error estimation;
+            - 'matchingErrorRelative': whether error estimation is relative;
+            - 'errorEstimatorKindMarginal': kind of marginal error estimator;
+            - 'polybasis': type of polynomial basis for pivot interpolation;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation;
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant;
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal;
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights.
+            - 'M': degree of rational interpolant numerator;
+            - 'N': degree of rational interpolant denominator;
+            - 'greedyTolMarginal': uniform error tolerance for marginal greedy
+                algorithm;
+            - 'maxIterMarginal': maximum number of marginal greedy steps;
+            - 'radialDirectionalWeights': radial basis weights for pivot
+                numerator;
+            - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
+                radial basis weights;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant;
+            - 'autoCollapse': whether to collapse trained reduced model as soon
+                as it is built;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional;
+            - 'interpTol': tolerance for pivot interpolation;
+            - 'QTol': tolerance for robust rational denominator management.
+        parameterListCritical: Recognized keys of critical approximant
+            parameters:
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator via sparse
+                grid.
+        verbosity: Verbosity level.
+        POD: Kind of snapshots orthogonalization.
+        scaleFactorDer: Scaling factors for derivative computation.
+        matchState: Whether to match the system state rather than the system
+            output.
+        matchingWeight: Weight for matching.
+        matchingKind: Kind of matching.
+        matchingWeightError: Weight for matching in error estimation.
+        matchingErrorRelative: Whether error estimation is relative.
+        S: Total number of pivot samples current approximant relies upon.
+        samplerPivot: Pivot sample point generator.
+        SMarginal: Total number of marginal samples current approximant relies
+            upon.
+        samplerMarginal: Marginal sample point generator via sparse grid.
+        errorEstimatorKindMarginal: Kind of marginal error estimator.
+        polybasis: Type of polynomial basis for pivot interpolation.
+        polybasisMarginal: Type of polynomial basis for marginal interpolation.
+        paramsMarginal: Dictionary of parameters for marginal interpolation.
+        M: Degree of rational interpolant numerator.
+        N: Degree of rational interpolant denominator.
+        greedyTolMarginal: Uniform error tolerance for marginal greedy
+            algorithm.
+        maxIterMarginal: Maximum number of marginal greedy steps.
+        radialDirectionalWeights: Radial basis weights for pivot numerator.
+        radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial
+            basis weights.
+        radialDirectionalWeightsMarginal: Radial basis weights for marginal
+            interpolant.
+        autoCollapse: Whether to collapse trained reduced model as soon as it
+            is built.
+        functionalSolve: Strategy for minimization of denominator functional.
+        interpTol: Tolerance for pivot interpolation.
+        QTol: Tolerance for robust rational denominator management.
+        muBounds: list of bounds for pivot parameter values.
+        muBoundsMarginal: list of bounds for marginal parameter values.
+        samplingEngine: Sampling engine.
+        uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
+            sampleList.
+        lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
+            solution(s) as parameterList.
+        uApproxReduced: Reduced approximate solution(s) with parameter(s)
+            lastSolvedApprox as sampleList.
+        lastSolvedApproxReduced: Parameter(s) corresponding to last computed
+            reduced approximate solution(s) as parameterList.
+        uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
+            sampleList.
+        lastSolvedApprox: Parameter(s) corresponding to last computed
+            approximate solution(s) as parameterList.
+    """
+
 class RationalInterpolantPivotedGreedyPoleMatch(
                                       RationalInterpolantPivotedGreedyBase,
                                       GenericPivotedGreedyApproximantPoleMatch,
                                       RationalInterpolantPivotedPoleMatch):
     """
     ROM pivoted greedy rational interpolant computation for parametric
         problems (with pole matching).
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'matchState': whether to match the system state rather than the
                 system output; defaults to False;
             - 'matchingWeight': weight for pole matching optimization; defaults
                 to 1;
             - 'matchingShared': required ratio of marginal points to share
                 resonance; defaults to 1.;
             - 'badPoleCorrection': strategy for correction of bad poles;
                 available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
                 defaults to 'ERASE';
             - 'matchingWeightError': weight for pole matching optimization in
                 error estimation; defaults to 0;
+            - 'matchingErrorRelative': whether error estimation is relative;
+                defaults to False, i.e. absolute error;
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': number of starting marginal samples;
             - 'samplerMarginal': marginal sample point generator via sparse
                 grid;
             - 'errorEstimatorKindMarginal': kind of marginal error estimator;
                 available values include 'LOOK_AHEAD' and 'LOOK_AHEAD_RECOVER';
                 defaults to 'NONE';
             - 'polybasis': type of polynomial basis for pivot interpolation;
                 defaults to 'MONOMIAL';
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation; allowed values include 'MONOMIAL_*',
                 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
                 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant; defaults to
                     'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                      defaults to 1; only for 'NEARESTNEIGHBOR';
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                     defaults to None; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights; only for
                     radial basis.
             - 'M': degree of rational interpolant numerator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'N': degree of rational interpolant denominator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'greedyTolMarginal': uniform error tolerance for marginal greedy
                algorithm; defaults to 1e-1;
             - 'maxIterMarginal': maximum number of marginal greedy steps;
                defaults to 1e2;
             - 'radialDirectionalWeights': radial basis weights for pivot
                 numerator; defaults to 1;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights; defaults to [-1, -1];
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'autoCollapse': whether to collapse trained reduced model as soon
                 as it is built; defaults to False;
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for pivot interpolation; defaults to None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musPivot: Array of pivot snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'matchState': whether to match the system state rather than the
                 system output;
             - 'matchingWeight': weight for pole matching optimization;
             - 'matchingShared': required ratio of marginal points to share
                 resonance;
             - 'badPoleCorrection': strategy for correction of bad poles;
             - 'matchingWeightError': weight for pole matching optimization in
                 error estimation;
+            - 'matchingErrorRelative': whether error estimation is relative;
             - 'errorEstimatorKindMarginal': kind of marginal error estimator;
             - 'polybasis': type of polynomial basis for pivot interpolation;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation;
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant;
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal;
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights.
             - 'M': degree of rational interpolant numerator;
             - 'N': degree of rational interpolant denominator;
             - 'greedyTolMarginal': uniform error tolerance for marginal greedy
                 algorithm;
             - 'maxIterMarginal': maximum number of marginal greedy steps;
             - 'radialDirectionalWeights': radial basis weights for pivot
                 numerator;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'autoCollapse': whether to collapse trained reduced model as soon
                 as it is built;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for pivot interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator via sparse
                 grid.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         matchState: Whether to match the system state rather than the system
             output.
         matchingWeight: Weight for pole matching optimization.
         matchingShared: Required ratio of marginal points to share resonance.
         badPoleCorrection: Strategy for correction of bad poles.
         matchingWeightError: Weight for pole matching optimization in error
             estimation.
+        matchingErrorRelative: Whether error estimation is relative.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator via sparse grid.
         errorEstimatorKindMarginal: Kind of marginal error estimator.
         polybasis: Type of polynomial basis for pivot interpolation.
         polybasisMarginal: Type of polynomial basis for marginal interpolation.
         paramsMarginal: Dictionary of parameters for marginal interpolation.
         M: Degree of rational interpolant numerator.
         N: Degree of rational interpolant denominator.
         greedyTolMarginal: Uniform error tolerance for marginal greedy
             algorithm.
         maxIterMarginal: Maximum number of marginal greedy steps.
         radialDirectionalWeights: Radial basis weights for pivot numerator.
         radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial
             basis weights.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         autoCollapse: Whether to collapse trained reduced model as soon as it
             is built.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for pivot interpolation.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py
index 865173d..462040e 100644
--- a/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py
+++ b/rrompy/reduction_methods/pivoted/rational_interpolant_greedy_pivoted.py
@@ -1,613 +1,789 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from copy import deepcopy as copy
 import numpy as np
 from .generic_pivoted_approximant import (GenericPivotedApproximantBase,
                                           GenericPivotedApproximantNoMatch,
+                                          GenericPivotedApproximantMatch,
                                           GenericPivotedApproximantPoleMatch)
 from .gather_pivoted_approximant import gatherPivotedApproximant
 from rrompy.reduction_methods.standard.greedy.rational_interpolant_greedy \
                                                import RationalInterpolantGreedy
 from rrompy.reduction_methods.standard.greedy.generic_greedy_approximant \
                                                             import pruneSamples
 from rrompy.utilities.base.types import Np1D, paramList
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.poly_fitting.polynomial import polyvander as pv
 from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 from rrompy.parameter import emptyParameterList
 from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv
 
 __all__ = ['RationalInterpolantGreedyPivotedNoMatch',
+           'RationalInterpolantGreedyPivotedMatch',
            'RationalInterpolantGreedyPivotedPoleMatch']
 
 class RationalInterpolantGreedyPivotedBase(GenericPivotedApproximantBase,
                                            RationalInterpolantGreedy):
     def __init__(self, *args, **kwargs):
         self._preInit()
         super().__init__(*args, **kwargs)
         if self.nparPivot > 1: self.HFEngine._ignoreResidues = 1
         self._postInit()
 
     @property
     def tModelType(self):
         if hasattr(self, "_temporaryPivot"):
             return RationalInterpolantGreedy.tModelType.fget(self)
         return super().tModelType
 
     def _polyvanderAuxiliary(self, mus, deg, *args):
         degEff = [0] * self.npar
         degEff[self.directionPivot[0]] = deg
         return pv(mus, degEff, *args)
 
     def _marginalizeMiscellanea(self, forward:bool):
         if forward:
             self._m_selfmus = copy(self.mus)
             self._m_HFEparameterMap = copy(self.HFEngine.parameterMap)
             self._mus = self.checkParameterListPivot(
                                                  self.mus(self.directionPivot))
             self.HFEngine.parameterMap = {
                 "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]],
                 "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]}
         else:
             self._mus = self._m_selfmus
             self.HFEngine.parameterMap = self._m_HFEparameterMap
             del self._m_selfmus, self._m_HFEparameterMap
 
     def _marginalizeTrainedModel(self, forward:bool):
         if forward:
             del self._temporaryPivot
             self.trainedModel.data.mu0 = self.mu0
             self.trainedModel.data.scaleFactor = [1.] * self.npar
             self.trainedModel.data.scaleFactor[self.directionPivot[0]] = (
                                                            self.scaleFactor[0])
             self.trainedModel.data.parameterMap = self.HFEngine.parameterMap
             self._m_musUniqueCN = copy(self._musUniqueCN)
             musUniqueCNAux = np.zeros((self.S, self.npar),
                                       dtype = self._musUniqueCN.dtype)
             musUniqueCNAux[:, self.directionPivot[0]] = self._musUniqueCN(0)
             self._musUniqueCN = self.checkParameterList(musUniqueCNAux)
             self._m_derIdxs = copy(self._derIdxs)
             for j in range(len(self._derIdxs)):
                 for l in range(len(self._derIdxs[j])):
                     derjl = self._derIdxs[j][l][0]
                     self._derIdxs[j][l] = [0] * self.npar
                     self._derIdxs[j][l][self.directionPivot[0]] = derjl
             self.trainedModel.data.Q._dirPivot = self.directionPivot[0]
             self.trainedModel.data.P._dirPivot = self.directionPivot[0]
             # tell greedy error estimator that operator / RHS is pivot-affine
             if hasattr(self.HFEngine.A, "is_affine"):
                 self._A_is_affine = self.HFEngine.A.is_affine
             else:
                 self._A_is_affine = 0
             if hasattr(self.HFEngine.b, "is_affine"):
                 self._b_is_affine = self.HFEngine.b.is_affine
             else:
                 self._b_is_affine = 0
             if self._A_is_affine >= 1 / 2 and self._b_is_affine >= 1 / 2:
                 self._affine_lvl += [1 / 2]
         else:
             self._temporaryPivot = 1
             self.trainedModel.data.mu0 = self.checkParameterListPivot(
                                                  self.mu0(self.directionPivot))
             self.trainedModel.data.scaleFactor = self.scaleFactor
             self.trainedModel.data.parameterMap = {
                 "F": [self.HFEngine.parameterMap["F"][self.directionPivot[0]]],
                 "B": [self.HFEngine.parameterMap["B"][self.directionPivot[0]]]}
             self._musUniqueCN = copy(self._m_musUniqueCN)
             self._derIdxs = copy(self._m_derIdxs)
             del self._m_musUniqueCN, self._m_derIdxs
             del self.trainedModel.data.Q._dirPivot
             del self.trainedModel.data.P._dirPivot
             if self._A_is_affine >= 1 / 2 and self._b_is_affine >= 1 / 2:
                 self._affine_lvl.pop()
             del self._A_is_affine, self._b_is_affine
         self.trainedModel.data.npar = self.npar
 
     def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D:
         """Standard residual-based error estimator."""
         setupOK = self.setupApproxLocal()
         if setupOK > 0:
             err = np.empty(len(mus))
             err[:] = np.nan
             if not return_max: return err
             return err, [- setupOK], np.nan
         self._marginalizeTrainedModel(True)
         errRes = super().errorEstimator(mus, return_max)
         self._marginalizeTrainedModel(False)
         return errRes
 
     def _preliminaryTraining(self):
         """Initialize starting snapshots of solution map."""
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
-        self._S = self._setSampleBatch(self.S)
         self.resetSamples()
         self.samplingEngine.scaleFactor = self.scaleFactorDer
         musPivot = self.samplerTrainSet.generatePoints(self.S)
         while len(musPivot) > self.S: musPivot.pop()
         muTestPivot = self.samplerPivot.generatePoints(self.nTestPoints, False)
         idxPop = pruneSamples(self.mapParameterListPivot(muTestPivot),
                               self.mapParameterListPivot(musPivot),
                               1e-10 * self.scaleFactorPivot[0])
         muTestPivot.pop(idxPop)
         self._mus = emptyParameterList()
         self.mus.reset((self.S - 1, self.HFEngine.npar))
         self.muTest = emptyParameterList()
         self.muTest.reset((len(muTestPivot) + 1, self.HFEngine.npar))
         self.mus.data[:, self.directionPivot] = musPivot[: -1]
         self.mus.data[:, self.directionMarginal] = np.repeat(self.muMargLoc,
                                                           self.S - 1, axis = 0)
         self.muTest.data[: -1, self.directionPivot] = muTestPivot.data
         self.muTest.data[-1, self.directionPivot] = musPivot[-1]
         self.muTest.data[:, self.directionMarginal] = np.repeat(self.muMargLoc,
                                                           len(muTestPivot) + 1,
                                                           axis = 0)
         if len(self.mus) > 0:
             vbMng(self, "MAIN", 
                   ("Adding first {} sample point{} at {} to training "
                    "set.").format(self.S - 1, "" + "s" * (self.S > 2),
                                   self.mus), 3)
             self.samplingEngine.iterSample(self.mus)
         self._S = len(self.mus)
         self._approxParameters["S"] = self.S
         self.M, self.N = ("AUTO",) * 2
 
     def setupApproxLocal(self) -> int:
         """Compute rational interpolant."""
         self._marginalizeMiscellanea(True)
         setupOK = super().setupApproxLocal()
         self._marginalizeMiscellanea(False)
         return setupOK
 
     def addMarginalSamplePoints(self, musMarginal:paramList, *args,
                                 **kwargs) -> int:
         """Add marginal sample points to reduced model."""
         RROMPyAssert(self._mode, message = "Cannot add sample points.")
         musMarginal = self.checkParameterListMarginal(musMarginal)
         vbMng(self, "INIT",
               "Adding marginal sample point{} at {}.".format(
                                  "s" * (len(musMarginal) > 1), musMarginal), 5)
         if (self.SMarginal > 0 and hasattr(self, "polybasisMarginal")
         and self.polybasisMarginal in sk):
             RROMPyWarning(("Manually adding new samples with piecewise linear "
                            "marginal interpolation is dangerous. Sample depth "
                            "in samplerMarginal must be managed correctly."))
         _musOld = self.mus
         self._musMarginal.append(musMarginal)
         S0 = copy(self.S)
         idx, sizes = indicesScatter(len(musMarginal), return_sizes = True)
         _trainedModelOld = copy(self.trainedModel)
         _collapsed = (_trainedModelOld is not None
                   and _trainedModelOld.data._collapsed)
         pMat, Ps, Qs, mus = None, [], [], None
         req, emptyCores = [], np.where(sizes == 0)[0]
         if len(idx) == 0:
             vbMng(self, "MAIN", "Idling.", 25)
             if self.storeAllSamples: self.storeSamples()
             pL, pT, mT = recv(source = 0, tag = poolRank())
             pMat = np.empty((pL, 0), dtype = pT)
             mus = np.empty((0, self.mu0.shape[1]), dtype = mT)
         else:
             _scaleFactorOldPivot = copy(self.scaleFactor)
             self.scaleFactor = self.scaleFactorPivot
             self._temporaryPivot = 1
             for i in idx:
                 self.muMargLoc = self.musMarginal[[i + self.SMarginal]]
                 vbMng(self, "MAIN",
                       "Building marginal model no. {} at {}.".format(
                                       i + self.SMarginal + 1,
                                       self.musMarginal[i + self.SMarginal]), 5)
                 self.samplingEngine.resetHistory()
                 self.trainedModel = None
                 self.verbosity -= 5
                 self.samplingEngine.verbosity -= 5
                 RationalInterpolantGreedy.setupApprox(self, *args, **kwargs)
                 self.verbosity += 5
                 self.samplingEngine.verbosity += 5
                 if self.storeAllSamples: self.storeSamples(i + self.SMarginal)
                 musi = self.samplingEngine.mus
                 pMati = self.samplingEngine.projectionMatrix
                 if not self.matchState:
                     if self.POD == 1 and not (
                         hasattr(self.HFEngine.C, "is_mu_independent")
                     and self.HFEngine.C.is_mu_independent in self._output_lvl):
                         raise RROMPyException(("Cannot apply mu-dependent C "
                                                "to orthonormalized samples."))
                     vbMng(self, "INIT", "Extracting system output from state.",
                           35)
                     pMatiEff = None
                     for j, mu in enumerate(musi):
                         pMij = np.expand_dims(self.HFEngine.applyC(
                                                           pMati[:, j], mu), -1)
                         if pMatiEff is None:
                             pMatiEff = np.array(pMij)
                         else:
                             pMatiEff = np.append(pMatiEff, pMij, axis = 1)
                     pMati = pMatiEff
                     vbMng(self, "DEL", "Done extracting system output.", 35)
 
                 if pMat is None:
                     mus = copy(musi.data)
                     if i == 0:
                         for dest in emptyCores:
                             req += [isend((len(pMati), pMati.dtype, mus.dtype),
                                           dest = dest, tag = dest)]
                 else:
                     mus = np.vstack((mus, musi.data))
                 if _collapsed:
                     pMat = 1.
                 else:
                     if pMat is None:
                         pMat = copy(pMati)
                     else:
                         pMat = np.hstack((pMat, pMati))
                 Ps += [copy(self.trainedModel.data.P)]
                 Qs += [copy(self.trainedModel.data.Q)]
                 if _collapsed: Ps[-1].postmultiplyTensorize(pMati.T)
                 self._S = S0
             del self._temporaryPivot, self.muMargLoc
             self.scaleFactor = _scaleFactorOldPivot
         for r in req: r.wait()
         if _collapsed: pMat = pMati[:, : 0]
         pMat, Ps, Qs, mus, nsamples = gatherPivotedApproximant(pMat, Ps, Qs,
                                                     mus, sizes, self.polybasis)
         self._mus = _musOld
         self.mus.append(mus)
         Psupp = np.append(0, np.cumsum(nsamples[: -1]))
         if _trainedModelOld is None:
             self._setupTrainedModel(pMat, forceNew = True)
             self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], []
             self.trainedModel.data.Psupp = []
         else:
             self._trainedModel = _trainedModelOld
             if _collapsed:
                 self._setupTrainedModel(1.)
                 Psupp = [0] * len(musMarginal)
             else:
                 Psupp = Psupp + self.trainedModel.data.projMat.shape[1]
                 self._setupTrainedModel(pMat, 1)
         self._SMarginal += len(musMarginal)
         self.trainedModel.data.Qs += Qs
         self.trainedModel.data.Ps += Ps
         self.trainedModel.data.Psupp += list(Psupp)
         self._preliminaryMarginalFinalization()
         self._finalizeMarginalization()
         vbMng(self, "DEL", "Done setting up approximant.", 5)
         return 0
 
     def setupApprox(self, *args, **kwargs) -> int:
         """Compute rational interpolant."""
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5)
         self.computeScaleFactor()
         self._mus = emptyParameterList()
         self._musMarginal = emptyParameterList()
         musMarginal = self.samplerMarginal.generatePoints(self.SMarginal)
         while len(musMarginal) > self.SMarginal: musMarginal.pop()
         self._SMarginal = 0
         val = self.addMarginalSamplePoints(musMarginal, *args, **kwargs)
         vbMng(self, "DEL", "Done setting up approximant.", 5)
         return val
 
 class RationalInterpolantGreedyPivotedNoMatch(
                                           RationalInterpolantGreedyPivotedBase,
                                           GenericPivotedApproximantNoMatch):
     """
     ROM pivoted rational interpolant (without pole matching) computation for
         parametric problems.
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation; defaults to 'MONOMIAL';
             - 'greedyTol': uniform error tolerance for greedy algorithm;
                 defaults to 1e-2;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
                 defaults to 0.;
             - 'maxIter': maximum number of greedy steps; defaults to 1e2;
             - 'nTestPoints': number of test points; defaults to 5e2;
             - 'samplerTrainSet': training sample points generator; defaults to
                 samplerPivot;
             - 'errorEstimatorKind': kind of error estimator; available values
                 include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
                 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE';
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for pivot interpolation; defaults to
                 None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation;
             - 'greedyTol': uniform error tolerance for greedy algorithm;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
             - 'maxIter': maximum number of greedy steps;
             - 'nTestPoints': number of test points;
             - 'samplerTrainSet': training sample points generator;
             - 'errorEstimatorKind': kind of error estimator;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for pivot interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator.
         polybasis: Type of polynomial basis for pivot interpolation.
         greedyTol: uniform error tolerance for greedy algorithm.
         collinearityTol: Collinearity tolerance for greedy algorithm.
         maxIter: maximum number of greedy steps.
         nTestPoints: number of starting training points.
         samplerTrainSet: training sample points generator.
         errorEstimatorKind: kind of error estimator.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for pivot interpolation.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
         Q: Numpy 1D vector containing complex coefficients of approximant
             denominator.
         P: Numpy 2D vector whose columns are FE dofs of coefficients of
             approximant numerator.
     """
 
+class RationalInterpolantGreedyPivotedMatch(
+                                          RationalInterpolantGreedyPivotedBase,
+                                          GenericPivotedApproximantMatch):
+    """
+    ROM pivoted rational interpolant (with pole matching) computation for
+        parametric problems.
+
+    Args:
+        HFEngine: HF problem solver.
+        mu0(optional): Default parameter. Defaults to 0.
+        directionPivot(optional): Pivot components. Defaults to [0].
+        approxParameters(optional): Dictionary containing values for main
+            parameters of approximant. Recognized keys are:
+            - 'POD': kind of snapshots orthogonalization; allowed values
+                include 0, 1/2, and 1; defaults to 1, i.e. POD;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+                defaults to 'AUTO';
+            - 'matchState': whether to match the system state rather than the
+                system output; defaults to False;
+            - 'matchingWeight': weight for matching; defaults to 1;
+            - 'matchingKind': kind of matching; allowed values include 'ROTATE'
+                and 'PROJECT'; defaults to 'ROTATE';
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator;
+            - 'polybasis': type of polynomial basis for pivot
+                interpolation; defaults to 'MONOMIAL';
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation; allowed values include 'MONOMIAL_*',
+                'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
+                'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant; defaults to
+                    'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                     defaults to 1; only for 'NEARESTNEIGHBOR';
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                    defaults to None; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights; only for
+                    radial basis.
+            - 'greedyTol': uniform error tolerance for greedy algorithm;
+                defaults to 1e-2;
+            - 'collinearityTol': collinearity tolerance for greedy algorithm;
+                defaults to 0.;
+            - 'maxIter': maximum number of greedy steps; defaults to 1e2;
+            - 'nTestPoints': number of test points; defaults to 5e2;
+            - 'samplerTrainSet': training sample points generator; defaults to
+                samplerPivot;
+            - 'errorEstimatorKind': kind of error estimator; available values
+                include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
+                'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE';
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant; defaults to 1;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional; allowed values include 'NORM', 'DOMINANT',
+                'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
+                main folder for explanation); defaults to 'NORM';
+            - 'interpTol': tolerance for pivot interpolation; defaults to None;
+            - 'QTol': tolerance for robust rational denominator management;
+                defaults to 0.
+            Defaults to empty dict.
+        verbosity(optional): Verbosity level. Defaults to 10.
+            
+    Attributes:
+        HFEngine: HF problem solver.
+        mu0: Default parameter.
+        directionPivot: Pivot components.
+        mus: Array of snapshot parameters.
+        musMarginal: Array of marginal snapshot parameters.
+        approxParameters: Dictionary containing values for main parameters of
+            approximant. Recognized keys are in parameterList.
+        parameterListSoft: Recognized keys of soft approximant parameters:
+            - 'POD': kind of snapshots orthogonalization;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+            - 'matchState': whether to match the system state rather than the
+                system output;
+            - 'matchingWeight': weight for matching;
+            - 'matchingKind': kind of matching;
+            - 'polybasis': type of polynomial basis for pivot
+                interpolation;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation;
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant;
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal;
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights.
+            - 'greedyTol': uniform error tolerance for greedy algorithm;
+            - 'collinearityTol': collinearity tolerance for greedy algorithm;
+            - 'maxIter': maximum number of greedy steps;
+            - 'nTestPoints': number of test points;
+            - 'samplerTrainSet': training sample points generator;
+            - 'errorEstimatorKind': kind of error estimator;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional;
+            - 'interpTol': tolerance for pivot interpolation;
+            - 'QTol': tolerance for robust rational denominator management.
+        parameterListCritical: Recognized keys of critical approximant
+            parameters:
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator.
+        verbosity: Verbosity level.
+        POD: Kind of snapshots orthogonalization.
+        scaleFactorDer: Scaling factors for derivative computation.
+        matchState: Whether to match the system state rather than the system
+            output.
+        matchingWeight: Weight for matching.
+        matchingKind: Kind of matching.
+        S: Total number of pivot samples current approximant relies upon.
+        samplerPivot: Pivot sample point generator.
+        SMarginal: Total number of marginal samples current approximant relies
+            upon.
+        samplerMarginal: Marginal sample point generator.
+        polybasis: Type of polynomial basis for pivot interpolation.
+        polybasisMarginal: Type of polynomial basis for marginal interpolation.
+        paramsMarginal: Dictionary of parameters for marginal interpolation.
+        greedyTol: uniform error tolerance for greedy algorithm.
+        collinearityTol: Collinearity tolerance for greedy algorithm.
+        maxIter: maximum number of greedy steps.
+        nTestPoints: number of starting training points.
+        samplerTrainSet: training sample points generator.
+        errorEstimatorKind: kind of error estimator.
+        radialDirectionalWeightsMarginal: Radial basis weights for marginal
+            interpolant.
+        functionalSolve: Strategy for minimization of denominator functional.
+        interpTol: Tolerance for pivot interpolation.
+        QTol: Tolerance for robust rational denominator management.
+        muBounds: list of bounds for pivot parameter values.
+        muBoundsMarginal: list of bounds for marginal parameter values.
+        samplingEngine: Sampling engine.
+        uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
+            sampleList.
+        lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
+            solution(s) as parameterList.
+        uApproxReduced: Reduced approximate solution(s) with parameter(s)
+            lastSolvedApprox as sampleList.
+        lastSolvedApproxReduced: Parameter(s) corresponding to last computed
+            reduced approximate solution(s) as parameterList.
+        uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
+            sampleList.
+        lastSolvedApprox: Parameter(s) corresponding to last computed
+            approximate solution(s) as parameterList.
+        Q: Numpy 1D vector containing complex coefficients of approximant
+            denominator.
+        P: Numpy 2D vector whose columns are FE dofs of coefficients of
+            approximant numerator.
+    """
+
+    def setupApprox(self, *args, **kwargs) -> int:
+        if self.checkComputedApprox(): return -1
+        self.purgeparamsMarginal()
+        setupOK = super().setupApprox(*args, **kwargs)
+        if self.matchState: self._postApplyC()
+        return setupOK
+
 class RationalInterpolantGreedyPivotedPoleMatch(
                                           RationalInterpolantGreedyPivotedBase,
                                           GenericPivotedApproximantPoleMatch):
     """
     ROM pivoted rational interpolant (with pole matching) computation for
         parametric problems.
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'matchState': whether to match the system state rather than the
                 system output; defaults to False;
             - 'matchingWeight': weight for pole matching optimization; defaults
                 to 1;
             - 'matchingShared': required ratio of marginal points to share
                 resonance; defaults to 1.;
             - 'badPoleCorrection': strategy for correction of bad poles;
                 available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
                 defaults to 'ERASE';
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation; defaults to 'MONOMIAL';
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation; allowed values include 'MONOMIAL_*',
                 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
                 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant; defaults to
                     'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                      defaults to 1; only for 'NEARESTNEIGHBOR';
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                     defaults to None; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights; only for
                     radial basis.
             - 'greedyTol': uniform error tolerance for greedy algorithm;
                 defaults to 1e-2;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
                 defaults to 0.;
             - 'maxIter': maximum number of greedy steps; defaults to 1e2;
             - 'nTestPoints': number of test points; defaults to 5e2;
             - 'samplerTrainSet': training sample points generator; defaults to
                 samplerPivot;
             - 'errorEstimatorKind': kind of error estimator; available values
                 include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
                 'LOOK_AHEAD_RES', and 'NONE'; defaults to 'NONE';
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for pivot interpolation; defaults to None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'matchState': whether to match the system state rather than the
                 system output;
             - 'matchingWeight': weight for pole matching optimization;
             - 'matchingShared': required ratio of marginal points to share
                 resonance;
             - 'badPoleCorrection': strategy for correction of bad poles;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation;
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant;
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal;
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights.
             - 'greedyTol': uniform error tolerance for greedy algorithm;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
             - 'maxIter': maximum number of greedy steps;
             - 'nTestPoints': number of test points;
             - 'samplerTrainSet': training sample points generator;
             - 'errorEstimatorKind': kind of error estimator;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for pivot interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         matchState: Whether to match the system state rather than the system
             output.
         matchingWeight: Weight for pole matching optimization.
         matchingShared: Required ratio of marginal points to share resonance.
         badPoleCorrection: Strategy for correction of bad poles.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator.
         polybasis: Type of polynomial basis for pivot interpolation.
         polybasisMarginal: Type of polynomial basis for marginal interpolation.
         paramsMarginal: Dictionary of parameters for marginal interpolation.
         greedyTol: uniform error tolerance for greedy algorithm.
         collinearityTol: Collinearity tolerance for greedy algorithm.
         maxIter: maximum number of greedy steps.
         nTestPoints: number of starting training points.
         samplerTrainSet: training sample points generator.
         errorEstimatorKind: kind of error estimator.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for pivot interpolation.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
         Q: Numpy 1D vector containing complex coefficients of approximant
             denominator.
         P: Numpy 2D vector whose columns are FE dofs of coefficients of
             approximant numerator.
     """
 
     def setupApprox(self, *args, **kwargs) -> int:
         if self.checkComputedApprox(): return -1
         self.purgeparamsMarginal()
         setupOK = super().setupApprox(*args, **kwargs)
         if self.matchState: self._postApplyC()
         return setupOK
diff --git a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py
index 449ebd9..29be09a 100644
--- a/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py
+++ b/rrompy/reduction_methods/pivoted/rational_interpolant_pivoted.py
@@ -1,523 +1,686 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 import numpy as np
 from collections.abc import Iterable
 from copy import deepcopy as copy
 from .generic_pivoted_approximant import (GenericPivotedApproximantBase,
                                           GenericPivotedApproximantNoMatch,
+                                          GenericPivotedApproximantMatch,
                                           GenericPivotedApproximantPoleMatch)
 from .gather_pivoted_approximant import gatherPivotedApproximant
 from rrompy.reduction_methods.standard.rational_interpolant import (
                                                            RationalInterpolant)
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.base.types import paramList
 from rrompy.utilities.numerical.hash_derivative import nextDerivativeIndices
 from rrompy.utilities.poly_fitting.piecewise_linear import sparsekinds as sk
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 from rrompy.parameter import emptyParameterList
 from rrompy.utilities.parallel import poolRank, indicesScatter, isend, recv
 
 __all__ = ['RationalInterpolantPivotedNoMatch',
+           'RationalInterpolantPivotedMatch',
            'RationalInterpolantPivotedPoleMatch']
 
 class RationalInterpolantPivotedBase(GenericPivotedApproximantBase,
                                      RationalInterpolant):
     def __init__(self, *args, **kwargs):
         self._preInit()
-        self._addParametersToList(toBeExcluded = ["polydegreetype"])
         super().__init__(*args, **kwargs)
         if self.nparPivot > 1: self.HFEngine._ignoreResidues = 1
         self._postInit()
 
     @property
     def scaleFactorDer(self):
         """Value of scaleFactorDer."""
         if self._scaleFactorDer == "NONE": return 1.
         if self._scaleFactorDer == "AUTO": return self.scaleFactorPivot
         return self._scaleFactorDer
     @scaleFactorDer.setter
     def scaleFactorDer(self, scaleFactorDer):
         if isinstance(scaleFactorDer, (str,)):
             scaleFactorDer = scaleFactorDer.upper()
         elif isinstance(scaleFactorDer, Iterable):
             scaleFactorDer = list(scaleFactorDer)
         self._scaleFactorDer = scaleFactorDer
         self._approxParameters["scaleFactorDer"] = self._scaleFactorDer
 
-    @property
-    def polydegreetype(self):
-        """Value of polydegreetype."""
-        return "TOTAL"
-    @polydegreetype.setter
-    def polydegreetype(self, polydegreetype):
-        RROMPyWarning(("polydegreetype is used just to simplify inheritance, "
-                       "and its value cannot be changed from 'TOTAL'."))
-
     def _setupInterpolationIndices(self):
         """Setup parameters for polyvander."""
         RROMPyAssert(self._mode,
                      message = "Cannot setup interpolation indices.")
         if (self._musUniqueCN is None 
          or len(self._reorder) != len(self.musPivot)):
             try:
                 muPC = self.trainedModel.centerNormalizePivot(self.musPivot)
             except:
                 muPC = self.trainedModel.centerNormalize(self.musPivot)
             self._musUniqueCN, musIdxsTo, musIdxs, musCount = (muPC.unique(
                                     return_index = True, return_inverse = True,
                                     return_counts = True))
             self._musUnique = self.musPivot[musIdxsTo]
             self._derIdxs = [None] * len(self._musUniqueCN)
             self._reorder = np.empty(len(musIdxs), dtype = int)
             filled = 0
             for j, cnt in enumerate(musCount):
                 self._derIdxs[j] = nextDerivativeIndices([], self.nparPivot,
                                                          cnt)
                 jIdx = np.nonzero(musIdxs == j)[0]
                 self._reorder[jIdx] = np.arange(filled, filled + cnt)
                 filled += cnt
 
     def addMarginalSamplePoints(self, musMarginal:paramList) -> int:
         """Add marginal sample points to reduced model."""
         RROMPyAssert(self._mode, message = "Cannot add sample points.")
         musMarginal = self.checkParameterListMarginal(musMarginal)
         vbMng(self, "INIT",
               "Adding marginal sample point{} at {}.".format(
                                  "s" * (len(musMarginal) > 1), musMarginal), 5)
         if (self.SMarginal > 0 and hasattr(self, "polybasisMarginal")
         and self.polybasisMarginal in sk):
             RROMPyWarning(("Manually adding new samples with piecewise linear "
                            "marginal interpolation is dangerous. Sample depth "
                            "in samplerMarginal must be managed correctly."))
         mus = np.empty((self.S * len(musMarginal), self.HFEngine.npar),
                        dtype = np.complex)
         mus[:, self.directionPivot] = np.tile(self.musPivot.data,
                                               (len(musMarginal), 1))
         mus[:, self.directionMarginal] = np.repeat(musMarginal.data, self.S,
                                                    axis = 0)
         self._mus.append(mus)
         self._musMarginal.append(musMarginal)
         N0 = copy(self.N)
         idx, sizes = indicesScatter(len(musMarginal), return_sizes = True)
         pMat, Ps, Qs = None, [], []
         req, emptyCores = [], np.where(sizes == 0)[0]
         if len(idx) == 0:
             vbMng(self, "MAIN", "Idling.", 30)
             if self.storeAllSamples: self.storeSamples()
             pL, pT = recv(source = 0, tag = poolRank())
             pMat = np.empty((pL, 0), dtype = pT)
         else:
             _scaleFactorOldPivot = copy(self.scaleFactor)
             self.scaleFactor = self.scaleFactorPivot
             self._temporaryPivot = 1
             for i in idx:
                 musi = self.mus[self.S * (i + self.SMarginal)
                               : self.S * (i + self.SMarginal + 1)]
                 vbMng(self, "MAIN",
                       "Building marginal model no. {} at {}.".format(
                                       i + self.SMarginal + 1,
                                       self.musMarginal[i + self.SMarginal]), 5)
                 vbMng(self, "INIT", "Starting computation of snapshots.", 10)
                 self.samplingEngine.resetHistory()
                 self.samplingEngine.iterSample(musi)
                 vbMng(self, "DEL", "Done computing snapshots.", 10)
                 self.verbosity -= 5
                 self.samplingEngine.verbosity -= 5
                 self._setupRational(self._setupDenominator())
                 self.verbosity += 5
                 self.samplingEngine.verbosity += 5
                 if self.storeAllSamples: self.storeSamples(i + self.SMarginal)
                 pMati = self.samplingEngine.projectionMatrix
                 if not self.matchState:
                     if self.POD == 1 and not (
                         hasattr(self.HFEngine.C, "is_mu_independent")
                     and self.HFEngine.C.is_mu_independent in self._output_lvl):
                         raise RROMPyException(("Cannot apply mu-dependent C "
                                                "to orthonormalized samples."))
                     vbMng(self, "INIT", "Extracting system output from state.",
                           35)
                     pMatiEff = None
                     for j, mu in enumerate(musi):
                         pMij = np.expand_dims(self.HFEngine.applyC(
                                                           pMati[:, j], mu), -1)
                         if pMatiEff is None:
                             pMatiEff = np.array(pMij)
                         else:
                             pMatiEff = np.append(pMatiEff, pMij, axis = 1)
                     pMati = pMatiEff
                     vbMng(self, "DEL", "Done extracting system output.", 35)
                 if pMat is None and i == 0:
                     for dest in emptyCores:
                         req += [isend((len(pMati), pMati.dtype), dest = dest,
                                       tag = dest)]
                 if self.trainedModel.data._collapsed:
                     pMat = 1.
                 else:
                     if pMat is None:
                         pMat = copy(pMati)
                     else:
                         pMat = np.hstack((pMat, pMati))
                 Ps += [copy(self.trainedModel.data.P)]
                 Qs += [copy(self.trainedModel.data.Q)]
                 if self.trainedModel.data._collapsed:
                     Ps[-1].postmultiplyTensorize(pMati.T)
                 del self.trainedModel.data.Q, self.trainedModel.data.P
                 self.N = N0
             del self._temporaryPivot
             self.scaleFactor = _scaleFactorOldPivot
         for r in req: r.wait()
         if self.trainedModel.data._collapsed: pMat = pMati[:, : 0]
         pMat, Ps, Qs, _, _ = gatherPivotedApproximant(pMat, Ps, Qs,
                                                       self.mus.data, sizes,
                                                       self.polybasis, False)
         if self.trainedModel.data._collapsed:
             self._setupTrainedModel(1.)
             Psupp = [0] * len(musMarginal)
         else:
             self._setupTrainedModel(pMat,
                                     len(self.trainedModel.data.projMat) > 0)
             Psupp = (self.SMarginal + np.arange(0, len(musMarginal))) * self.S
         self._SMarginal += len(musMarginal)
         self.trainedModel.data.Qs += Qs
         self.trainedModel.data.Ps += Ps
         self.trainedModel.data.Psupp += list(Psupp)
         self._preliminaryMarginalFinalization()
         self._finalizeMarginalization()
         vbMng(self, "DEL", "Done adding marginal sample points.", 5)
         return 0
 
     def setupApprox(self) -> int:
         """Compute rational interpolant."""
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5)
         self.computeScaleFactor()
         self.resetSamples()
         self.samplingEngine.scaleFactor = self.scaleFactorDer
         self._mus = emptyParameterList()
         self._musMarginal = emptyParameterList()
         self.musPivot = self.samplerPivot.generatePoints(self.S)
         while len(self.musPivot) > self.S: self.musPivot.pop()
         musMarginal = self.samplerMarginal.generatePoints(self.SMarginal)
         while len(musMarginal) > self.SMarginal: musMarginal.pop()
         self._setupTrainedModel(np.zeros((0, 0)), forceNew = True)
         self.trainedModel.data.Qs, self.trainedModel.data.Ps = [], []
         self.trainedModel.data.Psupp = []
         self._SMarginal = 0
         val = self.addMarginalSamplePoints(musMarginal)
         vbMng(self, "DEL", "Done setting up approximant.", 5)
         return val
 
 class RationalInterpolantPivotedNoMatch(RationalInterpolantPivotedBase,
                                         GenericPivotedApproximantNoMatch):
     """
     ROM pivoted rational interpolant (without pole matching) computation for
         parametric problems.
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation; defaults to 'MONOMIAL';
             - 'M': degree of rational interpolant numerator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'N': degree of rational interpolant denominator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'radialDirectionalWeights': radial basis weights for pivot
                 numerator; defaults to 1;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights; defaults to [-1, -1];
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for pivot interpolation; defaults to
                 None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musPivot: Array of pivot snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation;
             - 'M': degree of rational interpolant numerator;
             - 'N': degree of rational interpolant denominator;
             - 'radialDirectionalWeights': radial basis weights for pivot
                 numerator;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for pivot interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator.
         polybasis: Type of polynomial basis for pivot interpolation.
         M: Numerator degree of approximant.
         N: Denominator degree of approximant.
         radialDirectionalWeights: Radial basis weights for pivot numerator.
         radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial
             basis weights.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for pivot interpolation.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
         Q: Numpy 1D vector containing complex coefficients of approximant
             denominator.
         P: Numpy 2D vector whose columns are FE dofs of coefficients of
             approximant numerator.
     """
     
+class RationalInterpolantPivotedMatch(RationalInterpolantPivotedBase,
+                                      GenericPivotedApproximantMatch):
+    """
+    ROM pivoted rational interpolant (with some matching) computation for
+        parametric problems.
+
+    Args:
+        HFEngine: HF problem solver.
+        mu0(optional): Default parameter. Defaults to 0.
+        directionPivot(optional): Pivot components. Defaults to [0].
+        approxParameters(optional): Dictionary containing values for main
+            parameters of approximant. Recognized keys are:
+            - 'POD': kind of snapshots orthogonalization; allowed values
+                include 0, 1/2, and 1; defaults to 1, i.e. POD;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+                defaults to 'AUTO';
+            - 'matchState': whether to match the system state rather than the
+                system output; defaults to False;
+            - 'matchingWeight': weight for matching; defaults to 1;
+            - 'matchingKind': kind of matching; allowed values include 'ROTATE'
+                and 'PROJECT'; defaults to 'ROTATE';
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator;
+            - 'polybasis': type of polynomial basis for pivot
+                interpolation; defaults to 'MONOMIAL';
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation; allowed values include 'MONOMIAL_*',
+                'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
+                'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant; defaults to
+                    'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                     defaults to 1; only for 'NEARESTNEIGHBOR';
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                    defaults to None; not for 'NEARESTNEIGHBOR' or
+                    'PIECEWISE_LINEAR_*';
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights; only for
+                    radial basis.
+            - 'M': degree of rational interpolant numerator; defaults to
+                'AUTO', i.e. maximum allowed;
+            - 'N': degree of rational interpolant denominator; defaults to
+                'AUTO', i.e. maximum allowed;
+            - 'radialDirectionalWeights': radial basis weights for pivot
+                numerator; defaults to 1;
+            - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
+                radial basis weights; defaults to [-1, -1];
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant; defaults to 1;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional; allowed values include 'NORM', 'DOMINANT',
+                'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
+                main folder for explanation); defaults to 'NORM';
+            - 'interpTol': tolerance for pivot interpolation; defaults to None;
+            - 'QTol': tolerance for robust rational denominator management;
+                defaults to 0.
+            Defaults to empty dict.
+        verbosity(optional): Verbosity level. Defaults to 10.
+            
+    Attributes:
+        HFEngine: HF problem solver.
+        mu0: Default parameter.
+        directionPivot: Pivot components.
+        mus: Array of snapshot parameters.
+        musPivot: Array of pivot snapshot parameters.
+        musMarginal: Array of marginal snapshot parameters.
+        approxParameters: Dictionary containing values for main parameters of
+            approximant. Recognized keys are in parameterList.
+        parameterListSoft: Recognized keys of soft approximant parameters:
+            - 'POD': kind of snapshots orthogonalization;
+            - 'scaleFactorDer': scaling factors for derivative computation;
+            - 'matchState': whether to match the system state rather than the
+                system output;
+            - 'matchingWeight': weight for matching;
+            - 'matchingKind': kind of matching;
+            - 'polybasis': type of polynomial basis for pivot
+                interpolation;
+            - 'polybasisMarginal': type of polynomial basis for marginal
+                interpolation;
+            - 'paramsMarginal': dictionary of parameters for marginal
+                interpolation; include:
+                . 'MMarginal': degree of marginal interpolant;
+                . 'nNeighborsMarginal': number of marginal nearest neighbors;
+                . 'polydegreetypeMarginal': type of polynomial degree for
+                    marginal;
+                . 'interpTolMarginal': tolerance for marginal interpolation;
+                . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
+                    rescaling of marginal radial basis weights.
+            - 'M': degree of rational interpolant numerator;
+            - 'N': degree of rational interpolant denominator;
+            - 'radialDirectionalWeights': radial basis weights for pivot
+                numerator;
+            - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
+                radial basis weights;
+            - 'radialDirectionalWeightsMarginal': radial basis weights for
+                marginal interpolant;
+            - 'functionalSolve': strategy for minimization of denominator
+                functional;
+            - 'interpTol': tolerance for pivot interpolation;
+            - 'QTol': tolerance for robust rational denominator management.
+        parameterListCritical: Recognized keys of critical approximant
+            parameters:
+            - 'S': total number of pivot samples current approximant relies
+                upon;
+            - 'samplerPivot': pivot sample point generator;
+            - 'SMarginal': total number of marginal samples current approximant
+                relies upon;
+            - 'samplerMarginal': marginal sample point generator.
+        verbosity: Verbosity level.
+        POD: Kind of snapshots orthogonalization.
+        scaleFactorDer: Scaling factors for derivative computation.
+        matchState: Whether to match the system state rather than the system
+            output.
+        matchingWeight: Weight for matching.
+        matchingKind: Kind of matching.
+        S: Total number of pivot samples current approximant relies upon.
+        samplerPivot: Pivot sample point generator.
+        SMarginal: Total number of marginal samples current approximant relies
+            upon.
+        samplerMarginal: Marginal sample point generator.
+        polybasis: Type of polynomial basis for pivot interpolation.
+        polybasisMarginal: Type of polynomial basis for marginal interpolation.
+        paramsMarginal: Dictionary of parameters for marginal interpolation.
+        M: Numerator degree of approximant.
+        N: Denominator degree of approximant.
+        radialDirectionalWeights: Radial basis weights for pivot numerator.
+        radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial
+            basis weights.
+        radialDirectionalWeightsMarginal: Radial basis weights for marginal
+            interpolant.
+        functionalSolve: Strategy for minimization of denominator functional.
+        interpTol: Tolerance for pivot interpolation.
+        QTol: Tolerance for robust rational denominator management.
+        muBounds: list of bounds for pivot parameter values.
+        muBoundsMarginal: list of bounds for marginal parameter values.
+        samplingEngine: Sampling engine.
+        uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
+            sampleList.
+        lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
+            solution(s) as parameterList.
+        uApproxReduced: Reduced approximate solution(s) with parameter(s)
+            lastSolvedApprox as sampleList.
+        lastSolvedApproxReduced: Parameter(s) corresponding to last computed
+            reduced approximate solution(s) as parameterList.
+        uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
+            sampleList.
+        lastSolvedApprox: Parameter(s) corresponding to last computed
+            approximate solution(s) as parameterList.
+        Q: Numpy 1D vector containing complex coefficients of approximant
+            denominator.
+        P: Numpy 2D vector whose columns are FE dofs of coefficients of
+            approximant numerator.
+    """
+
+    def setupApprox(self, *args, **kwargs) -> int:
+        if self.checkComputedApprox(): return -1
+        self.purgeparamsMarginal()
+        setupOK = super().setupApprox(*args, **kwargs)
+        if self.matchState: self._postApplyC()
+        return setupOK
+
 class RationalInterpolantPivotedPoleMatch(RationalInterpolantPivotedBase,
                                           GenericPivotedApproximantPoleMatch):
     """
     ROM pivoted rational interpolant (with pole matching) computation for
         parametric problems.
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         directionPivot(optional): Pivot components. Defaults to [0].
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'matchState': whether to match the system state rather than the
                 system output; defaults to False;
             - 'matchingWeight': weight for pole matching optimization; defaults
                 to 1;
             - 'matchingShared': required ratio of marginal points to share
                 resonance; defaults to 1.;
             - 'badPoleCorrection': strategy for correction of bad poles;
                 available values include 'ERASE', 'RATIONAL', and 'POLYNOMIAL';
                 defaults to 'ERASE';
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation; defaults to 'MONOMIAL';
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation; allowed values include 'MONOMIAL_*',
                 'CHEBYSHEV_*', 'LEGENDRE_*', 'NEARESTNEIGHBOR', and 
                 'PIECEWISE_LINEAR_*'; defaults to 'MONOMIAL';
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant; defaults to
                     'AUTO', i.e. maximum allowed; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                      defaults to 1; only for 'NEARESTNEIGHBOR';
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal; defaults to 'TOTAL'; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                     defaults to None; not for 'NEARESTNEIGHBOR' or
                     'PIECEWISE_LINEAR_*';
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights; only for
                     radial basis.
             - 'M': degree of rational interpolant numerator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'N': degree of rational interpolant denominator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'radialDirectionalWeights': radial basis weights for pivot
                 numerator; defaults to 1;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights; defaults to [-1, -1];
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant; defaults to 1;
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for pivot interpolation; defaults to None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         directionPivot: Pivot components.
         mus: Array of snapshot parameters.
         musPivot: Array of pivot snapshot parameters.
         musMarginal: Array of marginal snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'matchState': whether to match the system state rather than the
                 system output;
             - 'matchingWeight': weight for pole matching optimization;
             - 'matchingShared': required ratio of marginal points to share
                 resonance;
             - 'badPoleCorrection': strategy for correction of bad poles;
             - 'polybasis': type of polynomial basis for pivot
                 interpolation;
             - 'polybasisMarginal': type of polynomial basis for marginal
                 interpolation;
             - 'paramsMarginal': dictionary of parameters for marginal
                 interpolation; include:
                 . 'MMarginal': degree of marginal interpolant;
                 . 'nNeighborsMarginal': number of marginal nearest neighbors;
                 . 'polydegreetypeMarginal': type of polynomial degree for
                     marginal;
                 . 'interpTolMarginal': tolerance for marginal interpolation;
                 . 'radialDirectionalWeightsMarginalAdapt': bounds for adaptive
                     rescaling of marginal radial basis weights.
             - 'M': degree of rational interpolant numerator;
             - 'N': degree of rational interpolant denominator;
             - 'radialDirectionalWeights': radial basis weights for pivot
                 numerator;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights;
             - 'radialDirectionalWeightsMarginal': radial basis weights for
                 marginal interpolant;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for pivot interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of pivot samples current approximant relies
                 upon;
             - 'samplerPivot': pivot sample point generator;
             - 'SMarginal': total number of marginal samples current approximant
                 relies upon;
             - 'samplerMarginal': marginal sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         matchState: Whether to match the system state rather than the system
             output.
         matchingWeight: Weight for pole matching optimization.
         matchingShared: Required ratio of marginal points to share resonance.
         badPoleCorrection: Strategy for correction of bad poles.
         S: Total number of pivot samples current approximant relies upon.
         samplerPivot: Pivot sample point generator.
         SMarginal: Total number of marginal samples current approximant relies
             upon.
         samplerMarginal: Marginal sample point generator.
         polybasis: Type of polynomial basis for pivot interpolation.
         polybasisMarginal: Type of polynomial basis for marginal interpolation.
         paramsMarginal: Dictionary of parameters for marginal interpolation.
         M: Numerator degree of approximant.
         N: Denominator degree of approximant.
         radialDirectionalWeights: Radial basis weights for pivot numerator.
         radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial
             basis weights.
         radialDirectionalWeightsMarginal: Radial basis weights for marginal
             interpolant.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for pivot interpolation.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for pivot parameter values.
         muBoundsMarginal: list of bounds for marginal parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
         Q: Numpy 1D vector containing complex coefficients of approximant
             denominator.
         P: Numpy 2D vector whose columns are FE dofs of coefficients of
             approximant numerator.
     """
 
     def setupApprox(self, *args, **kwargs) -> int:
         if self.checkComputedApprox(): return -1
         self.purgeparamsMarginal()
         setupOK = super().setupApprox(*args, **kwargs)
         if self.matchState: self._postApplyC()
         return setupOK
diff --git a/rrompy/reduction_methods/pivoted/trained_model/convert_trained_model_pivoted.py b/rrompy/reduction_methods/pivoted/trained_model/convert_trained_model_pivoted.py
index f4ede6c..dbb76f9 100644
--- a/rrompy/reduction_methods/pivoted/trained_model/convert_trained_model_pivoted.py
+++ b/rrompy/reduction_methods/pivoted/trained_model/convert_trained_model_pivoted.py
@@ -1,68 +1,54 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from .trained_model_pivoted_rational_nomatch import (
                                             TrainedModelPivotedRationalNoMatch)
+from .trained_model_pivoted_rational_match import (
+                                              TrainedModelPivotedRationalMatch)
 from .trained_model_pivoted_rational_polematch import (
                                           TrainedModelPivotedRationalPoleMatch)
-from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.exception_manager import RROMPyException, RROMPyWarning
 
 __all__ = ['convertTrainedModelPivoted']
 
-def convertTrainedModelPivoted(model, outType, verbObj = None,
-                               muteWarnings : bool = False):
-    if isinstance(model, outType): return model
-    if ((isinstance(model, TrainedModelPivotedRationalNoMatch)
-     and outType == TrainedModelPivotedRationalPoleMatch)
-     or (isinstance(model, TrainedModelPivotedRationalPoleMatch)
-     and outType == TrainedModelPivotedRationalNoMatch)):
-        return convertTrainedModelPivotedMatchUnmatch(model, outType, verbObj,
-                                                      muteWarnings)
-    raise RROMPyException(("Model type or conversion output type not "
-                           "recognized."))
+excludeDataPoleMatch = ["HIs", "suppEffPts", "suppEffIdx", "coeffsEff",
+                        "polesEff", "projGramian"]
 
-def convertTrainedModelPivotedMatchUnmatch(model, outType, verbObj = None, 
-                                           muteWarnings : bool = False):
-    if verbObj is not None:
-        sf, st = ["NoMatch", "PoleMatch"]
-        if outType == TrainedModelPivotedRationalPoleMatch:
-            msgw = "match poles, set up marginalInterp,"
-        else: #if outType == TrainedModelPivotedRationalNoMatch:
-            st, sf = sf, st
-            msgw = "set up marginalInterp"
-        vbMng(verbObj, "INIT",
-              "Starting model conversion from {} to {} model.".format(sf, st),
-              10)
+def convertTrainedModelPivoted(model, outType,
+                               muteWarnings : bool = False):
+    if outType not in [TrainedModelPivotedRationalNoMatch,
+                       TrainedModelPivotedRationalMatch,
+                       TrainedModelPivotedRationalPoleMatch]:
+        raise RROMPyException("Conversion output type not recognized.")
+    if outType == TrainedModelPivotedRationalNoMatch:
+        msgw = ""
+    elif outType == TrainedModelPivotedRationalMatch:
+        msgw = "match Qs, "
+    else:
+        msgw = "match poles, "
     excludeDataKey = ["marginalInterp", "approxParameters"]
-    if outType == TrainedModelPivotedRationalPoleMatch:
-        modelC = TrainedModelPivotedRationalPoleMatch()
-        msgw = "match poles, set up marginalInterp,"
-    else: #if outType == TrainedModelPivotedRationalNoMatch:
-        modelC = TrainedModelPivotedRationalNoMatch()
-        msgw = "set up marginalInterp"
-        excludeDataKey += ["HIs", "suppEffPts", "suppEffIdx", "coeffsEff",
-                           "polesEff", "projGramian"]
+    modelC = outType()
+    for key in excludeDataPoleMatch:
+        if hasattr(model, key): excludeDataKey += [key]
     for key in model.__dict__.keys(): setattr(modelC, key, model.__dict__[key])
     for key in excludeDataKey: delattr(modelC.data, key)
-    if verbObj is not None:
-        vbMng(verbObj, "DEL", "Finished model conversion.", 10)
     if not muteWarnings:
-        RROMPyWarning(("Model conversion result not yet fuctional: must stil "
-                       "{} and assign approxParameters.").format(msgw))
+        RROMPyWarning(("Model conversion result not yet fuctional: must still "
+                       "{}set up marginalInterp, and assign"
+                       "approxParameters.").format(msgw))
     return modelC
diff --git a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_match.py b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_match.py
new file mode 100644
index 0000000..ebad586
--- /dev/null
+++ b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_match.py
@@ -0,0 +1,339 @@
+# Copyright (C) 2018-2020 by the RROMPy authors
+#
+# This file is part of RROMPy.
+#
+# RROMPy is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# RROMPy is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import numpy as np
+from copy import deepcopy as copy
+from .trained_model_pivoted_rational_nomatch import (
+                                            TrainedModelPivotedRationalNoMatch)
+from rrompy.utilities.base.types import (Tuple, Np1D, Np2D, List, ListAny,
+                                         paramVal, paramList, sampList, HFEng)
+from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp
+from rrompy.utilities.numerical import dot
+from rrompy.utilities.numerical.point_matching import polynomialMatching
+from rrompy.utilities.numerical.degree import reduceDegreeN
+from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb,
+                                                  PolynomialInterpolator as PI)
+from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb,
+                                                RadialBasisInterpolator as RBI)
+from rrompy.utilities.poly_fitting.heaviside import rational2heaviside
+from rrompy.utilities.poly_fitting.nearest_neighbor import (
+                                            NearestNeighborInterpolator as NNI)
+from rrompy.utilities.poly_fitting.piecewise_linear import (
+                                            PiecewiseLinearInterpolator as PLI)
+from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert
+from rrompy.sampling import sampleList
+
+__all__ = ['TrainedModelPivotedRationalMatch']
+
+class TrainedModelPivotedRationalMatch(TrainedModelPivotedRationalNoMatch):
+    """
+    ROM approximant evaluation for pivoted approximants based on interpolation
+        of rational approximants (with some matching).
+    
+    Attributes:
+        Data: dictionary with all that can be pickled.
+    """
+    
+    def centerNormalizeMarginal(self, mu : paramList = [],
+                                mu0 : paramVal = None) -> paramList:
+        """
+        Compute normalized parameter to be plugged into approximant.
+
+        Args:
+            mu: Parameter(s) 1.
+            mu0: Parameter(s) 2. If None, set to self.data.mu0Marginal.
+
+        Returns:
+            Normalized parameter.
+        """
+        mu = self.checkParameterListMarginal(mu)
+        if mu0 is None:
+            mu0 = self.data.mu0(self.data.directionMarginal)
+        return (self.mapParameterList(mu, idx = self.data.directionMarginal)
+              - self.mapParameterList(mu0, idx = self.data.directionMarginal)
+               ) / [self.data.scaleFactor[x]
+                                          for x in self.data.directionMarginal]
+
+    def setupMarginalInterp(self, approx, interpPars:ListAny, extraPar = None):
+        vbMng(self, "INIT", "Starting computation of marginal interpolator.",
+              12)
+        musMCN = self.centerNormalizeMarginal(self.data.musMarginal)
+        nM, pbM = len(musMCN), approx.polybasisMarginal
+        if pbM in ppb + rbpb:
+            if extraPar: approx._setMMarginalAuto()
+            _MMarginalEff = approx.paramsMarginal["MMarginal"]
+        if pbM in ppb:
+            p = PI()
+        elif pbM in rbpb:
+            p = RBI()
+        else: # if pbM in sparsekinds + ["NEARESTNEIGHBOR"]:
+            if pbM == "NEARESTNEIGHBOR":
+                p = NNI()
+            else: # if pbM in sparsekinds:
+                pllims = [[-1.] * self.data.nparMarginal,
+                          [1.] * self.data.nparMarginal]
+                p = PLI()
+        if pbM in ppb + rbpb:
+            if not extraPar:
+                approx.paramsMarginal["MMarginal"] = reduceDegreeN(
+                            _MMarginalEff, len(musMCN), self.data.nparMarginal,
+                            approx.paramsMarginal["polydegreetypeMarginal"])
+            MMEff = approx.paramsMarginal["MMarginal"]
+            while MMEff >= 0:
+                wellCond, msg = p.setupByInterpolation(musMCN, np.eye(nM),
+                                                       MMEff, *interpPars)
+                vbMng(self, "MAIN", msg, 30)
+                if wellCond: break
+                vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing "
+                                     "MMarginal by 1."), 35)
+                MMEff -= 1
+            if MMEff < 0:
+                raise RROMPyException(("Instability in computation of "
+                                       "interpolant. Aborting."))
+            if (pbM in rbpb and len(interpPars) > 4
+            and "optimizeScalingBounds" in interpPars[4].keys()):
+                interpPars[4]["optimizeScalingBounds"] = [-1., -1.]
+        elif pbM == "NEARESTNEIGHBOR":
+            p.setupByInterpolation(musMCN, np.eye(nM), *interpPars)
+        else: # if pbM in sparsekinds:
+            p.setupByInterpolation(musMCN, np.eye(nM), pllims, extraPar,
+                                   *interpPars)
+        self.data.marginalInterp = p
+        if pbM in ppb + rbpb:
+            approx.paramsMarginal["MMarginal"] = _MMarginalEff
+        vbMng(self, "DEL", "Done computing marginal interpolator.", 12)
+
+    def updateEffectiveSamples(self, exclude:List[int], *args, **kwargs):
+        super().updateEffectiveSamples(exclude)
+        self.initializeFromRational(*args, **kwargs)
+
+    def initializeFromRational(self, matchingWeight:float, matchingKind:str,
+                               HFEngine:HFEng, is_state:bool):
+        """Initialize rational representation."""
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        Qs = [Q.coeffs for Q in self.data.Qs]
+        Ps = [P.coeffs for P in self.data.Ps]
+        N = len(Qs)
+        degQ = np.max([Q.shape[0] for Q in Qs])
+        degP = np.max([P.shape[0] for P in Ps])
+        for j in range(N):
+            if Qs[j].shape[0] < degQ:
+                Qs[j] = np.pad(Qs[j], (0, degQ - Qs[j].shape[0]), "constant")
+            if Ps[j].shape[0] < degP:
+                Ps[j] = np.pad(Ps[j], [(0, degP - Ps[j].shape[0]), (0, 0)],
+                               "constant")
+        Qs, Ps = polynomialMatching(Qs, Ps, self.data.musMarginal.data,
+                                    matchingWeight, self.data.Psupp,
+                                    self.data.projMat, HFEngine, is_state, 0,
+                                    matchingKind)
+        for j in range(N):
+            if not isinstance(self.data.Qs[j], PI):
+                q = PI()
+                q.npar = self.data.Qs[j].npar
+                q.polybasis = self.data.Qs[j].polybasis
+                self.data.Qs[j] = q
+            if not isinstance(self.data.Ps[j], PI):
+                p = PI()
+                p.npar = self.data.Ps[j].npar
+                p.polybasis = self.data.Ps[j].polybasis
+                self.data.Ps[j] = p
+            self.data.Qs[j].coeffs, self.data.Ps[j].coeffs = Qs[j], Ps[j]
+
+    def getApproxReduced(self, mu : paramList = []) -> sampList:
+        """
+        Evaluate reduced representation of approximant at arbitrary parameter.
+
+        Args:
+            mu: Target parameter.
+        """
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        mu = self.checkParameterList(mu)
+        if (not hasattr(self, "lastSolvedApproxReduced")
+         or self.lastSolvedApproxReduced != mu):
+            vbMng(self, "INIT",
+                  "Evaluating approximant at mu = {}.".format(mu), 12)
+            muP = mu(self.data.directionPivot)
+            muMC = self.centerNormalizeMarginal(
+                                               mu(self.data.directionMarginal))
+            mIvals = self.data.marginalInterp(muMC)
+            QV = self.getQVal(muP, mIvals = mIvals)
+            QVzero = np.where(QV == 0.)[0]
+            if len(QVzero) > 0:
+                QV[QVzero] = np.finfo(np.complex).eps / (1.
+                                                      + self.data.Qs[0].deg[0])
+            self.uApproxReduced = self.getPVal(muP, mIvals = mIvals) / QV
+            vbMng(self, "DEL", "Done evaluating approximant.", 12)
+            self.lastSolvedApproxReduced = mu
+        return self.uApproxReduced
+
+    def interpolateMarginalP(self, mu : paramList = [],
+                             mIvals : Np2D = None) -> ListAny:
+        """Obtain interpolated approximant numerator."""
+        mu = self.checkParameterListMarginal(mu)
+        vbMng(self, "INIT", "Interpolating marginal P at mu = {}.".format(mu),
+              95)
+        if self.data._collapsed:
+            outShape = self.data.Ps[0].coeffs.shape
+        else:
+            outShape = (self.data.Ps[0].coeffs.shape[0],
+                        self.data.projMat.shape[1])
+        intMP = np.zeros((len(mu),) + outShape,
+                         dtype = self.data.Ps[0].coeffs.dtype)
+        if mIvals is None:
+            muC = self.centerNormalizeMarginal(mu)
+            mIvals = self.data.marginalInterp(muC)
+        for P, Psupp, mI in zip(self.data.Ps, self.data.Psupp, mIvals):
+            iL, iR = Psupp, Psupp + P.shape[0]
+            for j, m in enumerate(mI): intMP[j, :, iL : iR] += m * P.coeffs
+        vbMng(self, "DEL", "Done interpolating marginal P.", 95)
+        return intMP
+
+    def interpolateMarginalQ(self, mu : paramList = [],
+                             mIvals : Np2D = None) -> ListAny:
+        """Obtain interpolated approximant denominator."""
+        mu = self.checkParameterListMarginal(mu)
+        vbMng(self, "INIT", "Interpolating marginal Q at mu = {}.".format(mu),
+              95)
+        intMQ = np.zeros((len(mu),) + self.data.Qs[0].coeffs.shape,
+                         dtype = self.data.Qs[0].coeffs.dtype)
+        if mIvals is None:
+            muC = self.centerNormalizeMarginal(mu)
+            mIvals = self.data.marginalInterp(muC)
+        for Q, mI in zip(self.data.Qs, mIvals):
+            for j, m in enumerate(mI): intMQ[j] += m * Q.coeffs
+        vbMng(self, "DEL", "Done interpolating marginal Q.", 95)
+        return intMQ
+
+    def getPVal(self, mu : paramList = [],
+                mIvals : List[int] = None) -> sampList:
+        """
+        Evaluate rational numerator at arbitrary parameter.
+
+        Args:
+            mu: Target parameter.
+        """
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        if mIvals is None:
+            mu = self.checkParameterList(mu)
+            muP = self.centerNormalizePivot(mu(self.data.directionPivot))
+            muM = self.centerNormalizeMarginal(mu(self.data.directionMarginal))
+            mIvals = self.data.marginalInterp(muM)
+        else:
+            mu = self.checkParameterListPivot(mu)
+            muP = self.centerNormalizePivot(mu)
+        if self.data._collapsed:
+            outShape = self.data.Ps[0].shape
+        else:
+            outShape = (self.data.projMat.shape[1],)
+        p = np.zeros(outShape + (len(mu),),
+                     dtype = self.data.Ps[0].coeffs.dtype)
+        for P, Psupp, mI in zip(self.data.Ps, self.data.Psupp, mIvals):
+            iL, iR = Psupp, Psupp + P.shape[0]
+            for j, m in enumerate(mI):
+                p[iL : iR, j] += m * P(muP[j])[:, 0]
+        return sampleList(p)
+
+    def getQVal(self, mu : paramList = [], der : List[int] = None,
+                scl : Np1D = None, mIvals : List[int] = None) -> Np1D:
+        """
+        Evaluate rational denominator at arbitrary parameter.
+
+        Args:
+            mu: Target parameter.
+            der(optional): Derivatives to take before evaluation.
+        """
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        if mIvals is None:
+            mu = self.checkParameterList(mu)
+            muP = self.centerNormalizePivot(mu(self.data.directionPivot))
+            muM = self.centerNormalizeMarginal(mu(self.data.directionMarginal))
+            mIvals = self.data.marginalInterp(muM)
+        else:
+            mu = self.checkParameterListPivot(mu)
+            muP = self.centerNormalizePivot(mu)
+        if der is None:
+            derP, derM = 0, [0]
+        else:
+            derP = der[self.data.directionPivot[0]]
+            derM = [der[x] for x in self.data.directionMarginal]
+        if np.any(np.array(derM) != 0):
+            raise RROMPyException(("Derivatives of Q with respect to marginal "
+                                    "parameters not allowed."))
+        sclP = 1 if scl is None else scl[self.data.directionPivot[0]]
+        q = np.zeros(len(mu), dtype = np.complex)
+        for Q, mI in zip(self.data.Qs, mIvals):
+            for j, m in enumerate(mI): q[j] = q[j] + m * Q(muP[j], derP, sclP)
+        return q
+
+    def getPoles(self, marginalVals : ListAny = [fp]) -> paramList:
+        """
+        Obtain approximant poles.
+
+        Returns:
+            Numpy complex vector of poles.
+        """
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        mVals = list(marginalVals)
+        rDim = mVals.index(fp)
+        if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]:
+            raise RROMPyException(("Exactly 1 'freepar' entry in "
+                                    "marginalVals must be provided."))
+        if rDim != self.data.directionPivot[0]:
+            raise RROMPyException(("'freepar' entry in marginalVals must "
+                                    "coincide with pivot direction."))
+        mVals[rDim] = self.data.mu0(rDim)[0]
+        mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim]
+        Q = copy(self.data.Qs[0])
+        Q.coeffs = self.interpolateMarginalQ(mMarg)[0]
+        roots = self.data.scaleFactor[rDim] * Q.roots()
+        return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim),
+                                                            idx = [rDim])(0, 0)
+                                   + roots, "B", [rDim])(0)
+
+    def getResidues(self, marginalVals : ListAny = [fp]) -> Tuple[paramList,
+                                                                  Np2D]:
+        """
+        Obtain approximant residues.
+
+        Returns:
+            Numpy matrix with residues as columns.
+        """
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        mVals = list(marginalVals)
+        rDim = mVals.index(fp)
+        if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]:
+            raise RROMPyException(("Exactly 1 'freepar' entry in "
+                                    "marginalVals must be provided."))
+        if rDim != self.data.directionPivot[0]:
+            raise RROMPyException(("'freepar' entry in marginalVals must "
+                                    "coincide with pivot direction."))
+        mVals[rDim] = self.data.mu0(rDim)[0]
+        mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim]
+        P, Q = copy(self.data.Ps[0]), copy(self.data.Qs[0])
+        P.coeffs = self.interpolateMarginalP(mMarg)[0]
+        Q.coeffs = self.interpolateMarginalQ(mMarg)[0]
+        res, pls, _ = rational2heaviside(P, Q)
+        if len(pls) == 0:
+            return pls, np.empty((0, 0), dtype = self.data.Ps[0].coeffs.dtype)
+        res = res[: len(pls), :].T
+        pls = self.mapParameterList(self.mapParameterList(self.data.mu0(rDim),
+                                                          idx = [rDim])(0, 0)
+                                  + self.data.scaleFactor[rDim] * pls, "B",
+                                  [rDim])(0)
+        if not self.data._collapsed: res = dot(self.data.projMat, res).T
+        return pls, res
diff --git a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_nomatch.py b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_nomatch.py
index cc18e84..e7d5e2d 100644
--- a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_nomatch.py
+++ b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_nomatch.py
@@ -1,232 +1,272 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 import numpy as np
 from rrompy.reduction_methods.standard.trained_model.trained_model_rational \
                                                     import TrainedModelRational
 from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, ListAny,
                                          paramVal, paramList, sampList)
 from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp
 from rrompy.utilities.numerical import dot
 from rrompy.utilities.numerical.compress_matrix import compressMatrix
 from rrompy.utilities.poly_fitting.heaviside import rational2heaviside
 from rrompy.utilities.poly_fitting.nearest_neighbor import (
                                             NearestNeighborInterpolator as NNI)
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 from rrompy.parameter import checkParameterList
 from rrompy.sampling import emptySampleList
 
 __all__ = ['TrainedModelPivotedRationalNoMatch']
 
 class TrainedModelPivotedRationalNoMatch(TrainedModelRational):
     """
     ROM approximant evaluation for pivoted approximants based on interpolation
         of rational approximants (without pole matching).
     
     Attributes:
         Data: dictionary with all that can be pickled.
     """
 
     def checkParameterListPivot(self, mu:paramList,
                                 check_if_single : bool = False) -> paramList:
         return checkParameterList(mu, self.data.nparPivot, check_if_single)
 
     def checkParameterListMarginal(self, mu:paramList,
                                   check_if_single : bool = False) -> paramList:
         return checkParameterList(mu, self.data.nparMarginal, check_if_single)
 
     def compress(self, collapse : bool = False, tol : float = 0.,
                  returnRMat : bool = False, **compressMatrixkwargs):
         if not collapse and tol <= 0.: return
         RMat = self.data.projMat
         if not collapse:
             if hasattr(self.data, "_compressTol"):
                 RROMPyWarning(("Recompressing already compressed model is "
                                "ineffective. Aborting."))
                 return
             self.data.projMat, RMat, _ = compressMatrix(RMat, tol,
                                                         **compressMatrixkwargs)
         if hasattr(self.data, "Ps"):
             for obj, suppj in zip(self.data.Ps, self.data.Psupp):
                 obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]])
         if hasattr(self, "_PsExcl"):
             for obj, suppj in zip(self._PsExcl, self._PsuppExcl):
                 obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]])
             self._PsuppExcl = [0] * len(self._PsuppExcl)
         self.data.Psupp = [0] * len(self.data.Psupp)
         super(TrainedModelRational, self).compress(collapse, tol)
         if returnRMat: return RMat
 
     def centerNormalizePivot(self, mu : paramList = [],
                              mu0 : paramVal = None) -> paramList:
         """
         Compute normalized parameter to be plugged into approximant.
 
         Args:
             mu: Parameter(s) 1.
             mu0: Parameter(s) 2. If None, set to self.data.mu0Pivot.
 
         Returns:
             Normalized parameter.
         """
         mu = self.checkParameterListPivot(mu)
         if mu0 is None:
-            mu0 = self.checkParameterListPivot(
-                                    self.data.mu0(0, self.data.directionPivot))
+            mu0 = self.data.mu0(self.data.directionPivot)
         return (self.mapParameterList(mu, idx = self.data.directionPivot)
               - self.mapParameterList(mu0, idx = self.data.directionPivot)
                ) / [self.data.scaleFactor[x] for x in self.data.directionPivot]
 
     def setupMarginalInterp(self, interpPars:ListAny):
         self.data.marginalInterp = NNI()
         self.data.marginalInterp.setupByInterpolation(self.data.musMarginal,
                                          np.arange(len(self.data.musMarginal)),
                                          1, *interpPars)
 
     def updateEffectiveSamples(self, exclude:List[int]):
         if hasattr(self, "_idxExcl"):
             for j, excl in enumerate(self._idxExcl):
                 self.data.musMarginal.insert(self._musMExcl[j], excl)
                 self.data.Ps.insert(excl, self._PsExcl[j])
                 self.data.Qs.insert(excl, self._QsExcl[j])
                 self.data.Psupp.insert(excl, self._PsuppExcl[j])
         self._idxExcl, self._musMExcl = list(np.sort(exclude)), []
         self._PsExcl, self._QsExcl, self._PsuppExcl = [], [], []
         for excl in self._idxExcl[::-1]:
             self._musMExcl = [self.data.musMarginal[excl]] + self._musMExcl
             self.data.musMarginal.pop(excl)
             self._PsExcl = [self.data.Ps.pop(excl)] + self._PsExcl
             self._QsExcl = [self.data.Qs.pop(excl)] + self._QsExcl
             self._PsuppExcl = [self.data.Psupp.pop(excl)] + self._PsuppExcl
 
-    def getPVal(self, mu : paramList = []) -> sampList:
+    def getApproxReduced(self, mu : paramList = []) -> sampList:
         """
-        Evaluate rational numerator at arbitrary parameter.
+        Evaluate reduced representation of approximant at arbitrary parameter.
 
         Args:
             mu: Target parameter.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         mu = self.checkParameterList(mu)
-        muP = self.centerNormalizePivot(mu(self.data.directionPivot))
-        muM = self.checkParameterListMarginal(mu(self.data.directionMarginal))
-        idxMUnique, idxMmap = np.unique(self.data.marginalInterp(muM),
-                                        return_inverse = True)
+        if (not hasattr(self, "lastSolvedApproxReduced")
+         or self.lastSolvedApproxReduced != mu):
+            vbMng(self, "INIT",
+                  "Evaluating approximant at mu = {}.".format(mu), 12)
+            muP = mu(self.data.directionPivot)
+            muM = mu(self.data.directionMarginal)
+            mIvals = self.data.marginalInterp(muM)
+            QV = self.getQVal(muP, mIvals = mIvals)
+            QVzero = np.where(QV == 0.)[0]
+            if len(QVzero) > 0:
+                QV[QVzero] = np.finfo(np.complex).eps / (1.
+                                                      + self.data.Qs[0].deg[0])
+            self.uApproxReduced = self.getPVal(muP, mIvals = mIvals) / QV
+            vbMng(self, "DEL", "Done evaluating approximant.", 12)
+            self.lastSolvedApproxReduced = mu
+        return self.uApproxReduced
+
+    def getPVal(self, mu : paramList = [],
+                mIvals : List[int] = None) -> sampList:
+        """
+        Evaluate rational numerator at arbitrary parameter.
+
+        Args:
+            mu: Target parameter.
+        """
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        if mIvals is None:
+            mu = self.checkParameterList(mu)
+            muP = self.centerNormalizePivot(mu(self.data.directionPivot))
+            muM = mu(self.data.directionMarginal)
+            mIvals = self.data.marginalInterp(muM)
+        else:
+            mu = self.checkParameterListPivot(mu)
+            muP = self.centerNormalizePivot(mu)
+        idxMUnique, idxMmap = np.unique(mIvals, return_inverse = True)
         idxMUnique = np.array(idxMUnique, dtype = int)
         p = emptySampleList()
         vbMng(self, "INIT", "Evaluating numerator at mu = {}.".format(mu), 17)
         for i, iM in enumerate(idxMUnique):
             idx = np.where(idxMmap == i)[0]
             Pval, supp = self.data.Ps[iM](muP[idx]), self.data.Psupp[iM]
             if i == 0:
                 if hasattr(self.data.projMat, "shape"):
                     plen = self.data.projMat.shape[1]
                 else:
                     plen = len(Pval)
                 p.reset((plen, len(mu)), dtype = Pval.dtype)
                 p.data[:] = 0.
             p.data[supp : supp + len(Pval), idx] = Pval
         vbMng(self, "DEL", "Done evaluating numerator.", 17)
         return p
 
     def getQVal(self, mu:Np1D, der : List[int] = None,
-                scl : Np1D = None) -> Np1D:
+                scl : Np1D = None, mIvals : List[int] = None) -> Np1D:
         """
         Evaluate rational denominator at arbitrary parameter.
 
         Args:
             mu: Target parameter.
             der(optional): Derivatives to take before evaluation.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
-        mu = self.checkParameterList(mu)
-        muP = self.centerNormalizePivot(mu(self.data.directionPivot))
-        muM = self.checkParameterListMarginal(mu(self.data.directionMarginal))
+        if mIvals is None:
+            mu = self.checkParameterList(mu)
+            muP = self.centerNormalizePivot(mu(self.data.directionPivot))
+            muM = mu(self.data.directionMarginal)
+            mIvals = self.data.marginalInterp(muM)
+        else:
+            mu = self.checkParameterListPivot(mu)
+            muP = self.centerNormalizePivot(mu)
         if der is None:
             derP, derM = 0, [0]
         else:
             derP = der[self.data.directionPivot[0]]
             derM = [der[x] for x in self.data.directionMarginal]
         if np.any(np.array(derM) != 0):
             raise RROMPyException(("Derivatives of Q with respect to marginal "
                                    "parameters not allowed."))
         sclP = 1 if scl is None else scl[self.data.directionPivot[0]]
-        idxMUnique, idxMmap = np.unique(self.data.marginalInterp(muM),
-                                        return_inverse = True)
+        idxMUnique, idxMmap = np.unique(mIvals, return_inverse = True)
         idxMUnique = np.array(idxMUnique, dtype = int)
         vbMng(self, "INIT", "Evaluating denominator at mu = {}.".format(mu),
               17)
         for i, iM in enumerate(idxMUnique):
             idx = np.where(idxMmap == i)[0]
             Qval = self.data.Qs[iM](muP[idx], derP, sclP)
             if i == 0: q = np.empty(len(mu), dtype = Qval.dtype)
             q[idx] = Qval
         vbMng(self, "DEL", "Done evaluating denominator.", 17)
         return q
 
     def getPoles(self, marginalVals : ListAny = [fp]) -> paramList:
         """
         Obtain approximant poles.
 
         Returns:
             Numpy complex vector of poles.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         mVals = list(marginalVals)
         rDim = mVals.index(fp)
         if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]:
             raise RROMPyException(("Exactly 1 'freepar' entry in "
                                    "marginalVals must be provided."))
         if rDim != self.data.directionPivot[0]:
             raise RROMPyException(("'freepar' entry in marginalVals must "
                                    "coincide with pivot direction."))
         mVals[rDim] = self.data.mu0(rDim)[0]
         muM = self.checkParameterListMarginal([mVals[j]
                                       for j in range(len(mVals)) if j != rDim])
         iM = int(self.data.marginalInterp(muM))
         roots = self.data.scaleFactor[rDim] * self.data.Qs[iM].roots()
         return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim),
                                                            idx = [rDim])(0, 0)
                                    + roots, "B", [rDim])(0)
 
-    def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]:
+    def getResidues(self, marginalVals : ListAny = [fp]) -> Tuple[paramList,
+                                                                  Np2D]:
         """
         Obtain approximant residues.
 
         Returns:
             Numpy matrix with residues as columns.
         """
-        pls = self.getPoles(*args, **kwargs)
-        if len(pls) == 0:
-            return pls, np.empty((0, 0), dtype = self.data.Ps[0].coeffs.dtype)
-        if len(args) == 1:
-            mVals = args[0]
-        elif len(args) == 0:
-            mVals = [fp]
-        else:
-            mVals = kwargs["marginalVals"]
+        RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
+        mVals = list(marginalVals)
         rDim = mVals.index(fp)
+        if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]:
+            raise RROMPyException(("Exactly 1 'freepar' entry in "
+                                   "marginalVals must be provided."))
+        if rDim != self.data.directionPivot[0]:
+            raise RROMPyException(("'freepar' entry in marginalVals must "
+                                   "coincide with pivot direction."))
         mVals[rDim] = self.data.mu0(rDim)[0]
         muM = self.checkParameterListMarginal([mVals[j]
                                       for j in range(len(mVals)) if j != rDim])
         iM = int(self.data.marginalInterp(muM))
-        res = rational2heaviside(self.data.Ps[iM], self.data.Qs[iM])[0]
+        res, pls, _ = rational2heaviside(self.data.Ps[iM], self.data.Qs[iM])
+        if len(pls) == 0:
+            return pls, np.empty((0, 0), dtype = self.data.Ps[0].coeffs.dtype)
         res = res[: len(pls), :].T
+        pls = self.mapParameterList(self.mapParameterList(self.data.mu0(rDim),
+                                                          idx = [rDim])(0, 0)
+                                  + self.data.scaleFactor[rDim] * pls, "B",
+                                  [rDim])(0)
         if not self.data._collapsed: res = dot(self.data.projMat, res).T
         return pls, res
diff --git a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py
index 214a359..ace4853 100644
--- a/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py
+++ b/rrompy/reduction_methods/pivoted/trained_model/trained_model_pivoted_rational_polematch.py
@@ -1,563 +1,537 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 import warnings
 import numpy as np
 from scipy.special import factorial as fact
 from scipy.sparse import csr_matrix, hstack, SparseEfficiencyWarning
-from collections.abc import Iterable
 from copy import deepcopy as copy
 from itertools import combinations
 from .trained_model_pivoted_rational_nomatch import (
                                             TrainedModelPivotedRationalNoMatch)
+from .trained_model_pivoted_rational_match import (
+                                              TrainedModelPivotedRationalMatch)
 from rrompy.utilities.base.types import (Tuple, Np1D, Np2D, List, ListAny,
-                                         paramVal, paramList, sampList, HFEng)
+                                         paramList, sampList, HFEng)
 from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp
 from rrompy.utilities.numerical import dot
 from rrompy.utilities.numerical.point_matching import rationalFunctionMatching
 from rrompy.utilities.numerical.degree import reduceDegreeN
 from rrompy.utilities.poly_fitting.polynomial import (polybases as ppb,
                                                   PolynomialInterpolator as PI)
 from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb,
                                                 RadialBasisInterpolator as RBI)
+from rrompy.utilities.poly_fitting.nearest_neighbor import (
+                                            NearestNeighborInterpolator as NNI)
 from rrompy.utilities.poly_fitting.heaviside import (rational2heaviside,
                                                    polyval as heavival,
                                                    heavisideUniformShape,
                                                    HeavisideInterpolator as HI)
-from rrompy.utilities.poly_fitting.nearest_neighbor import (
-                                            NearestNeighborInterpolator as NNI)
 from rrompy.utilities.poly_fitting.piecewise_linear import (sparsekinds,
                                             PiecewiseLinearInterpolator as PLI)
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 from rrompy.sampling import sampleList, emptySampleList
 
 __all__ = ['TrainedModelPivotedRationalPoleMatch']
 
-class TrainedModelPivotedRationalPoleMatch(TrainedModelPivotedRationalNoMatch):
+class TrainedModelPivotedRationalPoleMatch(TrainedModelPivotedRationalMatch):
     """
     ROM approximant evaluation for pivoted approximants based on interpolation
         of rational approximants (with pole matching).
     
     Attributes:
         Data: dictionary with all that can be pickled.
     """
 
     def compress(self, collapse : bool = False, tol : float = 0.,
                  returnRMat : bool = False, **compressMatrixkwargs):
         Psupp = copy(self.data.Psupp)
         RMat = super().compress(collapse, tol, True, **compressMatrixkwargs)
         if RMat is None: return
         for j in range(len(self.data.coeffsEff)):
             self.data.coeffsEff[j] = dot(self.data.coeffsEff[j], RMat.T)
         for obj, suppj in zip(self.data.HIs, Psupp):
             obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]])
         if hasattr(self, "_HIsExcl"):
             for obj, suppj in zip(self._HIsExcl, Psupp):
                 obj.postmultiplyTensorize(RMat.T[suppj : suppj + obj.shape[0]])
             if not hasattr(self, "_PsExcl"):
                 self._PsuppExcl = [0] * len(self._PsuppExcl)
         if returnRMat: return RMat
     
-    def centerNormalizeMarginal(self, mu : paramList = [],
-                                mu0 : paramVal = None) -> paramList:
-        """
-        Compute normalized parameter to be plugged into approximant.
-
-        Args:
-            mu: Parameter(s) 1.
-            mu0: Parameter(s) 2. If None, set to self.data.mu0Marginal.
-
-        Returns:
-            Normalized parameter.
-        """
-        mu = self.checkParameterListMarginal(mu)
-        if mu0 is None:
-            mu0 = self.checkParameterListMarginal(
-                                 self.data.mu0(0, self.data.directionMarginal))
-        return (self.mapParameterList(mu, idx = self.data.directionMarginal)
-              - self.mapParameterList(mu0, idx = self.data.directionMarginal)
-               ) / [self.data.scaleFactor[x]
-                                          for x in self.data.directionMarginal]
-
     def setupMarginalInterp(self, approx, interpPars:ListAny, extraPar = None):
         vbMng(self, "INIT", "Starting computation of marginal interpolator.",
               12)
         musMCN = self.centerNormalizeMarginal(self.data.musMarginal)
         nM, pbM = len(musMCN), approx.polybasisMarginal
         if pbM in ppb + rbpb:
             if extraPar: approx._setMMarginalAuto()
             _MMarginalEff = approx.paramsMarginal["MMarginal"]
         if pbM in ppb:
             p = PI()
         elif pbM in rbpb:
             p = RBI()
         else: # if pbM in sparsekinds + ["NEARESTNEIGHBOR"]:
             if pbM == "NEARESTNEIGHBOR":
                 p = NNI()
             else: # if pbM in sparsekinds:
                 pllims = [[-1.] * self.data.nparMarginal,
                           [1.] * self.data.nparMarginal]
                 p = PLI()
         for ipts, pts in enumerate(self.data.suppEffPts):
             if len(pts) == 0:
                 raise RROMPyException("Empty list of support points.")
             musMCNEff, valsEff = musMCN[pts], np.eye(len(pts))
             if pbM in ppb + rbpb:
                 if extraPar:
                     if ipts > 0:
                         verb = approx.verbosity
                         approx.verbosity = 0
                         _musM = approx.musMarginal
                         approx.musMarginal = musMCNEff
                         approx._setMMarginalAuto()
                         approx.musMarginal = _musM
                         approx.verbosity = verb
                 else:
                     approx.paramsMarginal["MMarginal"] = reduceDegreeN(
                          _MMarginalEff, len(musMCNEff), self.data.nparMarginal,
                          approx.paramsMarginal["polydegreetypeMarginal"])
                 MMEff = approx.paramsMarginal["MMarginal"]
                 while MMEff >= 0:
                     wellCond, msg = p.setupByInterpolation(musMCNEff, valsEff,
                                                            MMEff, *interpPars)
                     vbMng(self, "MAIN", msg, 30)
                     if wellCond: break
                     vbMng(self, "MAIN",
                           ("Polyfit is poorly conditioned. Reducing "
                            "MMarginal by 1."), 35)
                     MMEff -= 1
                 if MMEff < 0:
                     raise RROMPyException(("Instability in computation of "
                                            "interpolant. Aborting."))
                 if (pbM in rbpb and len(interpPars) > 4
                 and "optimizeScalingBounds" in interpPars[4].keys()):
                     interpPars[4]["optimizeScalingBounds"] = [-1., -1.]
             elif pbM == "NEARESTNEIGHBOR":
                 if ipts > 0: interpPars[0] = 1
                 p.setupByInterpolation(musMCNEff, valsEff, *interpPars)
             elif ipts == 0: # and pbM in sparsekinds:
                 p.setupByInterpolation(musMCNEff, valsEff, pllims,
                                        extraPar[pts], *interpPars)
             if ipts == 0:
                 self.data.marginalInterp = copy(p)
                 self.data.coeffsEff, self.data.polesEff = [], []
                 N = len(self.data.suppEffIdx)
                 goodIdx = np.where(self.data.suppEffIdx != -1)[0]
                 for hi, sup in zip(self.data.HIs, self.data.Psupp):
                     pEff, cEff = hi.poles.reshape(-1, 1), hi.coeffs
                     cEffH = np.empty((cEff.shape[0], 0))
                     if (self.data._collapsed
                      or self.data.projMat.shape[1] == cEff.shape[1]):
                         cEff = np.hstack([cEff, cEffH])
                     else:
                         supC = self.data.projMat.shape[1] - sup - cEff.shape[1]
                         cEff = hstack((csr_matrix((len(cEff), sup)),
                                        csr_matrix(cEff),
                                        csr_matrix((len(cEff), supC)),
                                        cEffH), "csr")
                     goodIdxC = np.append(goodIdx, np.arange(N, cEff.shape[0]))
                     self.data.coeffsEff += [cEff[goodIdxC, :]]
                     self.data.polesEff += [pEff[goodIdx]]
             else:
                 ptsBad = [i for i in range(nM) if i not in pts]
                 idxBad = np.where(self.data.suppEffIdx[goodIdx] == ipts)[0]
                 warnings.simplefilter('ignore', SparseEfficiencyWarning)
                 if pbM in sparsekinds:
                     for ij, j in enumerate(ptsBad):
                         nearest = pts[np.argmin(np.sum(np.abs(musMCNEff.data
                                             - np.tile(musMCN[j], [len(pts), 1])
                                                 ), axis = 1).flatten())]
                         self.data.coeffsEff[j][idxBad] = copy(
                                           self.data.coeffsEff[nearest][idxBad])
                         self.data.polesEff[j][idxBad] = copy(
                                            self.data.polesEff[nearest][idxBad])
                 else:
                     if (self.data._collapsed
                      or self.data.projMat.shape[1] == cEff.shape[1]):
                         cfBase = np.zeros((len(idxBad), cEff.shape[1]),
                                           dtype = cEff.dtype)
                     else:
                         cfBase = csr_matrix((len(idxBad),
                                              self.data.coeffsEff[0].shape[1]), 
                                             dtype = cEff.dtype)
                     valMuMBad = p(musMCN[ptsBad])
                     for ijb, jb in enumerate(ptsBad):
                         self.data.coeffsEff[jb][idxBad] = copy(cfBase)
                         self.data.polesEff[jb][idxBad] = 0.
                         for ij, j in enumerate(pts):
                             val = valMuMBad[ij][ijb]
                             if not np.isclose(val, 0., atol = 1e-15):
                                 self.data.coeffsEff[jb][idxBad] += (val
                                               * self.data.coeffsEff[j][idxBad])
                                 self.data.polesEff[jb][idxBad] += (val
                                                * self.data.polesEff[j][idxBad])
                 warnings.filters.pop(0)
         if pbM in ppb + rbpb:
             approx.paramsMarginal["MMarginal"] = _MMarginalEff
         vbMng(self, "DEL", "Done computing marginal interpolator.", 12)
 
     def updateEffectiveSamples(self, exclude:List[int], *args, **kwargs):
         if hasattr(self, "_idxExcl"):
             for j, excl in enumerate(self._idxExcl):
                 self.data.HIs.insert(excl, self._HIsExcl[j])
-        super().updateEffectiveSamples(exclude)
+        TrainedModelPivotedRationalNoMatch.updateEffectiveSamples(self,
+                                                                  exclude)
         self._HIsExcl = []
         for excl in self._idxExcl[::-1]:
             self._HIsExcl = [self.data.HIs.pop(excl)] + self._HIsExcl
         poles = [hi.poles for hi in self.data.HIs]
         coeffs = [hi.coeffs for hi in self.data.HIs]
         self.initializeFromLists(poles, coeffs, self.data.Psupp,
                                  self.data.HIs[0].polybasis, *args, **kwargs)
 
     def initializeFromRational(self, *args, **kwargs):
         """Initialize Heaviside representation."""
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         poles, coeffs = [], []
         for Q, P in zip(self.data.Qs, self.data.Ps):
             cfs, pls, basis = rational2heaviside(P, Q)
             poles += [pls]
             coeffs += [cfs]
         self.initializeFromLists(poles, coeffs, self.data.Psupp, basis, *args,
                                  **kwargs)
 
     def initializeFromLists(self, poles:ListAny, coeffs:ListAny, supps:ListAny,
                             basis:str, matchingWeight:float, HFEngine:HFEng,
                             is_state:bool):
         """Initialize Heaviside representation."""
         poles, coeffs = heavisideUniformShape(poles, coeffs)
-        N = len(poles[0])
         poles, coeffs = rationalFunctionMatching(poles, coeffs,
                                                  self.data.musMarginal.data,
                                                  matchingWeight, supps,
                                                  self.data.projMat, HFEngine,
                                                  is_state, None)
         self.data.HIs = []
         for pls, cfs in zip(poles, coeffs):
             hsi = HI()
             hsi.poles = pls
             if len(cfs) == len(pls):
                 cfs = np.pad(cfs, ((0, 1), (0, 0)), "constant")
             hsi.coeffs = cfs
             hsi.npar = 1
             hsi.polybasis = basis
             self.data.HIs += [hsi]
         self.data.suppEffPts = [np.arange(len(self.data.HIs))]
         self.data.suppEffIdx = np.zeros(len(poles[0]), dtype = int)
 
     def checkShared(self, shared:float, correction : str = "ERASE") -> str:
         N = len(self.data.HIs[0].poles)
         M = len(self.data.HIs)
         correction = correction.upper().strip().replace(" ","")
         if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]:
             RROMPyWarning(("Correction kind not recognized. Overriding to "
                            "'ERASE'."))
             correction = "ERASE"
         goodLocPoles = np.array([np.isinf(hi.poles) == False
                                                       for hi in self.data.HIs])
         self.data.suppEffPts = [np.arange(len(self.data.HIs))]
         self.data.suppEffIdx = - np.ones(N, dtype = int)
         goodGlobPoles = np.sum(goodLocPoles, axis = 0)
         goodEnoughPoles = goodGlobPoles >= max(1., 1. * shared * M)
         keepPole = np.where(goodEnoughPoles)[0]
         halfPole = np.where(goodEnoughPoles * (goodGlobPoles < M))[0]
         self.data.suppEffIdx[keepPole] = 0
         for idxR in halfPole:
             pts = np.where(goodLocPoles[:, idxR])[0]
             idxEff = len(self.data.suppEffPts)
             for idEff, prevPts in enumerate(self.data.suppEffPts):
                 if len(prevPts) == len(pts):
                     if np.allclose(prevPts, pts):
                         idxEff = idEff
                         break
             if idxEff == len(self.data.suppEffPts):
                 self.data.suppEffPts += [pts]
             self.data.suppEffIdx[idxR] = idxEff
         degBad = len(self.data.HIs[0].coeffs) - N - 1
         for pt in range(len(self.data.HIs)):
             idxR = np.where(goodLocPoles[pt] * (goodEnoughPoles == False))[0]
             self.removePoleResLocal(idxR, pt, degBad, correction, True)
         return ("Hard-erased {} pole".format(N - len(keepPole))
               + "s" * (N - len(keepPole) != 1)
               + " and soft-erased {} pole".format(len(halfPole))
               + "s" * (len(halfPole) != 1) + ".")
 
     def removePoleResLocal(self, badidx:List[int], margidx:int,
                            degcorr : int = None, correction : str = "ERASE",
                            hidden : bool = False):
         if not hasattr(badidx, "__len__"): badidx = [badidx]
         badidx = np.array(badidx)
         if len(badidx) == 0: return
         correction = correction.upper().strip().replace(" ","")
         if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]:
             RROMPyWarning(("Correction kind not recognized. Overriding to "
                            "'ERASE'."))
             correction = "ERASE"
         if hidden:
             N = len(self.data.HIs[margidx].poles)
         else:
             N = len(self.data.polesEff[margidx])
         goodidx = [j for j in range(N) if j not in badidx]
         if correction != "ERASE":
             if degcorr is None:
                 if hidden:
                     degcorr = len(self.data.HIs[margidx].coeffs) - N - 1
                 else:
                     degcorr = self.data.coeffsEff[margidx].shape[0] - N - 1
             muM, musEff = self.data.musMarginal[margidx], []
             polybasis = self.data.HIs[margidx].polybasis
             for mu in self.data.mus:
                 if np.allclose(mu(self.data.directionMarginal), muM):
                     musEff += [mu(self.data.directionPivot[0])]
             musEff = self.centerNormalizePivot(musEff)
             if hidden:
                 plsBad = self.data.HIs[margidx].poles[badidx]
             else:
                 plsBad = self.data.polesEff[margidx][badidx, 0]
             plsBadEff = np.isinf(plsBad) == False
             plsBad, badidx = plsBad[plsBadEff], badidx[plsBadEff]
             if hidden:
                 plsGood = self.data.HIs[margidx].poles[goodidx]
                 corrVals = heavival(musEff,
                                     self.data.HIs[margidx].coeffs[badidx],
                                     plsBad, polybasis).T
             else:
                 plsGood = self.data.polesEff[margidx][goodidx]
                 corrVals = heavival(musEff,
                                 self.data.coeffsEff[margidx].toarray()[badidx],
                                 plsBad, polybasis).T
             if correction == "RATIONAL":
                 hi = HI()
                 hi.setupByInterpolation(musEff, plsGood, corrVals, degcorr,
                                         polybasis)
                 if hidden:
                     self.data.HIs[margidx].coeffs[goodidx] += (
                                                      hi.coeffs[: len(goodidx)])
                 else:
                     self.data.coeffsEff[margidx][goodidx, :] += (
                                                      hi.coeffs[: len(goodidx)])
                 polyCorr = hi.coeffs[len(goodidx) :]
             elif correction == "POLYNOMIAL":
                 pi = PI()
                 pi.setupByInterpolation(musEff, corrVals, degcorr,
                                         polybasis.split("_")[0])
                 polyCorr = pi.coeffs
             if hidden:
                 self.data.HIs[margidx].coeffs[N : N + degcorr + 1] += polyCorr
             else:
                 self.data.coeffsEff[margidx][N : N + degcorr + 1, :] += (
                                                                       polyCorr)
         if hidden:
             self.data.HIs[margidx].poles[badidx] = np.inf
             self.data.HIs[margidx].coeffs[badidx] = 0.
         else:
             self.data.polesEff[margidx] = self.data.polesEff[margidx][goodidx]
             goodidx += list(range(N, self.data.coeffsEff[margidx].shape[0]))
             self.data.coeffsEff[margidx] = (
                                       self.data.coeffsEff[margidx][goodidx, :])
 
     def removePoleResGlobal(self, badidx:List[int], degcorr : int = None,
                             correction : str = "ERASE", hidden : bool = False):
         if not hasattr(badidx, "__len__"): badidx = [badidx]
         if len(badidx) == 0: return
         correction = correction.upper().strip().replace(" ","")
         if correction not in ["ERASE", "RATIONAL", "POLYNOMIAL"]:
             RROMPyWarning(("Correction kind not recognized. Overriding to "
                            "'ERASE'."))
             correction = "ERASE"
         for margidx in range(len(self.data.HIs)):
             self.removePoleResLocal(badidx, margidx, degcorr, correction,
                                     hidden)
 
     def getApproxReduced(self, mu : paramList = []) -> sampList:
         """
         Evaluate reduced representation of approximant at arbitrary parameter.
 
         Args:
             mu: Target parameter.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         mu = self.checkParameterList(mu)
         if (not hasattr(self, "lastSolvedApproxReduced")
          or self.lastSolvedApproxReduced != mu):
             vbMng(self, "INIT",
                   "Evaluating approximant at mu = {}.".format(mu), 12)
             muP = self.centerNormalizePivot(mu(self.data.directionPivot))
             muM = mu(self.data.directionMarginal)
             his = self.interpolateMarginalInterpolator(muM)
             for i, (mP, hi) in enumerate(zip(muP, his)):
                 uAppR = hi(mP)[:, 0]
                 if i == 0:
                     uApproxR = np.empty((len(uAppR), len(mu)),
                                         dtype = uAppR.dtype)
                 uApproxR[:, i] = uAppR
             self.uApproxReduced = sampleList(uApproxR)
             vbMng(self, "DEL", "Done evaluating approximant.", 12)
             self.lastSolvedApproxReduced = mu
         return self.uApproxReduced
 
     def interpolateMarginalInterpolator(self, mu : paramList = []) -> ListAny:
         """Obtain interpolated approximant interpolator."""
         mu = self.checkParameterListMarginal(mu)
         vbMng(self, "INIT",
               "Interpolating marginal models at mu = {}.".format(mu), 95)
         his = []
         muC = self.centerNormalizeMarginal(mu)
         mIvals = self.data.marginalInterp(muC)
         verb, self.verbosity = self.verbosity, 0
         poless = self.interpolateMarginalPoles(mu, mIvals)
         coeffss = self.interpolateMarginalCoeffs(mu, mIvals)
         self.verbosity = verb
         for j in range(len(mu)):
             his += [HI()]
             his[-1].poles = poless[j]
             his[-1].coeffs = coeffss[j]
             his[-1].npar = 1
             his[-1].polybasis = self.data.HIs[0].polybasis
         vbMng(self, "DEL", "Done interpolating marginal models.", 95)
         return his
 
     def interpolateMarginalPoles(self, mu : paramList = [],
                                  mIvals : Np2D = None) -> ListAny:
         """Obtain interpolated approximant poles."""
         mu = self.checkParameterListMarginal(mu)
         vbMng(self, "INIT",
               "Interpolating marginal poles at mu = {}.".format(mu), 95)
         intMPoles = np.zeros((len(mu),) + self.data.polesEff[0].shape,
                              dtype = self.data.polesEff[0].dtype)
         if mIvals is None:
             muC = self.centerNormalizeMarginal(mu)
             mIvals = self.data.marginalInterp(muC)
         for pEff, mI in zip(self.data.polesEff, mIvals):
             for j, m in enumerate(mI): intMPoles[j] += m * pEff
         vbMng(self, "DEL", "Done interpolating marginal poles.", 95)
         return intMPoles[..., 0]
 
     def interpolateMarginalCoeffs(self, mu : paramList = [],
                                   mIvals : Np2D = None) -> ListAny:
         """Obtain interpolated approximant coefficients."""
         mu = self.checkParameterListMarginal(mu)
         vbMng(self, "INIT",
               "Interpolating marginal coefficients at mu = {}.".format(mu), 95)
         intMCoeffs = np.zeros((len(mu),) + self.data.coeffsEff[0].shape,
                               dtype = self.data.coeffsEff[0].dtype)
         if mIvals is None:
             muC = self.centerNormalizeMarginal(mu)
             mIvals = self.data.marginalInterp(muC)
         for cEff, mI in zip(self.data.coeffsEff, mIvals):
             for j, m in enumerate(mI): intMCoeffs[j] += m * cEff
         vbMng(self, "DEL", "Done interpolating marginal coefficients.", 95)
         return intMCoeffs
 
     def getPVal(self, mu : paramList = []) -> sampList:
         """
         Evaluate rational numerator at arbitrary parameter.
 
         Args:
             mu: Target parameter.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         mu = self.checkParameterList(mu)
         p = emptySampleList()
         muP = self.centerNormalizePivot(mu(self.data.directionPivot))
         muM = mu(self.data.directionMarginal)
         his = self.interpolateMarginalInterpolator(muM)
         for i, (mP, hi) in enumerate(zip(muP, his)):
             Pval = hi(mP) * np.prod(mP[0] - hi.poles)
             if i == 0: p.reset((len(Pval), len(mu)), dtype = Pval.dtype)
             p[i] = Pval
         return p
 
     def getQVal(self, mu:Np1D, der : List[int] = None,
                 scl : Np1D = None) -> Np1D:
         """
         Evaluate rational denominator at arbitrary parameter.
 
         Args:
             mu: Target parameter.
             der(optional): Derivatives to take before evaluation.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         mu = self.checkParameterList(mu)
         muP = self.centerNormalizePivot(mu(self.data.directionPivot))
         muM = mu(self.data.directionMarginal)
         if der is None:
             derP, derM = 0, [0]
         else:
             derP = der[self.data.directionPivot[0]]
             derM = [der[x] for x in self.data.directionMarginal]
         if np.any(np.array(derM) != 0):
             raise RROMPyException(("Derivatives of Q with respect to marginal "
                                    "parameters not allowed."))
         sclP = 1 if scl is None else scl[self.data.directionPivot[0]]
         derVal = np.zeros(len(mu), dtype = np.complex)
         pls = self.interpolateMarginalPoles(muM)
         for i, (mP, pl) in enumerate(zip(muP, pls)):
             N = len(pl)
             if derP == N: derVal[i] = 1.
             elif derP >= 0 and derP < N:
                 plDist = mP[0] - pl
                 for terms in combinations(np.arange(N), N - derP):
                     derVal[i] += np.prod(plDist[list(terms)])
         return sclP ** derP * fact(derP) * derVal
 
     def getPoles(self, marginalVals : ListAny = [fp]) -> paramList:
         """
         Obtain approximant poles.
 
         Returns:
             Numpy complex vector of poles.
         """
         RROMPyAssert(self.data.nparPivot, 1, "Number of pivot parameters")
         mVals = list(marginalVals)
         rDim = mVals.index(fp)
         if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]:
             raise RROMPyException(("Exactly 1 'freepar' entry in "
                                    "marginalVals must be provided."))
         if rDim != self.data.directionPivot[0]:
             raise RROMPyException(("'freepar' entry in marginalVals must "
                                    "coincide with pivot direction."))
         mVals[rDim] = self.data.mu0(rDim)[0]
         mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim]
         roots = (self.data.scaleFactor[rDim]
                * self.interpolateMarginalPoles(mMarg)[0])
         return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim),
                                                            idx = [rDim])(0, 0)
                                    + roots, "B", [rDim])(0)
 
-    def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]:
+    def getResidues(self, marginalVals : ListAny = [fp]) -> Tuple[paramList,
+                                                                  Np2D]:
         """
         Obtain approximant residues.
 
         Returns:
             Numpy matrix with residues as columns.
         """
-        pls = self.getPoles(*args, **kwargs)
-        if len(args) == 1:
-            mVals = args[0]
-        elif len(args) == 0:
-            mVals = [None]
-        else:
-            mVals = kwargs["marginalVals"]
-        if not isinstance(mVals, Iterable): mVals = [mVals]
-        mVals = list(mVals)
+        mVals = list(marginalVals)
+        pls = self.getPoles(mVals)
         rDim = mVals.index(fp)
         mMarg = [mVals[j] for j in range(len(mVals)) if j != rDim]
         res = self.interpolateMarginalCoeffs(mMarg)[0][: len(pls), :].T
         if not self.data._collapsed: res = dot(self.data.projMat, res).T
         return pls, res
diff --git a/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py b/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py
index 2c105ad..6178753 100644
--- a/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py
+++ b/rrompy/reduction_methods/standard/greedy/generic_greedy_approximant.py
@@ -1,634 +1,633 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from abc import abstractmethod
 from copy import deepcopy as copy
 import numpy as np
 from matplotlib import pyplot as plt
 from rrompy.hfengines.base.linear_affine_engine import checkIfAffine
 from rrompy.reduction_methods.standard.generic_standard_approximant import (
                                                     GenericStandardApproximant)
 from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramVal,
                                          paramList, sampList)
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.numerical import dot
 from rrompy.utilities.expression import expressionEvaluator
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 from rrompy.sampling.sample_list import sampleList
 from rrompy.parameter import emptyParameterList, parameterList
 from rrompy.utilities.parallel import masterCore
 
 __all__ = ['GenericGreedyApproximant']
 
 def localL2Distance(mus:Np2D, badmus:Np2D) -> Np2D:
     return np.linalg.norm(np.tile(mus[..., np.newaxis], [1, 1, len(badmus)])
                         - badmus[..., np.newaxis].T, axis = 1)
 
 def pruneSamples(mus:paramList, badmus:paramList,
                  tol : float = 1e-8) -> Np1D:
     """Remove from mus all the elements which are too close to badmus."""
     if isinstance(mus, (parameterList, sampleList)): mus = mus.data
     if isinstance(badmus, (parameterList, sampleList)): badmus = badmus.data
     if len(badmus) == 0: return np.arange(len(mus))
     proximity = np.min(localL2Distance(mus, badmus), axis = 1)
     return np.where(proximity <= tol)[0]
 
 class GenericGreedyApproximant(GenericStandardApproximant):
     """
     ROM greedy interpolant computation for parametric problems
         (ABSTRACT).
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'S': number of starting training points;
             - 'sampler': sample point generator;
             - 'greedyTol': uniform error tolerance for greedy algorithm;
                 defaults to 1e-2;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
                 defaults to 0.;
             - 'maxIter': maximum number of greedy steps; defaults to 1e2;
             - 'nTestPoints': number of test points; defaults to 5e2;
             - 'samplerTrainSet': training sample points generator; defaults to
                 sampler.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
 
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         mus: Array of snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'greedyTol': uniform error tolerance for greedy algorithm;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
             - 'maxIter': maximum number of greedy steps;
             - 'nTestPoints': number of test points;
             - 'samplerTrainSet': training sample points generator.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of samples current approximant relies upon;
             - 'sampler': sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         S: number of test points.
         sampler: Sample point generator.
         greedyTol: Uniform error tolerance for greedy algorithm.
         collinearityTol: Collinearity tolerance for greedy algorithm.
         maxIter: maximum number of greedy steps.
         nTestPoints: number of starting training points.
         samplerTrainSet: training sample points generator.
         muBounds: list of bounds for parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
     
     def __init__(self, *args, **kwargs):
         self._preInit()
         if not hasattr(self, "_affine_lvl"): self._affine_lvl = []
         self._affine_lvl += [1]
         self._addParametersToList(["greedyTol", "collinearityTol", "maxIter",
                                    "nTestPoints", "samplerTrainSet"],
                                    [1e-2, 0., 1e2, 5e2, "AUTO"])
         super().__init__(*args, **kwargs)
         self._postInit()
 
     @property
     def greedyTol(self):
         """Value of greedyTol."""
         return self._greedyTol
     @greedyTol.setter
     def greedyTol(self, greedyTol):
         if greedyTol < 0:
             raise RROMPyException("greedyTol must be non-negative.")
         if hasattr(self, "_greedyTol") and self.greedyTol is not None:
             greedyTolold = self.greedyTol
         else:
             greedyTolold = -1
         self._greedyTol = greedyTol
         self._approxParameters["greedyTol"] = self.greedyTol
         if greedyTolold != self.greedyTol:
             self.resetSamples()
 
     @property
     def collinearityTol(self):
         """Value of collinearityTol."""
         return self._collinearityTol
     @collinearityTol.setter
     def collinearityTol(self, collinearityTol):
         if collinearityTol < 0:
             raise RROMPyException("collinearityTol must be non-negative.")
         if (hasattr(self, "_collinearityTol")
         and self.collinearityTol is not None):
             collinearityTolold = self.collinearityTol
         else:
             collinearityTolold = -1
         self._collinearityTol = collinearityTol
         self._approxParameters["collinearityTol"] = self.collinearityTol
         if collinearityTolold != self.collinearityTol:
             self.resetSamples()
 
     @property
     def maxIter(self):
         """Value of maxIter."""
         return self._maxIter
     @maxIter.setter
     def maxIter(self, maxIter):
         if maxIter <= 0: raise RROMPyException("maxIter must be positive.")
         if hasattr(self, "_maxIter") and self.maxIter is not None:
             maxIterold = self.maxIter
         else:
             maxIterold = -1
         self._maxIter = maxIter
         self._approxParameters["maxIter"] = self.maxIter
         if maxIterold != self.maxIter:
             self.resetSamples()
 
     @property
     def nTestPoints(self):
         """Value of nTestPoints."""
         return self._nTestPoints
     @nTestPoints.setter
     def nTestPoints(self, nTestPoints):
         if nTestPoints <= 0:
             raise RROMPyException("nTestPoints must be positive.")
         if not np.isclose(nTestPoints, np.int(nTestPoints)):
             raise RROMPyException("nTestPoints must be an integer.")
         nTestPoints = np.int(nTestPoints)
         if hasattr(self, "_nTestPoints") and self.nTestPoints is not None:
             nTestPointsold = self.nTestPoints
         else:
             nTestPointsold = -1
         self._nTestPoints = nTestPoints
         self._approxParameters["nTestPoints"] = self.nTestPoints
         if nTestPointsold != self.nTestPoints:
             self.resetSamples()
 
     @property
     def samplerTrainSet(self):
         """Value of samplerTrainSet."""
         return self._samplerTrainSet
     @samplerTrainSet.setter
     def samplerTrainSet(self, samplerTrainSet):
         if (isinstance(samplerTrainSet, (str,))
         and samplerTrainSet.upper() == "AUTO"):
             samplerTrainSet = self.sampler
         if 'generatePoints' not in dir(samplerTrainSet):
             raise RROMPyException("samplerTrainSet type not recognized.")
         if (hasattr(self, '_samplerTrainSet')
         and self.samplerTrainSet not in [None, "AUTO"]):
             samplerTrainSetOld = self.samplerTrainSet
         self._samplerTrainSet = samplerTrainSet
         self._approxParameters["samplerTrainSet"] = self.samplerTrainSet
         if (not 'samplerTrainSetOld' in locals()
          or samplerTrainSetOld != self.samplerTrainSet):
             self.resetSamples()
 
     def resetSamples(self):
         """Reset samples."""
         super().resetSamples()
         self._mus = emptyParameterList()
 
     def _affineResidualMatricesContraction(self, rb:Np2D, rA : Np2D = None) \
                                                     -> Tuple[Np1D, Np1D, Np1D]:
         self.assembleReducedResidualBlocks(full = rA is not None)
         # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj()
         ff = np.sum(self.trainedModel.data.resbb.dot(rb) * rb.conj(), axis = 0)
         if rA is None: return ff
         # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj()
         Lf = np.sum(np.tensordot(self.trainedModel.data.resAb, rA, 2)
                   * rb.conj(), axis = 0)
         # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj()
         LL = np.sum(np.tensordot(self.trainedModel.data.resAA, rA, 2)
                   * rA.conj(), axis = (0, 1))
         return ff, Lf, LL
 
     def getErrorEstimatorAffine(self, mus:Np1D) -> Np1D:
         """Standard residual estimator."""
         checkIfAffine(self.HFEngine, "apply affinity-based error estimator",
                       False, self._affine_lvl)
         self.HFEngine.buildA()
         self.HFEngine.buildb()
         mus = self.checkParameterList(mus)
         tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0
         uApproxRs = self.getApproxReduced(mus).data
         self.trainedModel.verbosity = tMverb
         muTestEff = self.mapParameterList(mus)
         radiusA = np.empty((len(self.HFEngine.thAs), len(mus)),
                            dtype = np.complex)
         radiusb = np.empty((len(self.HFEngine.thbs), len(mus)),
                            dtype = np.complex)
         for j, thA in enumerate(self.HFEngine.thAs):
             radiusA[j] = expressionEvaluator(thA[0], muTestEff)
         for j, thb in enumerate(self.HFEngine.thbs):
             radiusb[j] = expressionEvaluator(thb[0], muTestEff)
         radiusA = np.expand_dims(uApproxRs, 1) * radiusA
         ff, Lf, LL = self._affineResidualMatricesContraction(radiusb, radiusA)
         err = np.abs((LL - 2. * np.real(Lf) + ff) / ff) ** .5
         return err
 
     def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D:
         setupOK = self.setupApproxLocal()
         if setupOK > 0:
             err = np.empty(len(mus))
             err[:] = np.nan
             if not return_max: return err
             return err, [- setupOK], np.nan
         mus = self.checkParameterList(mus)
         vbMng(self.trainedModel, "INIT",
               "Evaluating error estimator at mu = {}.".format(mus), 10)
         err = self.getErrorEstimatorAffine(mus)
         vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10)
         if not return_max: return err
         idxMaxEst = [np.argmax(err)]
         return err, idxMaxEst, err[idxMaxEst]
 
     def _isLastSampleCollinear(self) -> bool:
         """Check collinearity of last sample."""
         if self.collinearityTol <= 0.: return False
         if self.POD == 1:
             reff = self.samplingEngine.Rscale[:, -1]
         else:
             RROMPyWarning(("Repeated orthogonalization of the samples for "
                            "collinearity check. Consider setting POD to "
                            "True."))
             if not hasattr(self, "_PODEngine"):
                 from rrompy.sampling import PODEngine
                 self._PODEngine = PODEngine(self.HFEngine)
             reff = self._PODEngine.generalizedQR(self.samplingEngine.samples,
                                                  only_R = True,
                                                  is_state = True)[:, -1]
         cLevel = np.abs(reff[-1]) / np.linalg.norm(reff)
         cLevel = np.inf if np.isclose(cLevel, 0., atol = 1e-15) else 1 / cLevel
         vbMng(self, "MAIN", "Collinearity indicator {:.4e}.".format(cLevel), 3)
         return cLevel > self.collinearityTol
 
     def plotEstimator(self, est:Np1D, idxMax:List[int], estMax:List[float]):
         if (not (np.any(np.isnan(est)) or np.any(np.isinf(est)))
         and masterCore()):
             fig = plt.figure(figsize = plt.figaspect(1. / self.npar))
             for jpar in range(self.npar):
                 ax = fig.add_subplot(1, self.npar, 1 + jpar)
                 musre = np.array(self.muTest.re.data)
                 errCP = copy(est)
                 idx = np.delete(np.arange(self.npar), jpar)
                 while len(musre) > 0:
                     if self.npar == 1:
                         currIdx = np.arange(len(musre))
                     else:
                         currIdx = np.where(np.isclose(np.sum(
                                      np.abs(musre[:, idx] - musre[0, idx]), 1),
                                                       0., atol = 1e-15))[0]
                     ax.semilogy(musre[currIdx, jpar], errCP[currIdx], 'k',
                                 linewidth = 1)
                     musre = np.delete(musre, currIdx, 0)
                     errCP = np.delete(errCP, currIdx)
                 ax.semilogy([self.muBounds.re(0, jpar),
                              self.muBounds.re(-1, jpar)],
                             [self.greedyTol] * 2, 'r--')
                 ax.semilogy(self.mus.re(jpar),
                             2. * self.greedyTol * np.ones(len(self.mus)), '*m')
                 if len(idxMax) > 0 and estMax is not None:
                     ax.semilogy(self.muTest.re(idxMax, jpar), estMax, 'xr')
                 ax.set_xlim(*list(self.sampler.lims.re(jpar)))
                 ax.grid()
             plt.tight_layout()
             plt.show()
     
     def greedyNextSample(self, muidx:int, plotEst : str = "NONE")\
                                           -> Tuple[Np1D, int, float, paramVal]:
         """Compute next greedy snapshot of solution map."""
         RROMPyAssert(self._mode, message = "Cannot add greedy sample.")
         mus = copy(self.muTest[muidx])
         self.muTest.pop(muidx)
         for j, mu in enumerate(mus):
             vbMng(self, "MAIN",
                   ("Adding sample point no. {} at {} to training "
                    "set.").format(len(self.mus) + 1, mu), 3)
             self.mus.append(mu)
             self._S = len(self.mus)
             self._approxParameters["S"] = self.S
             if (self.samplingEngine.nsamples <= len(mus) - j - 1
              or not np.allclose(mu, self.samplingEngine.mus[j - len(mus)])):
                 self.samplingEngine.nextSample(mu)
             if self._isLastSampleCollinear():
                 vbMng(self, "MAIN",
                       ("Collinearity above tolerance detected. Starting "
                        "preemptive greedy loop termination."), 3)
                 self._collinearityFlag = 1
                 errorEstTest = np.empty(len(self.muTest))
                 errorEstTest[:] = np.nan
                 return errorEstTest, [-1], np.nan, np.nan
         errorEstTest, muidx, maxErrorEst = self.errorEstimator(self.muTest,
                                                                True)
         if plotEst == "ALL":
             self.plotEstimator(errorEstTest, muidx, maxErrorEst)
         return errorEstTest, muidx, maxErrorEst, self.muTest[muidx]
 
     def _preliminaryTraining(self):
         """Initialize starting snapshots of solution map."""
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
         if self.samplingEngine.nsamples > 0: return
         self.resetSamples()
         self.computeScaleFactor()
         self.samplingEngine.scaleFactor = self.scaleFactorDer
         self.mus = self.samplerTrainSet.generatePoints(self.S)
         while len(self.mus) > self.S: self.mus.pop()
         muTestBase = self.sampler.generatePoints(self.nTestPoints, False)
         idxPop = pruneSamples(self.mapParameterList(muTestBase),
                               self.mapParameterList(self.mus),
                               1e-10 * self.scaleFactor[0])
         muTestBase.pop(idxPop)
         muLast = copy(self.mus[-1])
         self.mus.pop()
         if len(self.mus) > 0:
             vbMng(self, "MAIN", 
                   ("Adding first {} sample point{} at {} to training "
                    "set.").format(self.S - 1, "" + "s" * (self.S > 2),
                                   self.mus), 3)
             self.samplingEngine.iterSample(self.mus)
         self._S = len(self.mus)
         self._approxParameters["S"] = self.S
         self.muTest = emptyParameterList()
         self.muTest.reset((len(muTestBase) + 1, self.mus.shape[1]))
         self.muTest.data[: -1] = muTestBase.data
         self.muTest.data[-1] = muLast.data
 
     @abstractmethod
     def setupApproxLocal(self) -> int:
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         vbMng(self, "INIT", "Setting up local approximant.", 5)
         pass
         vbMng(self, "DEL", "Done setting up local approximant.", 5)
         return 0
 
     def addSamplePoints(self, mus:paramList):
         """Add sample points to reduced model."""
         raise RROMPyException("Cannot add samples to greedy reduced model.")
 
-    _postGreedyRecover = 1
     def setupApprox(self, plotEst : str = "NONE") -> int:
         """Compute greedy snapshots of solution map."""
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
         vbMng(self, "INIT", "Setting up {}.". format(self.name()), 5)
         vbMng(self, "INIT", "Starting computation of snapshots.", 5)
         self._collinearityFlag = 0
         self._preliminaryTraining()
         muidx, self.firstGreedyIter = [len(self.muTest) - 1], True
         errorEstTest, maxErrorEst = [np.inf], np.inf
         max2ErrorEst, trainedModelOld = np.inf, None
         while self.firstGreedyIter or (len(self.muTest) > 0
                      and (maxErrorEst is None or max2ErrorEst > self.greedyTol)
                      and self.samplingEngine.nsamples < self.maxIter):
             muTestOld, errorEstTestOld = self.muTest, errorEstTest
             muidxOld, maxErrorEstOld = muidx, maxErrorEst
             errorEstTest, muidx, maxErrorEst, mu = self.greedyNextSample(muidx,
                                                                        plotEst)
             if maxErrorEst is not None and (np.any(np.isnan(maxErrorEst))
                                          or np.any(np.isinf(maxErrorEst))):
                 if self._collinearityFlag == 0 and not self.firstGreedyIter:
                     RROMPyWarning(("Instability in a posteriori "
                                    "estimator. Starting preemptive greedy "
                                    "loop termination."))
                 self.muTest, errorEstTest = muTestOld, errorEstTestOld
                 if self.firstGreedyIter and muidx[0] < 0:
                     self.trainedModel = None
                     if self._collinearityFlag:
                         raise RROMPyException(("Starting sample points too "
                                                "collinear. Aborting greedy "
                                                "iterations."))
                     raise RROMPyException(("Instability in approximant "
                                            "computation. Aborting greedy "
                                            "iterations."))
                 self._S = trainedModelOld.data.approxParameters["S"]
                 self._approxParameters["S"] = self.S
                 while self.samplingEngine.nsamples > self.S:
                     self.samplingEngine.popSample()
                 while len(self.mus) > self.S: self.mus.pop(-1)
                 muidx, maxErrorEst = muidxOld, maxErrorEstOld
                 break
             if maxErrorEst is not None:
                 max2ErrorEst = np.max(maxErrorEst)
                 vbMng(self, "MAIN", ("Uniform testing error estimate "
                                      "{:.4e}.").format(max2ErrorEst), 5)
             if self.firstGreedyIter:
                 trainedModelOld = copy(self.trainedModel)
             else:
                 trainedModelOld.data = copy(self.trainedModel.data)
             self.firstGreedyIter = False
         vbMng(self, "DEL", ("Done computing snapshots (final snapshot count: "
                             "{}).").format(self.samplingEngine.nsamples), 5)
         if (maxErrorEst is None or np.any(np.isnan(maxErrorEst))
                                 or np.any(np.isinf(maxErrorEst))):
             while self.samplingEngine.nsamples > self.S:
                 self.samplingEngine.popSample()
             while len(self.mus) > self.S: self.mus.pop(-1)
-        elif self._postGreedyRecover:
+        else:
             self._S = self.samplingEngine.nsamples
             while len(self.mus) < self.S:
                 self.mus.append(self.samplingEngine.mus[len(self.mus)])
             self.trainedModel = None
             self.setupApproxLocal()
         if plotEst == "LAST":
             self.plotEstimator(errorEstTest, muidx, maxErrorEst)
         vbMng(self, "DEL", "Done setting up approximant.", 5)
         return 0
 
     def assembleReducedResidualGramian(self, pMat:sampList):
         """
         Build residual gramian of reduced linear system through projections.
         """
         if (not hasattr(self.trainedModel.data, "gramian")
          or self.trainedModel.data.gramian is None):
             gramian = self.HFEngine.innerProduct(pMat, pMat, dual = True)
         else:
             Sold = self.trainedModel.data.gramian.shape[0]
             S = len(self.mus)
             if Sold > S:
                 gramian = self.trainedModel.data.gramian[: S, : S]
             else:
                 idxOld = list(range(Sold))
                 idxNew = list(range(Sold, S))
                 gramian = np.empty((S, S), dtype = np.complex)
                 gramian[: Sold, : Sold] = self.trainedModel.data.gramian
                 gramian[: Sold, Sold :] = self.HFEngine.innerProduct(
                                        pMat(idxNew), pMat(idxOld), dual = True)
                 gramian[Sold :, : Sold] = gramian[: Sold, Sold :].T.conj()
                 gramian[Sold :, Sold :] = self.HFEngine.innerProduct(
                                        pMat(idxNew), pMat(idxNew), dual = True)
         self.trainedModel.data.gramian = gramian
 
     def assembleReducedResidualBlocksbb(self, bs:List[Np1D]):
         """
         Build blocks (of type bb) of reduced linear system through projections.
         """
         nbs = len(bs)
         if (not hasattr(self.trainedModel.data, "resbb")
          or self.trainedModel.data.resbb is None):
             resbb = np.empty((nbs, nbs), dtype = np.complex)
             for i in range(nbs):
                 Mbi = bs[i]
                 resbb[i, i] = self.HFEngine.innerProduct(Mbi, Mbi, dual = True)
                 for j in range(i):
                     Mbj = bs[j]
                     resbb[i, j] = self.HFEngine.innerProduct(Mbj, Mbi,
                                                              dual = True)
             for i in range(nbs):
                 for j in range(i + 1, nbs):
                     resbb[i, j] = resbb[j, i].conj()
             self.trainedModel.data.resbb = resbb
 
     def assembleReducedResidualBlocksAb(self, As:List[Np2D], bs:List[Np1D],
                                         pMat:sampList):
         """
         Build blocks (of type Ab) of reduced linear system through projections.
         """
         nAs = len(As)
         nbs = len(bs)
         S = len(self.mus)
         if (not hasattr(self.trainedModel.data, "resAb")
          or self.trainedModel.data.resAb is None):
             if isinstance(pMat, (parameterList, sampleList)): pMat = pMat.data
             resAb = np.empty((nbs, S, nAs), dtype = np.complex)
             for j in range(nAs):
                 MAj = dot(As[j], pMat)
                 for i in range(nbs):
                     Mbi = bs[i]
                     resAb[i, :, j] = self.HFEngine.innerProduct(MAj, Mbi,
                                                                 dual = True)
         else:
             Sold = self.trainedModel.data.resAb.shape[1]
             if Sold == S: return
             if Sold > S:
                 resAb = self.trainedModel.data.resAb[:, : S, :]
             else:
                 if isinstance(pMat, (parameterList, sampleList)):
                     pMat = pMat.data
                 resAb = np.empty((nbs, S, nAs), dtype = np.complex)
                 resAb[:, : Sold, :] = self.trainedModel.data.resAb
                 for j in range(nAs):
                     MAj = dot(As[j], pMat[:, Sold :])
                     for i in range(nbs):
                         Mbi = bs[i]
                         resAb[i, Sold :, j] = self.HFEngine.innerProduct(
                                                          MAj, Mbi, dual = True)
         self.trainedModel.data.resAb = resAb
 
     def assembleReducedResidualBlocksAA(self, As:List[Np2D], pMat:sampList):
         """
         Build blocks (of type AA) of reduced linear system through projections.
         """
         nAs = len(As)
         S = len(self.mus)
         if (not hasattr(self.trainedModel.data, "resAA")
          or self.trainedModel.data.resAA is None):
             if isinstance(pMat, (parameterList, sampleList)): pMat = pMat.data
             resAA = np.empty((S, nAs, S, nAs), dtype = np.complex)
             for i in range(nAs):
                 MAi = dot(As[i], pMat)
                 resAA[:, i, :, i] = self.HFEngine.innerProduct(MAi, MAi,
                                                                dual = True)
                 for j in range(i):
                     MAj = dot(As[j], pMat)
                     resAA[:, i, :, j] = self.HFEngine.innerProduct(MAj, MAi,
                                                                    dual = True)
             for i in range(nAs):
                 for j in range(i + 1, nAs):
                     resAA[:, i, :, j] = resAA[:, j, :, i].T.conj()
         else:
             Sold = self.trainedModel.data.resAA.shape[0]
             if Sold == S: return
             if Sold > S:
                 resAA = self.trainedModel.data.resAA[: S, :, : S, :]
             else:
                 if isinstance(pMat, (parameterList, sampleList)):
                     pMat = pMat.data
                 resAA = np.empty((S, nAs, S, nAs), dtype = np.complex)
                 resAA[: Sold, :, : Sold, :] = self.trainedModel.data.resAA
                 for i in range(nAs):
                     MAi = dot(As[i], pMat)
                     resAA[: Sold, i, Sold :, i] = self.HFEngine.innerProduct(
                                    MAi[:, Sold :], MAi[:, : Sold], dual = True)
                     resAA[Sold :, i, : Sold, i] = resAA[: Sold, i,
                                                         Sold :, i].T.conj()
                     resAA[Sold :, i, Sold :, i] = self.HFEngine.innerProduct(
                                    MAi[:, Sold :], MAi[:, Sold :], dual = True)
                     for j in range(i):
                         MAj = dot(As[j], pMat)
                         resAA[: Sold, i, Sold :, j] = (
                                      self.HFEngine.innerProduct(MAj[:, Sold :],
                                                                 MAi[:, : Sold],
                                                                 dual = True))
                         resAA[Sold :, i, : Sold, j] = (
                                      self.HFEngine.innerProduct(MAj[:, : Sold],
                                                                 MAi[:, Sold :],
                                                                 dual = True))
                         resAA[Sold :, i, Sold :, j] = (
                                      self.HFEngine.innerProduct(MAj[:, Sold :],
                                                                 MAi[:, Sold :],
                                                                 dual = True))
                 for i in range(nAs):
                     for j in range(i + 1, nAs):
                         resAA[: Sold, i, Sold :, j] = (
                                           resAA[Sold :, j, : Sold, i].T.conj())
                         resAA[Sold :, i, : Sold, j] = (
                                           resAA[: Sold, j, Sold :, i].T.conj())
                         resAA[Sold :, i, Sold :, j] = (
                                           resAA[Sold :, j, Sold :, i].T.conj())
         self.trainedModel.data.resAA = resAA
 
     def assembleReducedResidualBlocks(self, full : bool = False):
         """Build affine blocks of affine decomposition of residual."""
         if full:
             checkIfAffine(self.HFEngine, "assemble reduced residual blocks",
                           False, self._affine_lvl)
         else:
             checkIfAffine(self.HFEngine, "assemble reduced RHS blocks", True,
                           self._affine_lvl)
         self.HFEngine.buildb()
         self.assembleReducedResidualBlocksbb(self.HFEngine.bs)
         if full:
             pMat = self.samplingEngine.projectionMatrix
             self.HFEngine.buildA()
             self.assembleReducedResidualBlocksAb(self.HFEngine.As,
                                                  self.HFEngine.bs, pMat)
             self.assembleReducedResidualBlocksAA(self.HFEngine.As, pMat)
diff --git a/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py b/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py
index 1a8af24..a929635 100644
--- a/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py
+++ b/rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py
@@ -1,503 +1,456 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from copy import deepcopy as copy
 import numpy as np
 from rrompy.hfengines.base.linear_affine_engine import checkIfAffine
 from .generic_greedy_approximant import GenericGreedyApproximant
 from rrompy.utilities.poly_fitting.polynomial import (polybases, polyfitname,
                                                   PolynomialInterpolator as PI,
                                                   polyvander)
 from rrompy.utilities.numerical import dot
-from rrompy.utilities.numerical.degree import totalDegreeN
 from rrompy.utilities.expression import expressionEvaluator
 from rrompy.reduction_methods.standard import RationalInterpolant
 from rrompy.utilities.base.types import Np1D, Tuple, paramVal, List
 from rrompy.utilities.base.verbosity_depth import verbosityManager as vbMng
 from rrompy.utilities.poly_fitting import customFit
 from rrompy.utilities.exception_manager import (RROMPyWarning, RROMPyException,
                                                 RROMPyAssert, RROMPy_FRAGILE)
 from rrompy.sampling import sampleList, emptySampleList
 
 __all__ = ['RationalInterpolantGreedy']
 
 class RationalInterpolantGreedy(GenericGreedyApproximant, RationalInterpolant):
     """
     ROM greedy rational interpolant computation for parametric problems.
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'S': number of starting training points;
             - 'sampler': sample point generator;
             - 'greedyTol': uniform error tolerance for greedy algorithm;
                 defaults to 1e-2;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
                 defaults to 0.;
             - 'maxIter': maximum number of greedy steps; defaults to 1e2;
             - 'nTestPoints': number of test points; defaults to 5e2;
             - 'samplerTrainSet': training sample points generator; defaults to
                 sampler;
             - 'polybasis': type of basis for interpolation; defaults to
                 'MONOMIAL';
             - 'errorEstimatorKind': kind of error estimator; available values
                 include 'AFFINE', 'DISCREPANCY', 'LOOK_AHEAD',
                 'LOOK_AHEAD_RES', 'LOOK_AHEAD_OUTPUT', and 'NONE'; defaults to
                 'NONE';
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for interpolation; defaults to None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         mus: Array of snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'greedyTol': uniform error tolerance for greedy algorithm;
             - 'collinearityTol': collinearity tolerance for greedy algorithm;
             - 'maxIter': maximum number of greedy steps;
             - 'nTestPoints': number of test points;
             - 'samplerTrainSet': training sample points generator;
             - 'errorEstimatorKind': kind of error estimator;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for interpolation;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of samples current approximant relies upon;
             - 'sampler': sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         S: number of test points.
         sampler: Sample point generator.
         greedyTol: uniform error tolerance for greedy algorithm.
         collinearityTol: Collinearity tolerance for greedy algorithm.
         maxIter: maximum number of greedy steps.
         nTestPoints: number of starting training points.
         samplerTrainSet: training sample points generator.
         errorEstimatorKind: kind of error estimator.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: tolerance for interpolation.
         QTol: tolerance for robust rational denominator management.
         muBounds: list of bounds for parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
     """
     
     _allowedEstimatorKinds = ["AFFINE", "DISCREPANCY", "LOOK_AHEAD",
                               "LOOK_AHEAD_RES", "LOOK_AHEAD_OUTPUT", "NONE"]
     
     def __init__(self, *args, **kwargs):
         self._preInit()
         self._addParametersToList(["errorEstimatorKind"], ["DISCREPANCY"],
-                                  toBeExcluded = ["M", "N", "polydegreetype", 
+                                  toBeExcluded = ["M", "N",
                                                   "radialDirectionalWeights"])
         super().__init__(*args, **kwargs)
         self._postInit()
 
     @property
     def E(self):
         """Value of E."""
-        self._E = self.sampleBatchIdx - 1
+        self._E = self.S - 1
         return self._E
     @E.setter
     def E(self, E):
         RROMPyWarning(("E is used just to simplify inheritance, and its value "
-                       "cannot be changed from that of sampleBatchIdx - 1."))
+                       "cannot be changed from that of S - 1."))
         
     def _setMAuto(self):
         self.M = self.E
 
     def _setNAuto(self):
         self.N = self.E
 
-    @property
-    def polydegreetype(self):
-        """Value of polydegreetype."""
-        return "TOTAL"
-    @polydegreetype.setter
-    def polydegreetype(self, polydegreetype):
-        RROMPyWarning(("polydegreetype is used just to simplify inheritance, "
-                       "and its value cannot be changed from 'TOTAL'."))
-        
     @property
     def polybasis(self):
         """Value of polybasis."""
         return self._polybasis
     @polybasis.setter
     def polybasis(self, polybasis):
         try:
             polybasis = polybasis.upper().strip().replace(" ","")
             if polybasis not in polybases: 
                 raise RROMPyException("Sample type not recognized.")
             self._polybasis = polybasis
         except:
             RROMPyWarning(("Prescribed polybasis not recognized. Overriding "
                            "to 'MONOMIAL'."))
             self._polybasis = "MONOMIAL"
         self._approxParameters["polybasis"] = self.polybasis
 
     @property
     def errorEstimatorKind(self):
         """Value of errorEstimatorKind."""
         return self._errorEstimatorKind
     @errorEstimatorKind.setter
     def errorEstimatorKind(self, errorEstimatorKind):
         errorEstimatorKind = errorEstimatorKind.upper()
         if errorEstimatorKind not in self._allowedEstimatorKinds:
             RROMPyWarning(("Error estimator kind not recognized. Overriding "
                            "to 'NONE'."))
             errorEstimatorKind = "NONE"
         self._errorEstimatorKind = errorEstimatorKind
         self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind
 
     def _polyvanderAuxiliary(self, mus, deg, *args):
         return polyvander(mus, deg, *args)
 
     def getErrorEstimatorDiscrepancy(self, mus:Np1D) -> Np1D:
         """Discrepancy-based residual estimator."""
         checkIfAffine(self.HFEngine, "apply discrepancy-based error estimator",
                       False, self._affine_lvl)
         mus = self.checkParameterList(mus)
         muCTest = self.trainedModel.centerNormalize(mus)
         tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0
         QTest = self.trainedModel.getQVal(mus)
         QTzero = np.where(QTest == 0.)[0]
         if len(QTzero) > 0:
             RROMPyWarning(("Adjusting estimator to avoid division by "
                            "numerically zero denominator."))
             QTest[QTzero] = np.finfo(np.complex).eps / (1. + self.N)
         self.HFEngine.buildA()
         self.HFEngine.buildb()
         nAs, nbs = self.HFEngine.nAs, self.HFEngine.nbs
         muTrainEff = self.mapParameterList(self.mus)
         muTestEff = self.mapParameterList(mus)
         PTrain = self.trainedModel.getPVal(self.mus).data.T
         QTrain = self.trainedModel.getQVal(self.mus)
         QTzero = np.where(QTrain == 0.)[0]
         if len(QTzero) > 0:
             RROMPyWarning(("Adjusting estimator to avoid division by "
                            "numerically zero denominator."))
             QTrain[QTzero] = np.finfo(np.complex).eps / (1. + self.N)
         PTest = self.trainedModel.getPVal(mus).data
         self.trainedModel.verbosity = tMverb
         radiusAbTrain = np.empty((self.S, nAs * self.S + nbs),
                                  dtype = np.complex)
         radiusA = np.empty((self.S, nAs, len(mus)), dtype = np.complex)
         radiusb = np.empty((nbs, len(mus)), dtype = np.complex)
         for j, thA in enumerate(self.HFEngine.thAs):
             idxs = j * self.S + np.arange(self.S)
             radiusAbTrain[:, idxs] = expressionEvaluator(thA[0], muTrainEff,
                                                          (self.S, 1)) * PTrain
             radiusA[:, j] = PTest * expressionEvaluator(thA[0], muTestEff,
                                                         (len(mus),))
         for j, thb in enumerate(self.HFEngine.thbs):
             idx = nAs * self.S + j
             radiusAbTrain[:, idx] = QTrain * expressionEvaluator(thb[0],
                                                          muTrainEff, (self.S,))
             radiusb[j] = QTest * expressionEvaluator(thb[0], muTestEff,
                                                      (len(mus),))
         QRHSNorm2 = self._affineResidualMatricesContraction(radiusb)
         vanTrain = self._polyvanderAuxiliary(self._musUniqueCN, self.E,
                                              self.polybasis0, self._derIdxs,
                                              self._reorder)
         interpPQ = customFit(vanTrain, radiusAbTrain, rcond = self.interpTol)
         vanTest = self._polyvanderAuxiliary(muCTest, self.E, self.polybasis0)
         DradiusAb = vanTest.dot(interpPQ)
         radiusA = (radiusA
                  - DradiusAb[:, : - nbs].reshape(len(mus), -1, self.S).T)
         radiusb = radiusb - DradiusAb[:, - nbs :].T
         ff, Lf, LL = self._affineResidualMatricesContraction(radiusb, radiusA)
         err = np.abs((LL - 2. * np.real(Lf) + ff) / QRHSNorm2) ** .5
         return err
     
     def getErrorEstimatorLookAhead(self, mus:Np1D,
                                    what : str = "") -> Tuple[Np1D, List[int]]:
         """Residual estimator based on look-ahead idea."""
         errTest, QTest, idxMaxEst = self._EIMStep(mus)
         mu_muTestS = mus[idxMaxEst]
         app_muTestSample = self.trainedModel.getApproxReduced(mu_muTestS)
         if self._mode == RROMPy_FRAGILE:
             if what == "RES" and not self.HFEngine.isCEye:
                 raise RROMPyException(("Cannot compute LOOK_AHEAD_RES "
                                        "estimator in fragile mode for "
                                        "non-scalar C."))
             app_muTestSample = dot(self.trainedModel.data.projMat[:,
                                                   : app_muTestSample.shape[0]],
                                    app_muTestSample)
         else:
             app_muTestSample = dot(self.samplingEngine.projectionMatrix,
                                    app_muTestSample)
         app_muTestSample = sampleList(app_muTestSample)
         if what == "RES":
             errmu = self.HFEngine.residual(mu_muTestS, app_muTestSample,
                                            post_c = False)
             solmu = self.HFEngine.residual(mu_muTestS, None, post_c = False)
             normSol = self.HFEngine.norm(solmu, dual = True)
             normErr = self.HFEngine.norm(errmu, dual = True)
         else:
             for j, mu in enumerate(mu_muTestS):
                 uEx = self.samplingEngine.nextSample(mu)
                 if what == "OUTPUT":
                     uEx = self.HFEngine.applyC(uEx, mu)
                     app_muTS = self.HFEngine.applyC(app_muTestSample[j], mu)
                     if j == 0:
                         app_muTestS = emptySampleList()
                         app_muTestS.reset((len(app_muTS), len(mu_muTestS)),
                                           dtype = app_muTS.dtype)
                     app_muTestS[j] = app_muTS
                 if j == 0:
                     solmu = emptySampleList()
                     solmu.reset((len(uEx), len(mu_muTestS)), dtype = uEx.dtype)
                 solmu[j] = uEx
             if what == "OUTPUT": app_muTestSample = app_muTestS
             errmu = solmu - app_muTestSample
             normSol = self.HFEngine.norm(solmu, is_state = what != "OUTPUT")
             normErr = self.HFEngine.norm(errmu, is_state = what != "OUTPUT")
         errsamples = normErr / normSol
         musT = copy(self.mus)
         musT.append(mu_muTestS)
         musT = self.trainedModel.centerNormalize(musT)
         musC = self.trainedModel.centerNormalize(mus)
         errT = np.zeros((len(musT), len(mu_muTestS)), dtype = np.complex)
         errT[np.arange(len(self.mus), len(musT)),
              np.arange(len(mu_muTestS))] = errsamples * QTest[idxMaxEst]
         vanT = self._polyvanderAuxiliary(musT, self.E + 1, self.polybasis)
         fitOut = customFit(vanT, errT, full = True, rcond = self.interpTol)
         vbMng(self, "MAIN",
               ("Fitting {} samples with degree {} through {}... Conditioning "
                "of LS system: {:.4e}.").format(len(vanT), self.E + 1,
                                        polyfitname(self.polybasis),
                                        fitOut[1][2][0] / fitOut[1][2][-1]), 15)
         vanC = self._polyvanderAuxiliary(musC, self.E + 1, self.polybasis)
         err = np.sum(np.abs(vanC.dot(fitOut[0])), axis = -1) / QTest
         return err, idxMaxEst
     
     def getErrorEstimatorNone(self, mus:Np1D) -> Np1D:
         """EIM-based residual estimator."""
         err = np.max(self._EIMStep(mus, True), axis = 1)
         err *= self.greedyTol / np.mean(err)
         return err
 
     def _EIMStep(self, mus:Np1D,
                  only_one : bool = False) -> Tuple[Np1D, Np1D, List[int]]:
-        """Residual estimator based on look-ahead idea."""
+        """EIM step to find next magic point."""
         mus = self.checkParameterList(mus)
         tMverb, self.trainedModel.verbosity = self.trainedModel.verbosity, 0
         QTest = self.trainedModel.getQVal(mus)
         QTzero = np.where(QTest == 0.)[0]
         if len(QTzero) > 0:
             RROMPyWarning(("Adjusting estimator to avoid division by "
                            "numerically zero denominator."))
             QTest[QTzero] = np.finfo(np.complex).eps / (1. + self.N)
         QTest = np.abs(QTest)
         muCTest = self.trainedModel.centerNormalize(mus)
         muCTrain = self.trainedModel.centerNormalize(self.mus)
         self.trainedModel.verbosity = tMverb
         vanTest = self._polyvanderAuxiliary(muCTest, self.E, self.polybasis)
         vanTestNext = self._polyvanderAuxiliary(muCTest, self.E + 1,
                                                 self.polybasis)[:,
                                                             vanTest.shape[1] :]
         idxsTest = np.arange(vanTestNext.shape[1])
         basis = np.zeros((len(idxsTest), 0), dtype = float)
         idxMaxEst = []
         while len(idxsTest) > 0:
             vanTrial = self._polyvanderAuxiliary(muCTrain, self.E,
                                                  self.polybasis)
             vanTrialNext = self._polyvanderAuxiliary(muCTrain, self.E + 1,
                                                      self.polybasis)[:,
                                                            vanTrial.shape[1] :]
             vanTrial = np.hstack((vanTrial, vanTrialNext.dot(basis).reshape(
                                            len(vanTrialNext), basis.shape[1])))
             valuesTrial = vanTrialNext[:, idxsTest]
             vanTestEff = np.hstack((vanTest, vanTestNext.dot(basis).reshape(
                                             len(vanTestNext), basis.shape[1])))
             vanTestNextEff = vanTestNext[:, idxsTest]
             coeffTest = np.linalg.solve(vanTrial, valuesTrial)
             errTest = (np.abs(vanTestNextEff - vanTestEff.dot(coeffTest))
                      / np.expand_dims(QTest, 1))
             if only_one: return errTest
             idxMaxErr = np.unravel_index(np.argmax(errTest), errTest.shape)
             idxMaxEst += [idxMaxErr[0]]
             muCTrain.append(muCTest[idxMaxErr[0]])
             basis = np.pad(basis, [(0, 0), (0, 1)], "constant")
             basis[idxsTest[idxMaxErr[1]], -1] = 1.
             idxsTest = np.delete(idxsTest, idxMaxErr[1])
         return errTest, QTest, idxMaxEst
     
     def errorEstimator(self, mus:Np1D, return_max : bool = False) -> Np1D:
         """Standard residual-based error estimator."""
         setupOK = self.setupApproxLocal()
         if setupOK > 0:
             err = np.empty(len(mus))
             err[:] = np.nan
             if not return_max: return err
             return err, [- setupOK], np.nan
         mus = self.checkParameterList(mus)
         vbMng(self.trainedModel, "INIT",
               "Evaluating error estimator at mu = {}.".format(mus), 10)
         if self.errorEstimatorKind == "AFFINE":
             err = self.getErrorEstimatorAffine(mus)
         else:
             self._setupInterpolationIndices()
             if self.errorEstimatorKind == "DISCREPANCY":
                 err = self.getErrorEstimatorDiscrepancy(mus)
             elif self.errorEstimatorKind[: 10] == "LOOK_AHEAD":
                 err, idxMaxEst = self.getErrorEstimatorLookAhead(mus,
                                                  self.errorEstimatorKind[11 :])
             else: #if self.errorEstimatorKind == "NONE":
                 err = self.getErrorEstimatorNone(mus)
         vbMng(self.trainedModel, "DEL", "Done evaluating error estimator.", 10)
         if not return_max: return err
         if self.errorEstimatorKind[: 10] != "LOOK_AHEAD":
-            idxMaxEst = np.empty(self.sampleBatchSize, dtype = int)
-            errCP = copy(err)
-            for j in range(self.sampleBatchSize):
-                k = np.argmax(errCP)
-                idxMaxEst[j] = k
-                if j + 1 < self.sampleBatchSize:
-                    musZero = self.trainedModel.centerNormalize(mus, mus[k])
-                    errCP *= np.linalg.norm(musZero.data, axis = 1)
+            idxMaxEst = [np.argmax(err)]
         return err, idxMaxEst, err[idxMaxEst]
 
     _warnPlottingNormalization = 1
     def plotEstimator(self, *args, **kwargs):
         super().plotEstimator(*args, **kwargs)
         if (self.errorEstimatorKind == "NONE"
         and self._warnPlottingNormalization):
             RROMPyWarning(("Error estimator arbitrarily normalized before "
                            "plotting."))
             self._warnPlottingNormalization = 0
 
     def greedyNextSample(self, *args,
                          **kwargs) -> Tuple[Np1D, int, float, paramVal]:
         """Compute next greedy snapshot of solution map."""
         RROMPyAssert(self._mode, message = "Cannot add greedy sample.")
-        self.sampleBatchIdx += 1
-        self.sampleBatchSize = totalDegreeN(self.npar - 1, self.sampleBatchIdx)
         err, muidx, maxErr, muNext =  super().greedyNextSample(*args, **kwargs)
-        if maxErr is not None and (np.any(np.isnan(maxErr))
-                                or np.any(np.isinf(maxErr))):
-            self.sampleBatchIdx -= 1
-            self.sampleBatchSize = totalDegreeN(self.npar - 1,
-                                                self.sampleBatchIdx)
         if (self.errorEstimatorKind == "NONE" and not np.isnan(maxErr)
                                               and not np.isinf(maxErr)):
             maxErr = None
         return err, muidx, maxErr, muNext
 
-    def _setSampleBatch(self, maxS:int):
-        self.sampleBatchIdx, self.sampleBatchSize, S = -1, 0, 0
-        nextBatchSize = 1
-        while S + nextBatchSize <= maxS:
-            self.sampleBatchIdx += 1
-            self.sampleBatchSize = nextBatchSize
-            S += self.sampleBatchSize
-            nextBatchSize = totalDegreeN(self.npar - 1,
-                                         self.sampleBatchIdx + 1)
-        return S
-
     def _preliminaryTraining(self):
         """Initialize starting snapshots of solution map."""
         RROMPyAssert(self._mode, message = "Cannot start greedy algorithm.")
         if self.samplingEngine.nsamples > 0: return
-        self._S = self._setSampleBatch(self.S)
+        if self.npar > 1:
+            raise RROMPyException(("Cannot apply minimal rational interpolant "
+                                   "in multivariate case."))
         super()._preliminaryTraining()
         self.M, self.N = ("AUTO",) * 2
 
     def setupApproxLocal(self) -> int:
         """Compute rational interpolant."""
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
         self.verbosity -= 10
         vbMng(self, "INIT", "Setting up local approximant.", 5)
         pMat = self.samplingEngine.projectionMatrix
         firstRun = self.trainedModel is None
         if not firstRun: pMat = pMat[:, len(self.trainedModel.data.mus) :]
         self._setupTrainedModel(pMat, not firstRun)
         unstable = 0
         if self.E > 0:
             Q = self._setupDenominator()
         else:
             Q = PI()
-            Q.coeffs = np.ones((1,) * self.npar, dtype = np.complex)
-            Q.npar = self.npar
-            Q.polybasis = self.polybasis
+            Q.coeffs = np.ones((1,), dtype = np.complex)
+            Q.npar, Q.polybasis = 1, self.polybasis
         if not unstable:
             self._setupRational(Q)
             if self.M < self.E:
                 RROMPyWarning(("Instability in numerator computation. "
                                "Aborting."))
                 unstable = 1
         if not unstable:
             self.trainedModel.data.approxParameters = copy(
                                                          self.approxParameters)
         vbMng(self, "DEL", "Done setting up local approximant.", 5)
         self.verbosity += 10
         return unstable
         
     def setupApprox(self, plotEst : str = "NONE") -> int:
-        self._postGreedyRecover = 0
         val = super().setupApprox(plotEst)
-        self._postGreedyRecover = 1
         if val == 0:
-            if len(self.mus) < self.samplingEngine.nsamples:
-                while len(self.mus) < self.samplingEngine.nsamples:
-                    self.mus.append(self.samplingEngine.mus[len(self.mus)])
-                self.trainedModel = None
-                self._S = self._setSampleBatch(len(self.mus) + 1) - 1
-                self.setupApproxLocal()
             self._setupRational(self.trainedModel.data.Q,
                                 self.trainedModel.data.P)
             self.trainedModel.data.approxParameters = copy(
                                                          self.approxParameters)
         return val
-        
-    def loadTrainedModel(self, filename:str):
-        """Load trained reduced model from file."""
-        super().loadTrainedModel(filename)
-        self._setSampleBatch(self.S + 1)
diff --git a/rrompy/reduction_methods/standard/rational_interpolant.py b/rrompy/reduction_methods/standard/rational_interpolant.py
index fbf3d3f..ff49ec9 100644
--- a/rrompy/reduction_methods/standard/rational_interpolant.py
+++ b/rrompy/reduction_methods/standard/rational_interpolant.py
@@ -1,721 +1,681 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from copy import deepcopy as copy
 import numpy as np
 from scipy.linalg import eig
 from collections.abc import Iterable
 from .generic_standard_approximant import GenericStandardApproximant
 from rrompy.utilities.poly_fitting.polynomial import (
                                             polybases as ppb, polyfitname,
                                             polyvander as pvP, polyTimes,
                                             PolynomialInterpolator as PI,
                                             PolynomialInterpolatorNodal as PIN)
 from rrompy.utilities.poly_fitting.heaviside import rational2heaviside
 from rrompy.utilities.poly_fitting.radial_basis import (polybases as rbpb,
                                                 RadialBasisInterpolator as RBI)
 from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, paramList,
                                          interpEng)
 from rrompy.utilities.base import verbosityManager as vbMng
 from rrompy.utilities.numerical import pseudoInverse, dot, baseDistanceMatrix
 from rrompy.utilities.numerical.factorials import multifactorial
 from rrompy.utilities.numerical.hash_derivative import (nextDerivativeIndices,
                                                   hashDerivativeToIdx as hashD,
                                                   hashIdxToDerivative as hashI)
-from rrompy.utilities.numerical.degree import (reduceDegreeN,
-                                               degreeTotalToFull,
-                                               fullDegreeMaxMask,
-                                               totalDegreeMaxMask)
 from rrompy.utilities.exception_manager import (RROMPyException, RROMPyAssert,
                                                 RROMPyWarning)
 
 __all__ = ['RationalInterpolant']
 
 def polyTimesTable(P:interpEng, mus:Np1D, reorder:List[int],
                    derIdxs:List[List[List[int]]], scl : Np1D = None) -> Np2D:
     """Table of polynomial products."""
     if not isinstance(P, PI):
         raise RROMPyException(("Polynomial to evaluate must be a polynomial "
                                "interpolator."))
     Pvals = [[0.] * len(derIdx) for derIdx in derIdxs]
     for j, derIdx in enumerate(derIdxs):
         nder = len(derIdx)
         for der in range(nder):
             derI = hashI(der, P.npar)
             Pvals[j][der] = P([mus[j]], derI, scl) / multifactorial(derI)
     return blockDiagDer(Pvals, reorder, derIdxs)
 
 def vanderInvTable(vanInv:Np2D, idxs:List[int], reorder:List[int],
                    derIdxs:List[List[List[int]]]) -> Np2D:
     """Table of Vandermonde pseudo-inverse."""
     S = len(reorder)
     Ts = [None] * len(idxs)
     for k in range(len(idxs)):
         invLocs = [None] * len(derIdxs)
         idxGlob = 0
         for j, derIdx in enumerate(derIdxs):
             nder = len(derIdx)
             idxGlob += nder
             idxLoc = np.arange(S)[(reorder >= idxGlob - nder)
                                 * (reorder < idxGlob)]
             invLocs[j] = vanInv[k, idxLoc]
         Ts[k] = blockDiagDer(invLocs, reorder, derIdxs, [2, 1, 0])
     return Ts
 
 def blockDiagDer(vals:List[Np1D], reorder:List[int],
                  derIdxs:List[List[List[int]]],
                  permute : List[int] = None) -> Np2D:
     """Table of derivative values for point confluence."""
     S = len(reorder)
     T = np.zeros((S, S), dtype = np.complex)
     if permute is None: permute = [0, 1, 2]
     idxGlob = 0
     for j, derIdx in enumerate(derIdxs):
         nder = len(derIdx)
         idxGlob += nder
         idxLoc = np.arange(S)[(reorder >= idxGlob - nder)
                             * (reorder < idxGlob)]
         val = vals[j]
         for derI, derIdxI in enumerate(derIdx):
             for derJ, derIdxJ in enumerate(derIdx):
                 diffIdx = [x - y for (x, y) in zip(derIdxI, derIdxJ)]
                 if all([x >= 0 for x in diffIdx]):
                     diffj = hashD(diffIdx)
                     i1, i2, i3 = np.array([derI, derJ, diffj])[permute]
                     T[idxLoc[i1], idxLoc[i2]] = val[i3]
     return T
 
 class RationalInterpolant(GenericStandardApproximant):
     """
     ROM rational interpolant computation for parametric problems.
 
     Args:
         HFEngine: HF problem solver.
         mu0(optional): Default parameter. Defaults to 0.
         approxParameters(optional): Dictionary containing values for main
             parameters of approximant. Recognized keys are:
             - 'POD': kind of snapshots orthogonalization; allowed values
                 include 0, 1/2, and 1; defaults to 1, i.e. POD;
             - 'scaleFactorDer': scaling factors for derivative computation;
                 defaults to 'AUTO';
             - 'S': total number of samples current approximant relies upon;
             - 'sampler': sample point generator;
             - 'polybasis': type of polynomial basis for interpolation; defaults
                 to 'MONOMIAL';
             - 'M': degree of rational interpolant numerator; defaults to
                 'AUTO', i.e. maximum allowed;
             - 'N': degree of rational interpolant denominator; defaults to
                 'AUTO', i.e. maximum allowed;
-            - 'polydegreetype': type of polynomial degree; defaults to 'TOTAL';
             - 'radialDirectionalWeights': radial basis weights for interpolant
                 numerator; defaults to 1;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights; defaults to [-1, -1];
             - 'functionalSolve': strategy for minimization of denominator
                 functional; allowed values include 'NORM', 'DOMINANT',
                 'BARYCENTRIC[_NORM]', and 'BARYCENTRIC_AVERAGE' (check pdf in
                 main folder for explanation); defaults to 'NORM';
             - 'interpTol': tolerance for interpolation; defaults to None;
             - 'QTol': tolerance for robust rational denominator management;
                 defaults to 0.
             Defaults to empty dict.
         verbosity(optional): Verbosity level. Defaults to 10.
             
     Attributes:
         HFEngine: HF problem solver.
         mu0: Default parameter.
         mus: Array of snapshot parameters.
         approxParameters: Dictionary containing values for main parameters of
             approximant. Recognized keys are in parameterList.
         parameterListSoft: Recognized keys of soft approximant parameters:
             - 'POD': kind of snapshots orthogonalization;
             - 'scaleFactorDer': scaling factors for derivative computation;
             - 'polybasis': type of polynomial basis for interpolation;
             - 'M': degree of rational interpolant numerator;
             - 'N': degree of rational interpolant denominator;
-            - 'polydegreetype': type of polynomial degree;
             - 'radialDirectionalWeights': radial basis weights for interpolant
                 numerator;
             - 'radialDirectionalWeightsAdapt': bounds for adaptive rescaling of
                 radial basis weights;
             - 'functionalSolve': strategy for minimization of denominator
                 functional;
             - 'interpTol': tolerance for interpolation via numpy.polyfit;
             - 'QTol': tolerance for robust rational denominator management.
         parameterListCritical: Recognized keys of critical approximant
             parameters:
             - 'S': total number of samples current approximant relies upon;
             - 'sampler': sample point generator.
         verbosity: Verbosity level.
         POD: Kind of snapshots orthogonalization.
         scaleFactorDer: Scaling factors for derivative computation.
         S: Number of solution snapshots over which current approximant is
             based upon.
         sampler: Sample point generator.
         polybasis: type of polynomial basis for interpolation.
         M: Numerator degree of approximant.
         N: Denominator degree of approximant.
-        polydegreetype: Type of polynomial degree.
         radialDirectionalWeights: Radial basis weights for interpolant
             numerator.
         radialDirectionalWeightsAdapt: Bounds for adaptive rescaling of radial
             basis weights.
         functionalSolve: Strategy for minimization of denominator functional.
         interpTol: Tolerance for interpolation via numpy.polyfit.
         QTol: Tolerance for robust rational denominator management.
         muBounds: list of bounds for parameter values.
         samplingEngine: Sampling engine.
         uHF: High fidelity solution(s) with parameter(s) lastSolvedHF as
             sampleList.
         lastSolvedHF: Parameter(s) corresponding to last computed high fidelity
             solution(s) as parameterList.
         uApproxReduced: Reduced approximate solution(s) with parameter(s)
             lastSolvedApprox as sampleList.
         lastSolvedApproxReduced: Parameter(s) corresponding to last computed
             reduced approximate solution(s) as parameterList.
         uApprox: Approximate solution(s) with parameter(s) lastSolvedApprox as
             sampleList.
         lastSolvedApprox: Parameter(s) corresponding to last computed
             approximate solution(s) as parameterList.
         Q: Numpy 1D vector containing complex coefficients of approximant
             denominator.
         P: Numpy 2D vector whose columns are FE dofs of coefficients of
             approximant numerator.
     """
 
     _allowedFunctionalSolveKinds = ["NORM", "DOMINANT", "BARYCENTRIC_NORM",
                                     "BARYCENTRIC_AVERAGE"]
     
     def __init__(self, *args, **kwargs):
         self._preInit()
-        self._addParametersToList(["polybasis", "M", "N", "polydegreetype",
+        self._addParametersToList(["polybasis", "M", "N",
                                    "radialDirectionalWeights",
                                    "radialDirectionalWeightsAdapt",
                                    "functionalSolve", "interpTol", "QTol"],
-                                  ["MONOMIAL", "AUTO", "AUTO", "TOTAL", 1.,
-                                   [-1., -1.], "NORM", -1, 0.])
+                                  ["MONOMIAL", "AUTO", "AUTO", 1., [-1., -1.],
+                                   "NORM", -1, 0.])
         super().__init__(*args, **kwargs)
         self._postInit()
 
     @property
     def tModelType(self):
         from .trained_model.trained_model_rational import TrainedModelRational
         return TrainedModelRational
 
     @property
     def polybasis(self):
         """Value of polybasis."""
         return self._polybasis
     @polybasis.setter
     def polybasis(self, polybasis):
         try:
             polybasis = polybasis.upper().strip().replace(" ","")
             if polybasis not in ppb + rbpb: 
                 raise RROMPyException("Prescribed polybasis not recognized.")
             self._polybasis = polybasis
         except:
             RROMPyWarning(("Prescribed polybasis not recognized. Overriding "
                            "to 'MONOMIAL'."))
             self._polybasis = "MONOMIAL"
         self._approxParameters["polybasis"] = self.polybasis
 
     @property
     def polybasis0(self):
         if "_" in self.polybasis:
             return self.polybasis.split("_")[0]
         return self.polybasis
 
     @property
     def functionalSolve(self):
         """Value of functionalSolve."""
         return self._functionalSolve
     @functionalSolve.setter
     def functionalSolve(self, functionalSolve):
         try:
             functionalSolve = functionalSolve.upper().strip().replace(" ","")
             if functionalSolve == "BARYCENTRIC": functionalSolve += "_NORM"
             if functionalSolve not in self._allowedFunctionalSolveKinds:
                 raise RROMPyException(("Prescribed functionalSolve not "
                                        "recognized."))
             self._functionalSolve = functionalSolve
         except:
             RROMPyWarning(("Prescribed functionalSolve not recognized. "
                            "Overriding to 'NORM'."))
             self._functionalSolve = "NORM"
         self._approxParameters["functionalSolve"] = self.functionalSolve
 
     @property
     def interpTol(self):
         """Value of interpTol."""
         return self._interpTol
     @interpTol.setter
     def interpTol(self, interpTol):
         self._interpTol = interpTol
         self._approxParameters["interpTol"] = self.interpTol
 
     @property
     def radialDirectionalWeights(self):
         """Value of radialDirectionalWeights."""
         return self._radialDirectionalWeights
     @radialDirectionalWeights.setter
     def radialDirectionalWeights(self, radialDirectionalWeights):
         if isinstance(radialDirectionalWeights, Iterable):
             radialDirectionalWeights = list(radialDirectionalWeights)
         else:
             radialDirectionalWeights = [radialDirectionalWeights]
         self._radialDirectionalWeights = radialDirectionalWeights
         self._approxParameters["radialDirectionalWeights"] = (
                                                  self.radialDirectionalWeights)
 
     @property
     def radialDirectionalWeightsAdapt(self):
         """Value of radialDirectionalWeightsAdapt."""
         return self._radialDirectionalWeightsAdapt
     @radialDirectionalWeightsAdapt.setter
     def radialDirectionalWeightsAdapt(self, radialDirectionalWeightsAdapt):
         self._radialDirectionalWeightsAdapt = radialDirectionalWeightsAdapt
         self._approxParameters["radialDirectionalWeightsAdapt"] = (
                                             self.radialDirectionalWeightsAdapt)
 
     @property
     def M(self):
         """Value of M."""
         return self._M
     @M.setter
     def M(self, M):
         if isinstance(M, str):
             M = M.strip().replace(" ","")
             if "-" not in M: M = M + "-0"
             self._M_isauto, self._M_shift = True, int(M.split("-")[-1])
             M = 0
         if M < 0: raise RROMPyException("M must be non-negative.")
         self._M = M
         self._approxParameters["M"] = self.M
 
     def _setMAuto(self):
-        self.M = max(0, reduceDegreeN(self.S, self.S, self.npar,
-                                      self.polydegreetype) - self._M_shift)
+        self.M = max(0, self.S - self._M_shift - 1)
         vbMng(self, "MAIN", "Automatically setting M to {}.".format(self.M),
               25)
 
     @property
     def N(self):
         """Value of N."""
         return self._N
     @N.setter
     def N(self, N):
         if isinstance(N, str):
             N = N.strip().replace(" ","")
             if "-" not in N: N = N + "-0"
             self._N_isauto, self._N_shift = True, int(N.split("-")[-1])
             N = 0
         if N < 0: raise RROMPyException("N must be non-negative.")
         self._N = N
         self._approxParameters["N"] = self.N
 
     def _setNAuto(self):
-        self.N = max(0, reduceDegreeN(self.S, self.S, self.npar,
-                                      self.polydegreetype) - self._N_shift)
+        self.N = max(0, self.S - self._N_shift - 1)
         vbMng(self, "MAIN", "Automatically setting N to {}.".format(self.N),
               25)
 
-    @property
-    def polydegreetype(self):
-        """Value of polydegreetype."""
-        return self._polydegreetype
-    @polydegreetype.setter
-    def polydegreetype(self, polydegreetype):
-        try:
-            polydegreetype = polydegreetype.upper().strip().replace(" ","")
-            if polydegreetype not in ["TOTAL", "FULL"]: 
-                raise RROMPyException(("Prescribed polydegreetype not "
-                                       "recognized."))
-            self._polydegreetype = polydegreetype
-        except:
-            RROMPyWarning(("Prescribed polydegreetype not recognized. "
-                           "Overriding to 'TOTAL'."))
-            self._polydegreetype = "TOTAL"
-        self._approxParameters["polydegreetype"] = self.polydegreetype
-
     @property
     def QTol(self):
         """Value of tolerance for robust rational denominator management."""
         return self._QTol
     @QTol.setter
     def QTol(self, QTol):
         if QTol < 0.:
             RROMPyWarning(("Overriding prescribed negative robustness "
                            "tolerance to 0."))
             QTol = 0.
         self._QTol = QTol
         self._approxParameters["QTol"] = self.QTol
         
     def resetSamples(self):
         """Reset samples."""
         super().resetSamples()
         self._musUniqueCN = None
         self._derIdxs = None
         self._reorder = None
 
     def _setupInterpolationIndices(self):
         """Setup parameters for polyvander."""
         if self._musUniqueCN is None or len(self._reorder) != len(self.mus):
             self._musUniqueCN, musIdxsTo, musIdxs, musCount = (
                             self.trainedModel.centerNormalize(self.mus).unique(
                                     return_index = True, return_inverse = True,
                                     return_counts = True))
             self._musUnique = self.mus[musIdxsTo]
             self._derIdxs = [None] * len(self._musUniqueCN)
             self._reorder = np.empty(len(musIdxs), dtype = int)
             filled = 0
             for j, cnt in enumerate(musCount):
                 self._derIdxs[j] = nextDerivativeIndices([], self.mus.shape[1],
                                                          cnt)
                 jIdx = np.nonzero(musIdxs == j)[0]
                 self._reorder[jIdx] = np.arange(filled, filled + cnt)
                 filled += cnt
 
     def _setupDenominator(self):
         """Compute rational denominator."""
         RROMPyAssert(self._mode, message = "Cannot setup denominator.")
+        if self.npar > 1:
+            raise RROMPyException(("Cannot apply minimal rational interpolant "
+                                   "in multivariate case."))
         vbMng(self, "INIT", "Starting computation of denominator.", 7)
         if hasattr(self, "_N_isauto"):
             self._setNAuto()
         else:
-            N = reduceDegreeN(self.N, self.S, self.npar, self.polydegreetype)
-            if N < self.N:
+            if self.S - 1 < self.N:
                 RROMPyWarning(("N too large compared to S. Reducing N by "
-                               "{}").format(self.N - N))
-                self.N = N
+                               "{}").format(self.N - self.S + 1))
+                self.N = self.S - 1
         while self.N > 0:
-            if self.functionalSolve != "NORM" and self.npar > 1:
-                RROMPyWarning(("Strategy for functional optimization must be "
-                               "'NORM' for more than one parameter. "
-                               "Overriding to 'NORM'."))
-                self.functionalSolve = "NORM"
             if (self.functionalSolve[:11] == "BARYCENTRIC"
             and self.N + 1 < self.S):
                 RROMPyWarning(("Barycentric strategy cannot be applied with "
                                "Least Squares. Overriding to 'NORM'."))
                 self.functionalSolve = "NORM"
             if self.functionalSolve[:11] == "BARYCENTRIC":
                 invD, TN = None, None
                 self._setupInterpolationIndices()
                 if len(self._musUnique) != self.S:
                     RROMPyWarning(("Barycentric functional optimization "
                                    "cannot be applied to repeated samples. "
                                    "Overriding to 'NORM'."))
                     self.functionalSolve = "NORM"
             if self.functionalSolve[:11] != "BARYCENTRIC":
                 invD, TN = self._computeInterpolantInverseBlocks()
             if self.POD == 1:
                 sampleE = self.samplingEngine.Rscale
                 Rscaling = None
             elif self.POD == 1/2:
                 sampleE = self.samplingEngine.samples_normal
                 Rscaling = self.samplingEngine.Rscale
             else:
                 sampleE = self.samplingEngine.samples
                 Rscaling = None
             ev, eV = self.findeveVG(sampleE, invD, TN, Rscaling)
             if self.functionalSolve[:11] == "BARYCENTRIC": break
             nevBad = np.sum(np.abs(ev / ev[-1]) < self.QTol)
             if not nevBad: break
-            if self.npar == 1:
-                dN = nevBad
-            else: #if self.npar > 1 and self.functionalSolve == "NORM":
-                dN = self.N - reduceDegreeN(self.N, len(eV) - nevBad,
-                                            self.npar, self.polydegreetype)
+            dN = nevBad
             vbMng(self, "MAIN",
                   ("Smallest {} eigenvalue{} below tolerance. Reducing N by "
                    "{}.").format(nevBad, "s" * (nevBad > 1), dN), 10)
             self.N = self.N - dN
         if hasattr(self, "_gram"): del self._gram
         if self.N <= 0:
-            self.N, eV = 0, np.ones((1,) * self.npar, dtype = np.complex)
+            self.N, eV = 0, np.ones((1,), dtype = np.complex)
         if self.N > 0 and self.functionalSolve[:11] == "BARYCENTRIC":
             q = PIN()
             q.polybasis, q.nodes = self.polybasis0, eV
         else:
             q = PI()
-            q.npar, q.polybasis = self.npar, self.polybasis0
-            if self.polydegreetype == "TOTAL":
-                q.coeffs = degreeTotalToFull(tuple([self.N + 1] * self.npar),
-                                             self.npar, eV)
-            else:
-                q.coeffs = eV.reshape([self.N + 1] * self.npar)
+            q.npar, q.polybasis = 1, self.polybasis0
+            q.coeffs = eV.flatten()
         vbMng(self, "DEL", "Done computing denominator.", 7)
         return q
 
     def _setupNumerator(self):
         """Compute rational numerator."""
         RROMPyAssert(self._mode, message = "Cannot setup numerator.")
+        if self.npar > 1:
+            raise RROMPyException(("Cannot apply minimal rational interpolant "
+                                   "in multivariate case."))
         vbMng(self, "INIT", "Starting computation of numerator.", 7)
         self._setupInterpolationIndices()
         Qevaldiag = polyTimesTable(self.trainedModel.data.Q, self._musUniqueCN,
                                    self._reorder, self._derIdxs,
                                    self.scaleFactorRel)
         if self.POD == 1:
             Qevaldiag = Qevaldiag.dot(self.samplingEngine.Rscale.T)
         elif self.POD == 1/2:
             Qevaldiag = Qevaldiag * self.samplingEngine.Rscale
         if hasattr(self, "_M_isauto"):
             self._setMAuto()
-            M = self.M
         else:
-            M = reduceDegreeN(self.M, self.S, self.npar, self.polydegreetype)
-            if M < self.M:
+            if self.S - 1 < self.M:
                 RROMPyWarning(("M too large compared to S. Reducing M by "
-                               "{}").format(self.M - M))
-                self.M = M
+                               "{}").format(self.M - self.S + 1))
+                self.M = self.S - 1
+        M = self.M
         while self.M >= 0:
-            pParRest = [self.M, self.polybasis, self.verbosity >= 5,
-                        self.polydegreetype == "TOTAL",
+            pParRest = [self.M, self.polybasis, self.verbosity >= 5, 0,
                         {"derIdxs": self._derIdxs, "reorder": self._reorder,
                          "scl": self.scaleFactorRel}]
             if self.polybasis in ppb:
                 p = PI()
             else:
                 self.computeScaleFactor()
                 rDWEff = np.array([w * f for w, f in zip(
                                                  self.radialDirectionalWeights,
                                                  self.scaleFactor)])
                 pParRest = pParRest[: 2] + [rDWEff] + pParRest[2 :]
                 pParRest[-1]["optimizeScalingBounds"] = (
                                             self.radialDirectionalWeightsAdapt)
                 p = RBI()
             if self.polybasis in ppb + rbpb:
                 pParRest += [{"rcond": self.interpTol}]
             wellCond, msg = p.setupByInterpolation(self._musUniqueCN,
                                                    Qevaldiag, *pParRest)
             vbMng(self, "MAIN", msg, 5)
             if wellCond: break
             vbMng(self, "MAIN", ("Polyfit is poorly conditioned. Reducing M "
                                  "by 1."), 10)
             self.M = self.M - 1
         if self.M < 0:
             raise RROMPyException(("Instability in computation of numerator. "
                                    "Aborting."))
         self.M = M
         vbMng(self, "DEL", "Done computing numerator.", 7)
         return p
 
     def setupApprox(self) -> int:
         """Compute rational interpolant."""
         if self.checkComputedApprox(): return -1
         RROMPyAssert(self._mode, message = "Cannot setup approximant.")
+        if self.npar > 1:
+            raise RROMPyException(("Cannot apply minimal rational interpolant "
+                                   "in multivariate case."))
         vbMng(self, "INIT", "Setting up {}.".format(self.name()), 5)
         self.computeSnapshots()
         self._setupTrainedModel(self.samplingEngine.projectionMatrix)
         self._setupRational(self._setupDenominator())
         self.trainedModel.data.approxParameters = copy(self.approxParameters)
         vbMng(self, "DEL", "Done setting up approximant.", 5)
         return 0
 
     def _setupRational(self, Q:interpEng, P : interpEng = None):
         vbMng(self, "INIT", "Starting approximant finalization.", 5)
         self.trainedModel.data.Q = Q
         if P is None: P = self._setupNumerator()
-        while self.N > 0 and self.npar == 1:
+        while self.N > 0:
             if self.HFEngine._ignoreResidues:
                 pls = Q.roots()
                 cfs, projMat = None, None
             else:
                 cfs, pls, _ = rational2heaviside(P, Q)
                 cfs = cfs[: self.N].T
                 if self.POD != 1:
                     projMat = self.samplingEngine.projectionMatrix
                 else:
                     projMat = None
             foci = self.sampler.normalFoci()
             plsA = self.mapParameterList(self.mapParameterList(self.mu0)(0, 0)
                                        + self.scaleFactor * pls, "B")(0)
             idxBad = self.HFEngine.flagBadPolesResiduesAbsolute(plsA, cfs,
                                                                 projMat)
             if not self.HFEngine._ignoreResidues: cfs[:, idxBad] = 0.
             idxBad += self.HFEngine.flagBadPolesResiduesRelative(pls, cfs,
                                                                  projMat, foci)
             idxBad = idxBad > 0
             if not np.any(idxBad): break
             vbMng(self, "MAIN",
                   "Removing {} spurious pole{} out of {}.".format(
                        np.sum(idxBad), "s" * (np.sum(idxBad) > 1), self.N), 10)
             if isinstance(Q, PIN):
                 Q.nodes = Q.nodes[idxBad == False]
             else:
                 Q = PI()
-                Q.npar = self.npar
-                Q.polybasis = self.polybasis0
+                Q.npar, Q.polybasis = 1, self.polybasis0
                 Q.coeffs = np.ones(1, dtype = np.complex)
                 for pl in pls[idxBad == False]:
                     Q.coeffs = polyTimes(Q.coeffs, [- pl, 1.],
                                     Pbasis = Q.polybasis, Rbasis = Q.polybasis)
                 Q.coeffs /= np.linalg.norm(Q.coeffs)
             self.trainedModel.data.Q = Q
             self.N = Q.deg[0]
             P = self._setupNumerator()
         self.trainedModel.data.P = P
         vbMng(self, "DEL", "Terminated approximant finalization.", 5)
 
     def _computeInterpolantInverseBlocks(self) -> Tuple[List[Np2D], Np2D]:
         """
         Compute inverse factors for minimal interpolant target functional.
         """
         RROMPyAssert(self._mode, message = "Cannot solve eigenvalue problem.")
         self._setupInterpolationIndices()
         pvPPar = [self.polybasis0, self._derIdxs, self._reorder,
                   self.scaleFactorRel]
         full = self.N + 1 == self.S == len(self._musUniqueCN)
         if full:
             mus = self._musUniqueCN[self._reorder]
             dist = baseDistanceMatrix(mus, magnitude = False)[..., 0]
             dist[np.arange(self.N + 1),
                  np.arange(self.N + 1)] = multifactorial([self.N])
             fitinvE = np.prod(dist, axis = 1) ** -1
             vbMng(self, "MAIN",
                   ("Evaluating quasi-Lagrangian basis of degree {} at {} "
                    "sample points.").format(self.N, self.N + 1), 5)
             invD = [np.diag(fitinvE)]
             TN = pvP(self._musUniqueCN, self.N, *pvPPar)
         else:
             while self.N >= 0:
-                if self.polydegreetype == "TOTAL":
-                    Neff = self.N
-                    idxsB = totalDegreeMaxMask(self.N, self.npar)
-                else: #if self.polydegreetype == "FULL":
-                    Neff = [self.N] * self.npar
-                    idxsB = fullDegreeMaxMask(self.N, self.npar)
-                TN = pvP(self._musUniqueCN, Neff, *pvPPar)
+                TN = pvP(self._musUniqueCN, [self.N], *pvPPar)
                 fitOut = pseudoInverse(TN, rcond = self.interpTol, full = True)
                 vbMng(self, "MAIN",
                       ("Fitting {} samples with degree {} through {}... "
                        "Conditioning of pseudoinverse system: {:.4e}.").format(
                                         TN.shape[0], self.N,
                                         polyfitname(self.polybasis0),
                                         fitOut[1][1][0] / fitOut[1][1][-1]), 5)
                 if fitOut[1][0] == TN.shape[1]:
-                    fitinv = fitOut[0][idxsB, :]
+                    fitinv = fitOut[0][[self.N], :]
                     break
                 vbMng(self, "MAIN", 
                       "Polyfit is poorly conditioned. Reducing N by 1.", 10)
                 self.N = self.N - 1
             if self.N < 0:
                 raise RROMPyException(("Instability in computation of "
                                        "denominator. Aborting."))
-            invD = vanderInvTable(fitinv, idxsB, self._reorder, self._derIdxs)
+            invD = vanderInvTable(fitinv, [self.N], self._reorder,
+                                  self._derIdxs)
         return invD, TN
 
     def findeveVG(self, sampleE:Np2D, invD:List[Np2D], TN:Np2D,
                   Rscaling : Np1D = None) -> Tuple[Np1D, Np2D]:
         """
         Compute eigenvalues and eigenvectors of rational denominator matrix, or
             of its right chol factor if POD.
         """
         RROMPyAssert(self._mode, message = "Cannot solve spectral problem.")
         if self.POD == 1:
             if self.functionalSolve[:11] == "BARYCENTRIC":
                 Rstack = sampleE
             else:
                 vbMng(self, "INIT", "Building generalized half-gramian.",
                       10)
                 S, eWidth = sampleE.shape[0], len(invD)
                 Rstack = np.zeros((S * eWidth, TN.shape[1]),
                                   dtype = np.complex)
                 for k in range(eWidth):
                     Rstack[k * S : (k + 1) * S, :] = dot(sampleE, dot(invD[k],
                                                                       TN))
                 vbMng(self, "DEL", "Done building half-gramian.", 10)
             _, s, Vh = np.linalg.svd(Rstack, full_matrices = False)
             evG, eVG = s[::-1], Vh[::-1].T.conj()
             evExp, probKind = -2., "svd "
         else:
             if not hasattr(self, "_gram"):
                 vbMng(self, "INIT", "Building gramian matrix.", 10)
                 self._gram = self.HFEngine.innerProduct(sampleE, sampleE,
                                                         is_state = True)
                 if Rscaling is not None:
                     self._gram = (self._gram.T * Rscaling.conj()).T * Rscaling
                 vbMng(self, "DEL", "Done building gramian.", 10)
             if self.functionalSolve[:11] == "BARYCENTRIC":
                 G = self._gram
             else:
                 vbMng(self, "INIT", "Building generalized gramian.", 10)
                 G = np.zeros((TN.shape[1],) * 2, dtype = np.complex)
                 for k in range(len(invD)):
                     iDkN = dot(invD[k], TN)
                     G += dot(dot(self._gram, iDkN).T, iDkN.conj()).T
                 vbMng(self, "DEL", "Done building gramian.", 10)
             evG, eVG = np.linalg.eigh(G)
             evExp, probKind = -1., "eigen"
         if (self.functionalSolve in ["NORM", "BARYCENTRIC_NORM"]
          or np.sum(np.abs(evG) < np.finfo(float).eps * np.abs(evG[-1])
                                                      * len(evG)) == 1):
             eV = eVG[:, 0]
         elif self.functionalSolve == "BARYCENTRIC_AVERAGE":
             eV = eVG.dot(evG ** evExp * np.sum(eVG, axis = 0).conj())
         else:
             eV = eVG.dot(evG ** evExp * eVG[0].conj())
         vbMng(self, "MAIN",
               ("Solved {}problem of size {} with condition number "
                "{:.4e}.").format(probKind, len(evG) - 1, evG[-1] / evG[1]), 5)
         if self.functionalSolve[:11] == "BARYCENTRIC":
             S, mus = len(eV), self._musUniqueCN[self._reorder].flatten()
             arrow = np.zeros((S + 1,) * 2, dtype = np.complex)
             arrow[1 :, 0] = 1.
             arrow[0, 1 :] = eV
             arrow[np.arange(1, S + 1), np.arange(1, S + 1)] = mus
             active = np.eye(S + 1)
             active[0, 0] = 0.
             poles, qTm1 = eig(arrow, active)
             eVgood = np.isinf(poles) + np.isnan(poles) == False
             poles = poles[eVgood]
             self.N = len(poles)
             if self.QTol > 0:
                 # compare optimal score with self.N poles to those obtained
                 # by removing one of the poles
                 qTm1 = qTm1[1 :, eVgood].conj() ** -1.
                 dists = mus.reshape(-1, 1) - mus
                 dists[np.arange(S), np.arange(S)] = multifactorial([self.N])
                 dists = np.prod(dists, axis = 1).conj() ** -1.
                 qComp = np.empty((self.N + 1, S), dtype = np.complex)
                 qComp[0] = dists * np.prod(qTm1, axis = 1)
                 for j in range(self.N):
                     qTmj = np.prod(qTm1[:, np.arange(self.N) != j], axis = 1)
                     qComp[j + 1] = dists * qTmj
                 Lqs = qComp.dot(eVG)
                 scores = np.real(np.sum(Lqs * evG ** -evExp * Lqs.conj(),
                                         axis = 1))
                 evBad = scores[1 :] < self.QTol * scores[0]
                 nevBad = np.sum(evBad)
                 if nevBad:
                     vbMng(self, "MAIN",
                           ("Suboptimal pole{} detected. Reducing N by "
                            "{}.").format("s" * (nevBad > 1), nevBad), 10)
                     self.N = self.N - nevBad
                     poles = poles[evBad == False]
             eV = poles
         return evG[1 :], eV
         
     def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]:
         """
         Obtain approximant residues.
 
         Returns:
             Matrix with residues as columns.
         """
         return self.trainedModel.getResidues(*args, **kwargs)
diff --git a/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py b/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py
index 6b1822b..3505e59 100644
--- a/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py
+++ b/rrompy/reduction_methods/standard/trained_model/trained_model_rational.py
@@ -1,177 +1,170 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 import numpy as np
-from collections.abc import Iterable
 from rrompy.reduction_methods.base.trained_model.trained_model import (
                                                                   TrainedModel)
 from rrompy.utilities.numerical import dot
 from rrompy.utilities.numerical.compress_matrix import compressMatrix
 from rrompy.utilities.base.types import (Np1D, Np2D, Tuple, List, ListAny,
                                          paramVal, paramList, sampList)
 from rrompy.utilities.base import verbosityManager as vbMng, freepar as fp
 from rrompy.utilities.exception_manager import RROMPyException, RROMPyWarning
 from rrompy.parameter import emptyParameterList
 from rrompy.sampling import sampleList
 
 __all__ = ['TrainedModelRational']
 
 class TrainedModelRational(TrainedModel):
     """
     ROM approximant evaluation for rational approximant.
     
     Attributes:
         Data: dictionary with all that can be pickled.
     """
 
     def compress(self, collapse : bool = False, tol : float = 0., *args,
                  **kwargs):
         if not collapse and tol <= 0.: return
         RMat = self.data.projMat
         if not collapse:
             if hasattr(self.data, "_compressTol"):
                 RROMPyWarning(("Recompressing already compressed model is "
                                "ineffective. Aborting."))
                 return
             self.data.projMat, RMat, _ = compressMatrix(RMat, tol, *args,
                                                         **kwargs)
         self.data.P.postmultiplyTensorize(RMat.T)
         super().compress(collapse, tol)
 
     def centerNormalize(self, mu : paramList = [],
                         mu0 : paramVal = None) -> paramList:
         """
         Compute normalized parameter to be plugged into approximant.
 
         Args:
             mu: Parameter(s) 1.
             mu0: Parameter(s) 2. If None, set to self.data.mu0.
 
         Returns:
             Normalized parameter.
         """
         mu = self.checkParameterList(mu)
         if mu0 is None: mu0 = self.data.mu0
         return (self.mapParameterList(mu)
               - self.mapParameterList(mu0)) / self.data.scaleFactor
 
     def getPVal(self, mu : paramList = []) -> sampList:
         """
         Evaluate rational numerator at arbitrary parameter.
 
         Args:
             mu: Target parameter.
         """
         mu = self.checkParameterList(mu)
         vbMng(self, "INIT", "Evaluating numerator at mu = {}.".format(mu), 17)
         p = sampleList(self.data.P(self.centerNormalize(mu)))
         vbMng(self, "DEL", "Done evaluating numerator.", 17)
         return p
 
     def getQVal(self, mu:Np1D, der : List[int] = None,
                 scl : Np1D = None) -> Np1D:
         """
         Evaluate rational denominator at arbitrary parameter.
 
         Args:
             mu: Target parameter.
             der(optional): Derivatives to take before evaluation.
         """
         mu = self.checkParameterList(mu)
         vbMng(self, "INIT", "Evaluating denominator at mu = {}.".format(mu),
               17)
         q = self.data.Q(self.centerNormalize(mu), der, scl)
         vbMng(self, "DEL", "Done evaluating denominator.", 17)
         return q
 
     def getApproxReduced(self, mu : paramList = []) -> sampList:
         """
         Evaluate reduced representation of approximant at arbitrary parameter.
 
         Args:
             mu: Target parameter.
         """
         mu = self.checkParameterList(mu)
         if (not hasattr(self, "lastSolvedApproxReduced")
          or self.lastSolvedApproxReduced != mu):
             vbMng(self, "INIT",
                   "Evaluating approximant at mu = {}.".format(mu), 12)
             QV = self.getQVal(mu)
             QVzero = np.where(QV == 0.)[0]
             if len(QVzero) > 0:
                 QV[QVzero] = np.finfo(np.complex).eps / (1.
                                                       + self.data.Q.deg[0])
             self.uApproxReduced = self.getPVal(mu) / QV
             vbMng(self, "DEL", "Done evaluating approximant.", 12)
             self.lastSolvedApproxReduced = mu
         return self.uApproxReduced
     
     def getPoles(self, marginalVals : ListAny = [fp]) -> paramList:
         """
         Obtain approximant poles.
 
         Returns:
             Numpy complex vector of poles.
         """
         mVals = list(marginalVals)
         rDim = mVals.index(fp)
         if rDim < len(mVals) - 1 and fp in mVals[rDim + 1 :]:
             raise RROMPyException(("Exactly 1 'freepar' entry in "
                                    "marginalVals must be provided."))
         mVals[rDim] = self.data.mu0(rDim)
         mVals = list(self.centerNormalize(mVals).data.flatten())
         mVals[rDim] = fp
         roots = self.data.scaleFactor[rDim] * self.data.Q.roots(mVals)
         return self.mapParameterList(self.mapParameterList(self.data.mu0(rDim),
                                                            idx = [rDim])(0, 0)
                                    + roots, "B", [rDim])(0)
 
-    def getResidues(self, *args, **kwargs) -> Tuple[paramList, Np2D]:
+    def getResidues(self, marginalVals : ListAny = [fp]) -> Tuple[paramList,
+                                                                  Np2D]:
         """
         Obtain approximant residues.
 
         Returns:
             Numpy matrix with residues as columns.
         """
-        pls = self.getPoles(*args, **kwargs)
+        mVals = list(marginalVals)
+        pls = self.getPoles(mVals)
         if len(pls) == 0:
             return pls, np.empty((0, 0), dtype = self.data.P.coeffs.dtype)
-        if len(args) == 1:
-            mVals = args[0]
-        elif len(args) == 0:
-            mVals = [None]
-        else:
-            mVals = kwargs["marginalVals"]
-        if not isinstance(mVals, Iterable): mVals = [mVals]
-        mVals = list(mVals)
         rDim = mVals.index(fp)
         poles = emptyParameterList()
         poles.reset((len(pls), self.data.npar), dtype = pls.dtype)
         for k, pl in enumerate(pls):
             mValsLoc = list(mVals)
             mValsLoc[rDim] = pl
             poles[k] = mValsLoc
         QV = self.getQVal(poles, list(1 * (np.arange(self.data.npar) == rDim)))
         QVzero = np.where(QV == 0.)[0]
         if len(QVzero) > 0:
             RROMPyWarning(("Adjusting residues to avoid division by "
                            "numerically zero denominator."))
             QV[QVzero] = np.finfo(np.complex).eps / (1. + self.data.Q.deg[0])
         res = self.getPVal(poles).data / QV
         if not self.data._collapsed: res = dot(self.data.projMat, res).T
         return pls, res
diff --git a/rrompy/utilities/numerical/__init__.py b/rrompy/utilities/numerical/__init__.py
index 6785cfc..6ec38cd 100644
--- a/rrompy/utilities/numerical/__init__.py
+++ b/rrompy/utilities/numerical/__init__.py
@@ -1,45 +1,46 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from .compress_matrix import compressMatrix
 from .halton import haltonGenerate
 from .kroneckerer import kroneckerer
 from .low_discrepancy import lowDiscrepancy
 from .point_distances import baseDistanceMatrix
-from .point_matching import pointMatching, rationalFunctionMatching
+from .point_matching import pointMatching, polynomialMatching, rationalFunctionMatching
 from .potential import potential
 from .pseudo_inverse import pseudoInverse
 from .quadrature_points import quadraturePointsGenerate
 from .sobol import sobolGenerate
 from .tensor_la import dot, solve
 
 __all__ = [
         'compressMatrix',
         'haltonGenerate',
         'kroneckerer',
         'lowDiscrepancy',
         'baseDistanceMatrix',
         'pointMatching',
+        'polynomialMatching',
         'rationalFunctionMatching',
         'potential',
         'pseudoInverse',
         'quadraturePointsGenerate',
         'sobolGenerate',
         'dot',
         'solve'
            ]
diff --git a/rrompy/utilities/numerical/point_distances.py b/rrompy/utilities/numerical/point_distances.py
index c21f3f7..d500904 100644
--- a/rrompy/utilities/numerical/point_distances.py
+++ b/rrompy/utilities/numerical/point_distances.py
@@ -1,76 +1,76 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 import numpy as np
 from scipy.sparse import spmatrix
 from rrompy.utilities.base.types import List, Np1D, Np2D, HFEng
 
 __all__ = ['baseDistanceMatrix', 'vectorDistanceMatrix',
            'doubleDistanceMatrix']
 
 def baseDistanceMatrix(x:Np2D, y : Np2D = None, npar : int = None,
                        magnitude : bool = True, weights : Np1D = None) -> Np2D:
     if npar is None: npar = x.shape[1] if x.ndim > 1 else 1
     if y is None: y = x
     if x.ndim != 3 or x.shape[1] != npar: x = x.reshape(-1, 1, npar)
     if y.ndim != 2 or y.shape[1] != npar: y = y.reshape(-1, npar)
     dist = np.repeat(x, len(y), axis = 1) - y
     if weights is not None: dist *= np.array(weights).flatten()
     if magnitude:
         if dist.shape[2] == 1:
             dist = np.abs(dist)[..., 0]
         else:
             dist = np.sum(np.abs(dist) ** 2., axis = 2) ** .5
     return dist
 
 def vectorDistanceMatrix(X:Np2D, Y:Np2D, HFEngine : HFEng = None,
                          is_state : bool = True, Xbad : List[bool] = None,
                          Ybad : List[bool] = None) -> Np2D:
     if HFEngine is None:
         innerT = np.real(Y.T.conj().dot(X))
         if isinstance(X, (spmatrix,)):
             norm2X = np.sum(np.abs(X.todense()) ** 2., axis = 0)
         else:
             norm2X = np.sum(np.abs(X) ** 2., axis = 0)
         if isinstance(Y, (spmatrix,)):
             norm2Y = np.sum(np.abs(Y.todense()) ** 2., axis = 0)
         else:
             norm2Y = np.sum(np.abs(Y) ** 2., axis = 0)
     else:
         innerT = np.real(HFEngine.innerProduct(X, Y, is_state = is_state))
         norm2X = HFEngine.norm(X, is_state = is_state) ** 2.
         norm2Y = HFEngine.norm(Y, is_state = is_state) ** 2.
     if Xbad is None: Xbad = np.where(np.isinf(norm2X))[0]
     if Ybad is None: Ybad = np.where(np.isinf(norm2Y))[0]
     dist2T = (np.tile(norm2Y.reshape(-1, 1), len(norm2X))
             + norm2X.reshape(1, -1) - 2 * innerT)
     dist2T[:, Xbad], dist2T[Ybad, :] = np.inf, np.inf
     dist2T[np.ix_(Ybad, Xbad)] = 0.
     dist2T[dist2T < 0.] = 0.
     return dist2T.T ** .5
 
 def doubleDistanceMatrix(x:Np1D, y:Np1D, w : float = 0, X : Np2D = None,
                          Y : Np2D = None, HFEngine : HFEng = None,
                          is_state : bool = True) -> Np2D:
     Xbad, Ybad = np.where(np.isinf(x))[0], np.where(np.isinf(y))[0]
     dist = vectorDistanceMatrix(np.reshape(x, [1, -1]), np.reshape(y, [1, -1]),
                                 Xbad = Xbad, Ybad = Ybad)
     if w == 0: return dist
     distAdj = vectorDistanceMatrix(X, Y, HFEngine, is_state, Xbad = Xbad,
                                    Ybad = Ybad)
-    return (dist + w * distAdj) / (1. + w)
+    return ((dist ** 2. + w * distAdj ** 2.) / (1 + w)) ** .5
diff --git a/rrompy/utilities/numerical/point_matching.py b/rrompy/utilities/numerical/point_matching.py
index c014228..a5b5bf2 100644
--- a/rrompy/utilities/numerical/point_matching.py
+++ b/rrompy/utilities/numerical/point_matching.py
@@ -1,112 +1,192 @@
 # Copyright (C) 2018-2020 by the RROMPy authors
 #
 # This file is part of RROMPy.
 #
 # RROMPy is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # RROMPy is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
 #
 
 from copy import deepcopy as copy
 import numpy as np
 from scipy.optimize import linear_sum_assignment as LSA
 from .tensor_la import dot
 from .point_distances import baseDistanceMatrix, doubleDistanceMatrix
 from rrompy.utilities.base.types import Tuple, List, ListAny, Np1D, Np2D, HFEng
-from rrompy.utilities.exception_manager import RROMPyAssert
+from rrompy.utilities.exception_manager import RROMPyException, RROMPyAssert
 
-__all__ = ['pointMatching', 'rationalFunctionMatching']
+__all__ = ['pointMatching', 'polynomialMatching', 'rationalFunctionMatching']
 
 def pointMatching(distMatrix:Np2D) -> Tuple[Np1D, Np1D]:
     return LSA(distMatrix)
 
+def polynomialMatching(Qs:List[Np1D], Ps:List[Np2D], featPts:Np2D,
+                       matchingWeight:float, supps:ListAny, projMat:Np2D,
+                       HFEngine : HFEng = None, is_state : bool = True,
+                       root : int = 0,
+                       kind : str = "ROTATE") -> Tuple[List[Np1D], List[Np2D]]:
+    """
+    Match poles and residues of a set of rational functions.
+
+    Args:
+        Qs: List of denominator coefficients.
+        Ps: List of numerator coefficients.
+        featPts: Marginal parameters corresponding to rational models.
+        matchingWeight: Matching weight in distance computation.
+        supps: Support indices for projection matrix.
+        projMat: Projection matrix for residues.        
+        HFEngine(optional): Engine for distance evaluation. Defaults to None,
+            i.e. Euclidean metric.
+        is_state(optional): Whether residues are of system state. Defaults to
+            True.
+        root(optional): Root of search tree. Defaults to 0.
+        kind(optional): Kind of matching, either 'ROTATE' or 'PROJECT'.
+            Defaults to 'ROTATE'.
+
+    Returns:
+        Matched list of (lists of) poles and list of (lists of) residues.
+    """
+    M = len(featPts)
+    RROMPyAssert(len(Qs), M, "Number of rational functions to be matched")
+    RROMPyAssert(len(Ps), M, "Number of rational functions to be matched")
+    if M <= 1: return Qs, Ps
+    kind = kind.upper().strip().replace(" ","")
+    if kind not in ["ROTATE", "PROJECT"]:
+        raise RROMPyException("Matching kind not recognized.")
+    degQ = np.max([Q.shape[0] for Q in Qs])
+    degP = np.max([P.shape[0] for P in Ps])
+    for j in range(M):
+        if Qs[j].shape[0] < degQ:
+            Qs[j] = np.pad(Qs[j], (0, degQ - Qs[j].shape[0]), "constant")
+        if Ps[j].shape[0] < degP:
+            Ps[j] = np.pad(Ps[j], [(0, degP - Ps[j].shape[0]), (0, 0)],
+                           "constant")
+    featDist = baseDistanceMatrix(featPts)
+    free = list(range(M))
+    if matchingWeight != 0:
+        if hasattr(projMat, "shape"):
+            PsC = [dot(projMat[:, supps[j] : supps[j] + Ps[j].shape[1]],
+                       Ps[j].T) for j in range(M)]
+        else:
+            PsC = [dot(projMat, Ps[j].T) for j in range(M)]
+    fixed = [free.pop(root)]
+    for j in range(M - 1, 0, -1):
+        #find closest point
+        idx = np.argmin(featDist[np.ix_(fixed, free)].flatten())
+        Ifix = fixed[idx // j]
+        fixed += [free.pop(idx % j)]
+        Ifree = fixed[-1]
+        if kind == "PROJECT": norm2 = np.sum(np.abs(Qs[Ifree]) ** 2.)
+        inner = np.sum(Qs[Ifix] * Qs[Ifree].conj())
+        if matchingWeight != 0:
+            if HFEngine is None:
+                if kind == "PROJECT":
+                    norm2P = np.sum(np.abs(Ps[Ifree]) ** 2.)
+                innerP = np.sum(Ps[Ifix] * Ps[Ifree].conj())
+            else:
+                if kind == "PROJECT":
+                    norm2P = HFEngine.norm(PsC[Ifree], is_state = is_state)
+                    norm2P = np.sum(norm2P)
+                innerP = [HFEngine.innerProduct(
+                                     PsC[Ifix][:, j], PsC[Ifree][:, j],
+                                     is_state = is_state) for j in range(degP)]
+                innerP = np.sum(innerP)
+            if kind == "PROJECT": norm2 = norm2 + matchingWeight * norm2P
+            inner = inner + matchingWeight * innerP
+        scale = np.abs(inner) if kind == "ROTATE" else norm2
+        if scale >= 1e-15:
+            w = inner / scale
+            Qs[Ifree], Ps[Ifree] = Qs[Ifree] * w, Ps[Ifree] * w
+            PsC[Ifree] = PsC[Ifree] * w
+    return Qs, Ps
+
 def rationalFunctionMatching(poles:List[Np1D], coeffs:List[Np2D],
                              featPts:Np2D, matchingWeight:float, supps:ListAny,
                              projMat:Np2D, HFEngine : HFEng = None,
                              is_state : bool = True, root : int = None) \
                                               -> Tuple[List[Np1D], List[Np2D]]:
     """
     Match poles and residues of a set of rational functions.
 
     Args:
         poles: List of (lists of) poles.
         coeffs: List of (lists of) residues.
         featPts: Marginal parameters corresponding to rational models.
         matchingWeight: Matching weight in distance computation.
         supps: Support indices for projection matrix.
         projMat: Projection matrix for residues.        
         HFEngine(optional): Engine for distance evaluation. Defaults to None,
             i.e. Euclidean metric.
         is_state(optional): Whether residues are of system state. Defaults to
             True.
         root(optional): Root of search tree. Defaults to None, i.e.
             automatically chosen.
 
     Returns:
         Matched list of (lists of) poles and list of (lists of) residues.
     """
     M, N = len(featPts), len(poles[0])
     RROMPyAssert(len(poles), M, "Number of rational functions to be matched")
     RROMPyAssert(len(coeffs), M, "Number of rational functions to be matched")
     if M <= 1: return poles, coeffs
     featDist = baseDistanceMatrix(featPts)
     free = list(range(M))
     if root is None:
         #start from sample point with closest neighbor,
         #among those with no inf pole
         notInfPls = np.where([np.any(np.isinf(p)) == False for p in poles])[0]
         MEff = len(notInfPls)
         if MEff == 1:
             root = notInfPls[0]
         else:
             featDistEff = featDist[notInfPls][:, notInfPls]
             root = notInfPls[np.argpartition(featDistEff.flatten(),
                                              MEff)[MEff] % MEff]
     polesC = copy(poles)
     if matchingWeight != 0:
         if hasattr(projMat, "shape"):
             resC = [dot(projMat[:, supps[j] : supps[j] + coeffs[j].shape[1]],
                         coeffs[j][: N].T) for j in range(M)]
         else:
             resC = [dot(projMat, coeffs[j][: N].T) for j in range(M)]
     fixed = [free.pop(root)]
     for j in range(M - 1, 0, -1):
         #find closest point
         idx = np.argmin(featDist[np.ix_(fixed, free)].flatten())
         Ifix = fixed[idx // j]
         fixed += [free.pop(idx % j)]
         Ifree = fixed[-1]
         plsfix, plsfree = polesC[Ifix], polesC[Ifree]
         freeInf = np.where(np.isinf(plsfree))[0]
         freeNotInf = np.where(np.isinf(plsfree) == False)[0]
         plsfree = plsfree[freeNotInf]
         if matchingWeight == 0:
             resfix, resfree = None, None
         else:
             resfix, resfree = resC[Ifix], resC[Ifree][:, freeNotInf]
         #build assignment distance matrix
         distj = doubleDistanceMatrix(plsfree, plsfix, matchingWeight, resfree,
                                      resfix, HFEngine, is_state)
         reordering = pointMatching(distj)[1]
         reorderingInf = [x for x in range(N) if x not in reordering]
         #reorder good poles
         poles[Ifree][reordering], poles[Ifree][reorderingInf] = (
                                poles[Ifree][freeNotInf], poles[Ifree][freeInf])
         coeffs[Ifree][reordering], coeffs[Ifree][reorderingInf] = (
                              coeffs[Ifree][freeNotInf], coeffs[Ifree][freeInf])
         #transfer missing poles over
         polesC[Ifree][reordering], polesC[Ifree][reorderingInf] = (
                         polesC[Ifree][freeNotInf], polesC[Ifix][reorderingInf])
         if matchingWeight != 0:
             resC[Ifree][:, reordering], resC[Ifree][:, reorderingInf] = (
                       resC[Ifree][:, freeNotInf], resC[Ifix][:, reorderingInf])
     return poles, coeffs
diff --git a/tests/4_reduction_methods_multiD/rational_interpolant_2d.py b/tests/4_reduction_methods_multiD/rational_interpolant_2d.py
deleted file mode 100644
index 3148314..0000000
--- a/tests/4_reduction_methods_multiD/rational_interpolant_2d.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (C) 2018-2020 by the RROMPy authors
-#
-# This file is part of RROMPy.
-#
-# RROMPy is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# RROMPy is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with RROMPy. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import numpy as np
-from matrix_random import matrixRandom
-from rrompy.reduction_methods import RationalInterpolant as RI
-from rrompy.parameter.parameter_sampling import (QuadratureSampler as QS,
-                                                 ManualSampler as MS)
-
-def test_monomials(capsys):
-    mu = [5.05, 7.1]
-    mu0 = [5., 7.]
-    solver = matrixRandom()
-    uh = solver.solve(mu)[0]
-    params = {"POD": False, "S": 16, "QTol": 1e-6, "interpTol": 1e-3,
-              "polybasis": "MONOMIAL",
-              "sampler": QS([[4.9, 6.85], [5.1, 7.15]], "UNIFORM")}
-    approx = RI(solver, mu0, params, verbosity = 100)
-    approx.setupApprox()
-
-    uhP1 = approx.getApprox(mu)[0]
-    errP = approx.getErr(mu)[0]
-    errNP = approx.normErr(mu)[0]
-    myerrP = uhP1 - uh
-    assert np.allclose(np.abs(errP - myerrP), 0., rtol = 1e-3)
-    assert np.isclose(solver.norm(errP), errNP, rtol = 1e-3)
-    resP = approx.getRes(mu)[0]
-    resNP = approx.normRes(mu)
-    assert np.isclose(solver.norm(resP), resNP, rtol = 1e-3)
-    assert np.allclose(np.abs(resP - (solver.b(mu) - solver.A(mu).dot(uhP1))),
-                       0., rtol = 1e-3)
-    assert np.isclose(errNP / solver.norm(uh), 5.2667e-05, rtol = 1)
-
-    out, err = capsys.readouterr()
-    assert ("poorly conditioned. Reducing N " in out)
-    assert len(err) == 0
-    
-def test_well_cond():
-    mu = [5.05, 7.1]
-    mu0 = [5., 7.]
-    solver = matrixRandom()
-    params = {"POD": True, "M": 3, "N": 3, "S": 16,
-              "interpTol": 1e-10, "polybasis": "CHEBYSHEV",
-              "sampler": QS([[4.9, 6.85], [5.1, 7.15]], "UNIFORM")}
-    approx = RI(solver, mu0, params, verbosity = 0)
-    approx.setupApprox()
-    assert np.isclose(approx.normErr(mu)[0] / approx.normHF(mu)[0],
-                      5.98695e-05, rtol = 1e-1)
-
-def test_hermite():
-    mu = [5.05, 7.1]
-    mu0 = [5., 7.]
-    solver = matrixRandom()
-    sampler0 = QS([[4.9, 6.85], [5.1, 7.15]], "UNIFORM")
-    params = {"POD": True, "M": 3, "N": 3, "S": 25, "polybasis": "CHEBYSHEV",
-              "sampler": MS([[4.9, 6.85], [5.1, 7.15]],
-                            points = sampler0.generatePoints(9))}
-    approx = RI(solver, mu0, params, verbosity = 0)
-    approx.setupApprox()
-    assert np.isclose(approx.normErr(mu)[0] / approx.normHF(mu)[0],
-                      5.50053e-05, rtol = 5e-1)
-