diff --git a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py index da631f7..1596ba8 100644 --- a/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py +++ b/rrompy/reduction_methods/lagrange_greedy/approximant_lagrange_greedy_pade.py @@ -1,592 +1,592 @@ # Copyright (C) 2018 by the RROMPy authors # # This file is part of RROMPy. # # RROMPy is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RROMPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with RROMPy. If not, see . # from copy import copy import numpy as np from rrompy.reduction_methods.base import (checkRobustTolerance, setupFitCallables) from .generic_approximant_lagrange_greedy import ( GenericApproximantLagrangeGreedy) from rrompy.reduction_methods.lagrange import ApproximantLagrangePade from rrompy.utilities.base.types import DictAny, List, HFEng from rrompy.utilities.base import purgeDict, verbosityDepth, customFit from rrompy.utilities.warning_manager import warn __all__ = ['ApproximantLagrangePadeGreedy'] class ApproximantLagrangePadeGreedy(GenericApproximantLagrangeGreedy, ApproximantLagrangePade): """ ROM greedy Pade' interpolant computation for parametric problems. Args: HFEngine: HF problem solver. mu0(optional): Default parameter. Defaults to 0. approxParameters(optional): Dictionary containing values for main parameters of approximant. Recognized keys are: - 'POD': whether to compute POD of snapshots; defaults to True; - 'muBounds': list of bounds for parameter values; defaults to [[0], [1]]; - 'basis': type of basis for interpolation; allowed values include 'MONOMIAL', 'CHEBYSHEV' and 'LEGENDRE'; defaults to 'MONOMIAL'; - 'Delta': difference between M and N in rational approximant; defaults to 0; - 'greedyTol': uniform error tolerance for greedy algorithm; defaults to 1e-2; - 'errorEstimatorKind': kind of error estimator; available values include 'EXACT' and 'SIMPLIFIED'; defaults to 'SIMPLIFIED'; - 'maxIter': maximum number of greedy steps; defaults to 1e2; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; defaults to 0.2; - 'nTrainingPoints': number of training points; defaults to maxIter / refinementRatio; - 'nTestPoints': number of starting test points; defaults to 1; - 'trainingSetGenerator': training sample points generator; defaults to uniform sampler within muBounds; - 'interpRcond': tolerance for interpolation via numpy.polyfit; defaults to None; - 'robustTol': tolerance for robust Pade' denominator management; defaults to 0. Defaults to empty dict. homogeneized: Whether to homogeneize Dirichlet BCs. Defaults to False. verbosity(optional): Verbosity level. Defaults to 10. Attributes: HFEngine: HF problem solver. mu0: Default parameter. mus: Array of snapshot parameters. homogeneized: Whether to homogeneize Dirichlet BCs. approxParameters: Dictionary containing values for main parameters of approximant. Recognized keys are in parameterList. parameterList: Recognized keys of approximant parameters: - 'POD': whether to compute POD of snapshots; - 'muBounds': list of bounds for parameter values; - 'basis': type of basis for interpolation; - 'Delta': difference between M and N in rational approximant; - 'greedyTol': uniform error tolerance for greedy algorithm; - 'errorEstimatorKind': kind of error estimator; - 'maxIter': maximum number of greedy steps; - 'refinementRatio': ratio of training points to be exhausted before training set refinement; - 'nTrainingPoints': number of training points; - 'nTestPoints': number of starting test points; - 'trainingSetGenerator': training sample points generator; - 'interpRcond': tolerance for interpolation via numpy.polyfit; - 'robustTol': tolerance for robust Pade' denominator management. extraApproxParameters: List of approxParameters keys in addition to mother class's. POD: whether to compute POD of snapshots. muBounds: list of bounds for parameter values. greedyTol: uniform error tolerance for greedy algorithm. errorEstimatorKind: kind of error estimator. maxIter: maximum number of greedy steps. refinementRatio: ratio of training points to be exhausted before training set refinement. nTrainingPoints: number of training points. nTestPoints: number of starting test points. trainingSetGenerator: training sample points generator. interpRcond: Tolerance for interpolation via numpy.polyfit. robustTol: Tolerance for robust Pade' denominator management. samplingEngine: Sampling engine. uHF: High fidelity solution with wavenumber lastSolvedHF as numpy complex vector. lastSolvedHF: Wavenumber corresponding to last computed high fidelity solution. Q: Numpy 1D vector containing complex coefficients of approximant denominator. P: Numpy 2D vector whose columns are FE dofs of coefficients of approximant numerator. uApp: Last evaluated approximant as numpy complex vector. lastApproxParameters: List of parameters corresponding to last computed approximant. """ def __init__(self, HFEngine:HFEng, mu0 : complex = 0., approxParameters : DictAny = {}, homogeneized : bool = False, verbosity : int = 10): self._preInit() self._addParametersToList(["basis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"]) super().__init__(HFEngine = HFEngine, mu0 = mu0, approxParameters = approxParameters, homogeneized = homogeneized, verbosity = verbosity) self._postInit() @property def approxParameters(self): """ Value of approximant parameters. Its assignment may change robustTol. """ return self._approxParameters @approxParameters.setter def approxParameters(self, approxParams): approxParameters = purgeDict(approxParams, self.parameterList, dictname = self.name() + ".approxParameters", baselevel = 1) approxParametersCopy = purgeDict(approxParameters, ["basis", "Delta", "errorEstimatorKind", "interpRcond", "robustTol"], True, True, baselevel = 1) if "Delta" in list(approxParameters.keys()): self._Delta = approxParameters["Delta"] elif hasattr(self, "Delta"): self._Delta = self.Delta else: self._Delta = 0 GenericApproximantLagrangeGreedy.approxParameters.fset(self, approxParametersCopy) keyList = list(approxParameters.keys()) self.Delta = self.Delta if "basis" in keyList or not hasattr(self, "_val"): if "basis" in keyList: kind = approxParameters["basis"] else: kind = "MONOMIAL" setupFit = setupFitCallables(kind) for x in setupFit: super().__setattr__("_" + x, setupFit[x]) if "errorEstimatorKind" in keyList: self.errorEstimatorKind = approxParameters["errorEstimatorKind"] elif hasattr(self, "errorEstimatorKind"): self.errorEstimatorKind = self.errorEstimatorKind else: self.errorEstimatorKind = "SIMPLIFIED" if "interpRcond" in keyList: self.interpRcond = approxParameters["interpRcond"] elif hasattr(self, "interpRcond"): self.interpRcond = self.interpRcond else: self.interpRcond = None if "robustTol" in keyList: self.robustTol = approxParameters["robustTol"] elif hasattr(self, "robustTol"): self.robustTol = self.robustTol else: self.robustTol = 0 @property def Delta(self): """Value of Delta.""" return self._Delta @Delta.setter def Delta(self, Delta): if not np.isclose(Delta, np.floor(Delta)): raise ArithmeticError("Delta must be an integer.") if Delta < 0: warn(("Error estimator unreliable for Delta < 0. Overloading of " "errorEstimator is suggested.")) else: Deltamin = (max(self.HFEngine.nbs, self.HFEngine.nAs * self.homogeneized) - 1 - 1 * (self.HFEngine.nAs > 1)) if Delta < Deltamin: warn(("Method may be unreliable for selected Delta. Suggested " "minimal value of Delta: {}.").format(Deltamin)) self._Delta = Delta self._approxParameters["Delta"] = self.Delta @property def errorEstimatorKind(self): """Value of errorEstimatorKind.""" return self._errorEstimatorKind @errorEstimatorKind.setter def errorEstimatorKind(self, errorEstimatorKind): errorEstimatorKind = errorEstimatorKind.upper() if errorEstimatorKind not in ["EXACT", "SIMPLIFIED"]: warn(("Error estimator kind not recognized. Overriding to " "'SIMPLIFIED'.")) errorEstimatorKind = "SIMPLIFIED" self._errorEstimatorKind = errorEstimatorKind self._approxParameters["errorEstimatorKind"] = self.errorEstimatorKind @property def nTestPoints(self): """Value of nTestPoints.""" return self._nTestPoints @nTestPoints.setter def nTestPoints(self, nTestPoints): if nTestPoints <= np.abs(self.Delta): warn(("nTestPoints must be at least abs(Delta) + 1. Increasing " "value to abs(Delta) + 1.")) nTestPoints = np.abs(self.Delta) + 1 if not np.isclose(nTestPoints, np.int(nTestPoints)): raise ArithmeticError("nTestPoints must be an integer.") nTestPoints = np.int(nTestPoints) if hasattr(self, "nTestPoints"): nTestPointsold = self.nTestPoints else: nTestPointsold = -1 self._nTestPoints = nTestPoints self._approxParameters["nTestPoints"] = self.nTestPoints if nTestPointsold != self.nTestPoints: self.resetSamples() def resetSamples(self): """Reset samples.""" super().resetSamples() self.resbb = None self.resAb = None self.resAA = None self.As = None self.bs = None def errorEstimator(self, mus:List[np.complex]) -> List[np.complex]: """Standard residual-based error estimator.""" self.setupApprox() self.initEstNormer() PM = self.P[:, -1] if np.any(np.isnan(PM)) or np.any(np.isinf(PM)): err = np.empty(len(mus)) err[:] = np.inf return err nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs - 1, nAs * self.homogeneized) radiusmus = self.radiusPade(mus) radiussmus = self.radiusPade(self.mus) musTile = np.tile(radiusmus.reshape(-1, 1), [1, self.S]) smusCol = radiussmus.reshape(1, -1) num = np.prod(musTile[:, : self.S] - smusCol, axis = 1) den = self.getQVal(mus) self.assembleReducedResidualBlocks() vanderBase = np.polynomial.polynomial.polyvander(radiusmus, max(nAs, nbs)).T radiusb0 = vanderBase[: nbs + 1, :] - RHSnorms = np.power(np.abs(np.einsum('ij,ij->j', - self.resbb.dot(radiusb0), - radiusb0.conj())), .5) + # 'ij,jk,ik->k', resbb, radiusb0, radiusb0.conj() + b0resb0 = np.sum(self.resbb.dot(radiusb0) * radiusb0.conj(), axis = 0) + RHSnorms = np.power(np.abs(b0resb0), .5) vanderBase = vanderBase[: -1, :] delta = self.S - self.N - 1 nbsEff = max(0, nbs - delta) - computeff = ((self.errorEstimatorKind == "SIMPLIFIED" and delta == 0) - or (self.errorEstimatorKind == "EXACT" and nbsEff > 0)) if self.errorEstimatorKind == "SIMPLIFIED": radiusA = np.tensordot(PM, vanderBase[: nAs, :], 0) if delta == 0: radiusb = np.abs(self.Q[-1]) * radiusb0[: -1, :] else: #if self.errorEstimatorKind == "EXACT": momentQ = np.zeros(nbsEff, dtype = np.complex) momentQu = np.zeros((self.S, nAs), dtype = np.complex) - radiusbTen = np.zeros((nbsEff, len(mus), nbsEff), + radiusbTen = np.zeros((nbsEff, nbsEff, len(mus)), dtype = np.complex) radiusATen = np.zeros((nAs, nAs, len(mus)), dtype = np.complex) if nbsEff > 0: momentQ[0] = self.Q[-1] - radiusbTen[:, :, 0] = vanderBase[: nbsEff, :] + radiusbTen[0, :, :] = vanderBase[: nbsEff, :] momentQu[:, 0] = self.P[:, -1] radiusATen[0, :, :] = vanderBase[: nAs, :] Qvals = self.getQVal(self.mus) - for k in range(1, max(nAs, nbs)): + for k in range(1, max(nAs, nbs * (nbsEff > 0))): Qvals = Qvals * radiussmus if k > delta and k < nbs: momentQ[k - delta] = self._fitinv.dot(Qvals) - radiusbTen[k :, :, k - delta] = radiusbTen[: delta - k, - :, 0] + radiusbTen[k - delta, k :, :] = ( + radiusbTen[0, : delta - k, :]) if k < nAs: momentQu[:, k] = Qvals * self._fitinv radiusATen[k, k :, :] = radiusATen[0, : - k, :] if self.POD and nAs > 1: - momentQu[:, 1 :] = self.samplingEngine.RPOD.dot(momentQu[:, - 1 :]) + momentQu[:, 1 :] = self.samplingEngine.RPOD.dot( + momentQu[:, 1 :]) radiusA = np.tensordot(momentQu, radiusATen, 1) if nbsEff > 0: - radiusb = radiusbTen.dot(momentQ) - if computeff: - ff = np.einsum('ij,ij->j', - self.resbb[delta + 1 :, delta + 1 :].dot(radiusb), - radiusb.conj()) - Lf = np.einsum('il,il->l', - np.tensordot(self.resAb[delta :, :, :], radiusA, 2), - radiusb.conj()) + radiusb = np.tensordot(momentQ, radiusbTen, 1) + if ((self.errorEstimatorKind == "SIMPLIFIED" and delta == 0) + or (self.errorEstimatorKind == "EXACT" and nbsEff > 0)): + # 'ij,jk,ik->k', resbb, radiusb, radiusb.conj() + ff = np.sum(self.resbb[delta + 1 :, delta + 1 :].dot(radiusb) + * radiusb.conj(), axis = 0) + # 'ijk,jkl,il->l', resAb, radiusA, radiusb.conj() + Lf = np.sum(np.tensordot(self.resAb[delta :, :, :], radiusA, 2) + * radiusb.conj(), axis = 0) else: ff, Lf = 0., 0. - LL = np.einsum('ikt,kit->t', np.tensordot(self.resAA, radiusA, 2), - radiusA.conj()) + # 'ijkl,klt,ijt->t', resAA, radiusA, radiusA.conj() + LL = np.sum(np.tensordot(self.resAA, radiusA, 2) * radiusA.conj(), + axis = (0, 1)) jOpt = np.power(np.abs(ff - 2. * np.real(Lf) + LL), .5) return self._domcoeff(self.S - 1) * jOpt * np.abs(num / den) / RHSnorms def setupApprox(self): """ Compute Pade' interpolant. SVD-based robust eigenvalue management. """ if not self.checkComputedApprox(): if self.verbosity >= 5: verbosityDepth("INIT", "Setting up {}.". format(self.name())) self.computeScaleFactor() self.S = len(self.mus) self._M = self.S - 1 self._N = self.S - 1 if self.Delta < 0: self._M += self.Delta else: self._N -= self.Delta if min(self.M, self.N) < 0: if self.verbosity >= 5: verbosityDepth("MAIN", "Minimal sample size not achieved.") self.Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) self.P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) self.Q[:] = np.nan self.P[:] = np.nan self.lastApproxParameters = copy(self.approxParameters) if self.verbosity >= 5: verbosityDepth("DEL", ("Aborting computation of " "approximant.\n")) return self.greedy() if self.N > 0: if self.verbosity >= 7: verbosityDepth("INIT", ("Starting computation of " "denominator.")) TS = self._vander(self.radiusPade(self.mus), self.S - 1) while self.N > 0: RHS = np.zeros(self.S) RHS[-1] = 1. fitOut = customFit(TS.T, RHS, full = True, rcond = self.interpRcond) if self.verbosity >= 2: verbosityDepth("MAIN", ("Fitting {} samples with " "degree {} through {}... " "Conditioning of system: " "{:.4e}.").format(self.S, self.S - 1, self._fitname, fitOut[1][2][0] / fitOut[1][2][-1])) if fitOut[1][1] < self.S: warn(("Polyfit is poorly conditioned. Starting " "preemptive termination of computation of " "approximant.")) self.Q = np.empty(max(self.N, 0) + 1, dtype = np.complex) self.P = np.empty((len(self.mus), max(self.M, 0) + 1), dtype = np.complex) self.Q[:] = np.nan self.P[:] = np.nan self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 7: verbosityDepth("DEL", ("Aborting computation of " "denominator.")) if self.verbosity >= 5: verbosityDepth("DEL", ("Aborting computation of " "approximant.\n")) return self._fitinv = fitOut[0] G = (TS[:, : self.N + 1].T * fitOut[0]).T if self.POD: if self.verbosity >= 7: verbosityDepth("INIT", ("Solving svd for square " "root of gramian matrix.")) G = self.samplingEngine.RPOD.dot(G) _, s, eV = np.linalg.svd(G, full_matrices = False) ev = s[::-1] eV = eV[::-1, :].conj().T if self.verbosity >= 2: try: condev = s[0] / s[-1] except: condev = np.inf verbosityDepth("MAIN", ("Solved svd problem of " "size {} x {} with " "condition number " "{:.4e}.").format( self.S, self.N + 1, condev)) else: if self.verbosity >= 10: verbosityDepth("INIT", "Building gramian matrix.", end = "") G = self.samplingEngine.samples.dot(G) G2 = self.HFEngine.innerProduct(G, G) if self.verbosity >= 10: verbosityDepth("DEL", "Done building gramian.", inline = True) if self.verbosity >= 7: verbosityDepth("INIT", ("Solving eigenvalue " "problem for gramian " "matrix.")) ev, eV = np.linalg.eigh(G2) if self.verbosity >= 2: try: condev = ev[-1] / ev[0] except: condev = np.inf verbosityDepth("MAIN", ("Solved eigenvalue " "problem of size {} with " "condition number " "{:.4e}.").format( self.N + 1, condev)) if self.verbosity >= 7: verbosityDepth("DEL", ("Done solving eigenvalue " "problem.")) newParameters = checkRobustTolerance(ev, self.M, self.robustTol) if not newParameters: break self._N = newParameters["N"] self._M = newParameters["E"] if self.N <= 0: self._N = 0 eV = np.ones((1, 1)) self.Q = eV[:, 0] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing denominator.") else: self.Q = np.ones(1, dtype = np.complex) if self.verbosity >= 7: verbosityDepth("INIT", "Starting computation of numerator.") self.lastApproxParameters = copy(self.approxParameters) Qevaldiag = np.diag(self.getQVal(self.mus)) while self.M >= 0: fitVander = self._vander(self.radiusPade(self.mus), self.M) fitOut = customFit(fitVander, Qevaldiag, full = True, rcond = self.interpRcond) if fitOut[1][1] == self.M + 1: P = fitOut[0].T break warn(("Polyfit is poorly conditioned. Reducing M from {} to " "{}. Exact snapshot interpolation not guaranteed.")\ .format(self.M, fitOut[1][1] - 1)) self._M = fitOut[1][1] - 1 if self.M <= 0: raise Exception(("Instability in computation of numerator. " "Aborting.")) self.P = np.atleast_2d(P) if self.POD: self.P = self.samplingEngine.RPOD.dot(self.P) if self.verbosity >= 7: verbosityDepth("DEL", "Done computing numerator.") self.lastApproxParameters = copy(self.approxParameters) if hasattr(self, "lastSolvedApp"): del self.lastSolvedApp if self.verbosity >= 5: verbosityDepth("DEL", "Done setting up approximant.\n") def assembleReducedResidualBlocks(self): """Build affine blocks of reduced linear system through projections.""" self.initEstNormer() if self.As is None or self.bs is None: if self.verbosity >= 7: verbosityDepth("INIT", "Computing Taylor blocks of system.") nAs = self.HFEngine.nAs - 1 nbs = max(self.HFEngine.nbs, (nAs + 1) * self.homogeneized) self.As = [self.HFEngine.A(self.mu0, j + 1) for j in range(nAs)] self.bs = [self.HFEngine.b(self.mu0, j, self.homogeneized) for j in range(nbs)] if self.verbosity >= 7: verbosityDepth("DEL", "Done computing Taylor blocks.") computeResbb = self.resbb is None computeResAb = self.resAb is None or self.resAb.shape[1] != self.S - computeResAA = self.resAA is None or self.resAA.shape[1] != self.S + computeResAA = self.resAA is None or self.resAA.shape[0] != self.S samples = self.samplingEngine.samples if computeResbb or computeResAb or computeResAA: if self.verbosity >= 7: verbosityDepth("INIT", "Projecting Taylor terms of residual.") nAs = len(self.As) nbs = len(self.bs) - 1 if computeResbb: self.resbb = np.empty((nbs + 1, nbs + 1), dtype = np.complex) for i in range(nbs + 1): Mbi = self.scaleFactor ** i * self.bs[i] for j in range(i): Mbj = self.scaleFactor ** j * self.bs[j] self.resbb[i, j] = self.estNormer.innerProduct(Mbj, Mbi) self.resbb[i, i] = self.estNormer.innerProduct(Mbi, Mbi) for i in range(nbs + 1): for j in range(i + 1, nbs + 1): self.resbb[i, j] = self.resbb[j][i].conj() if computeResAb: if self.resAb is None: self.resAb = np.empty((nbs, self.S, nAs), dtype = np.complex) for i in range(nbs): Mbi = self.scaleFactor ** (i + 1) * self.bs[i + 1] for j in range(nAs): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples)) self.resAb[i, :, j] = self.estNormer.innerProduct( MAj, Mbi) else: Sold = self.resAb.shape[1] if Sold > self.S: self.resAb = self.resAb[:, : self.S, :] else: resAbNew = np.empty((nbs, self.S, nAs), dtype = np.complex) resAbNew[:, : Sold, :] = self.resAb self.resAb = resAbNew for i in range(nbs): Mbi = self.scaleFactor ** (i + 1) * self.bs[i + 1] for j in range(nAs): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples[:, Sold :])) self.resAb[i, Sold :, j] = ( self.estNormer.innerProduct(MAj, Mbi)) if computeResAA: if self.resAA is None: - self.resAA = np.empty((nAs, self.S, self.S, nAs), + self.resAA = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) for i in range(nAs): MAi = (self.scaleFactor ** (i + 1) * self.As[i].dot(samples)) for j in range(i): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples)) - self.resAA[i, :, :, j] = ( + self.resAA[:, i, :, j] = ( self.estNormer.innerProduct(MAj, MAi)) - self.resAA[i, :, :, i] = self.estNormer.innerProduct( + self.resAA[:, i, :, i] = self.estNormer.innerProduct( MAi, MAi) for i in range(nAs): for j in range(i + 1, nAs): - self.resAA[i, :, :, j] = ( + self.resAA[:, i, :, j] = ( self.resAA[j, :, :, i].conj()) else: - Sold = self.resAA.shape[1] + Sold = self.resAA.shape[0] if Sold > self.S: - self.resAA = self.resAA[:, : self.S, : self.S, :] + self.resAA = self.resAA[: self.S, :, : self.S, :] else: - resAANew = np.empty((nAs, self.S, self.S, nAs), + resAANew = np.empty((self.S, nAs, self.S, nAs), dtype = np.complex) - resAANew[:, : Sold, : Sold, :] = self.resAA + resAANew[: Sold, :, : Sold, :] = self.resAA self.resAA = resAANew for i in range(nAs): MAi = (self.scaleFactor ** (i + 1) * self.As[i].dot(samples)) for j in range(i): MAj = (self.scaleFactor ** (j + 1) * self.As[j].dot(samples)) - self.resAA[i, : Sold, Sold :, j] = ( + self.resAA[: Sold, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, : Sold])) - self.resAA[i, Sold :, : Sold, j] = ( + self.resAA[Sold :, i, : Sold, j] = ( self.estNormer.innerProduct(MAj[:, : Sold], MAi[:, Sold :])) - self.resAA[i, Sold :, Sold :, j] = ( + self.resAA[Sold :, i, Sold :, j] = ( self.estNormer.innerProduct(MAj[:, Sold :], MAi[:, Sold :])) - self.resAA[i, : Sold, Sold :, i] = ( + self.resAA[: Sold, i, Sold :, i] = ( self.estNormer.innerProduct(MAi[:, Sold :], MAi[:, : Sold])) - self.resAA[i, Sold :, : Sold, i] = ( - self.resAA[i, : Sold, Sold :, i].conj().T) - self.resAA[i, Sold :, Sold :, i] = ( + self.resAA[Sold :, i, : Sold, i] = ( + self.resAA[: Sold, i, Sold :, i].conj().T) + self.resAA[Sold :, i, Sold :, i] = ( self.estNormer.innerProduct(MAi[:, Sold :], MAi[:, Sold :])) for i in range(nAs): for j in range(i + 1, nAs): - self.resAA[i, :, :, j] = ( - self.resAA[j, :, :, i].conj()) + self.resAA[:, i, :, j] = ( + self.resAA[:, j, :, i].conj()) if self.verbosity >= 7: verbosityDepth("DEL", ("Done setting up Taylor " "decomposition of residual."))