Page MenuHomec4science

rational_interpolation_method.tex
No OneTemporary

File Metadata

Created
Sat, Apr 20, 18:33

rational_interpolation_method.tex

\documentclass[10pt,a4paper]{article}
\usepackage[left=1in,right=1in,top=1in,bottom=1in]{geometry}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{hyperref}
\usepackage{xcolor}
\setlength{\parindent}{0pt}
\newcommand{\code}[1]{{\color{blue}\texttt{#1}}}
\newcommand{\footpath}[1]{\footnote{\path{#1}}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\I}{\mathcal{I}}
\DeclareMathOperator*{\argmin}{arg\,min}
\newcommand{\inner}[2]{\left\langle#1,#2\right\rangle_V}
\newcommand{\norm}[1]{\left\|#1\right\|_V}
\title{\bf The \code{RROMPy} rational interpolation method}
\author{D. Pradovera, CSQI, EPF Lausanne -- \texttt{davide.pradovera@epfl.ch}}
\date{}
\hypersetup{ pdftitle={The RROMPy rational interpolation method}, pdfauthor={Davide Pradovera} }
\begin{document}
\maketitle
\section*{Introduction}
This document provides an explanation for the numerical methods provided by the class \code{Rational Interpolant}\footpath{./rrompy/reduction_methods/standard/rational_interpolant.py} and daughters, e.g. \code{Rational Interpolant Greedy}\footpath{./rrompy/reduction_methods/standard/greedy/rational_interpolant_greedy.py}, as well as most of the pivoted approximants\footpath{./rrompy/reduction_methods/pivoted/{,greedy/}rational_interpolant_*.py}.
Most of the focus will be dedicated to the impact of the (\code{rationalMode},\code{functionalSolve}) parameters, whose allowed values are
\begin{itemize}
\item (\code{MINIMAL},\code{NORM}) (default): see Section~\ref{sec:min:norm}; allows for repeated sample points.
\item (\code{MINIMAL},\code{DOMINANT}): see Section~\ref{sec:min:dominant}; allows for repeated sample points.
\item (\code{BARYCENTRIC},\code{NORM}): see Section~\ref{sec:min:barycentricnorm}; does not allow for a Least Squares approach; undefined for more than one parameter.
\item (\code{BARYCENTRIC},\code{DOMINANT}): see Section~\ref{sec:min:barycentricaverage}; does not allow for a Least Squares approach; undefined for more than one parameter.
\item (\code{STANDARD},\code{NORM}): see Section~\ref{sec:out:norm}; allows for repeated sample points.
\item (\code{STANDARD},\code{DOMINANT}): see Section~\ref{sec:out:dominant}; allows for repeated sample points.
\end{itemize}
We restrict the discussion to the single-parameter case. The main reference throughout the present document is \cite{Pradovera}.
\section{Aim of approximation}
We seek an approximation of $u:\C\to V$, with $(V,\inner{\cdot}{\cdot})$ a complex\footnote{The inner product is linear (resp. conjugate linear) in the first (resp. second) argument: $\inner{\alpha v}{\beta w}=\alpha\overline{\beta}\inner{v}{w}$.} Hilbert space (with induced norm $\norm{\cdot}$), of the form $\widehat{p}/\widehat{q}$, where $\widehat{p}:\C\to V$ and $\widehat{q}:\C\to\C$. The target $u$ might be high-dimensional (for instance, the solution of a PDE after Finite Element discretization) or low(er)-dimensional (for instance, a functional of the above-mentioned PDE solution). For a given denominator $\widehat{q}$, the numerator $\widehat{p}$ is found by interpolation of $\widehat{q}u$. Hence, here we focus on the computation of the denominator $\widehat{q}$.
Other than the choice of target function $u$, the parameters which affect the computation of $\widehat{q}$ are:
\begin{itemize}
\item $\code{mus}\subset\C$ ($\{\mu_j\}_{j=1}^S$ below); for \code{BARYCENTRIC}, the $S$ points must be distinct.
\item $\code{N}\in\N$ ($N$ below); for \code{BARYCENTRIC}, $N$ must equal $S-1$.
\item $\code{polybasis}\in\{\code{"CHEBYSHEV"}, \code{"LEGENDRE"}, \code{"MONOMIAL"}\}$.
\end{itemize}
To simplify the notation, we set $E=S-1$. For simplicity, we will consider only the case of $S$ distinct sample points. One can deal with the case of confluent sample points by extending the standard (Lagrange) interpolation steps to Hermite-Lagrange ones.
In the following, we will make use of the polynomial interpolation operator $\I^M:(\C\times W)^S\to\mathbb{P}^M(\C;W)$, where $M$ is an integer (either $N$ or $E$), and $W$ a Banach space (either $\C$ or $V$). We define its action on samples $\left((\mu_j,\psi_j)\right)_{j=1}^S\in(\C\times W)^S$ as
\begin{equation*}
\I^M\left(\big((\mu_j,\psi_j)\big)_{j=1}^S\right)=\argmin_{p\in\mathbb{P}^M(\C;W)}\sum_{j=1}^S\left\|p(\mu_j)-\psi_j\right\|_W^2,
\end{equation*}
where
\begin{equation*}
\mathbb{P}^M(\C;W)=\left\{\mu\mapsto\sum_{i=0}^M\alpha_i\mu^i\ :\ \alpha_0,\ldots,\alpha_M\in W\right\}.
\end{equation*}
In \code{RROMPy}, we compute interpolants by employing normal equations: given a basis $\{\phi_i\}_{i=0}^M$ of $\mathbb{P}^M(\C;\C)$, we expand
\begin{equation*}
p(\mu)=\sum_{i=0}^Mc_i\phi_i(\mu)
\end{equation*}
and observe that, for optimality, the coefficients $\{c_i\}_{i=0}^M\subset W$ must satisfy
\begin{equation*}
\sum_{j=1}^S\sum_{l=0}^M\overline{\phi_i(\mu_j)}\phi_l(\mu_j)c_l=\sum_{j=1}^S\overline{\phi_i(\mu_j)}\psi_j\quad\forall i=0,\ldots,M,
\end{equation*}
i.e., in matrix form\footnote{The superscript ${}^H$ denotes conjugate transposition, i.e. $A^H=\overline{A}^\top$.},
\begin{equation}\label{eq:normal}
\underbrace{[c_i]_{i=0}^M}_{\in W^{M+1}}=\bigg(\underbrace{\Big[\overline{\phi_i(\mu_j)}\Big]_{i=0,j=1}^{M,S}}_{=:\Phi^H\in\C^{(M+1)\times S}}\underbrace{\Big[\phi_i(\mu_j)\Big]_{j=1,i=0}^{S,M}}_{=:\Phi\in\C^{S\times(M+1)}}\bigg)^{-1}\underbrace{\Big[\overline{\phi_i(\mu_j)}\Big]_{i=0,j=1}^{M,S}}_{=:\Phi^H\in\C^{(M+1)\times S}}\underbrace{[\psi_j]_{j=1}^S}_{\in W^{S}}.
\end{equation}
In practice, the polynomial basis $\{\phi_i\}_{i=0}^M$ is determined by the value of $\code{polybasis}$:
\begin{itemize}
\item If $\code{polybasis}=\code{"CHEBYSHEV"}$, then $\phi_k(\mu)=\mu^k$ for $k\in\{0,1\}$ and $\phi_k(\mu)=2\mu\phi_{k-1}(\mu)-\phi_{k-2}(\mu)$ for $k\geq2$.
\item If $\code{polybasis}=\code{"LEGENDRE"}$, then $\phi_k(\mu)=\mu^k$ for $k\in\{0,1\}$ and $\phi_k(\mu)=(2-1/k)\mu\phi_{k-1}(\mu)-(1-1/k)\phi_{k-2}(\mu)$ for $k\geq2$.
\item If $\code{polybasis}=\code{"MONOMIAL"}$, then $\phi_k(\mu)=\mu^k$ for $k\geq0$.
\end{itemize}
Moreover, it will prove useful to define the ``snapshot rank'' $r=r(\{u(\mu_j)\}_{j=1}^S)=\text{dim}~\text{span}\{u(\mu_j)\}_{j=1}^S$. This quantity is bounded from above by $S$ and by the dimension of $V$ (e.g., $r=1$ if $V=\C$).
\section{\code{MINIMAL} Rational Interpolation (MRI)}
The main motivation behind the MRI method involves the modified approximation problem
\begin{equation}\label{eq:simple}
u\approx\I^E\left(\Big(\big(\mu_j,\widehat{q}(\mu_j)u(\mu_j)\big)\Big)_{j=1}^S\right)\Big/\widehat{q},
\end{equation}
where $\widehat{q}:\C\to\C$ is a polynomial of degree $\leq N\leq E$.
The denominator $\widehat{q}$ is found as
\begin{equation}\label{eq:glob1}
\widehat{q}=\argmin_{\substack{q\in\mathbb{P}^N(\C;\C)\\(\star)}}\norm{\frac{\textrm{d}^E}{\textrm{d}\mu^E}\I^E\left(\Big(\big(\mu_j,q(\mu_j)u(\mu_j)\big)\Big)_{j=1}^S\right)}
\end{equation}
where $(\star)$ is a normalization condition (which changes depending on \code{functionalSolve}) to exclude the trivial minimizer $\widehat{q}\equiv0$. The methods described differ in terms of the constraint $(\star)$, as well as of the degrees of freedom which are chosen to represent the denominator $q$.
For \eqref{eq:glob1} to be well-defined (unique optimizer up to unit scaling), in addition to the condition $N\leq S-1$, we must have (by balance of degrees of freedom vs. constraints) $N\leq r$
\subsection{Polynomial coefficients as degrees of freedom}\label{sec:mri:poly}
If the polynomial basis $\{\phi_i\}_{i=0}^E$ in \eqref{eq:normal} is hierarchical (as the three ones above), then the $E$-th derivative of $\I^E$ is proportional to the coefficient $c_E$, and we have
\begin{equation}\label{eq:poly1}
\widehat{q}=\argmin_{\substack{q\in\mathbb{P}^N(\C;\C)\\(\star)}}\norm{[\underbrace{0,\ldots,0}_E,1]\big(\Phi^H\Phi\big)^{-1}\Phi^H[q(\mu_j)u(\mu_j)]_{j=1}^S}.
\end{equation}
Using the Kronecker delta ($\delta_{ij}=1$ if $i=j$ and $\delta_{ij}=0$ if $i\neq j$), the last term $[q(\mu_j)u(\mu_j)]_{j=1}^S\in V^S$ can be factored into
\begin{equation}\label{eq:poly2}
\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}[q(\mu_j)]_{j=1}^S=\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}\widetilde{\Phi}\mathbf{q},
\end{equation}
where $\widetilde{\Phi}$ is the $S\times(N+1)$ matrix obtained by extracting the first $N+1$ columns of $\Phi$. We remark that we have expanded the polynomial $q$ using the basis\footnote{In theory, nothing prevents us from using different bases for $\I^E$ and $q$, cf. Section~\ref{sec:observations}.} $\{\phi_i\}_{i=0}^N$: $q(\mu)=\sum_{i=0}^Nq_i\phi_i(\mu)$, with coefficients $\mathbf{q}=[q_i]_{i=0}^N\in\C^{N+1}$.
Combining \eqref{eq:poly1} and \eqref{eq:poly2}, it is useful to consider the $(N+1)\times(N+1)$ Hermitian matrix with entries ($0\leq i,i'\leq N$)
\begin{equation}\label{eq:poly3}
G_{ii'}=\sum_{j,j'=1}^S\left(\Phi\big(\Phi^H\Phi\big)^{-1}\right)_{j,E+1}\left(\big(\Phi^H\Phi\big)^{-1}\Phi^H\right)_{E+1,j'}\overline{\Phi}_{ji}\Phi_{j'i'}\inner{u(\mu_{j'})}{u(\mu_j)}.
\end{equation}
If $(\star)$ is quadratic (resp. linear) in $\mathbf{q}$, then we can cast the computation of the denominator as a quadratically (resp. linearly) constrained quadratic program involving $G$.
\subsubsection{Quadratic constraint}\label{sec:min:norm}
We constrain $\widehat{\mathbf{q}}$ to have unit (Euclidean) norm. The resulting optimization problem can be cast as a minimal (normalized) eigenvector problem for $G$ in \eqref{eq:poly3}. More explicitly,
\[\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\\|\mathbf{q}\|_2=1}}\mathbf{q}^HG\mathbf{q}.\]
\subsubsection{Linear constraint}\label{sec:min:dominant}
We constrain $\widehat{q}_N=1$, thus forcing $q$ to be monic, with degree exactly $N$. Given $G$ in \eqref{eq:poly3}, the resulting optimization problem can be solved rather easily as:
\[\widehat{\mathbf{q}}=\frac{G^{-1}\mathbf{e}_{N+1}}{\mathbf{e}_{N+1}^\top G^{-1}\mathbf{e}_{N+1}},\quad\text{with }\mathbf{e}_{N+1}=[0,\ldots,0,1]^\top\in\C^{N+1}.\]
\subsection{Barycentric coefficients as degrees of freedom}\label{sec:bary}
Here we assume that the sample points are distinct, and that $N=E=S-1$, so that, in particular, $\Phi=\widetilde{\Phi}$. Considering the constraint $N\leq r$, we can deduce that the approach presented in this section can only be applied if the rank of the snapshots is either full or defective by 1.
We can choose for convenience a non-hierarchical rational basis, dependent on the sample points, for $q$ and $\I^E$, taking inspiration from barycentric interpolation:
\begin{equation*}
\varphi_i(\mu)=\frac{1}{\mu-\mu_{i+1}}\quad\text{for }i=0,\ldots,N-1.
\end{equation*}
Evaluation of such basis at the sample points leads to unbounded results. To correct this, we symbolically weigh each row by the nodal polynomial $\prod_{j=1}^S(\cdot-\mu_j)$, causing cancellation of the unbounded terms. The resulting ``effective'' polynomial basis is
\begin{equation}\label{eq:bary1}
\phi_i(\mu)=\prod_{\substack{j=1\\j\neq i+1}}^S(\mu-\mu_j).
\end{equation}
Since all elements of the basis are monic and of degree exactly $N$, the minimization problem can be cast as
\begin{equation}\label{eq:bary2}
\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\(\star)}}\norm{[\underbrace{1,\ldots,1}_{S}]\big(\Phi^H\Phi\big)^{-1}\Phi^H\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}\Phi\mathbf{q}\,}.
\end{equation}
At the same time, it is easy to see from \eqref{eq:bary1} that the Vandermonde-like matrix $\Phi$ is diagonal, so that
\[\big(\Phi^H\Phi\big)^{-1}\Phi^H\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}\Phi=\big(\Phi^H\Phi\big)^{-1}\Phi^H\Phi\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}=\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S},\]
and
\begin{equation}\label{eq:bary3}
\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\(\star)}}\norm{\sum_{i=0}^Eu(\mu_{i+1})q_i}.
\end{equation}
Considering \eqref{eq:bary3}, it is useful to define the $S\times S$ Hermitian ``snapshot Gramian'' matrix with entries ($0\leq i,i'\leq N$)
\begin{equation}\label{eq:bary4}
G_{ii'}=\inner{u(\mu_{i'+1})}{u(\mu_{i+1})}.
\end{equation}
So, once again, if $(\star)$ is quadratic (resp. linear) in $\mathbf{q}$, then we can cast the computation of the denominator as a quadratically (resp. linearly) constrained quadratic program involving $G$.
Before specifying the kind of normalization enforced, it is important to make a remark on numerical stability. The basis in \eqref{eq:bary1} is actually just a ($i$-dependent) factor away from being the Lagrangian one (for which $\phi_i(\mu_j)$ would equal $\delta_{(i+1)j}$ instead of
\[\delta_{(i+1)j}\prod_{k\neq i+1}(\mu_j-\mu_k),\]
as it does in our case). As such, it is generally a bad idea to numerically evaluate $q$ starting from its expansion coefficients with respect to $\{\phi_i\}_{i=0}^N$. We get around this by exploiting the following trick, whose foundation is in \cite[Section 2.3.3]{Klein}: the roots of $\widehat{q}=\sum_{i=0}^N\widehat{q}_i\phi_i$ are the $N$ finite eigenvalues $\lambda$ of the generalized $(N+2)\times(N+2)$ eigenproblem
\begin{equation}\label{eq:bary5}
\textrm{Det}\left(\begin{bmatrix}
0 & \widehat{q}_0 & \cdots & \widehat{q}_N\\
1 & \mu_1 & & \\
\vdots & & \ddots & \\
1 & & & \mu_S
\end{bmatrix}-\begin{bmatrix}
0 & & & \\
& 1 & & \\
& & \ddots & \\
& & & 1
\end{bmatrix}\lambda\right)=0.
\end{equation}
This computation is numerically more stable than most other manipulations of a polynomial in the basis \eqref{eq:bary1}. Once the roots of $\widehat{q}$ have been computed, one can either convert it to nodal form
\begin{equation}\label{eq:pole1}
\widehat{q}(\mu)\propto\prod_{i=1}^N(\mu-\widehat{\lambda}_i),
\end{equation}
or forgo using $\widehat{q}$ completely, in favor of a Heaviside-like approximation involving the newly computed roots $\{\widehat{\lambda}_i\}_{i=1}^N$ as poles:
\[
\frac{\widehat{p}(\mu)}{\widehat{q}(\mu)}\quad\rightsquigarrow\quad\widehat{b}_0+\sum_{i=1}^N\frac{\widehat{b}_i}{\mu-\widehat{\lambda}_i}.
\]
See the final paragraph in Section~\ref{sec:observations} for a slightly more detailed motivation of why the Heaviside form of the approximant might be more useful than the standard rational one $\widehat{p}/\widehat{q}$ in practice.
\subsubsection{Quadratic constraint}\label{sec:min:barycentricnorm}
We constrain $\widehat{\mathbf{q}}$ to have unit (Euclidean) norm. The resulting optimization problem can be cast as a minimal (normalized) eigenvector problem for $G$ in \eqref{eq:bary4}. More explicitly,
\[\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\\|\mathbf{q}\|_2=1}}\mathbf{q}^HG\mathbf{q}.\]
\subsubsection{Linear constraint}\label{sec:min:barycentricaverage}
We constrain $\sum_{i=0}^N\widehat{q}_i=1$, so that the polynomial $\widehat{q}$ is monic. Given $G$ in \eqref{eq:bary4}, the resulting optimization problem can be solved rather easily as:
\[\widehat{\mathbf{q}}=\frac{G^{-1}\mathbf{1}_{S}}{\mathbf{1}_{S}^\top G^{-1}\mathbf{1}_{S}},\quad\text{with }\mathbf{1}_{S}=[1,\ldots,1]^\top\in\C^{S}.\]
\subsection{Minor observations for MRI}\label{sec:observations}
\begin{itemize}
\item If $N=E$, normal equations are not necessary to compute $\I^E$, since $\Phi$ is square and can be inverted directly. However, in practical applications, it may be useful to decrease the degree $E$ of the interpolant (which, in our presentation, we kept fixed to $S-1$ for simplicity) to overcome numerical instabilities which may arise in the (pseudo-)inversion of $\Phi$. If this happens, $\Phi$ becomes non-square, and normal equations are the only option.
\item For \code{BARYCENTRYC}, a specific choice of polynomial basis for $\I^E$ was used to diagonalize the functional. Under the assumptions that the sample points are distinct (and that $E+1=S$), one can employ the quasi-Lagrangian basis \eqref{eq:bary1} to expand $\I^E$ in the other approaches as well, thus simplifying significantly the structure of \eqref{eq:glob1}:
\begin{equation*}
\widehat{q}=\argmin_{\substack{q\in\mathbb{P}^N(\C;\C)\\(\star)}}\norm{\sum_{j=1}^Sq(\mu_j)u(\mu_j)\prod_{\substack{j'=1\\j'\neq j}}^S\frac1{\mu_j-\mu_{j'}}}.
\end{equation*}
This is independent of the basis used to expand $q$ and, numerically, has repercussions only on the computation of the term $\big(\Phi^H\Phi\big)^{-1}\Phi^H$ in \eqref{eq:poly3}.
\item In general, \code{NORM} can be expected to be more numerically stable than \code{DOMINANT}. This is due to the fact that the normalization is enforced in a more numerically robust fashion.
\item If the snapshots are orthonormalized via \code{POD}\footpath{./rrompy/sampling/engines/sampling_engine_pod.py}, a simple unitary transformation allows to replace $V$ with $\C^r$. As a consequence, all the $V$-inner products (resp. norms) can be recast as Euclidean inner products (resp. norms) involving the $R$ factor of the generalized ($V$-orthonormal) QR decomposition of the snapshots.
\item If a univariate rational surrogate is built in the scope of multivariate pole-matching-based pivoted approximation\footpath{./rrompy/reduction_methods/pivoted/*}, the rational approximant is converted into a Heaviside/nodal representation when different surrogates are combined. As such, the \code{BARYCENTRIC} approach may be preferable to avoid extra computations, as well as additional round-off artifacts.
\end{itemize}
\section{\code{STANDARD} Rational Interpolation (SRI)}\label{sec:out:sri}
The main motivation behind the (polynomial) SRI method involves the modified approximation problem
\begin{equation*}
u\approx\I^M\left(\Big(\big(\mu_j,\widehat{q}(\mu_j)u(\mu_j)\big)\Big)_{j=1}^S\right)\Big/\widehat{q},
\end{equation*}
where $\widehat{q}:\C\to\C$ is a polynomial of degree $\leq N\leq E$, and $M\in\N$ is such that $0\leq M\leq S-2$.
The denominator $\widehat{q}$ is found as
\begin{equation}\label{eq:glob2}
\widehat{q}=\argmin_{\substack{q\in\mathbb{P}^N(\C;\C)\\(\star)}}\sum_{\ell=1}^S\norm{q(\mu_\ell)u(\mu_\ell)-\I^M\left(\Big(\big(\mu_j,q(\mu_j)u(\mu_j)\big)\Big)_{j=1}^S\right)(\mu_\ell)}^2
\end{equation}
where $(\star)$ is a normalization condition (which changes depending on \code{functionalSolve}) to exclude the trivial minimizer $\widehat{q}\equiv0$.
For \eqref{eq:glob2} to be well-defined (unique optimizer up to unit scaling), in addition to the conditions $N\leq S-1$ and $M\leq S-2$, we must have (by balance of degrees of freedom vs. constraints) $M+N/r+1\leq S$.
As before, we can expand the target functional by linear algebra
\begin{equation}\label{eq:poly51}
\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\(\star)}}\sum_{\ell=1}^S\norm{\mathbf{e}_\ell^\top\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}\widetilde{\Phi}\mathbf{q}-\mathbf{e}_\ell^\top\Phi\big(\Phi^H\Phi\big)^{-1}\Phi^H\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}\widetilde{\Phi}\mathbf{q}}^2,
\end{equation}
with $\Phi\in\C^{S\times(M+1)}$, $\widetilde{\Phi}\in\C^{S\times(N+1)}$, and
\begin{equation*}
\mathbf{e}_\ell=[\underbrace{0,\ldots,0}_{\ell-1},1,\underbrace{0,\ldots,0}_{S-\ell}]^\top\in\C^S.
\end{equation*}
We observe that $I-\Phi\big(\Phi^H\Phi\big)^{-1}\Phi^H$ (with $I$ the $S\times S$ identity matrix) corresponds to the (orthogonal) projection onto the orthogonal complement of the range of $\Phi$, so that we may express it as $\Psi\Psi^H$, with the $S-M-1$ columns of $\Psi$ being an orthonormal basis for the orthogonal complement of the range of $\Phi$ (which can be found by completion of the QR decomposition of $\Phi$). Hence, we have
\begin{equation}\label{eq:poly52}
\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\(\star)}}\sum_{\ell=1}^S\norm{\mathbf{e}_\ell^\top\Psi\Psi^H\Big[u(\mu_j)\delta_{jj'}\Big]_{j=1,j'=1}^{S,S}\widetilde{\Phi}\mathbf{q}}^2.
\end{equation}
As above, we can interpret \eqref{eq:poly52} as the minimization of a quadratic form represented by a $(N+1)\times(N+1)$ Hermitian matrix with entries ($0\leq i,i'\leq N$)
\begin{equation}\label{eq:poly53}
G_{ii'}=\sum_{j,j'=1}^S\inner{u(\mu_{j'})}{u(\mu_j)}\left(\Psi\Psi^H\right)_{jj'}\overline{\Phi}_{ji}\Phi_{j'i'}.
\end{equation}
The matrix $G$ above can also be expressed as $\widetilde{\Phi}^H\big(\mathring{G}\bullet\left(\Psi\Psi^H\right)\big)\widetilde{\Phi}$, with $\mathring{G}$ the snapshot Gramian \eqref{eq:bary4} and $\bullet$ the Hadamard product. If a Cholesky decomposition $\mathring{G}=\mathring{R}^H\mathring{R}$ is available, then $G=\widetilde{\Phi}^H(\mathring{R}\odot\Psi^H)^H(\mathring{R}\odot\Psi^H)\widetilde{\Phi}$, with $\odot$ the Khatri-Rao product.
If $(\star)$ is quadratic (resp. linear) in $\mathbf{q}$, then we can cast the computation of the denominator as a quadratically (resp. linearly) constrained quadratic program involving $G$.
\subsection{Quadratic constraint}\label{sec:out:norm}
We constrain $\widehat{\mathbf{q}}$ to have unit (Euclidean) norm. The resulting optimization problem can be cast as a minimal (normalized) eigenvector problem for $G$ in \eqref{eq:poly53}. More explicitly,
\[\widehat{\mathbf{q}}=\argmin_{\substack{\mathbf{q}\in\C^{N+1}\\\|\mathbf{q}\|_2=1}}\mathbf{q}^HG\mathbf{q}.\]
\subsection{Linear constraint}\label{sec:out:dominant}
We constrain $\widehat{q}_N=1$, thus forcing $q$ to be monic, with degree exactly $N$. Given $G$ in \eqref{eq:poly53}, the resulting optimization problem can be solved rather easily as:
\[\widehat{\mathbf{q}}=\frac{G^{-1}\mathbf{e}_{N+1}}{\mathbf{e}_{N+1}^\top G^{-1}\mathbf{e}_{N+1}},\quad\text{with }\mathbf{e}_{N+1}=[0,\ldots,0,1]^\top\in\C^{N+1}.\]
\begin{thebibliography}{1}
\bibitem{Pradovera}%
D.~Pradovera, Interpolatory rational model order reduction of parametric problems lacking uniform inf-sup stability, SIAM J. Numer. Anal. 58 (2020) 2265--2293. doi:10.1137/19M1269695.
\bibitem{Klein}%
G.~Klein, Applications of Linear Barycentric Rational Interpolation, PhD Thesis no. 1762, Universit\'e de Fribourg (2012).
\end{thebibliography}
\end{document}

Event Timeline