Page MenuHomec4science

AL.tex
No OneTemporary

File Metadata

Created
Fri, May 24, 05:17
\section{Our optimization framework \label{sec:AL algorithm}}
To solve the formulation presented in \eqref{eq:minmax}, we propose the inexact ALM (iALM), detailed in Algorithm~\ref{Algo:2}.
At the iteration $k$, Algorithm~\ref{Algo:2} calls in Step 2 a solver that finds an approximate stationary point of the augmented Lagrangian $\L_{\b_k}(\cdot,y_k)$ with the accuracy of $\epsilon_{k+1}$, and this accuracy gradually increases in a controlled fashion.
The increasing sequence of penalty weights $\{\b_k\}_k$ and the dual update (Steps 4 and 5) are responsible for continuously enforcing the constraints in~\eqref{prob:01}. As we will see in the convergence analysis, the particular choice of the dual step size $\s_k$ in Algorithm~\ref{Algo:2} ensures that the dual variable $y_k$ remains bounded; see~\cite{bertsekas1976penalty} for a precedent in the ALM literature where a similar choice for $\sigma_k$ is considered.
Step 3 of Algorithm~\ref{Algo:2} removes pathological cases with divergent iterates. As an example, suppose that $g=\delta_\mathcal{X}$ in \eqref{prob:01} is the indicator function for a bounded convex set $\mathcal{X}\subset \RR^d$ and take $\rho' > \max_{x\in \mathcal{X}} \|x\|$. Then, for sufficiently large $k$, it is not difficult to verify that all the iterates of Algorithm~\ref{Algo:2} automatically satisfy $\|x_k\|\le \rho'$ without the need to execute Step 3.
\begin{algorithm}[h!]
\begin{algorithmic}
\STATE \textbf{Input:} $\rho,\rho',\rho''>0$. A non-decreasing, positive, unbounded sequence $\{\b_k\}_{k\ge 1}$, stopping threshold $\tau_f$ and $\tau_s$.
\STATE \textbf{Initialization:} $x_{1}\in \RR^d$ such that $\|A(x_1)\|\le \rho$ and $\|x_1\|\le \rho'$, $y_0\in \RR^m$, $\s_1$.
%For $k=1,2,\ldots$, execute\\
\FOR{$k=1,2,\dots$}
\STATE \begin{enumerate}[leftmargin=*]
\item \textbf{(Update tolerance)} $\epsilon_{k+1} = 1/\b_k$.
%\begin{align}
%\beta_k = \frac{\beta_{1}}{\log 2}\sqrt{k}\log^2(k+1) .
%\end{align}
\item \textbf{(Inexact primal solution)} Obtain $x_{k+1}\in \RR^d$ such that
\begin{equation*}
\dist(-\nabla_x \L_{\beta_k} (x_{k+1},y_k), \partial g(x_{k+1}) ) \le \epsilon_{k+1}
\end{equation*}
for first-order stationarity and, in addition,
\begin{equation*}
\lambda_{\text{min}}(\nabla _{xx}\mathcal{L}_{\beta_k}(x_{k+1}, y_k)) \ge -\epsilon_{k+1}
\end{equation*}
for second-order-stationarity.
\item \textbf{(Control)} If necessary, project $x_{k+1}$ to ensure that $\|x_{k+1}\|\le \rho'$.\\
\item \textbf{(Update dual step size)}
\begin{align*}
\s_{k+1} & = \s_{1} \min\Big(
\frac{\|A(x_1)\| \log^2 2 }{\|A(x_{k+1})\| (k+1)\log^2(k+2)} ,1
\Big).
\end{align*}
\item \textbf{(Dual ascent)} $y_{k+1} = y_{k} + \sigma_{k+1}A(x_{k+1})$.
\item \textbf{(Stopping criterion)} If
\begin{align*}
& \dist(-\nabla_x \L_{\b_k}(x_{k+1}),\partial g(x_{k+1}) \nonumber\\
& \qquad + \s_{k+1} \|A(x_{k+1})\| \le \tau_f,
\end{align*}
(and $\lambda_{\text{min}}(\nabla _{xx}\mathcal{L}_{\beta_{k}}(x_{k+1}, y_k)) \geq -\tau_s$ if second-order stationarity is required),
then quit and return $x_{k+1}$ as an (approximate) stationary point of~\eqref{prob:01}.
\end{enumerate}
\ENDFOR
\end{algorithmic}
\caption{Inexact AL for solving~\eqref{prob:01}}
\label{Algo:2}
\end{algorithm}

Event Timeline