Page MenuHomec4science

AL.tex
No OneTemporary

File Metadata

Created
Wed, Jun 12, 17:40
%!TEX root = ../main.tex
\vspace{-4mm}
\section{Algorithm \label{sec:AL algorithm}}
To solve the equivalent formulation of \eqref{prob:01} presented in \eqref{eq:minmax}, we propose the inexact ALM (iALM), detailed in Algorithm~\ref{Algo:2}.
At the $k^{\text{th}}$ iteration, Step 2 of Algorithm~\ref{Algo:2} calls a solver that finds an approximate stationary point of the augmented Lagrangian $\L_{\b_k}(\cdot,y_k)$ with the accuracy of $\epsilon_{k+1}$, and this accuracy gradually increases in a controlled fashion.
The increasing sequence of penalty weights $\{\b_k\}_k$ and the dual update (Steps 4 and 5) are responsible for continuously enforcing the constraints in~\eqref{prob:01}. The appropriate choice for $\{\b_k\}_k$ will be specified in Sections \ref{sec:first-o-opt} and \ref{sec:second-o-opt}.
The particular choice of the dual step sizes $\{\s_k\}_k$ in Algorithm~\ref{Algo:2} ensures that the dual variable $y_k$ remains bounded, see~\cite{bertsekas1976penalty} in the ALM literature where a similar dual step size is considered.
%Step 3 of Algorithm~\ref{Algo:2} removes pathological cases with divergent iterates. As an example, suppose that $g=\delta_\mathcal{X}$ in \eqref{prob:01} is the indicator function for a bounded convex set $\mathcal{X}\subset \RR^d$ and take $\rho' > \max_{x\in \mathcal{X}} \|x\|$. Then, for sufficiently large $k$ \editf{[(mfs:)we may consider removing this "sufficiently large" phrase which appears a few times or clearly explain what we meant for it] }, it is not difficult to verify that all the iterates of Algorithm~\ref{Algo:2} automatically satisfy $\|x_k\|\le \rho'$ without the need to execute Step 3.
\begin{algorithm}[h!]
\begin{algorithmic}
\STATE \textbf{Input:} Non-decreasing, positive, unbounded sequence $\{\b_k\}_{k\ge 1}$, stopping thresholds $\tau_f, \tau_s > 0$. \vspace{2pt}
\STATE \textbf{Initialization:} Primal variable $x_{1}\in \RR^d$, dual variable $y_0\in \RR^m$, dual step size $\s_1>0$.
%For $k=1,2,\ldots$, execute\\
\vspace{2pt}
\FOR{$k=1,2,\dots$}
\STATE \begin{enumerate}%[leftmargin=*]
\item \textbf{(Update tolerance)} $\epsilon_{k+1} = 1/\b_k$.
%\begin{align}
%\beta_k = \frac{\beta_{1}}{\log 2}\sqrt{k}\log^2(k+1) .
%\end{align}
\item \textbf{(Inexact primal solution)} Obtain $x_{k+1}\in \RR^d$ such that
\begin{equation*}
\dist(-\nabla_x \L_{\beta_k} (x_{k+1},y_k), \partial g(x_{k+1}) ) \le \epsilon_{k+1}
\end{equation*}
for first-order stationarity
\begin{equation*}
\lambda_{\text{min}}(\nabla _{xx}\mathcal{L}_{\beta_k}(x_{k+1}, y_k)) \ge -\epsilon_{k+1}
\end{equation*}
for second-order-stationarity, if $g=0$ in \eqref{prob:01}.
%\item \textbf{(Control)} If necessary, project $x_{k+1}$ to ensure that $\|x_{k+1}\|\le \rho'$.\editf{[(mfs:) can we remove this step? If we append this to 2nd step of algo., can we still make sure is solution(or approx. solution) is within this ball?]}\\
\item \textbf{(Update dual step size)}
\begin{align*}
\s_{k+1} & = \s_{1} \min\Big(
\frac{\|A(x_1)\| \log^2 2 }{\|A(x_{k+1})\| (k+1)\log^2(k+2)} ,1
\Big).
\end{align*}
\item \textbf{(Dual ascent)} $y_{k+1} = y_{k} + \sigma_{k+1}A(x_{k+1})$.
\item \textbf{(Stopping criterion)} If
\begin{align*}
& \dist(-\nabla_x \L_{\b_k}(x_{k+1}),\partial g(x_{k+1})) + \|A(x_{k+1})\| \le \tau_f,\nonumber
\end{align*}
for first-order stationarity and if also
$\lambda_{\text{min}}(\nabla _{xx}\mathcal{L}_{\beta_{k}}(x_{k+1}, y_k)) \geq -\tau_s$ for second-order stationarity,
then quit and return $x_{k+1}$ as an (approximate) stationary point of~\eqref{prob:01}.
\end{enumerate}
\ENDFOR
\end{algorithmic}
\caption{Inexact ALM for solving~\eqref{prob:01}}
\label{Algo:2}
\end{algorithm}

Event Timeline